swarm: code cleanup, move to ethersphere/swarm (#19661)
This commit is contained in:
committed by
Péter Szilágyi
parent
15f24ff189
commit
42b81f94ad
@ -1,35 +0,0 @@
|
||||
# Core team members
|
||||
|
||||
Viktor Trón - @zelig
|
||||
Louis Holbrook - @nolash
|
||||
Lewis Marshall - @lmars
|
||||
Anton Evangelatov - @nonsense
|
||||
Janoš Guljaš - @janos
|
||||
Balint Gabor - @gbalint
|
||||
Elad Nachmias - @justelad
|
||||
Daniel A. Nagy - @nagydani
|
||||
Aron Fischer - @homotopycolimit
|
||||
Fabio Barone - @holisticode
|
||||
Zahoor Mohamed - @jmozah
|
||||
Zsolt Felföldi - @zsfelfoldi
|
||||
|
||||
# External contributors
|
||||
|
||||
Kiel Barry
|
||||
Gary Rong
|
||||
Jared Wasinger
|
||||
Leon Stanko
|
||||
Javier Peletier [epiclabs.io]
|
||||
Bartek Borkowski [tungsten-labs.com]
|
||||
Shane Howley [mainframe.com]
|
||||
Doug Leonard [mainframe.com]
|
||||
Ivan Daniluk [status.im]
|
||||
Felix Lange [EF]
|
||||
Martin Holst Swende [EF]
|
||||
Guillaume Ballet [EF]
|
||||
ligi [EF]
|
||||
Christopher Dro [blick-labs.com]
|
||||
Sergii Bomko [ledgerleopard.com]
|
||||
Domino Valdano
|
||||
Rafael Matias
|
||||
Coogan Brennan
|
25
swarm/OWNERS
25
swarm/OWNERS
@ -1,25 +0,0 @@
|
||||
# Ownership by go packages
|
||||
|
||||
swarm
|
||||
├── api ─────────────────── ethersphere
|
||||
├── bmt ─────────────────── @zelig
|
||||
├── dev ─────────────────── @lmars
|
||||
├── fuse ────────────────── @jmozah, @holisticode
|
||||
├── grafana_dashboards ──── @nonsense
|
||||
├── metrics ─────────────── @nonsense, @holisticode
|
||||
├── network ─────────────── ethersphere
|
||||
│ ├── bitvector ───────── @zelig, @janos, @gbalint
|
||||
│ ├── priorityqueue ───── @zelig, @janos, @gbalint
|
||||
│ ├── simulations ─────── @zelig
|
||||
│ └── stream ──────────── @janos, @zelig, @gbalint, @holisticode, @justelad
|
||||
│ ├── intervals ───── @janos
|
||||
│ └── testing ─────── @zelig
|
||||
├── pot ─────────────────── @zelig
|
||||
├── pss ─────────────────── @nolash, @zelig, @nonsense
|
||||
├── services ────────────── @zelig
|
||||
├── state ───────────────── @justelad
|
||||
├── storage ─────────────── ethersphere
|
||||
│ ├── encryption ──────── @gbalint, @zelig, @nagydani
|
||||
│ ├── mock ────────────── @janos
|
||||
│ └── feed ────────────── @nolash, @jpeletier
|
||||
└── testutil ────────────── @lmars
|
243
swarm/README.md
243
swarm/README.md
@ -1,244 +1,7 @@
|
||||
## Swarm
|
||||
# Swarm
|
||||
|
||||
[https://swarm.ethereum.org](https://swarm.ethereum.org)
|
||||
https://swarm.ethereum.org
|
||||
|
||||
Swarm is a distributed storage platform and content distribution service, a native base layer service of the ethereum web3 stack. The primary objective of Swarm is to provide a decentralized and redundant store for dapp code and data as well as block chain and state data. Swarm is also set out to provide various base layer services for web3, including node-to-node messaging, media streaming, decentralised database services and scalable state-channel infrastructure for decentralised service economies.
|
||||
|
||||
[](https://travis-ci.org/ethereum/go-ethereum)
|
||||
[](https://gitter.im/ethersphere/orange-lounge?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
|
||||
|
||||
## Table of Contents
|
||||
|
||||
* [Building the source](#building-the-source)
|
||||
* [Running Swarm](#running-swarm)
|
||||
* [Documentation](#documentation)
|
||||
* [Developers Guide](#developers-guide)
|
||||
* [Go Environment](#development-environment)
|
||||
* [Vendored Dependencies](#vendored-dependencies)
|
||||
* [Testing](#testing)
|
||||
* [Profiling Swarm](#profiling-swarm)
|
||||
* [Metrics and Instrumentation in Swarm](#metrics-and-instrumentation-in-swarm)
|
||||
* [Public Gateways](#public-gateways)
|
||||
* [Swarm Dapps](#swarm-dapps)
|
||||
* [Contributing](#contributing)
|
||||
* [License](#license)
|
||||
|
||||
## Building the source
|
||||
|
||||
Building Swarm requires Go (version 1.10 or later).
|
||||
|
||||
go get -d github.com/ethereum/go-ethereum
|
||||
|
||||
go install github.com/ethereum/go-ethereum/cmd/swarm
|
||||
|
||||
## Running Swarm
|
||||
|
||||
Going through all the possible command line flags is out of scope here, but we've enumerated a few common parameter combos to get you up to speed quickly on how you can run your own Swarm node.
|
||||
|
||||
To run Swarm you need an Ethereum account. You can create a new account by running the following command:
|
||||
|
||||
geth account new
|
||||
|
||||
You will be prompted for a password:
|
||||
|
||||
Your new account is locked with a password. Please give a password. Do not forget this password.
|
||||
Passphrase:
|
||||
Repeat passphrase:
|
||||
|
||||
Once you have specified the password, the output will be the Ethereum address representing that account. For example:
|
||||
|
||||
Address: {2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1}
|
||||
|
||||
Using this account, connect to Swarm with
|
||||
|
||||
swarm --bzzaccount <your-account-here>
|
||||
|
||||
# in our example
|
||||
|
||||
swarm --bzzaccount 2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1
|
||||
|
||||
|
||||
### Verifying that your local Swarm node is running
|
||||
|
||||
When running, Swarm is accessible through an HTTP API on port 8500.
|
||||
|
||||
Confirm that it is up and running by pointing your browser to http://localhost:8500
|
||||
|
||||
### Ethereum Name Service resolution
|
||||
|
||||
The Ethereum Name Service is the Ethereum equivalent of DNS in the classic web. In order to use ENS to resolve names to Swarm content hashes (e.g. `bzz://theswarm.eth`), `swarm` has to connect to a `geth` instance, which is synced with the Ethereum mainnet. This is done using the `--ens-api` flag.
|
||||
|
||||
swarm --bzzaccount <your-account-here> \
|
||||
--ens-api '$HOME/.ethereum/geth.ipc'
|
||||
|
||||
# in our example
|
||||
|
||||
swarm --bzzaccount 2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1 \
|
||||
--ens-api '$HOME/.ethereum/geth.ipc'
|
||||
|
||||
For more information on usage, features or command line flags, please consult the Documentation.
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
Swarm documentation can be found at [https://swarm-guide.readthedocs.io](https://swarm-guide.readthedocs.io).
|
||||
|
||||
|
||||
## Developers Guide
|
||||
|
||||
### Go Environment
|
||||
|
||||
We assume that you have Go v1.10 installed, and `GOPATH` is set.
|
||||
|
||||
You must have your working copy under `$GOPATH/src/github.com/ethereum/go-ethereum`.
|
||||
|
||||
Most likely you will be working from your fork of `go-ethereum`, let's say from `github.com/nirname/go-ethereum`. Clone or move your fork into the right place:
|
||||
|
||||
```
|
||||
git clone git@github.com:nirname/go-ethereum.git $GOPATH/src/github.com/ethereum/go-ethereum
|
||||
```
|
||||
|
||||
|
||||
### Vendored Dependencies
|
||||
|
||||
All dependencies are tracked in the `vendor` directory. We use `govendor` to manage them.
|
||||
|
||||
If you want to add a new dependency, run `govendor fetch <import-path>`, then commit the result.
|
||||
|
||||
If you want to update all dependencies to their latest upstream version, run `govendor fetch +v`.
|
||||
|
||||
|
||||
### Testing
|
||||
|
||||
This section explains how to run unit, integration, and end-to-end tests in your development sandbox.
|
||||
|
||||
Testing one library:
|
||||
|
||||
```
|
||||
go test -v -cpu 4 ./swarm/api
|
||||
```
|
||||
|
||||
Note: Using options -cpu (number of cores allowed) and -v (logging even if no error) is recommended.
|
||||
|
||||
Testing only some methods:
|
||||
|
||||
```
|
||||
go test -v -cpu 4 ./eth -run TestMethod
|
||||
```
|
||||
|
||||
Note: here all tests with prefix TestMethod will be run, so if you got TestMethod, TestMethod1, then both!
|
||||
|
||||
Running benchmarks:
|
||||
|
||||
```
|
||||
go test -v -cpu 4 -bench . -run BenchmarkJoin
|
||||
```
|
||||
|
||||
|
||||
### Profiling Swarm
|
||||
|
||||
This section explains how to add Go `pprof` profiler to Swarm
|
||||
|
||||
If `swarm` is started with the `--pprof` option, a debugging HTTP server is made available on port 6060.
|
||||
|
||||
You can bring up http://localhost:6060/debug/pprof to see the heap, running routines etc.
|
||||
|
||||
By clicking full goroutine stack dump (clicking http://localhost:6060/debug/pprof/goroutine?debug=2) you can generate trace that is useful for debugging.
|
||||
|
||||
|
||||
### Metrics and Instrumentation in Swarm
|
||||
|
||||
This section explains how to visualize and use existing Swarm metrics and how to instrument Swarm with a new metric.
|
||||
|
||||
Swarm metrics system is based on the `go-metrics` library.
|
||||
|
||||
The most common types of measurements we use in Swarm are `counters` and `resetting timers`. Consult the `go-metrics` documentation for full reference of available types.
|
||||
|
||||
```
|
||||
# incrementing a counter
|
||||
metrics.GetOrRegisterCounter("network.stream.received_chunks", nil).Inc(1)
|
||||
|
||||
# measuring latency with a resetting timer
|
||||
start := time.Now()
|
||||
t := metrics.GetOrRegisterResettingTimer("http.request.GET.time"), nil)
|
||||
...
|
||||
t := UpdateSince(start)
|
||||
```
|
||||
|
||||
#### Visualizing metrics
|
||||
|
||||
Swarm supports an InfluxDB exporter. Consult the help section to learn about the command line arguments used to configure it:
|
||||
|
||||
```
|
||||
swarm --help | grep metrics
|
||||
```
|
||||
|
||||
We use Grafana and InfluxDB to visualise metrics reported by Swarm. We keep our Grafana dashboards under version control at `./swarm/grafana_dashboards`. You could use them or design your own.
|
||||
|
||||
We have built a tool to help with automatic start of Grafana and InfluxDB and provisioning of dashboards at https://github.com/nonsense/stateth , which requires that you have Docker installed.
|
||||
|
||||
Once you have `stateth` installed, and you have Docker running locally, you have to:
|
||||
|
||||
1. Run `stateth` and keep it running in the background
|
||||
```
|
||||
stateth --rm --grafana-dashboards-folder $GOPATH/src/github.com/ethereum/go-ethereum/swarm/grafana_dashboards --influxdb-database metrics
|
||||
```
|
||||
|
||||
2. Run `swarm` with at least the following params:
|
||||
```
|
||||
--metrics \
|
||||
--metrics.influxdb.export \
|
||||
--metrics.influxdb.endpoint "http://localhost:8086" \
|
||||
--metrics.influxdb.username "admin" \
|
||||
--metrics.influxdb.password "admin" \
|
||||
--metrics.influxdb.database "metrics"
|
||||
```
|
||||
|
||||
3. Open Grafana at http://localhost:3000 and view the dashboards to gain insight into Swarm.
|
||||
|
||||
|
||||
## Public Gateways
|
||||
|
||||
Swarm offers a local HTTP proxy API that Dapps can use to interact with Swarm. The Ethereum Foundation is hosting a public gateway, which allows free access so that people can try Swarm without running their own node.
|
||||
|
||||
The Swarm public gateways are temporary and users should not rely on their existence for production services.
|
||||
|
||||
The Swarm public gateway can be found at https://swarm-gateways.net and is always running the latest `stable` Swarm release.
|
||||
|
||||
## Swarm Dapps
|
||||
|
||||
You can find a few reference Swarm decentralised applications at: https://swarm-gateways.net/bzz:/swarmapps.eth
|
||||
|
||||
Their source code can be found at: https://github.com/ethersphere/swarm-dapps
|
||||
|
||||
## Contributing
|
||||
|
||||
Thank you for considering to help out with the source code! We welcome contributions from
|
||||
anyone on the internet, and are grateful for even the smallest of fixes!
|
||||
|
||||
If you'd like to contribute to Swarm, please fork, fix, commit and send a pull request
|
||||
for the maintainers to review and merge into the main code base. If you wish to submit more
|
||||
complex changes though, please check up with the core devs first on [our Swarm gitter channel](https://gitter.im/ethersphere/orange-lounge)
|
||||
to ensure those changes are in line with the general philosophy of the project and/or get some
|
||||
early feedback which can make both your efforts much lighter as well as our review and merge
|
||||
procedures quick and simple.
|
||||
|
||||
Please make sure your contributions adhere to our coding guidelines:
|
||||
|
||||
* Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
|
||||
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
|
||||
* Pull requests need to be based on and opened against the `master` branch.
|
||||
* [Code review guidelines](https://github.com/ethereum/go-ethereum/wiki/Code-Review-Guidelines).
|
||||
* Commit messages should be prefixed with the package(s) they modify.
|
||||
* E.g. "swarm/fuse: ignore default manifest entry"
|
||||
|
||||
|
||||
## License
|
||||
|
||||
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the
|
||||
[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), also
|
||||
included in our repository in the `COPYING.LESSER` file.
|
||||
|
||||
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the
|
||||
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also included
|
||||
in our repository in the `COPYING` file.
|
||||
**Note**: The codebase has been moved to [ethersphere/swarm](https://github.com/ethersphere/swarm)
|
||||
|
538
swarm/api/act.go
538
swarm/api/act.go
@ -1,538 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/ecies"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
"golang.org/x/crypto/sha3"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrDecrypt = errors.New("cant decrypt - forbidden")
|
||||
ErrUnknownAccessType = errors.New("unknown access type (or not implemented)")
|
||||
ErrDecryptDomainForbidden = errors.New("decryption request domain forbidden - can only decrypt on localhost")
|
||||
AllowedDecryptDomains = []string{
|
||||
"localhost",
|
||||
"127.0.0.1",
|
||||
}
|
||||
)
|
||||
|
||||
const EmptyCredentials = ""
|
||||
|
||||
type AccessEntry struct {
|
||||
Type AccessType
|
||||
Publisher string
|
||||
Salt []byte
|
||||
Act string
|
||||
KdfParams *KdfParams
|
||||
}
|
||||
|
||||
type DecryptFunc func(*ManifestEntry) error
|
||||
|
||||
func (a *AccessEntry) MarshalJSON() (out []byte, err error) {
|
||||
|
||||
return json.Marshal(struct {
|
||||
Type AccessType `json:"type,omitempty"`
|
||||
Publisher string `json:"publisher,omitempty"`
|
||||
Salt string `json:"salt,omitempty"`
|
||||
Act string `json:"act,omitempty"`
|
||||
KdfParams *KdfParams `json:"kdf_params,omitempty"`
|
||||
}{
|
||||
Type: a.Type,
|
||||
Publisher: a.Publisher,
|
||||
Salt: hex.EncodeToString(a.Salt),
|
||||
Act: a.Act,
|
||||
KdfParams: a.KdfParams,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (a *AccessEntry) UnmarshalJSON(value []byte) error {
|
||||
v := struct {
|
||||
Type AccessType `json:"type,omitempty"`
|
||||
Publisher string `json:"publisher,omitempty"`
|
||||
Salt string `json:"salt,omitempty"`
|
||||
Act string `json:"act,omitempty"`
|
||||
KdfParams *KdfParams `json:"kdf_params,omitempty"`
|
||||
}{}
|
||||
|
||||
err := json.Unmarshal(value, &v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Act = v.Act
|
||||
a.KdfParams = v.KdfParams
|
||||
a.Publisher = v.Publisher
|
||||
a.Salt, err = hex.DecodeString(v.Salt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(a.Salt) != 32 {
|
||||
return errors.New("salt should be 32 bytes long")
|
||||
}
|
||||
a.Type = v.Type
|
||||
return nil
|
||||
}
|
||||
|
||||
type KdfParams struct {
|
||||
N int `json:"n"`
|
||||
P int `json:"p"`
|
||||
R int `json:"r"`
|
||||
}
|
||||
|
||||
type AccessType string
|
||||
|
||||
const AccessTypePass = AccessType("pass")
|
||||
const AccessTypePK = AccessType("pk")
|
||||
const AccessTypeACT = AccessType("act")
|
||||
|
||||
// NewAccessEntryPassword creates a manifest AccessEntry in order to create an ACT protected by a password
|
||||
func NewAccessEntryPassword(salt []byte, kdfParams *KdfParams) (*AccessEntry, error) {
|
||||
if len(salt) != 32 {
|
||||
return nil, fmt.Errorf("salt should be 32 bytes long")
|
||||
}
|
||||
return &AccessEntry{
|
||||
Type: AccessTypePass,
|
||||
Salt: salt,
|
||||
KdfParams: kdfParams,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewAccessEntryPK creates a manifest AccessEntry in order to create an ACT protected by a pair of Elliptic Curve keys
|
||||
func NewAccessEntryPK(publisher string, salt []byte) (*AccessEntry, error) {
|
||||
if len(publisher) != 66 {
|
||||
return nil, fmt.Errorf("publisher should be 66 characters long, got %d", len(publisher))
|
||||
}
|
||||
if len(salt) != 32 {
|
||||
return nil, fmt.Errorf("salt should be 32 bytes long")
|
||||
}
|
||||
return &AccessEntry{
|
||||
Type: AccessTypePK,
|
||||
Publisher: publisher,
|
||||
Salt: salt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewAccessEntryACT creates a manifest AccessEntry in order to create an ACT protected by a combination of EC keys and passwords
|
||||
func NewAccessEntryACT(publisher string, salt []byte, act string) (*AccessEntry, error) {
|
||||
if len(salt) != 32 {
|
||||
return nil, fmt.Errorf("salt should be 32 bytes long")
|
||||
}
|
||||
if len(publisher) != 66 {
|
||||
return nil, fmt.Errorf("publisher should be 66 characters long")
|
||||
}
|
||||
|
||||
return &AccessEntry{
|
||||
Type: AccessTypeACT,
|
||||
Publisher: publisher,
|
||||
Salt: salt,
|
||||
Act: act,
|
||||
KdfParams: DefaultKdfParams,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NOOPDecrypt is a generic decrypt function that is passed into the API in places where real ACT decryption capabilities are
|
||||
// either unwanted, or alternatively, cannot be implemented in the immediate scope
|
||||
func NOOPDecrypt(*ManifestEntry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var DefaultKdfParams = NewKdfParams(262144, 1, 8)
|
||||
|
||||
// NewKdfParams returns a KdfParams struct with the given scrypt params
|
||||
func NewKdfParams(n, p, r int) *KdfParams {
|
||||
|
||||
return &KdfParams{
|
||||
N: n,
|
||||
P: p,
|
||||
R: r,
|
||||
}
|
||||
}
|
||||
|
||||
// NewSessionKeyPassword creates a session key based on a shared secret (password) and the given salt
|
||||
// and kdf parameters in the access entry
|
||||
func NewSessionKeyPassword(password string, accessEntry *AccessEntry) ([]byte, error) {
|
||||
if accessEntry.Type != AccessTypePass && accessEntry.Type != AccessTypeACT {
|
||||
return nil, errors.New("incorrect access entry type")
|
||||
|
||||
}
|
||||
return sessionKeyPassword(password, accessEntry.Salt, accessEntry.KdfParams)
|
||||
}
|
||||
|
||||
func sessionKeyPassword(password string, salt []byte, kdfParams *KdfParams) ([]byte, error) {
|
||||
return scrypt.Key(
|
||||
[]byte(password),
|
||||
salt,
|
||||
kdfParams.N,
|
||||
kdfParams.R,
|
||||
kdfParams.P,
|
||||
32,
|
||||
)
|
||||
}
|
||||
|
||||
// NewSessionKeyPK creates a new ACT Session Key using an ECDH shared secret for the given key pair and the given salt value
|
||||
func NewSessionKeyPK(private *ecdsa.PrivateKey, public *ecdsa.PublicKey, salt []byte) ([]byte, error) {
|
||||
granteePubEcies := ecies.ImportECDSAPublic(public)
|
||||
privateKey := ecies.ImportECDSA(private)
|
||||
|
||||
bytes, err := privateKey.GenerateShared(granteePubEcies, 16, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bytes = append(salt, bytes...)
|
||||
sessionKey := crypto.Keccak256(bytes)
|
||||
return sessionKey, nil
|
||||
}
|
||||
|
||||
func (a *API) doDecrypt(ctx context.Context, credentials string, pk *ecdsa.PrivateKey) DecryptFunc {
|
||||
return func(m *ManifestEntry) error {
|
||||
if m.Access == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
allowed := false
|
||||
requestDomain := sctx.GetHost(ctx)
|
||||
for _, v := range AllowedDecryptDomains {
|
||||
if strings.Contains(requestDomain, v) {
|
||||
allowed = true
|
||||
}
|
||||
}
|
||||
|
||||
if !allowed {
|
||||
return ErrDecryptDomainForbidden
|
||||
}
|
||||
|
||||
switch m.Access.Type {
|
||||
case "pass":
|
||||
if credentials != "" {
|
||||
key, err := NewSessionKeyPassword(credentials, m.Access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := hex.DecodeString(m.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc := NewRefEncryption(len(ref) - 8)
|
||||
decodedRef, err := enc.Decrypt(ref, key)
|
||||
if err != nil {
|
||||
return ErrDecrypt
|
||||
}
|
||||
|
||||
m.Hash = hex.EncodeToString(decodedRef)
|
||||
m.Access = nil
|
||||
return nil
|
||||
}
|
||||
return ErrDecrypt
|
||||
case "pk":
|
||||
publisherBytes, err := hex.DecodeString(m.Access.Publisher)
|
||||
if err != nil {
|
||||
return ErrDecrypt
|
||||
}
|
||||
publisher, err := crypto.DecompressPubkey(publisherBytes)
|
||||
if err != nil {
|
||||
return ErrDecrypt
|
||||
}
|
||||
key, err := NewSessionKeyPK(pk, publisher, m.Access.Salt)
|
||||
if err != nil {
|
||||
return ErrDecrypt
|
||||
}
|
||||
ref, err := hex.DecodeString(m.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc := NewRefEncryption(len(ref) - 8)
|
||||
decodedRef, err := enc.Decrypt(ref, key)
|
||||
if err != nil {
|
||||
return ErrDecrypt
|
||||
}
|
||||
|
||||
m.Hash = hex.EncodeToString(decodedRef)
|
||||
m.Access = nil
|
||||
return nil
|
||||
case "act":
|
||||
var (
|
||||
sessionKey []byte
|
||||
err error
|
||||
)
|
||||
|
||||
publisherBytes, err := hex.DecodeString(m.Access.Publisher)
|
||||
if err != nil {
|
||||
return ErrDecrypt
|
||||
}
|
||||
publisher, err := crypto.DecompressPubkey(publisherBytes)
|
||||
if err != nil {
|
||||
return ErrDecrypt
|
||||
}
|
||||
|
||||
sessionKey, err = NewSessionKeyPK(pk, publisher, m.Access.Salt)
|
||||
if err != nil {
|
||||
return ErrDecrypt
|
||||
}
|
||||
|
||||
found, ciphertext, decryptionKey, err := a.getACTDecryptionKey(ctx, storage.Address(common.Hex2Bytes(m.Access.Act)), sessionKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !found {
|
||||
// try to fall back to password
|
||||
if credentials != "" {
|
||||
sessionKey, err = NewSessionKeyPassword(credentials, m.Access)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
found, ciphertext, decryptionKey, err = a.getACTDecryptionKey(ctx, storage.Address(common.Hex2Bytes(m.Access.Act)), sessionKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !found {
|
||||
return ErrDecrypt
|
||||
}
|
||||
} else {
|
||||
return ErrDecrypt
|
||||
}
|
||||
}
|
||||
enc := NewRefEncryption(len(ciphertext) - 8)
|
||||
decodedRef, err := enc.Decrypt(ciphertext, decryptionKey)
|
||||
if err != nil {
|
||||
return ErrDecrypt
|
||||
}
|
||||
|
||||
ref, err := hex.DecodeString(m.Hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc = NewRefEncryption(len(ref) - 8)
|
||||
decodedMainRef, err := enc.Decrypt(ref, decodedRef)
|
||||
if err != nil {
|
||||
return ErrDecrypt
|
||||
}
|
||||
m.Hash = hex.EncodeToString(decodedMainRef)
|
||||
m.Access = nil
|
||||
return nil
|
||||
}
|
||||
return ErrUnknownAccessType
|
||||
}
|
||||
}
|
||||
|
||||
func (a *API) getACTDecryptionKey(ctx context.Context, actManifestAddress storage.Address, sessionKey []byte) (found bool, ciphertext, decryptionKey []byte, err error) {
|
||||
hasher := sha3.NewLegacyKeccak256()
|
||||
hasher.Write(append(sessionKey, 0))
|
||||
lookupKey := hasher.Sum(nil)
|
||||
hasher.Reset()
|
||||
|
||||
hasher.Write(append(sessionKey, 1))
|
||||
accessKeyDecryptionKey := hasher.Sum(nil)
|
||||
hasher.Reset()
|
||||
|
||||
lk := hex.EncodeToString(lookupKey)
|
||||
list, err := a.GetManifestList(ctx, NOOPDecrypt, actManifestAddress, lk)
|
||||
if err != nil {
|
||||
return false, nil, nil, err
|
||||
}
|
||||
for _, v := range list.Entries {
|
||||
if v.Path == lk {
|
||||
cipherTextBytes, err := hex.DecodeString(v.Hash)
|
||||
if err != nil {
|
||||
return false, nil, nil, err
|
||||
}
|
||||
return true, cipherTextBytes, accessKeyDecryptionKey, nil
|
||||
}
|
||||
}
|
||||
return false, nil, nil, nil
|
||||
}
|
||||
|
||||
func GenerateAccessControlManifest(ctx *cli.Context, ref string, accessKey []byte, ae *AccessEntry) (*Manifest, error) {
|
||||
refBytes, err := hex.DecodeString(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// encrypt ref with accessKey
|
||||
enc := NewRefEncryption(len(refBytes))
|
||||
encrypted, err := enc.Encrypt(refBytes, accessKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := &Manifest{
|
||||
Entries: []ManifestEntry{
|
||||
{
|
||||
Hash: hex.EncodeToString(encrypted),
|
||||
ContentType: ManifestType,
|
||||
ModTime: time.Now(),
|
||||
Access: ae,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// DoPK is a helper function to the CLI API that handles the entire business logic for
|
||||
// creating a session key and access entry given the cli context, ec keys and salt
|
||||
func DoPK(ctx *cli.Context, privateKey *ecdsa.PrivateKey, granteePublicKey string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) {
|
||||
if granteePublicKey == "" {
|
||||
return nil, nil, errors.New("need a grantee Public Key")
|
||||
}
|
||||
b, err := hex.DecodeString(granteePublicKey)
|
||||
if err != nil {
|
||||
log.Error("error decoding grantee public key", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
granteePub, err := crypto.DecompressPubkey(b)
|
||||
if err != nil {
|
||||
log.Error("error decompressing grantee public key", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sessionKey, err = NewSessionKeyPK(privateKey, granteePub, salt)
|
||||
if err != nil {
|
||||
log.Error("error getting session key", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ae, err = NewAccessEntryPK(hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)), salt)
|
||||
if err != nil {
|
||||
log.Error("error generating access entry", "err", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return sessionKey, ae, nil
|
||||
}
|
||||
|
||||
// DoACT is a helper function to the CLI API that handles the entire business logic for
|
||||
// creating a access key, access entry and ACT manifest (including uploading it) given the cli context, ec keys, password grantees and salt
|
||||
func DoACT(ctx *cli.Context, privateKey *ecdsa.PrivateKey, salt []byte, grantees []string, encryptPasswords []string) (accessKey []byte, ae *AccessEntry, actManifest *Manifest, err error) {
|
||||
if len(grantees) == 0 && len(encryptPasswords) == 0 {
|
||||
return nil, nil, nil, errors.New("did not get any grantee public keys or any encryption passwords")
|
||||
}
|
||||
|
||||
publisherPub := hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey))
|
||||
grantees = append(grantees, publisherPub)
|
||||
|
||||
accessKey = make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
if _, err := io.ReadFull(rand.Reader, accessKey); err != nil {
|
||||
panic("reading from crypto/rand failed: " + err.Error())
|
||||
}
|
||||
|
||||
lookupPathEncryptedAccessKeyMap := make(map[string]string)
|
||||
i := 0
|
||||
for _, v := range grantees {
|
||||
i++
|
||||
if v == "" {
|
||||
return nil, nil, nil, errors.New("need a grantee Public Key")
|
||||
}
|
||||
b, err := hex.DecodeString(v)
|
||||
if err != nil {
|
||||
log.Error("error decoding grantee public key", "err", err)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
granteePub, err := crypto.DecompressPubkey(b)
|
||||
if err != nil {
|
||||
log.Error("error decompressing grantee public key", "err", err)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
sessionKey, err := NewSessionKeyPK(privateKey, granteePub, salt)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
hasher := sha3.NewLegacyKeccak256()
|
||||
hasher.Write(append(sessionKey, 0))
|
||||
lookupKey := hasher.Sum(nil)
|
||||
|
||||
hasher.Reset()
|
||||
hasher.Write(append(sessionKey, 1))
|
||||
|
||||
accessKeyEncryptionKey := hasher.Sum(nil)
|
||||
|
||||
enc := NewRefEncryption(len(accessKey))
|
||||
encryptedAccessKey, err := enc.Encrypt(accessKey, accessKeyEncryptionKey)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
lookupPathEncryptedAccessKeyMap[hex.EncodeToString(lookupKey)] = hex.EncodeToString(encryptedAccessKey)
|
||||
}
|
||||
|
||||
for _, pass := range encryptPasswords {
|
||||
sessionKey, err := sessionKeyPassword(pass, salt, DefaultKdfParams)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
hasher := sha3.NewLegacyKeccak256()
|
||||
hasher.Write(append(sessionKey, 0))
|
||||
lookupKey := hasher.Sum(nil)
|
||||
|
||||
hasher.Reset()
|
||||
hasher.Write(append(sessionKey, 1))
|
||||
|
||||
accessKeyEncryptionKey := hasher.Sum(nil)
|
||||
|
||||
enc := NewRefEncryption(len(accessKey))
|
||||
encryptedAccessKey, err := enc.Encrypt(accessKey, accessKeyEncryptionKey)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
lookupPathEncryptedAccessKeyMap[hex.EncodeToString(lookupKey)] = hex.EncodeToString(encryptedAccessKey)
|
||||
}
|
||||
|
||||
m := &Manifest{
|
||||
Entries: []ManifestEntry{},
|
||||
}
|
||||
|
||||
for k, v := range lookupPathEncryptedAccessKeyMap {
|
||||
m.Entries = append(m.Entries, ManifestEntry{
|
||||
Path: k,
|
||||
Hash: v,
|
||||
ContentType: "text/plain",
|
||||
})
|
||||
}
|
||||
|
||||
ae, err = NewAccessEntryACT(hex.EncodeToString(crypto.CompressPubkey(&privateKey.PublicKey)), salt, "")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
return accessKey, ae, m, nil
|
||||
}
|
||||
|
||||
// DoPassword is a helper function to the CLI API that handles the entire business logic for
|
||||
// creating a session key and an access entry given the cli context, password and salt.
|
||||
// By default - DefaultKdfParams are used as the scrypt params
|
||||
func DoPassword(ctx *cli.Context, password string, salt []byte) (sessionKey []byte, ae *AccessEntry, err error) {
|
||||
ae, err = NewAccessEntryPassword(salt, DefaultKdfParams)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sessionKey, err = NewSessionKeyPassword(password, ae)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return sessionKey, ae, nil
|
||||
}
|
993
swarm/api/api.go
993
swarm/api/api.go
@ -1,993 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
//go:generate mimegen --types=./../../cmd/swarm/mimegen/mime.types --package=api --out=gen_mime.go
|
||||
//go:generate gofmt -s -w gen_mime.go
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"bytes"
|
||||
"mime"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/contracts/ens"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
||||
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
var (
|
||||
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
|
||||
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
|
||||
apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil)
|
||||
apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil)
|
||||
apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil)
|
||||
apiManifestUpdateCount = metrics.NewRegisteredCounter("api.manifestupdate.count", nil)
|
||||
apiManifestUpdateFail = metrics.NewRegisteredCounter("api.manifestupdate.fail", nil)
|
||||
apiManifestListCount = metrics.NewRegisteredCounter("api.manifestlist.count", nil)
|
||||
apiManifestListFail = metrics.NewRegisteredCounter("api.manifestlist.fail", nil)
|
||||
apiDeleteCount = metrics.NewRegisteredCounter("api.delete.count", nil)
|
||||
apiDeleteFail = metrics.NewRegisteredCounter("api.delete.fail", nil)
|
||||
apiGetTarCount = metrics.NewRegisteredCounter("api.gettar.count", nil)
|
||||
apiGetTarFail = metrics.NewRegisteredCounter("api.gettar.fail", nil)
|
||||
apiUploadTarCount = metrics.NewRegisteredCounter("api.uploadtar.count", nil)
|
||||
apiUploadTarFail = metrics.NewRegisteredCounter("api.uploadtar.fail", nil)
|
||||
apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil)
|
||||
apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil)
|
||||
apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil)
|
||||
apiAddFileFail = metrics.NewRegisteredCounter("api.addfile.fail", nil)
|
||||
apiRmFileCount = metrics.NewRegisteredCounter("api.removefile.count", nil)
|
||||
apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil)
|
||||
apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil)
|
||||
apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil)
|
||||
apiGetInvalid = metrics.NewRegisteredCounter("api.get.invalid", nil)
|
||||
)
|
||||
|
||||
// Resolver interface resolve a domain name to a hash using ENS
|
||||
type Resolver interface {
|
||||
Resolve(string) (common.Hash, error)
|
||||
}
|
||||
|
||||
// ResolveValidator is used to validate the contained Resolver
|
||||
type ResolveValidator interface {
|
||||
Resolver
|
||||
Owner(node [32]byte) (common.Address, error)
|
||||
HeaderByNumber(context.Context, *big.Int) (*types.Header, error)
|
||||
}
|
||||
|
||||
// NoResolverError is returned by MultiResolver.Resolve if no resolver
|
||||
// can be found for the address.
|
||||
type NoResolverError struct {
|
||||
TLD string
|
||||
}
|
||||
|
||||
// NewNoResolverError creates a NoResolverError for the given top level domain
|
||||
func NewNoResolverError(tld string) *NoResolverError {
|
||||
return &NoResolverError{TLD: tld}
|
||||
}
|
||||
|
||||
// Error NoResolverError implements error
|
||||
func (e *NoResolverError) Error() string {
|
||||
if e.TLD == "" {
|
||||
return "no ENS resolver"
|
||||
}
|
||||
return fmt.Sprintf("no ENS endpoint configured to resolve .%s TLD names", e.TLD)
|
||||
}
|
||||
|
||||
// MultiResolver is used to resolve URL addresses based on their TLDs.
|
||||
// Each TLD can have multiple resolvers, and the resolution from the
|
||||
// first one in the sequence will be returned.
|
||||
type MultiResolver struct {
|
||||
resolvers map[string][]ResolveValidator
|
||||
nameHash func(string) common.Hash
|
||||
}
|
||||
|
||||
// MultiResolverOption sets options for MultiResolver and is used as
|
||||
// arguments for its constructor.
|
||||
type MultiResolverOption func(*MultiResolver)
|
||||
|
||||
// MultiResolverOptionWithResolver adds a Resolver to a list of resolvers
|
||||
// for a specific TLD. If TLD is an empty string, the resolver will be added
|
||||
// to the list of default resolver, the ones that will be used for resolution
|
||||
// of addresses which do not have their TLD resolver specified.
|
||||
func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolverOption {
|
||||
return func(m *MultiResolver) {
|
||||
m.resolvers[tld] = append(m.resolvers[tld], r)
|
||||
}
|
||||
}
|
||||
|
||||
// NewMultiResolver creates a new instance of MultiResolver.
|
||||
func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
|
||||
m = &MultiResolver{
|
||||
resolvers: make(map[string][]ResolveValidator),
|
||||
nameHash: ens.EnsNode,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(m)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Resolve resolves address by choosing a Resolver by TLD.
|
||||
// If there are more default Resolvers, or for a specific TLD,
|
||||
// the Hash from the first one which does not return error
|
||||
// will be returned.
|
||||
func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
|
||||
rs, err := m.getResolveValidator(addr)
|
||||
if err != nil {
|
||||
return h, err
|
||||
}
|
||||
for _, r := range rs {
|
||||
h, err = r.Resolve(addr)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain
|
||||
func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) {
|
||||
rs := m.resolvers[""]
|
||||
tld := path.Ext(name)
|
||||
if tld != "" {
|
||||
tld = tld[1:]
|
||||
rstld, ok := m.resolvers[tld]
|
||||
if ok {
|
||||
return rstld, nil
|
||||
}
|
||||
}
|
||||
if len(rs) == 0 {
|
||||
return rs, NewNoResolverError(tld)
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
/*
|
||||
API implements webserver/file system related content storage and retrieval
|
||||
on top of the FileStore
|
||||
it is the public interface of the FileStore which is included in the ethereum stack
|
||||
*/
|
||||
type API struct {
|
||||
feed *feed.Handler
|
||||
fileStore *storage.FileStore
|
||||
dns Resolver
|
||||
Tags *chunk.Tags
|
||||
Decryptor func(context.Context, string) DecryptFunc
|
||||
}
|
||||
|
||||
// NewAPI the api constructor initialises a new API instance.
|
||||
func NewAPI(fileStore *storage.FileStore, dns Resolver, feedHandler *feed.Handler, pk *ecdsa.PrivateKey, tags *chunk.Tags) (self *API) {
|
||||
self = &API{
|
||||
fileStore: fileStore,
|
||||
dns: dns,
|
||||
feed: feedHandler,
|
||||
Tags: tags,
|
||||
Decryptor: func(ctx context.Context, credentials string) DecryptFunc {
|
||||
return self.doDecrypt(ctx, credentials, pk)
|
||||
},
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Retrieve FileStore reader API
|
||||
func (a *API) Retrieve(ctx context.Context, addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) {
|
||||
return a.fileStore.Retrieve(ctx, addr)
|
||||
}
|
||||
|
||||
// Store wraps the Store API call of the embedded FileStore
|
||||
func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr storage.Address, wait func(ctx context.Context) error, err error) {
|
||||
log.Debug("api.store", "size", size)
|
||||
return a.fileStore.Store(ctx, data, size, toEncrypt)
|
||||
}
|
||||
|
||||
// Resolve a name into a content-addressed hash
|
||||
// where address could be an ENS name, or a content addressed hash
|
||||
func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) {
|
||||
// if DNS is not configured, return an error
|
||||
if a.dns == nil {
|
||||
if hashMatcher.MatchString(address) {
|
||||
return common.Hex2Bytes(address), nil
|
||||
}
|
||||
apiResolveFail.Inc(1)
|
||||
return nil, fmt.Errorf("no DNS to resolve name: %q", address)
|
||||
}
|
||||
// try and resolve the address
|
||||
resolved, err := a.dns.Resolve(address)
|
||||
if err != nil {
|
||||
if hashMatcher.MatchString(address) {
|
||||
return common.Hex2Bytes(address), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return resolved[:], nil
|
||||
}
|
||||
|
||||
// Resolve resolves a URI to an Address using the MultiResolver.
|
||||
func (a *API) ResolveURI(ctx context.Context, uri *URI, credentials string) (storage.Address, error) {
|
||||
apiResolveCount.Inc(1)
|
||||
log.Trace("resolving", "uri", uri.Addr)
|
||||
|
||||
var sp opentracing.Span
|
||||
ctx, sp = spancontext.StartSpan(
|
||||
ctx,
|
||||
"api.resolve")
|
||||
defer sp.Finish()
|
||||
|
||||
// if the URI is immutable, check if the address looks like a hash
|
||||
if uri.Immutable() {
|
||||
key := uri.Address()
|
||||
if key == nil {
|
||||
return nil, fmt.Errorf("immutable address not a content hash: %q", uri.Addr)
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
addr, err := a.Resolve(ctx, uri.Addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uri.Path == "" {
|
||||
return addr, nil
|
||||
}
|
||||
walker, err := a.NewManifestWalker(ctx, addr, a.Decryptor(ctx, credentials), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var entry *ManifestEntry
|
||||
walker.Walk(func(e *ManifestEntry) error {
|
||||
// if the entry matches the path, set entry and stop
|
||||
// the walk
|
||||
if e.Path == uri.Path {
|
||||
entry = e
|
||||
// return an error to cancel the walk
|
||||
return errors.New("found")
|
||||
}
|
||||
// ignore non-manifest files
|
||||
if e.ContentType != ManifestType {
|
||||
return nil
|
||||
}
|
||||
// if the manifest's path is a prefix of the
|
||||
// requested path, recurse into it by returning
|
||||
// nil and continuing the walk
|
||||
if strings.HasPrefix(uri.Path, e.Path) {
|
||||
return nil
|
||||
}
|
||||
return ErrSkipManifest
|
||||
})
|
||||
if entry == nil {
|
||||
return nil, errors.New("not found")
|
||||
}
|
||||
addr = storage.Address(common.Hex2Bytes(entry.Hash))
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Get uses iterative manifest retrieval and prefix matching
|
||||
// to resolve basePath to content using FileStore retrieve
|
||||
// it returns a section reader, mimeType, status, the key of the actual content and an error
|
||||
func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) {
|
||||
log.Debug("api.get", "key", manifestAddr, "path", path)
|
||||
apiGetCount.Inc(1)
|
||||
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, decrypt)
|
||||
if err != nil {
|
||||
apiGetNotFound.Inc(1)
|
||||
status = http.StatusNotFound
|
||||
return nil, "", http.StatusNotFound, nil, err
|
||||
}
|
||||
|
||||
log.Debug("trie getting entry", "key", manifestAddr, "path", path)
|
||||
entry, _ := trie.getEntry(path)
|
||||
|
||||
if entry != nil {
|
||||
log.Debug("trie got entry", "key", manifestAddr, "path", path, "entry.Hash", entry.Hash)
|
||||
|
||||
if entry.ContentType == ManifestType {
|
||||
log.Debug("entry is manifest", "key", manifestAddr, "new key", entry.Hash)
|
||||
adr, err := hex.DecodeString(entry.Hash)
|
||||
if err != nil {
|
||||
return nil, "", 0, nil, err
|
||||
}
|
||||
return a.Get(ctx, decrypt, adr, entry.Path)
|
||||
}
|
||||
|
||||
// we need to do some extra work if this is a Swarm feed manifest
|
||||
if entry.ContentType == FeedContentType {
|
||||
if entry.Feed == nil {
|
||||
return reader, mimeType, status, nil, fmt.Errorf("Cannot decode Feed in manifest")
|
||||
}
|
||||
_, err := a.feed.Lookup(ctx, feed.NewQueryLatest(entry.Feed, lookup.NoClue))
|
||||
if err != nil {
|
||||
apiGetNotFound.Inc(1)
|
||||
status = http.StatusNotFound
|
||||
log.Debug(fmt.Sprintf("get feed update content error: %v", err))
|
||||
return reader, mimeType, status, nil, err
|
||||
}
|
||||
// get the data of the update
|
||||
_, contentAddr, err := a.feed.GetContent(entry.Feed)
|
||||
if err != nil {
|
||||
apiGetNotFound.Inc(1)
|
||||
status = http.StatusNotFound
|
||||
log.Warn(fmt.Sprintf("get feed update content error: %v", err))
|
||||
return reader, mimeType, status, nil, err
|
||||
}
|
||||
|
||||
// extract content hash
|
||||
if len(contentAddr) != storage.AddressLength {
|
||||
apiGetInvalid.Inc(1)
|
||||
status = http.StatusUnprocessableEntity
|
||||
errorMessage := fmt.Sprintf("invalid swarm hash in feed update. Expected %d bytes. Got %d", storage.AddressLength, len(contentAddr))
|
||||
log.Warn(errorMessage)
|
||||
return reader, mimeType, status, nil, errors.New(errorMessage)
|
||||
}
|
||||
manifestAddr = storage.Address(contentAddr)
|
||||
log.Trace("feed update contains swarm hash", "key", manifestAddr)
|
||||
|
||||
// get the manifest the swarm hash points to
|
||||
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt)
|
||||
if err != nil {
|
||||
apiGetNotFound.Inc(1)
|
||||
status = http.StatusNotFound
|
||||
log.Warn(fmt.Sprintf("loadManifestTrie (feed update) error: %v", err))
|
||||
return reader, mimeType, status, nil, err
|
||||
}
|
||||
|
||||
// finally, get the manifest entry
|
||||
// it will always be the entry on path ""
|
||||
entry, _ = trie.getEntry(path)
|
||||
if entry == nil {
|
||||
status = http.StatusNotFound
|
||||
apiGetNotFound.Inc(1)
|
||||
err = fmt.Errorf("manifest (feed update) entry for '%s' not found", path)
|
||||
log.Trace("manifest (feed update) entry not found", "key", manifestAddr, "path", path)
|
||||
return reader, mimeType, status, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// regardless of feed update manifests or normal manifests we will converge at this point
|
||||
// get the key the manifest entry points to and serve it if it's unambiguous
|
||||
contentAddr = common.Hex2Bytes(entry.Hash)
|
||||
status = entry.Status
|
||||
if status == http.StatusMultipleChoices {
|
||||
apiGetHTTP300.Inc(1)
|
||||
return nil, entry.ContentType, status, contentAddr, err
|
||||
}
|
||||
mimeType = entry.ContentType
|
||||
log.Debug("content lookup key", "key", contentAddr, "mimetype", mimeType)
|
||||
reader, _ = a.fileStore.Retrieve(ctx, contentAddr)
|
||||
} else {
|
||||
// no entry found
|
||||
status = http.StatusNotFound
|
||||
apiGetNotFound.Inc(1)
|
||||
err = fmt.Errorf("Not found: could not find resource '%s'", path)
|
||||
log.Trace("manifest entry not found", "key", contentAddr, "path", path)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *API) Delete(ctx context.Context, addr string, path string) (storage.Address, error) {
|
||||
apiDeleteCount.Inc(1)
|
||||
uri, err := Parse("bzz:/" + addr)
|
||||
if err != nil {
|
||||
apiDeleteFail.Inc(1)
|
||||
return nil, err
|
||||
}
|
||||
key, err := a.ResolveURI(ctx, uri, EmptyCredentials)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newKey, err := a.UpdateManifest(ctx, key, func(mw *ManifestWriter) error {
|
||||
log.Debug(fmt.Sprintf("removing %s from manifest %s", path, key.Log()))
|
||||
return mw.RemoveEntry(path)
|
||||
})
|
||||
if err != nil {
|
||||
apiDeleteFail.Inc(1)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newKey, nil
|
||||
}
|
||||
|
||||
// GetDirectoryTar fetches a requested directory as a tarstream
|
||||
// it returns an io.Reader and an error. Do not forget to Close() the returned ReadCloser
|
||||
func (a *API) GetDirectoryTar(ctx context.Context, decrypt DecryptFunc, uri *URI) (io.ReadCloser, error) {
|
||||
apiGetTarCount.Inc(1)
|
||||
addr, err := a.Resolve(ctx, uri.Addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
walker, err := a.NewManifestWalker(ctx, addr, decrypt, nil)
|
||||
if err != nil {
|
||||
apiGetTarFail.Inc(1)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
piper, pipew := io.Pipe()
|
||||
|
||||
tw := tar.NewWriter(pipew)
|
||||
|
||||
go func() {
|
||||
err := walker.Walk(func(entry *ManifestEntry) error {
|
||||
// ignore manifests (walk will recurse into them)
|
||||
if entry.ContentType == ManifestType {
|
||||
return nil
|
||||
}
|
||||
|
||||
// retrieve the entry's key and size
|
||||
reader, _ := a.Retrieve(ctx, storage.Address(common.Hex2Bytes(entry.Hash)))
|
||||
size, err := reader.Size(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// write a tar header for the entry
|
||||
hdr := &tar.Header{
|
||||
Name: entry.Path,
|
||||
Mode: entry.Mode,
|
||||
Size: size,
|
||||
ModTime: entry.ModTime,
|
||||
Xattrs: map[string]string{
|
||||
"user.swarm.content-type": entry.ContentType,
|
||||
},
|
||||
}
|
||||
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// copy the file into the tar stream
|
||||
n, err := io.Copy(tw, io.LimitReader(reader, hdr.Size))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if n != size {
|
||||
return fmt.Errorf("error writing %s: expected %d bytes but sent %d", entry.Path, size, n)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
// close tar writer before closing pipew
|
||||
// to flush remaining data to pipew
|
||||
// regardless of error value
|
||||
tw.Close()
|
||||
if err != nil {
|
||||
apiGetTarFail.Inc(1)
|
||||
pipew.CloseWithError(err)
|
||||
} else {
|
||||
pipew.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
return piper, nil
|
||||
}
|
||||
|
||||
// GetManifestList lists the manifest entries for the specified address and prefix
|
||||
// and returns it as a ManifestList
|
||||
func (a *API) GetManifestList(ctx context.Context, decryptor DecryptFunc, addr storage.Address, prefix string) (list ManifestList, err error) {
|
||||
apiManifestListCount.Inc(1)
|
||||
walker, err := a.NewManifestWalker(ctx, addr, decryptor, nil)
|
||||
if err != nil {
|
||||
apiManifestListFail.Inc(1)
|
||||
return ManifestList{}, err
|
||||
}
|
||||
|
||||
err = walker.Walk(func(entry *ManifestEntry) error {
|
||||
// handle non-manifest files
|
||||
if entry.ContentType != ManifestType {
|
||||
// ignore the file if it doesn't have the specified prefix
|
||||
if !strings.HasPrefix(entry.Path, prefix) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if the path after the prefix contains a slash, add a
|
||||
// common prefix to the list, otherwise add the entry
|
||||
suffix := strings.TrimPrefix(entry.Path, prefix)
|
||||
if index := strings.Index(suffix, "/"); index > -1 {
|
||||
list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1])
|
||||
return nil
|
||||
}
|
||||
if entry.Path == "" {
|
||||
entry.Path = "/"
|
||||
}
|
||||
list.Entries = append(list.Entries, entry)
|
||||
return nil
|
||||
}
|
||||
|
||||
// if the manifest's path is a prefix of the specified prefix
|
||||
// then just recurse into the manifest by returning nil and
|
||||
// continuing the walk
|
||||
if strings.HasPrefix(prefix, entry.Path) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if the manifest's path has the specified prefix, then if the
|
||||
// path after the prefix contains a slash, add a common prefix
|
||||
// to the list and skip the manifest, otherwise recurse into
|
||||
// the manifest by returning nil and continuing the walk
|
||||
if strings.HasPrefix(entry.Path, prefix) {
|
||||
suffix := strings.TrimPrefix(entry.Path, prefix)
|
||||
if index := strings.Index(suffix, "/"); index > -1 {
|
||||
list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1])
|
||||
return ErrSkipManifest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// the manifest neither has the prefix or needs recursing in to
|
||||
// so just skip it
|
||||
return ErrSkipManifest
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
apiManifestListFail.Inc(1)
|
||||
return ManifestList{}, err
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func (a *API) UpdateManifest(ctx context.Context, addr storage.Address, update func(mw *ManifestWriter) error) (storage.Address, error) {
|
||||
apiManifestUpdateCount.Inc(1)
|
||||
mw, err := a.NewManifestWriter(ctx, addr, nil)
|
||||
if err != nil {
|
||||
apiManifestUpdateFail.Inc(1)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := update(mw); err != nil {
|
||||
apiManifestUpdateFail.Inc(1)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr, err = mw.Store()
|
||||
if err != nil {
|
||||
apiManifestUpdateFail.Inc(1)
|
||||
return nil, err
|
||||
}
|
||||
log.Debug(fmt.Sprintf("generated manifest %s", addr))
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Modify loads manifest and checks the content hash before recalculating and storing the manifest.
|
||||
func (a *API) Modify(ctx context.Context, addr storage.Address, path, contentHash, contentType string) (storage.Address, error) {
|
||||
apiModifyCount.Inc(1)
|
||||
quitC := make(chan bool)
|
||||
trie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt)
|
||||
if err != nil {
|
||||
apiModifyFail.Inc(1)
|
||||
return nil, err
|
||||
}
|
||||
if contentHash != "" {
|
||||
entry := newManifestTrieEntry(&ManifestEntry{
|
||||
Path: path,
|
||||
ContentType: contentType,
|
||||
}, nil)
|
||||
entry.Hash = contentHash
|
||||
trie.addEntry(entry, quitC)
|
||||
} else {
|
||||
trie.deleteEntry(path, quitC)
|
||||
}
|
||||
|
||||
if err := trie.recalcAndStore(); err != nil {
|
||||
apiModifyFail.Inc(1)
|
||||
return nil, err
|
||||
}
|
||||
return trie.ref, nil
|
||||
}
|
||||
|
||||
// AddFile creates a new manifest entry, adds it to swarm, then adds a file to swarm.
|
||||
func (a *API) AddFile(ctx context.Context, mhash, path, fname string, content []byte, nameresolver bool) (storage.Address, string, error) {
|
||||
apiAddFileCount.Inc(1)
|
||||
|
||||
uri, err := Parse("bzz:/" + mhash)
|
||||
if err != nil {
|
||||
apiAddFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
}
|
||||
mkey, err := a.ResolveURI(ctx, uri, EmptyCredentials)
|
||||
if err != nil {
|
||||
apiAddFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// trim the root dir we added
|
||||
if path[:1] == "/" {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
entry := &ManifestEntry{
|
||||
Path: filepath.Join(path, fname),
|
||||
ContentType: mime.TypeByExtension(filepath.Ext(fname)),
|
||||
Mode: 0700,
|
||||
Size: int64(len(content)),
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
|
||||
mw, err := a.NewManifestWriter(ctx, mkey, nil)
|
||||
if err != nil {
|
||||
apiAddFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
fkey, err := mw.AddEntry(ctx, bytes.NewReader(content), entry)
|
||||
if err != nil {
|
||||
apiAddFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
newMkey, err := mw.Store()
|
||||
if err != nil {
|
||||
apiAddFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
|
||||
}
|
||||
|
||||
return fkey, newMkey.String(), nil
|
||||
}
|
||||
|
||||
func (a *API) UploadTar(ctx context.Context, bodyReader io.ReadCloser, manifestPath, defaultPath string, mw *ManifestWriter) (storage.Address, error) {
|
||||
apiUploadTarCount.Inc(1)
|
||||
var contentKey storage.Address
|
||||
tr := tar.NewReader(bodyReader)
|
||||
defer bodyReader.Close()
|
||||
var defaultPathFound bool
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
apiUploadTarFail.Inc(1)
|
||||
return nil, fmt.Errorf("error reading tar stream: %s", err)
|
||||
}
|
||||
|
||||
// only store regular files
|
||||
if !hdr.FileInfo().Mode().IsRegular() {
|
||||
continue
|
||||
}
|
||||
|
||||
// add the entry under the path from the request
|
||||
manifestPath := path.Join(manifestPath, hdr.Name)
|
||||
contentType := hdr.Xattrs["user.swarm.content-type"]
|
||||
if contentType == "" {
|
||||
contentType = mime.TypeByExtension(filepath.Ext(hdr.Name))
|
||||
}
|
||||
//DetectContentType("")
|
||||
entry := &ManifestEntry{
|
||||
Path: manifestPath,
|
||||
ContentType: contentType,
|
||||
Mode: hdr.Mode,
|
||||
Size: hdr.Size,
|
||||
ModTime: hdr.ModTime,
|
||||
}
|
||||
contentKey, err = mw.AddEntry(ctx, tr, entry)
|
||||
if err != nil {
|
||||
apiUploadTarFail.Inc(1)
|
||||
return nil, fmt.Errorf("error adding manifest entry from tar stream: %s", err)
|
||||
}
|
||||
if hdr.Name == defaultPath {
|
||||
contentType := hdr.Xattrs["user.swarm.content-type"]
|
||||
if contentType == "" {
|
||||
contentType = mime.TypeByExtension(filepath.Ext(hdr.Name))
|
||||
}
|
||||
|
||||
entry := &ManifestEntry{
|
||||
Hash: contentKey.Hex(),
|
||||
Path: "", // default entry
|
||||
ContentType: contentType,
|
||||
Mode: hdr.Mode,
|
||||
Size: hdr.Size,
|
||||
ModTime: hdr.ModTime,
|
||||
}
|
||||
contentKey, err = mw.AddEntry(ctx, nil, entry)
|
||||
if err != nil {
|
||||
apiUploadTarFail.Inc(1)
|
||||
return nil, fmt.Errorf("error adding default manifest entry from tar stream: %s", err)
|
||||
}
|
||||
defaultPathFound = true
|
||||
}
|
||||
}
|
||||
if defaultPath != "" && !defaultPathFound {
|
||||
return contentKey, fmt.Errorf("default path %q not found", defaultPath)
|
||||
}
|
||||
return contentKey, nil
|
||||
}
|
||||
|
||||
// RemoveFile removes a file entry in a manifest.
|
||||
func (a *API) RemoveFile(ctx context.Context, mhash string, path string, fname string, nameresolver bool) (string, error) {
|
||||
apiRmFileCount.Inc(1)
|
||||
|
||||
uri, err := Parse("bzz:/" + mhash)
|
||||
if err != nil {
|
||||
apiRmFileFail.Inc(1)
|
||||
return "", err
|
||||
}
|
||||
mkey, err := a.ResolveURI(ctx, uri, EmptyCredentials)
|
||||
if err != nil {
|
||||
apiRmFileFail.Inc(1)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// trim the root dir we added
|
||||
if path[:1] == "/" {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
mw, err := a.NewManifestWriter(ctx, mkey, nil)
|
||||
if err != nil {
|
||||
apiRmFileFail.Inc(1)
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = mw.RemoveEntry(filepath.Join(path, fname))
|
||||
if err != nil {
|
||||
apiRmFileFail.Inc(1)
|
||||
return "", err
|
||||
}
|
||||
|
||||
newMkey, err := mw.Store()
|
||||
if err != nil {
|
||||
apiRmFileFail.Inc(1)
|
||||
return "", err
|
||||
|
||||
}
|
||||
|
||||
return newMkey.String(), nil
|
||||
}
|
||||
|
||||
// AppendFile removes old manifest, appends file entry to new manifest and adds it to Swarm.
|
||||
func (a *API) AppendFile(ctx context.Context, mhash, path, fname string, existingSize int64, content []byte, oldAddr storage.Address, offset int64, addSize int64, nameresolver bool) (storage.Address, string, error) {
|
||||
apiAppendFileCount.Inc(1)
|
||||
|
||||
buffSize := offset + addSize
|
||||
if buffSize < existingSize {
|
||||
buffSize = existingSize
|
||||
}
|
||||
|
||||
buf := make([]byte, buffSize)
|
||||
|
||||
oldReader, _ := a.Retrieve(ctx, oldAddr)
|
||||
io.ReadAtLeast(oldReader, buf, int(offset))
|
||||
|
||||
newReader := bytes.NewReader(content)
|
||||
io.ReadAtLeast(newReader, buf[offset:], int(addSize))
|
||||
|
||||
if buffSize < existingSize {
|
||||
io.ReadAtLeast(oldReader, buf[addSize:], int(buffSize))
|
||||
}
|
||||
|
||||
combinedReader := bytes.NewReader(buf)
|
||||
totalSize := int64(len(buf))
|
||||
|
||||
// TODO(jmozah): to append using pyramid chunker when it is ready
|
||||
//oldReader := a.Retrieve(oldKey)
|
||||
//newReader := bytes.NewReader(content)
|
||||
//combinedReader := io.MultiReader(oldReader, newReader)
|
||||
|
||||
uri, err := Parse("bzz:/" + mhash)
|
||||
if err != nil {
|
||||
apiAppendFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
}
|
||||
mkey, err := a.ResolveURI(ctx, uri, EmptyCredentials)
|
||||
if err != nil {
|
||||
apiAppendFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// trim the root dir we added
|
||||
if path[:1] == "/" {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
mw, err := a.NewManifestWriter(ctx, mkey, nil)
|
||||
if err != nil {
|
||||
apiAppendFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
err = mw.RemoveEntry(filepath.Join(path, fname))
|
||||
if err != nil {
|
||||
apiAppendFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
entry := &ManifestEntry{
|
||||
Path: filepath.Join(path, fname),
|
||||
ContentType: mime.TypeByExtension(filepath.Ext(fname)),
|
||||
Mode: 0700,
|
||||
Size: totalSize,
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
|
||||
fkey, err := mw.AddEntry(ctx, io.Reader(combinedReader), entry)
|
||||
if err != nil {
|
||||
apiAppendFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
newMkey, err := mw.Store()
|
||||
if err != nil {
|
||||
apiAppendFileFail.Inc(1)
|
||||
return nil, "", err
|
||||
|
||||
}
|
||||
|
||||
return fkey, newMkey.String(), nil
|
||||
}
|
||||
|
||||
// BuildDirectoryTree used by swarmfs_unix
|
||||
func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver bool) (addr storage.Address, manifestEntryMap map[string]*manifestTrieEntry, err error) {
|
||||
|
||||
uri, err := Parse("bzz:/" + mhash)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
addr, err = a.Resolve(ctx, uri.Addr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
quitC := make(chan bool)
|
||||
rootTrie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can't load manifest %v: %v", addr.String(), err)
|
||||
}
|
||||
|
||||
manifestEntryMap = map[string]*manifestTrieEntry{}
|
||||
err = rootTrie.listWithPrefix(uri.Path, quitC, func(entry *manifestTrieEntry, suffix string) {
|
||||
manifestEntryMap[suffix] = entry
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("list with prefix failed %v: %v", addr.String(), err)
|
||||
}
|
||||
return addr, manifestEntryMap, nil
|
||||
}
|
||||
|
||||
// FeedsLookup finds Swarm feeds updates at specific points in time, or the latest update
|
||||
func (a *API) FeedsLookup(ctx context.Context, query *feed.Query) ([]byte, error) {
|
||||
_, err := a.feed.Lookup(ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data []byte
|
||||
_, data, err = a.feed.GetContent(&query.Feed)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// FeedsNewRequest creates a Request object to update a specific feed
|
||||
func (a *API) FeedsNewRequest(ctx context.Context, feed *feed.Feed) (*feed.Request, error) {
|
||||
return a.feed.NewRequest(ctx, feed)
|
||||
}
|
||||
|
||||
// FeedsUpdate publishes a new update on the given feed
|
||||
func (a *API) FeedsUpdate(ctx context.Context, request *feed.Request) (storage.Address, error) {
|
||||
return a.feed.Update(ctx, request)
|
||||
}
|
||||
|
||||
// ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails
|
||||
var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest")
|
||||
|
||||
// ErrNotAFeedManifest is returned when the address provided returned something other than a valid manifest
|
||||
var ErrNotAFeedManifest = errors.New("Not a feed manifest")
|
||||
|
||||
// ResolveFeedManifest retrieves the Swarm feed manifest for the given address, and returns the referenced Feed.
|
||||
func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*feed.Feed, error) {
|
||||
trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt)
|
||||
if err != nil {
|
||||
return nil, ErrCannotLoadFeedManifest
|
||||
}
|
||||
|
||||
entry, _ := trie.getEntry("")
|
||||
if entry.ContentType != FeedContentType {
|
||||
return nil, ErrNotAFeedManifest
|
||||
}
|
||||
|
||||
return entry.Feed, nil
|
||||
}
|
||||
|
||||
// ErrCannotResolveFeedURI is returned when the ENS resolver is not able to translate a name to a Swarm feed
|
||||
var ErrCannotResolveFeedURI = errors.New("Cannot resolve Feed URI")
|
||||
|
||||
// ErrCannotResolveFeed is returned when values provided are not enough or invalid to recreate a
|
||||
// feed out of them.
|
||||
var ErrCannotResolveFeed = errors.New("Cannot resolve Feed")
|
||||
|
||||
// ResolveFeed attempts to extract feed information out of the manifest, if provided
|
||||
// If not, it attempts to extract the feed out of a set of key-value pairs
|
||||
func (a *API) ResolveFeed(ctx context.Context, uri *URI, values feed.Values) (*feed.Feed, error) {
|
||||
var fd *feed.Feed
|
||||
var err error
|
||||
if uri.Addr != "" {
|
||||
// resolve the content key.
|
||||
manifestAddr := uri.Address()
|
||||
if manifestAddr == nil {
|
||||
manifestAddr, err = a.Resolve(ctx, uri.Addr)
|
||||
if err != nil {
|
||||
return nil, ErrCannotResolveFeedURI
|
||||
}
|
||||
}
|
||||
|
||||
// get the Swarm feed from the manifest
|
||||
fd, err = a.ResolveFeedManifest(ctx, manifestAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debug("handle.get.feed: resolved", "manifestkey", manifestAddr, "feed", fd.Hex())
|
||||
} else {
|
||||
var f feed.Feed
|
||||
if err := f.FromValues(values); err != nil {
|
||||
return nil, ErrCannotResolveFeed
|
||||
|
||||
}
|
||||
fd = &f
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
|
||||
// MimeOctetStream default value of http Content-Type header
|
||||
const MimeOctetStream = "application/octet-stream"
|
||||
|
||||
// DetectContentType by file file extension, or fallback to content sniff
|
||||
func DetectContentType(fileName string, f io.ReadSeeker) (string, error) {
|
||||
ctype := mime.TypeByExtension(filepath.Ext(fileName))
|
||||
if ctype != "" {
|
||||
return ctype, nil
|
||||
}
|
||||
|
||||
// save/rollback to get content probe from begin of file
|
||||
currentPosition, err := f.Seek(0, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return MimeOctetStream, fmt.Errorf("seeker can't seek, %s", err)
|
||||
}
|
||||
|
||||
// read a chunk to decide between utf-8 text and binary
|
||||
var buf [512]byte
|
||||
n, _ := f.Read(buf[:])
|
||||
ctype = http.DetectContentType(buf[:n])
|
||||
|
||||
_, err = f.Seek(currentPosition, io.SeekStart) // rewind to output whole file
|
||||
if err != nil {
|
||||
return MimeOctetStream, fmt.Errorf("seeker can't seek, %s", err)
|
||||
}
|
||||
|
||||
return ctype, nil
|
||||
}
|
@ -1,576 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
loglevel := flag.Int("loglevel", 2, "loglevel")
|
||||
flag.Parse()
|
||||
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
|
||||
}
|
||||
|
||||
func testAPI(t *testing.T, f func(*API, *chunk.Tags, bool)) {
|
||||
for _, v := range []bool{true, false} {
|
||||
datadir, err := ioutil.TempDir("", "bzz-test")
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(datadir)
|
||||
tags := chunk.NewTags()
|
||||
fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32), tags)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
api := NewAPI(fileStore, nil, nil, nil, tags)
|
||||
f(api, tags, v)
|
||||
}
|
||||
}
|
||||
|
||||
type testResponse struct {
|
||||
reader storage.LazySectionReader
|
||||
*Response
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
MimeType string
|
||||
Status int
|
||||
Size int64
|
||||
Content string
|
||||
}
|
||||
|
||||
func checkResponse(t *testing.T, resp *testResponse, exp *Response) {
|
||||
|
||||
if resp.MimeType != exp.MimeType {
|
||||
t.Errorf("incorrect mimeType. expected '%s', got '%s'", exp.MimeType, resp.MimeType)
|
||||
}
|
||||
if resp.Status != exp.Status {
|
||||
t.Errorf("incorrect status. expected '%d', got '%d'", exp.Status, resp.Status)
|
||||
}
|
||||
if resp.Size != exp.Size {
|
||||
t.Errorf("incorrect size. expected '%d', got '%d'", exp.Size, resp.Size)
|
||||
}
|
||||
if resp.reader != nil {
|
||||
content := make([]byte, resp.Size)
|
||||
read, _ := resp.reader.Read(content)
|
||||
if int64(read) != exp.Size {
|
||||
t.Errorf("incorrect content length. expected '%d...', got '%d...'", read, exp.Size)
|
||||
}
|
||||
resp.Content = string(content)
|
||||
}
|
||||
if resp.Content != exp.Content {
|
||||
// if !bytes.Equal(resp.Content, exp.Content)
|
||||
t.Errorf("incorrect content. expected '%s...', got '%s...'", string(exp.Content), string(resp.Content))
|
||||
}
|
||||
}
|
||||
|
||||
// func expResponse(content []byte, mimeType string, status int) *Response {
|
||||
func expResponse(content string, mimeType string, status int) *Response {
|
||||
log.Trace(fmt.Sprintf("expected content (%v): %v ", len(content), content))
|
||||
return &Response{mimeType, status, int64(len(content)), content}
|
||||
}
|
||||
|
||||
func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse {
|
||||
addr := storage.Address(common.Hex2Bytes(bzzhash))
|
||||
reader, mimeType, status, _, err := api.Get(context.TODO(), NOOPDecrypt, addr, path)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
quitC := make(chan bool)
|
||||
size, err := reader.Size(context.TODO(), quitC)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
log.Trace(fmt.Sprintf("reader size: %v ", size))
|
||||
s := make([]byte, size)
|
||||
_, err = reader.Read(s)
|
||||
if err != io.EOF {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
reader.Seek(0, 0)
|
||||
return &testResponse{reader, &Response{mimeType, status, size, string(s)}}
|
||||
}
|
||||
|
||||
func TestApiPut(t *testing.T) {
|
||||
testAPI(t, func(api *API, tags *chunk.Tags, toEncrypt bool) {
|
||||
content := "hello"
|
||||
exp := expResponse(content, "text/plain", 0)
|
||||
ctx := context.TODO()
|
||||
addr, wait, err := putString(ctx, api, content, exp.MimeType, toEncrypt)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
err = wait(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
resp := testGet(t, api, addr.Hex(), "")
|
||||
checkResponse(t, resp, exp)
|
||||
tag := tags.All()[0]
|
||||
testutil.CheckTag(t, tag, 2, 2, 0, 2) //1 chunk data, 1 chunk manifest
|
||||
})
|
||||
}
|
||||
|
||||
// TestApiTagLarge tests that the the number of chunks counted is larger for a larger input
|
||||
func TestApiTagLarge(t *testing.T) {
|
||||
const contentLength = 4096 * 4095
|
||||
testAPI(t, func(api *API, tags *chunk.Tags, toEncrypt bool) {
|
||||
randomContentReader := io.LimitReader(crand.Reader, int64(contentLength))
|
||||
tag, err := api.Tags.New("unnamed-tag", 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctx := sctx.SetTag(context.Background(), tag.Uid)
|
||||
key, waitContent, err := api.Store(ctx, randomContentReader, int64(contentLength), toEncrypt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = waitContent(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tag.DoneSplit(key)
|
||||
|
||||
if toEncrypt {
|
||||
tag := tags.All()[0]
|
||||
expect := int64(4095 + 64 + 1)
|
||||
testutil.CheckTag(t, tag, expect, expect, 0, expect)
|
||||
} else {
|
||||
tag := tags.All()[0]
|
||||
expect := int64(4095 + 32 + 1)
|
||||
testutil.CheckTag(t, tag, expect, expect, 0, expect)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// testResolver implements the Resolver interface and either returns the given
|
||||
// hash if it is set, or returns a "name not found" error
|
||||
type testResolveValidator struct {
|
||||
hash *common.Hash
|
||||
}
|
||||
|
||||
func newTestResolveValidator(addr string) *testResolveValidator {
|
||||
r := &testResolveValidator{}
|
||||
if addr != "" {
|
||||
hash := common.HexToHash(addr)
|
||||
r.hash = &hash
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (t *testResolveValidator) Resolve(addr string) (common.Hash, error) {
|
||||
if t.hash == nil {
|
||||
return common.Hash{}, fmt.Errorf("DNS name not found: %q", addr)
|
||||
}
|
||||
return *t.hash, nil
|
||||
}
|
||||
|
||||
func (t *testResolveValidator) Owner(node [32]byte) (addr common.Address, err error) {
|
||||
return
|
||||
}
|
||||
func (t *testResolveValidator) HeaderByNumber(context.Context, *big.Int) (header *types.Header, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// TestAPIResolve tests resolving URIs which can either contain content hashes
|
||||
// or ENS names
|
||||
func TestAPIResolve(t *testing.T) {
|
||||
ensAddr := "swarm.eth"
|
||||
hashAddr := "1111111111111111111111111111111111111111111111111111111111111111"
|
||||
resolvedAddr := "2222222222222222222222222222222222222222222222222222222222222222"
|
||||
doesResolve := newTestResolveValidator(resolvedAddr)
|
||||
doesntResolve := newTestResolveValidator("")
|
||||
|
||||
type test struct {
|
||||
desc string
|
||||
dns Resolver
|
||||
addr string
|
||||
immutable bool
|
||||
result string
|
||||
expectErr error
|
||||
}
|
||||
|
||||
tests := []*test{
|
||||
{
|
||||
desc: "DNS not configured, hash address, returns hash address",
|
||||
dns: nil,
|
||||
addr: hashAddr,
|
||||
result: hashAddr,
|
||||
},
|
||||
{
|
||||
desc: "DNS not configured, ENS address, returns error",
|
||||
dns: nil,
|
||||
addr: ensAddr,
|
||||
expectErr: errors.New(`no DNS to resolve name: "swarm.eth"`),
|
||||
},
|
||||
{
|
||||
desc: "DNS configured, hash address, hash resolves, returns resolved address",
|
||||
dns: doesResolve,
|
||||
addr: hashAddr,
|
||||
result: resolvedAddr,
|
||||
},
|
||||
{
|
||||
desc: "DNS configured, immutable hash address, hash resolves, returns hash address",
|
||||
dns: doesResolve,
|
||||
addr: hashAddr,
|
||||
immutable: true,
|
||||
result: hashAddr,
|
||||
},
|
||||
{
|
||||
desc: "DNS configured, hash address, hash doesn't resolve, returns hash address",
|
||||
dns: doesntResolve,
|
||||
addr: hashAddr,
|
||||
result: hashAddr,
|
||||
},
|
||||
{
|
||||
desc: "DNS configured, ENS address, name resolves, returns resolved address",
|
||||
dns: doesResolve,
|
||||
addr: ensAddr,
|
||||
result: resolvedAddr,
|
||||
},
|
||||
{
|
||||
desc: "DNS configured, immutable ENS address, name resolves, returns error",
|
||||
dns: doesResolve,
|
||||
addr: ensAddr,
|
||||
immutable: true,
|
||||
expectErr: errors.New(`immutable address not a content hash: "swarm.eth"`),
|
||||
},
|
||||
{
|
||||
desc: "DNS configured, ENS address, name doesn't resolve, returns error",
|
||||
dns: doesntResolve,
|
||||
addr: ensAddr,
|
||||
expectErr: errors.New(`DNS name not found: "swarm.eth"`),
|
||||
},
|
||||
}
|
||||
for _, x := range tests {
|
||||
t.Run(x.desc, func(t *testing.T) {
|
||||
api := &API{dns: x.dns}
|
||||
uri := &URI{Addr: x.addr, Scheme: "bzz"}
|
||||
if x.immutable {
|
||||
uri.Scheme = "bzz-immutable"
|
||||
}
|
||||
res, err := api.ResolveURI(context.TODO(), uri, "")
|
||||
if err == nil {
|
||||
if x.expectErr != nil {
|
||||
t.Fatalf("expected error %q, got result %q", x.expectErr, res)
|
||||
}
|
||||
if res.String() != x.result {
|
||||
t.Fatalf("expected result %q, got %q", x.result, res)
|
||||
}
|
||||
} else {
|
||||
if x.expectErr == nil {
|
||||
t.Fatalf("expected no error, got %q", err)
|
||||
}
|
||||
if err.Error() != x.expectErr.Error() {
|
||||
t.Fatalf("expected error %q, got %q", x.expectErr, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiResolver(t *testing.T) {
|
||||
doesntResolve := newTestResolveValidator("")
|
||||
|
||||
ethAddr := "swarm.eth"
|
||||
ethHash := "0x2222222222222222222222222222222222222222222222222222222222222222"
|
||||
ethResolve := newTestResolveValidator(ethHash)
|
||||
|
||||
testAddr := "swarm.test"
|
||||
testHash := "0x1111111111111111111111111111111111111111111111111111111111111111"
|
||||
testResolve := newTestResolveValidator(testHash)
|
||||
|
||||
tests := []struct {
|
||||
desc string
|
||||
r Resolver
|
||||
addr string
|
||||
result string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
desc: "No resolvers, returns error",
|
||||
r: NewMultiResolver(),
|
||||
err: NewNoResolverError(""),
|
||||
},
|
||||
{
|
||||
desc: "One default resolver, returns resolved address",
|
||||
r: NewMultiResolver(MultiResolverOptionWithResolver(ethResolve, "")),
|
||||
addr: ethAddr,
|
||||
result: ethHash,
|
||||
},
|
||||
{
|
||||
desc: "Two default resolvers, returns resolved address",
|
||||
r: NewMultiResolver(
|
||||
MultiResolverOptionWithResolver(ethResolve, ""),
|
||||
MultiResolverOptionWithResolver(ethResolve, ""),
|
||||
),
|
||||
addr: ethAddr,
|
||||
result: ethHash,
|
||||
},
|
||||
{
|
||||
desc: "Two default resolvers, first doesn't resolve, returns resolved address",
|
||||
r: NewMultiResolver(
|
||||
MultiResolverOptionWithResolver(doesntResolve, ""),
|
||||
MultiResolverOptionWithResolver(ethResolve, ""),
|
||||
),
|
||||
addr: ethAddr,
|
||||
result: ethHash,
|
||||
},
|
||||
{
|
||||
desc: "Default resolver doesn't resolve, tld resolver resolve, returns resolved address",
|
||||
r: NewMultiResolver(
|
||||
MultiResolverOptionWithResolver(doesntResolve, ""),
|
||||
MultiResolverOptionWithResolver(ethResolve, "eth"),
|
||||
),
|
||||
addr: ethAddr,
|
||||
result: ethHash,
|
||||
},
|
||||
{
|
||||
desc: "Three TLD resolvers, third resolves, returns resolved address",
|
||||
r: NewMultiResolver(
|
||||
MultiResolverOptionWithResolver(doesntResolve, "eth"),
|
||||
MultiResolverOptionWithResolver(doesntResolve, "eth"),
|
||||
MultiResolverOptionWithResolver(ethResolve, "eth"),
|
||||
),
|
||||
addr: ethAddr,
|
||||
result: ethHash,
|
||||
},
|
||||
{
|
||||
desc: "One TLD resolver doesn't resolve, returns error",
|
||||
r: NewMultiResolver(
|
||||
MultiResolverOptionWithResolver(doesntResolve, ""),
|
||||
MultiResolverOptionWithResolver(ethResolve, "eth"),
|
||||
),
|
||||
addr: ethAddr,
|
||||
result: ethHash,
|
||||
},
|
||||
{
|
||||
desc: "One defautl and one TLD resolver, all doesn't resolve, returns error",
|
||||
r: NewMultiResolver(
|
||||
MultiResolverOptionWithResolver(doesntResolve, ""),
|
||||
MultiResolverOptionWithResolver(doesntResolve, "eth"),
|
||||
),
|
||||
addr: ethAddr,
|
||||
result: ethHash,
|
||||
err: errors.New(`DNS name not found: "swarm.eth"`),
|
||||
},
|
||||
{
|
||||
desc: "Two TLD resolvers, both resolve, returns resolved address",
|
||||
r: NewMultiResolver(
|
||||
MultiResolverOptionWithResolver(ethResolve, "eth"),
|
||||
MultiResolverOptionWithResolver(testResolve, "test"),
|
||||
),
|
||||
addr: testAddr,
|
||||
result: testHash,
|
||||
},
|
||||
{
|
||||
desc: "One TLD resolver, no default resolver, returns error for different TLD",
|
||||
r: NewMultiResolver(
|
||||
MultiResolverOptionWithResolver(ethResolve, "eth"),
|
||||
),
|
||||
addr: testAddr,
|
||||
err: NewNoResolverError("test"),
|
||||
},
|
||||
}
|
||||
for _, x := range tests {
|
||||
t.Run(x.desc, func(t *testing.T) {
|
||||
res, err := x.r.Resolve(x.addr)
|
||||
if err == nil {
|
||||
if x.err != nil {
|
||||
t.Fatalf("expected error %q, got result %q", x.err, res.Hex())
|
||||
}
|
||||
if res.Hex() != x.result {
|
||||
t.Fatalf("expected result %q, got %q", x.result, res.Hex())
|
||||
}
|
||||
} else {
|
||||
if x.err == nil {
|
||||
t.Fatalf("expected no error, got %q", err)
|
||||
}
|
||||
if err.Error() != x.err.Error() {
|
||||
t.Fatalf("expected error %q, got %q", x.err, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptOriginForbidden(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
ctx = sctx.SetHost(ctx, "swarm-gateways.net")
|
||||
|
||||
me := &ManifestEntry{
|
||||
Access: &AccessEntry{Type: AccessTypePass},
|
||||
}
|
||||
|
||||
api := NewAPI(nil, nil, nil, nil, chunk.NewTags())
|
||||
|
||||
f := api.Decryptor(ctx, "")
|
||||
err := f(me)
|
||||
if err != ErrDecryptDomainForbidden {
|
||||
t.Fatalf("should fail with ErrDecryptDomainForbidden, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptOrigin(t *testing.T) {
|
||||
for _, v := range []struct {
|
||||
host string
|
||||
expectError error
|
||||
}{
|
||||
{
|
||||
host: "localhost",
|
||||
expectError: ErrDecrypt,
|
||||
},
|
||||
{
|
||||
host: "127.0.0.1",
|
||||
expectError: ErrDecrypt,
|
||||
},
|
||||
{
|
||||
host: "swarm-gateways.net",
|
||||
expectError: ErrDecryptDomainForbidden,
|
||||
},
|
||||
} {
|
||||
ctx := context.TODO()
|
||||
ctx = sctx.SetHost(ctx, v.host)
|
||||
|
||||
me := &ManifestEntry{
|
||||
Access: &AccessEntry{Type: AccessTypePass},
|
||||
}
|
||||
|
||||
api := NewAPI(nil, nil, nil, nil, chunk.NewTags())
|
||||
|
||||
f := api.Decryptor(ctx, "")
|
||||
err := f(me)
|
||||
if err != v.expectError {
|
||||
t.Fatalf("should fail with %v, got %v", v.expectError, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDetectContentType(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
file string
|
||||
content string
|
||||
expectedContentType string
|
||||
}{
|
||||
{
|
||||
file: "file-with-correct-css.css",
|
||||
content: "body {background-color: orange}",
|
||||
expectedContentType: "text/css; charset=utf-8",
|
||||
},
|
||||
{
|
||||
file: "empty-file.css",
|
||||
content: "",
|
||||
expectedContentType: "text/css; charset=utf-8",
|
||||
},
|
||||
{
|
||||
file: "empty-file.pdf",
|
||||
content: "",
|
||||
expectedContentType: "application/pdf",
|
||||
},
|
||||
{
|
||||
file: "empty-file.md",
|
||||
content: "",
|
||||
expectedContentType: "text/markdown; charset=utf-8",
|
||||
},
|
||||
{
|
||||
file: "empty-file-with-unknown-content.strangeext",
|
||||
content: "",
|
||||
expectedContentType: "text/plain; charset=utf-8",
|
||||
},
|
||||
{
|
||||
file: "file-with-unknown-extension-and-content.strangeext",
|
||||
content: "Lorem Ipsum",
|
||||
expectedContentType: "text/plain; charset=utf-8",
|
||||
},
|
||||
{
|
||||
file: "file-no-extension",
|
||||
content: "Lorem Ipsum",
|
||||
expectedContentType: "text/plain; charset=utf-8",
|
||||
},
|
||||
{
|
||||
file: "file-no-extension-no-content",
|
||||
content: "",
|
||||
expectedContentType: "text/plain; charset=utf-8",
|
||||
},
|
||||
{
|
||||
file: "css-file-with-html-inside.css",
|
||||
content: "<!doctype html><html><head></head><body></body></html>",
|
||||
expectedContentType: "text/css; charset=utf-8",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.file, func(t *testing.T) {
|
||||
detected, err := DetectContentType(tc.file, bytes.NewReader([]byte(tc.content)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if detected != tc.expectedContentType {
|
||||
t.Fatalf("File: %s, Expected mime type %s, got %s", tc.file, tc.expectedContentType, detected)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// putString provides singleton manifest creation on top of api.API
|
||||
func putString(ctx context.Context, a *API, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) {
|
||||
r := strings.NewReader(content)
|
||||
tag, err := a.Tags.New("unnamed-tag", 0)
|
||||
|
||||
log.Trace("created new tag", "uid", tag.Uid)
|
||||
|
||||
cCtx := sctx.SetTag(ctx, tag.Uid)
|
||||
key, waitContent, err := a.Store(cCtx, r, int64(len(content)), toEncrypt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
|
||||
r = strings.NewReader(manifest)
|
||||
key, waitManifest, err := a.Store(cCtx, r, int64(len(manifest)), toEncrypt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
tag.DoneSplit(key)
|
||||
return key, func(ctx context.Context) error {
|
||||
err := waitContent(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return waitManifest(ctx)
|
||||
}, nil
|
||||
}
|
@ -1,829 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnauthorized = errors.New("unauthorized")
|
||||
)
|
||||
|
||||
func NewClient(gateway string) *Client {
|
||||
return &Client{
|
||||
Gateway: gateway,
|
||||
}
|
||||
}
|
||||
|
||||
// Client wraps interaction with a swarm HTTP gateway.
|
||||
type Client struct {
|
||||
Gateway string
|
||||
}
|
||||
|
||||
// UploadRaw uploads raw data to swarm and returns the resulting hash. If toEncrypt is true it
|
||||
// uploads encrypted data
|
||||
func (c *Client) UploadRaw(r io.Reader, size int64, toEncrypt bool) (string, error) {
|
||||
if size <= 0 {
|
||||
return "", errors.New("data size must be greater than zero")
|
||||
}
|
||||
addr := ""
|
||||
if toEncrypt {
|
||||
addr = "encrypt"
|
||||
}
|
||||
req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/"+addr, r)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.ContentLength = size
|
||||
req.Header.Set(swarmhttp.SwarmTagHeaderName, fmt.Sprintf("raw_upload_%d", time.Now().Unix()))
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("unexpected HTTP status: %s", res.Status)
|
||||
}
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// DownloadRaw downloads raw data from swarm and it returns a ReadCloser and a bool whether the
|
||||
// content was encrypted
|
||||
func (c *Client) DownloadRaw(hash string) (io.ReadCloser, bool, error) {
|
||||
uri := c.Gateway + "/bzz-raw:/" + hash
|
||||
res, err := http.DefaultClient.Get(uri)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
res.Body.Close()
|
||||
return nil, false, fmt.Errorf("unexpected HTTP status: %s", res.Status)
|
||||
}
|
||||
isEncrypted := (res.Header.Get("X-Decrypted") == "true")
|
||||
return res.Body, isEncrypted, nil
|
||||
}
|
||||
|
||||
// File represents a file in a swarm manifest and is used for uploading and
|
||||
// downloading content to and from swarm
|
||||
type File struct {
|
||||
io.ReadCloser
|
||||
api.ManifestEntry
|
||||
Tag string
|
||||
}
|
||||
|
||||
// Open opens a local file which can then be passed to client.Upload to upload
|
||||
// it to swarm
|
||||
func Open(path string) (*File, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contentType, err := api.DetectContentType(f.Name(), f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &File{
|
||||
ReadCloser: f,
|
||||
ManifestEntry: api.ManifestEntry{
|
||||
ContentType: contentType,
|
||||
Mode: int64(stat.Mode()),
|
||||
Size: stat.Size(),
|
||||
ModTime: stat.ModTime(),
|
||||
},
|
||||
Tag: filepath.Base(path),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Upload uploads a file to swarm and either adds it to an existing manifest
|
||||
// (if the manifest argument is non-empty) or creates a new manifest containing
|
||||
// the file, returning the resulting manifest hash (the file will then be
|
||||
// available at bzz:/<hash>/<path>)
|
||||
func (c *Client) Upload(file *File, manifest string, toEncrypt bool) (string, error) {
|
||||
if file.Size <= 0 {
|
||||
return "", errors.New("file size must be greater than zero")
|
||||
}
|
||||
return c.TarUpload(manifest, &FileUploader{file}, "", toEncrypt)
|
||||
}
|
||||
|
||||
// Download downloads a file with the given path from the swarm manifest with
|
||||
// the given hash (i.e. it gets bzz:/<hash>/<path>)
|
||||
func (c *Client) Download(hash, path string) (*File, error) {
|
||||
uri := c.Gateway + "/bzz:/" + hash + "/" + path
|
||||
res, err := http.DefaultClient.Get(uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
res.Body.Close()
|
||||
return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status)
|
||||
}
|
||||
return &File{
|
||||
ReadCloser: res.Body,
|
||||
ManifestEntry: api.ManifestEntry{
|
||||
ContentType: res.Header.Get("Content-Type"),
|
||||
Size: res.ContentLength,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UploadDirectory uploads a directory tree to swarm and either adds the files
|
||||
// to an existing manifest (if the manifest argument is non-empty) or creates a
|
||||
// new manifest, returning the resulting manifest hash (files from the
|
||||
// directory will then be available at bzz:/<hash>/path/to/file), with
|
||||
// the file specified in defaultPath being uploaded to the root of the manifest
|
||||
// (i.e. bzz:/<hash>/)
|
||||
func (c *Client) UploadDirectory(dir, defaultPath, manifest string, toEncrypt bool) (string, error) {
|
||||
stat, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if !stat.IsDir() {
|
||||
return "", fmt.Errorf("not a directory: %s", dir)
|
||||
}
|
||||
if defaultPath != "" {
|
||||
if _, err := os.Stat(filepath.Join(dir, defaultPath)); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", fmt.Errorf("the default path %q was not found in the upload directory %q", defaultPath, dir)
|
||||
}
|
||||
return "", fmt.Errorf("default path: %v", err)
|
||||
}
|
||||
}
|
||||
return c.TarUpload(manifest, &DirectoryUploader{dir}, defaultPath, toEncrypt)
|
||||
}
|
||||
|
||||
// DownloadDirectory downloads the files contained in a swarm manifest under
|
||||
// the given path into a local directory (existing files will be overwritten)
|
||||
func (c *Client) DownloadDirectory(hash, path, destDir, credentials string) error {
|
||||
stat, err := os.Stat(destDir)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !stat.IsDir() {
|
||||
return fmt.Errorf("not a directory: %s", destDir)
|
||||
}
|
||||
|
||||
uri := c.Gateway + "/bzz:/" + hash + "/" + path
|
||||
req, err := http.NewRequest("GET", uri, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if credentials != "" {
|
||||
req.SetBasicAuth("", credentials)
|
||||
}
|
||||
req.Header.Set("Accept", "application/x-tar")
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
case http.StatusUnauthorized:
|
||||
return ErrUnauthorized
|
||||
default:
|
||||
return fmt.Errorf("unexpected HTTP status: %s", res.Status)
|
||||
}
|
||||
tr := tar.NewReader(res.Body)
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
// ignore the default path file
|
||||
if hdr.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
dstPath := filepath.Join(destDir, filepath.Clean(strings.TrimPrefix(hdr.Name, path)))
|
||||
if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
var mode os.FileMode = 0644
|
||||
if hdr.Mode > 0 {
|
||||
mode = os.FileMode(hdr.Mode)
|
||||
}
|
||||
dst, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := io.Copy(dst, tr)
|
||||
dst.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if n != hdr.Size {
|
||||
return fmt.Errorf("expected %s to be %d bytes but got %d", hdr.Name, hdr.Size, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DownloadFile downloads a single file into the destination directory
|
||||
// if the manifest entry does not specify a file name - it will fallback
|
||||
// to the hash of the file as a filename
|
||||
func (c *Client) DownloadFile(hash, path, dest, credentials string) error {
|
||||
hasDestinationFilename := false
|
||||
if stat, err := os.Stat(dest); err == nil {
|
||||
hasDestinationFilename = !stat.IsDir()
|
||||
} else {
|
||||
if os.IsNotExist(err) {
|
||||
// does not exist - should be created
|
||||
hasDestinationFilename = true
|
||||
} else {
|
||||
return fmt.Errorf("could not stat path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
manifestList, err := c.List(hash, path, credentials)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch len(manifestList.Entries) {
|
||||
case 0:
|
||||
return fmt.Errorf("could not find path requested at manifest address. make sure the path you've specified is correct")
|
||||
case 1:
|
||||
//continue
|
||||
default:
|
||||
return fmt.Errorf("got too many matches for this path")
|
||||
}
|
||||
|
||||
uri := c.Gateway + "/bzz:/" + hash + "/" + path
|
||||
req, err := http.NewRequest("GET", uri, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if credentials != "" {
|
||||
req.SetBasicAuth("", credentials)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
case http.StatusUnauthorized:
|
||||
return ErrUnauthorized
|
||||
default:
|
||||
return fmt.Errorf("unexpected HTTP status: expected 200 OK, got %d", res.StatusCode)
|
||||
}
|
||||
filename := ""
|
||||
if hasDestinationFilename {
|
||||
filename = dest
|
||||
} else {
|
||||
// try to assert
|
||||
re := regexp.MustCompile("[^/]+$") //everything after last slash
|
||||
|
||||
if results := re.FindAllString(path, -1); len(results) > 0 {
|
||||
filename = results[len(results)-1]
|
||||
} else {
|
||||
if entry := manifestList.Entries[0]; entry.Path != "" && entry.Path != "/" {
|
||||
filename = entry.Path
|
||||
} else {
|
||||
// assume hash as name if there's nothing from the command line
|
||||
filename = hash
|
||||
}
|
||||
}
|
||||
filename = filepath.Join(dest, filename)
|
||||
}
|
||||
filePath, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(filePath), 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dst, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dst.Close()
|
||||
|
||||
_, err = io.Copy(dst, res.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadManifest uploads the given manifest to swarm
|
||||
func (c *Client) UploadManifest(m *api.Manifest, toEncrypt bool) (string, error) {
|
||||
data, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return c.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt)
|
||||
}
|
||||
|
||||
// DownloadManifest downloads a swarm manifest
|
||||
func (c *Client) DownloadManifest(hash string) (*api.Manifest, bool, error) {
|
||||
res, isEncrypted, err := c.DownloadRaw(hash)
|
||||
if err != nil {
|
||||
return nil, isEncrypted, err
|
||||
}
|
||||
defer res.Close()
|
||||
var manifest api.Manifest
|
||||
if err := json.NewDecoder(res).Decode(&manifest); err != nil {
|
||||
return nil, isEncrypted, err
|
||||
}
|
||||
return &manifest, isEncrypted, nil
|
||||
}
|
||||
|
||||
// List list files in a swarm manifest which have the given prefix, grouping
|
||||
// common prefixes using "/" as a delimiter.
|
||||
//
|
||||
// For example, if the manifest represents the following directory structure:
|
||||
//
|
||||
// file1.txt
|
||||
// file2.txt
|
||||
// dir1/file3.txt
|
||||
// dir1/dir2/file4.txt
|
||||
//
|
||||
// Then:
|
||||
//
|
||||
// - a prefix of "" would return [dir1/, file1.txt, file2.txt]
|
||||
// - a prefix of "file" would return [file1.txt, file2.txt]
|
||||
// - a prefix of "dir1/" would return [dir1/dir2/, dir1/file3.txt]
|
||||
//
|
||||
// where entries ending with "/" are common prefixes.
|
||||
func (c *Client) List(hash, prefix, credentials string) (*api.ManifestList, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, c.Gateway+"/bzz-list:/"+hash+"/"+prefix, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if credentials != "" {
|
||||
req.SetBasicAuth("", credentials)
|
||||
}
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
case http.StatusUnauthorized:
|
||||
return nil, ErrUnauthorized
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status)
|
||||
}
|
||||
var list api.ManifestList
|
||||
if err := json.NewDecoder(res.Body).Decode(&list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &list, nil
|
||||
}
|
||||
|
||||
// Uploader uploads files to swarm using a provided UploadFn
|
||||
type Uploader interface {
|
||||
Upload(UploadFn) error
|
||||
Tag() string
|
||||
}
|
||||
|
||||
type UploaderFunc func(UploadFn) error
|
||||
|
||||
func (u UploaderFunc) Upload(upload UploadFn) error {
|
||||
return u(upload)
|
||||
}
|
||||
|
||||
func (u UploaderFunc) Tag() string {
|
||||
return fmt.Sprintf("multipart_upload_%d", time.Now().Unix())
|
||||
}
|
||||
|
||||
// DirectoryUploader implements Uploader
|
||||
var _ Uploader = &DirectoryUploader{}
|
||||
|
||||
// DirectoryUploader uploads all files in a directory, optionally uploading
|
||||
// a file to the default path
|
||||
type DirectoryUploader struct {
|
||||
Dir string
|
||||
}
|
||||
|
||||
func (d *DirectoryUploader) Tag() string {
|
||||
return filepath.Base(d.Dir)
|
||||
}
|
||||
|
||||
// Upload performs the upload of the directory and default path
|
||||
func (d *DirectoryUploader) Upload(upload UploadFn) error {
|
||||
return filepath.Walk(d.Dir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if f.IsDir() {
|
||||
return nil
|
||||
}
|
||||
file, err := Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
relPath, err := filepath.Rel(d.Dir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file.Path = filepath.ToSlash(relPath)
|
||||
return upload(file)
|
||||
})
|
||||
}
|
||||
|
||||
var _ Uploader = &FileUploader{}
|
||||
|
||||
// FileUploader uploads a single file
|
||||
type FileUploader struct {
|
||||
File *File
|
||||
}
|
||||
|
||||
func (f *FileUploader) Tag() string {
|
||||
return f.File.Tag
|
||||
}
|
||||
|
||||
// Upload performs the upload of the file
|
||||
func (f *FileUploader) Upload(upload UploadFn) error {
|
||||
return upload(f.File)
|
||||
}
|
||||
|
||||
// UploadFn is the type of function passed to an Uploader to perform the upload
|
||||
// of a single file (for example, a directory uploader would call a provided
|
||||
// UploadFn for each file in the directory tree)
|
||||
type UploadFn func(file *File) error
|
||||
|
||||
// TarUpload uses the given Uploader to upload files to swarm as a tar stream,
|
||||
// returning the resulting manifest hash
|
||||
func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, toEncrypt bool) (string, error) {
|
||||
ctx, sp := spancontext.StartSpan(context.Background(), "api.client.tarupload")
|
||||
defer sp.Finish()
|
||||
|
||||
var tn time.Time
|
||||
|
||||
reqR, reqW := io.Pipe()
|
||||
defer reqR.Close()
|
||||
addr := hash
|
||||
|
||||
// If there is a hash already (a manifest), then that manifest will determine if the upload has
|
||||
// to be encrypted or not. If there is no manifest then the toEncrypt parameter decides if
|
||||
// there is encryption or not.
|
||||
if hash == "" && toEncrypt {
|
||||
// This is the built-in address for the encrypted upload endpoint
|
||||
addr = "encrypt"
|
||||
}
|
||||
req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+addr, reqR)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
trace := GetClientTrace("swarm api client - upload tar", "api.client.uploadtar", uuid.New()[:8], &tn)
|
||||
|
||||
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
|
||||
transport := http.DefaultTransport
|
||||
|
||||
req.Header.Set("Content-Type", "application/x-tar")
|
||||
if defaultPath != "" {
|
||||
q := req.URL.Query()
|
||||
q.Set("defaultpath", defaultPath)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
tag := uploader.Tag()
|
||||
if tag == "" {
|
||||
tag = "unnamed_tag_" + fmt.Sprintf("%d", time.Now().Unix())
|
||||
}
|
||||
log.Trace("setting upload tag", "tag", tag)
|
||||
|
||||
req.Header.Set(swarmhttp.SwarmTagHeaderName, tag)
|
||||
|
||||
// use 'Expect: 100-continue' so we don't send the request body if
|
||||
// the server refuses the request
|
||||
req.Header.Set("Expect", "100-continue")
|
||||
|
||||
tw := tar.NewWriter(reqW)
|
||||
|
||||
// define an UploadFn which adds files to the tar stream
|
||||
uploadFn := func(file *File) error {
|
||||
hdr := &tar.Header{
|
||||
Name: file.Path,
|
||||
Mode: file.Mode,
|
||||
Size: file.Size,
|
||||
ModTime: file.ModTime,
|
||||
Xattrs: map[string]string{
|
||||
"user.swarm.content-type": file.ContentType,
|
||||
},
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(tw, file)
|
||||
return err
|
||||
}
|
||||
|
||||
// run the upload in a goroutine so we can send the request headers and
|
||||
// wait for a '100 Continue' response before sending the tar stream
|
||||
go func() {
|
||||
err := uploader.Upload(uploadFn)
|
||||
if err == nil {
|
||||
err = tw.Close()
|
||||
}
|
||||
reqW.CloseWithError(err)
|
||||
}()
|
||||
tn = time.Now()
|
||||
res, err := transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("unexpected HTTP status: %s", res.Status)
|
||||
}
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// MultipartUpload uses the given Uploader to upload files to swarm as a
|
||||
// multipart form, returning the resulting manifest hash
|
||||
func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error) {
|
||||
reqR, reqW := io.Pipe()
|
||||
defer reqR.Close()
|
||||
req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+hash, reqR)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// use 'Expect: 100-continue' so we don't send the request body if
|
||||
// the server refuses the request
|
||||
req.Header.Set("Expect", "100-continue")
|
||||
|
||||
mw := multipart.NewWriter(reqW)
|
||||
req.Header.Set("Content-Type", fmt.Sprintf("multipart/form-data; boundary=%q", mw.Boundary()))
|
||||
req.Header.Set(swarmhttp.SwarmTagHeaderName, fmt.Sprintf("multipart_upload_%d", time.Now().Unix()))
|
||||
|
||||
// define an UploadFn which adds files to the multipart form
|
||||
uploadFn := func(file *File) error {
|
||||
hdr := make(textproto.MIMEHeader)
|
||||
hdr.Set("Content-Disposition", fmt.Sprintf("form-data; name=%q", file.Path))
|
||||
hdr.Set("Content-Type", file.ContentType)
|
||||
hdr.Set("Content-Length", strconv.FormatInt(file.Size, 10))
|
||||
w, err := mw.CreatePart(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(w, file)
|
||||
return err
|
||||
}
|
||||
|
||||
// run the upload in a goroutine so we can send the request headers and
|
||||
// wait for a '100 Continue' response before sending the multipart form
|
||||
go func() {
|
||||
err := uploader.Upload(uploadFn)
|
||||
if err == nil {
|
||||
err = mw.Close()
|
||||
}
|
||||
reqW.CloseWithError(err)
|
||||
}()
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("unexpected HTTP status: %s", res.Status)
|
||||
}
|
||||
data, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// ErrNoFeedUpdatesFound is returned when Swarm cannot find updates of the given feed
|
||||
var ErrNoFeedUpdatesFound = errors.New("No updates found for this feed")
|
||||
|
||||
// CreateFeedWithManifest creates a feed manifest, initializing it with the provided
|
||||
// data
|
||||
// Returns the resulting feed manifest address that you can use to include in an ENS Resolver (setContent)
|
||||
// or reference future updates (Client.UpdateFeed)
|
||||
func (c *Client) CreateFeedWithManifest(request *feed.Request) (string, error) {
|
||||
responseStream, err := c.updateFeed(request, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer responseStream.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(responseStream)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var manifestAddress string
|
||||
if err = json.Unmarshal(body, &manifestAddress); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return manifestAddress, nil
|
||||
}
|
||||
|
||||
// UpdateFeed allows you to set a new version of your content
|
||||
func (c *Client) UpdateFeed(request *feed.Request) error {
|
||||
_, err := c.updateFeed(request, false)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) updateFeed(request *feed.Request, createManifest bool) (io.ReadCloser, error) {
|
||||
URL, err := url.Parse(c.Gateway)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
URL.Path = "/bzz-feed:/"
|
||||
values := URL.Query()
|
||||
body := request.AppendValues(values)
|
||||
if createManifest {
|
||||
values.Set("manifest", "1")
|
||||
}
|
||||
URL.RawQuery = values.Encode()
|
||||
|
||||
req, err := http.NewRequest("POST", URL.String(), bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// QueryFeed returns a byte stream with the raw content of the feed update
|
||||
// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver
|
||||
// points to that address
|
||||
func (c *Client) QueryFeed(query *feed.Query, manifestAddressOrDomain string) (io.ReadCloser, error) {
|
||||
return c.queryFeed(query, manifestAddressOrDomain, false)
|
||||
}
|
||||
|
||||
// queryFeed returns a byte stream with the raw content of the feed update
|
||||
// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver
|
||||
// points to that address
|
||||
// meta set to true will instruct the node return feed metainformation instead
|
||||
func (c *Client) queryFeed(query *feed.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) {
|
||||
URL, err := url.Parse(c.Gateway)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
URL.Path = "/bzz-feed:/" + manifestAddressOrDomain
|
||||
values := URL.Query()
|
||||
if query != nil {
|
||||
query.AppendValues(values) //adds query parameters
|
||||
}
|
||||
if meta {
|
||||
values.Set("meta", "1")
|
||||
}
|
||||
URL.RawQuery = values.Encode()
|
||||
res, err := http.Get(URL.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, ErrNoFeedUpdatesFound
|
||||
}
|
||||
errorMessageBytes, err := ioutil.ReadAll(res.Body)
|
||||
var errorMessage string
|
||||
if err != nil {
|
||||
errorMessage = "cannot retrieve error message: " + err.Error()
|
||||
} else {
|
||||
errorMessage = string(errorMessageBytes)
|
||||
}
|
||||
return nil, fmt.Errorf("Error retrieving feed updates: %s", errorMessage)
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// GetFeedRequest returns a structure that describes the referenced feed status
|
||||
// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver
|
||||
// points to that address
|
||||
func (c *Client) GetFeedRequest(query *feed.Query, manifestAddressOrDomain string) (*feed.Request, error) {
|
||||
|
||||
responseStream, err := c.queryFeed(query, manifestAddressOrDomain, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer responseStream.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(responseStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var metadata feed.Request
|
||||
if err := metadata.UnmarshalJSON(body); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &metadata, nil
|
||||
}
|
||||
|
||||
func GetClientTrace(traceMsg, metricPrefix, ruid string, tn *time.Time) *httptrace.ClientTrace {
|
||||
trace := &httptrace.ClientTrace{
|
||||
GetConn: func(_ string) {
|
||||
log.Trace(traceMsg+" - http get", "event", "GetConn", "ruid", ruid)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".getconn", nil).Update(time.Since(*tn))
|
||||
},
|
||||
GotConn: func(_ httptrace.GotConnInfo) {
|
||||
log.Trace(traceMsg+" - http get", "event", "GotConn", "ruid", ruid)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".gotconn", nil).Update(time.Since(*tn))
|
||||
},
|
||||
PutIdleConn: func(err error) {
|
||||
log.Trace(traceMsg+" - http get", "event", "PutIdleConn", "ruid", ruid, "err", err)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".putidle", nil).Update(time.Since(*tn))
|
||||
},
|
||||
GotFirstResponseByte: func() {
|
||||
log.Trace(traceMsg+" - http get", "event", "GotFirstResponseByte", "ruid", ruid)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".firstbyte", nil).Update(time.Since(*tn))
|
||||
},
|
||||
Got100Continue: func() {
|
||||
log.Trace(traceMsg, "event", "Got100Continue", "ruid", ruid)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".got100continue", nil).Update(time.Since(*tn))
|
||||
},
|
||||
DNSStart: func(_ httptrace.DNSStartInfo) {
|
||||
log.Trace(traceMsg, "event", "DNSStart", "ruid", ruid)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsstart", nil).Update(time.Since(*tn))
|
||||
},
|
||||
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||
log.Trace(traceMsg, "event", "DNSDone", "ruid", ruid)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsdone", nil).Update(time.Since(*tn))
|
||||
},
|
||||
ConnectStart: func(network, addr string) {
|
||||
log.Trace(traceMsg, "event", "ConnectStart", "ruid", ruid, "network", network, "addr", addr)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".connectstart", nil).Update(time.Since(*tn))
|
||||
},
|
||||
ConnectDone: func(network, addr string, err error) {
|
||||
log.Trace(traceMsg, "event", "ConnectDone", "ruid", ruid, "network", network, "addr", addr, "err", err)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".connectdone", nil).Update(time.Since(*tn))
|
||||
},
|
||||
WroteHeaders: func() {
|
||||
log.Trace(traceMsg, "event", "WroteHeaders(request)", "ruid", ruid)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".wroteheaders", nil).Update(time.Since(*tn))
|
||||
},
|
||||
Wait100Continue: func() {
|
||||
log.Trace(traceMsg, "event", "Wait100Continue", "ruid", ruid)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".wait100continue", nil).Update(time.Since(*tn))
|
||||
},
|
||||
WroteRequest: func(_ httptrace.WroteRequestInfo) {
|
||||
log.Trace(traceMsg, "event", "WroteRequest", "ruid", ruid)
|
||||
metrics.GetOrRegisterResettingTimer(metricPrefix+".wroterequest", nil).Update(time.Since(*tn))
|
||||
},
|
||||
}
|
||||
return trace
|
||||
}
|
@ -1,608 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed/lookup"
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
)
|
||||
|
||||
func serverFunc(api *api.API) swarmhttp.TestServer {
|
||||
return swarmhttp.NewServer(api, "")
|
||||
}
|
||||
|
||||
// TestClientUploadDownloadRaw test uploading and downloading raw data to swarm
|
||||
func TestClientUploadDownloadRaw(t *testing.T) {
|
||||
testClientUploadDownloadRaw(false, t)
|
||||
}
|
||||
|
||||
func TestClientUploadDownloadRawEncrypted(t *testing.T) {
|
||||
if testutil.RaceEnabled {
|
||||
t.Skip("flaky with -race on Travis")
|
||||
// See: https://github.com/ethersphere/go-ethereum/issues/1254
|
||||
}
|
||||
|
||||
testClientUploadDownloadRaw(true, t)
|
||||
}
|
||||
|
||||
func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) {
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
client := NewClient(srv.URL)
|
||||
|
||||
// upload some raw data
|
||||
data := []byte("foo123")
|
||||
hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check the tag was created successfully
|
||||
tag := srv.Tags.All()[0]
|
||||
testutil.CheckTag(t, tag, 1, 1, 0, 1)
|
||||
|
||||
// check we can download the same data
|
||||
res, isEncrypted, err := client.DownloadRaw(hash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if isEncrypted != toEncrypt {
|
||||
t.Fatalf("Expected encyption status %v got %v", toEncrypt, isEncrypted)
|
||||
}
|
||||
defer res.Close()
|
||||
gotData, err := ioutil.ReadAll(res)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(gotData, data) {
|
||||
t.Fatalf("expected downloaded data to be %q, got %q", data, gotData)
|
||||
}
|
||||
}
|
||||
|
||||
// TestClientUploadDownloadFiles test uploading and downloading files to swarm
|
||||
// manifests
|
||||
func TestClientUploadDownloadFiles(t *testing.T) {
|
||||
testClientUploadDownloadFiles(false, t)
|
||||
}
|
||||
|
||||
func TestClientUploadDownloadFilesEncrypted(t *testing.T) {
|
||||
testClientUploadDownloadFiles(true, t)
|
||||
}
|
||||
|
||||
func testClientUploadDownloadFiles(toEncrypt bool, t *testing.T) {
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
client := NewClient(srv.URL)
|
||||
upload := func(manifest, path string, data []byte) string {
|
||||
file := &File{
|
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
|
||||
ManifestEntry: api.ManifestEntry{
|
||||
Path: path,
|
||||
ContentType: "text/plain",
|
||||
Size: int64(len(data)),
|
||||
},
|
||||
}
|
||||
hash, err := client.Upload(file, manifest, toEncrypt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return hash
|
||||
}
|
||||
checkDownload := func(manifest, path string, expected []byte) {
|
||||
file, err := client.Download(manifest, path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer file.Close()
|
||||
if file.Size != int64(len(expected)) {
|
||||
t.Fatalf("expected downloaded file to be %d bytes, got %d", len(expected), file.Size)
|
||||
}
|
||||
if file.ContentType != "text/plain" {
|
||||
t.Fatalf("expected downloaded file to have type %q, got %q", "text/plain", file.ContentType)
|
||||
}
|
||||
data, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(data, expected) {
|
||||
t.Fatalf("expected downloaded data to be %q, got %q", expected, data)
|
||||
}
|
||||
}
|
||||
|
||||
// upload a file to the root of a manifest
|
||||
rootData := []byte("some-data")
|
||||
rootHash := upload("", "", rootData)
|
||||
|
||||
// check we can download the root file
|
||||
checkDownload(rootHash, "", rootData)
|
||||
|
||||
// upload another file to the same manifest
|
||||
otherData := []byte("some-other-data")
|
||||
newHash := upload(rootHash, "some/other/path", otherData)
|
||||
|
||||
// check we can download both files from the new manifest
|
||||
checkDownload(newHash, "", rootData)
|
||||
checkDownload(newHash, "some/other/path", otherData)
|
||||
|
||||
// replace the root file with different data
|
||||
newHash = upload(newHash, "", otherData)
|
||||
|
||||
// check both files have the other data
|
||||
checkDownload(newHash, "", otherData)
|
||||
checkDownload(newHash, "some/other/path", otherData)
|
||||
}
|
||||
|
||||
var testDirFiles = []string{
|
||||
"file1.txt",
|
||||
"file2.txt",
|
||||
"dir1/file3.txt",
|
||||
"dir1/file4.txt",
|
||||
"dir2/file5.txt",
|
||||
"dir2/dir3/file6.txt",
|
||||
"dir2/dir4/file7.txt",
|
||||
"dir2/dir4/file8.txt",
|
||||
}
|
||||
|
||||
func newTestDirectory(t *testing.T) string {
|
||||
dir, err := ioutil.TempDir("", "swarm-client-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, file := range testDirFiles {
|
||||
path := filepath.Join(dir, file)
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
os.RemoveAll(dir)
|
||||
t.Fatalf("error creating dir for %s: %s", path, err)
|
||||
}
|
||||
if err := ioutil.WriteFile(path, []byte(file), 0644); err != nil {
|
||||
os.RemoveAll(dir)
|
||||
t.Fatalf("error writing file %s: %s", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
return dir
|
||||
}
|
||||
|
||||
// TestClientUploadDownloadDirectory tests uploading and downloading a
|
||||
// directory of files to a swarm manifest
|
||||
func TestClientUploadDownloadDirectory(t *testing.T) {
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
dir := newTestDirectory(t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
// upload the directory
|
||||
client := NewClient(srv.URL)
|
||||
defaultPath := testDirFiles[0]
|
||||
hash, err := client.UploadDirectory(dir, defaultPath, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("error uploading directory: %s", err)
|
||||
}
|
||||
|
||||
// check the tag was created successfully
|
||||
tag := srv.Tags.All()[0]
|
||||
testutil.CheckTag(t, tag, 9, 9, 0, 9)
|
||||
|
||||
// check we can download the individual files
|
||||
checkDownloadFile := func(path string, expected []byte) {
|
||||
file, err := client.Download(hash, path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer file.Close()
|
||||
data, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(data, expected) {
|
||||
t.Fatalf("expected data to be %q, got %q", expected, data)
|
||||
}
|
||||
}
|
||||
for _, file := range testDirFiles {
|
||||
checkDownloadFile(file, []byte(file))
|
||||
}
|
||||
|
||||
// check we can download the default path
|
||||
checkDownloadFile("", []byte(testDirFiles[0]))
|
||||
|
||||
// check we can download the directory
|
||||
tmp, err := ioutil.TempDir("", "swarm-client-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmp)
|
||||
if err := client.DownloadDirectory(hash, "", tmp, ""); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, file := range testDirFiles {
|
||||
data, err := ioutil.ReadFile(filepath.Join(tmp, file))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(data, []byte(file)) {
|
||||
t.Fatalf("expected data to be %q, got %q", file, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestClientFileList tests listing files in a swarm manifest
|
||||
func TestClientFileList(t *testing.T) {
|
||||
testClientFileList(false, t)
|
||||
}
|
||||
|
||||
func TestClientFileListEncrypted(t *testing.T) {
|
||||
testClientFileList(true, t)
|
||||
}
|
||||
|
||||
func testClientFileList(toEncrypt bool, t *testing.T) {
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
dir := newTestDirectory(t)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
client := NewClient(srv.URL)
|
||||
hash, err := client.UploadDirectory(dir, "", "", toEncrypt)
|
||||
if err != nil {
|
||||
t.Fatalf("error uploading directory: %s", err)
|
||||
}
|
||||
|
||||
ls := func(prefix string) []string {
|
||||
list, err := client.List(hash, prefix, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
paths := make([]string, 0, len(list.CommonPrefixes)+len(list.Entries))
|
||||
paths = append(paths, list.CommonPrefixes...)
|
||||
for _, entry := range list.Entries {
|
||||
paths = append(paths, entry.Path)
|
||||
}
|
||||
sort.Strings(paths)
|
||||
return paths
|
||||
}
|
||||
|
||||
tests := map[string][]string{
|
||||
"": {"dir1/", "dir2/", "file1.txt", "file2.txt"},
|
||||
"file": {"file1.txt", "file2.txt"},
|
||||
"file1": {"file1.txt"},
|
||||
"file2.txt": {"file2.txt"},
|
||||
"file12": {},
|
||||
"dir": {"dir1/", "dir2/"},
|
||||
"dir1": {"dir1/"},
|
||||
"dir1/": {"dir1/file3.txt", "dir1/file4.txt"},
|
||||
"dir1/file": {"dir1/file3.txt", "dir1/file4.txt"},
|
||||
"dir1/file3.txt": {"dir1/file3.txt"},
|
||||
"dir1/file34": {},
|
||||
"dir2/": {"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"},
|
||||
"dir2/file": {"dir2/file5.txt"},
|
||||
"dir2/dir": {"dir2/dir3/", "dir2/dir4/"},
|
||||
"dir2/dir3/": {"dir2/dir3/file6.txt"},
|
||||
"dir2/dir4/": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
|
||||
"dir2/dir4/file": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
|
||||
"dir2/dir4/file7.txt": {"dir2/dir4/file7.txt"},
|
||||
"dir2/dir4/file78": {},
|
||||
}
|
||||
for prefix, expected := range tests {
|
||||
actual := ls(prefix)
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Fatalf("expected prefix %q to return %v, got %v", prefix, expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestClientMultipartUpload tests uploading files to swarm using a multipart
|
||||
// upload
|
||||
func TestClientMultipartUpload(t *testing.T) {
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
// define an uploader which uploads testDirFiles with some data
|
||||
// note: this test should result in SEEN chunks. assert accordingly
|
||||
data := []byte("some-data")
|
||||
uploader := UploaderFunc(func(upload UploadFn) error {
|
||||
for _, name := range testDirFiles {
|
||||
file := &File{
|
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
|
||||
ManifestEntry: api.ManifestEntry{
|
||||
Path: name,
|
||||
ContentType: "text/plain",
|
||||
Size: int64(len(data)),
|
||||
},
|
||||
}
|
||||
if err := upload(file); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// upload the files as a multipart upload
|
||||
client := NewClient(srv.URL)
|
||||
hash, err := client.MultipartUpload("", uploader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check the tag was created successfully
|
||||
tag := srv.Tags.All()[0]
|
||||
testutil.CheckTag(t, tag, 9, 9, 7, 9)
|
||||
|
||||
// check we can download the individual files
|
||||
checkDownloadFile := func(path string) {
|
||||
file, err := client.Download(hash, path)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer file.Close()
|
||||
gotData, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(gotData, data) {
|
||||
t.Fatalf("expected data to be %q, got %q", data, gotData)
|
||||
}
|
||||
}
|
||||
for _, file := range testDirFiles {
|
||||
checkDownloadFile(file)
|
||||
}
|
||||
}
|
||||
|
||||
func newTestSigner() (*feed.GenericSigner, error) {
|
||||
privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return feed.NewGenericSigner(privKey), nil
|
||||
}
|
||||
|
||||
// Test the transparent resolving of feed updates with bzz:// scheme
|
||||
//
|
||||
// First upload data to bzz:, and store the Swarm hash to the resulting manifest in a feed update.
|
||||
// This effectively uses a feed to store a pointer to content rather than the content itself
|
||||
// Retrieving the update with the Swarm hash should return the manifest pointing directly to the data
|
||||
// and raw retrieve of that hash should return the data
|
||||
func TestClientBzzWithFeed(t *testing.T) {
|
||||
|
||||
signer, _ := newTestSigner()
|
||||
|
||||
// Initialize a Swarm test server
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||
swarmClient := NewClient(srv.URL)
|
||||
defer srv.Close()
|
||||
|
||||
// put together some data for our test:
|
||||
dataBytes := []byte(`
|
||||
//
|
||||
// Create some data our manifest will point to. Data that could be very big and wouldn't fit in a feed update.
|
||||
// So what we are going to do is upload it to Swarm bzz:// and obtain a **manifest hash** pointing to it:
|
||||
//
|
||||
// MANIFEST HASH --> DATA
|
||||
//
|
||||
// Then, we store that **manifest hash** into a Swarm Feed update. Once we have done this,
|
||||
// we can use the **feed manifest hash** in bzz:// instead, this way: bzz://feed-manifest-hash.
|
||||
//
|
||||
// FEED MANIFEST HASH --> MANIFEST HASH --> DATA
|
||||
//
|
||||
// Given that we can update the feed at any time with a new **manifest hash** but the **feed manifest hash**
|
||||
// stays constant, we have effectively created a fixed address to changing content. (Applause)
|
||||
//
|
||||
// FEED MANIFEST HASH (the same) --> MANIFEST HASH(2) --> DATA(2)
|
||||
//
|
||||
`)
|
||||
|
||||
// Create a virtual File out of memory containing the above data
|
||||
f := &File{
|
||||
ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)),
|
||||
ManifestEntry: api.ManifestEntry{
|
||||
ContentType: "text/plain",
|
||||
Mode: 0660,
|
||||
Size: int64(len(dataBytes)),
|
||||
},
|
||||
}
|
||||
|
||||
// upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
|
||||
manifestAddressHex, err := swarmClient.Upload(f, "", false)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating manifest: %s", err)
|
||||
}
|
||||
|
||||
// convert the hex-encoded manifest hash to a 32-byte slice
|
||||
manifestAddress := common.FromHex(manifestAddressHex)
|
||||
|
||||
if len(manifestAddress) != storage.AddressLength {
|
||||
t.Fatalf("Something went wrong. Got a hash of an unexpected length. Expected %d bytes. Got %d", storage.AddressLength, len(manifestAddress))
|
||||
}
|
||||
|
||||
// Now create a **feed manifest**. For that, we need a topic:
|
||||
topic, _ := feed.NewTopic("interesting topic indeed", nil)
|
||||
|
||||
// Build a feed request to update data
|
||||
request := feed.NewFirstRequest(topic)
|
||||
|
||||
// Put the 32-byte address of the manifest into the feed update
|
||||
request.SetData(manifestAddress)
|
||||
|
||||
// Sign the update
|
||||
if err := request.Sign(signer); err != nil {
|
||||
t.Fatalf("Error signing update: %s", err)
|
||||
}
|
||||
|
||||
// Publish the update and at the same time request a **feed manifest** to be created
|
||||
feedManifestAddressHex, err := swarmClient.CreateFeedWithManifest(request)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating feed manifest: %s", err)
|
||||
}
|
||||
|
||||
// Check we have received the exact **feed manifest** to be expected
|
||||
// given the topic and user signing the updates:
|
||||
correctFeedManifestAddrHex := "747c402e5b9dc715a25a4393147512167bab018a007fad7cdcd9adc7fce1ced2"
|
||||
if feedManifestAddressHex != correctFeedManifestAddrHex {
|
||||
t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctFeedManifestAddrHex, feedManifestAddressHex)
|
||||
}
|
||||
|
||||
// Check we get a not found error when trying to get feed updates with a made-up manifest
|
||||
_, err = swarmClient.QueryFeed(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
|
||||
if err != ErrNoFeedUpdatesFound {
|
||||
t.Fatalf("Expected to receive ErrNoFeedUpdatesFound error. Got: %s", err)
|
||||
}
|
||||
|
||||
// If we query the feed directly we should get **manifest hash** back:
|
||||
reader, err := swarmClient.QueryFeed(nil, correctFeedManifestAddrHex)
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving feed updates: %s", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
gotData, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//Check that indeed the **manifest hash** is retrieved
|
||||
if !bytes.Equal(manifestAddress, gotData) {
|
||||
t.Fatalf("Expected: %v, got %v", manifestAddress, gotData)
|
||||
}
|
||||
|
||||
// Now the final test we were looking for: Use bzz://<feed-manifest> and that should resolve all manifests
|
||||
// and return the original data directly:
|
||||
f, err = swarmClient.Download(feedManifestAddressHex, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotData, err = ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check that we get back the original data:
|
||||
if !bytes.Equal(dataBytes, gotData) {
|
||||
t.Fatalf("Expected: %v, got %v", manifestAddress, gotData)
|
||||
}
|
||||
}
|
||||
|
||||
// TestClientCreateUpdateFeed will check that feeds can be created and updated via the HTTP client.
|
||||
func TestClientCreateUpdateFeed(t *testing.T) {
|
||||
|
||||
signer, _ := newTestSigner()
|
||||
|
||||
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
|
||||
client := NewClient(srv.URL)
|
||||
defer srv.Close()
|
||||
|
||||
// set raw data for the feed update
|
||||
databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...")
|
||||
|
||||
// our feed topic name
|
||||
topic, _ := feed.NewTopic("El Quijote", nil)
|
||||
createRequest := feed.NewFirstRequest(topic)
|
||||
|
||||
createRequest.SetData(databytes)
|
||||
if err := createRequest.Sign(signer); err != nil {
|
||||
t.Fatalf("Error signing update: %s", err)
|
||||
}
|
||||
|
||||
feedManifestHash, err := client.CreateFeedWithManifest(createRequest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
correctManifestAddrHex := "0e9b645ebc3da167b1d56399adc3276f7a08229301b72a03336be0e7d4b71882"
|
||||
if feedManifestHash != correctManifestAddrHex {
|
||||
t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestHash)
|
||||
}
|
||||
|
||||
reader, err := client.QueryFeed(nil, correctManifestAddrHex)
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving feed updates: %s", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
gotData, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(databytes, gotData) {
|
||||
t.Fatalf("Expected: %v, got %v", databytes, gotData)
|
||||
}
|
||||
|
||||
// define different data
|
||||
databytes = []byte("... no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero ...")
|
||||
|
||||
updateRequest, err := client.GetFeedRequest(nil, correctManifestAddrHex)
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving update request template: %s", err)
|
||||
}
|
||||
|
||||
updateRequest.SetData(databytes)
|
||||
if err := updateRequest.Sign(signer); err != nil {
|
||||
t.Fatalf("Error signing update: %s", err)
|
||||
}
|
||||
|
||||
if err = client.UpdateFeed(updateRequest); err != nil {
|
||||
t.Fatalf("Error updating feed: %s", err)
|
||||
}
|
||||
|
||||
reader, err = client.QueryFeed(nil, correctManifestAddrHex)
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving feed updates: %s", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
gotData, err = ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(databytes, gotData) {
|
||||
t.Fatalf("Expected: %v, got %v", databytes, gotData)
|
||||
}
|
||||
|
||||
// now try retrieving feed updates without a manifest
|
||||
|
||||
fd := &feed.Feed{
|
||||
Topic: topic,
|
||||
User: signer.Address(),
|
||||
}
|
||||
|
||||
lookupParams := feed.NewQueryLatest(fd, lookup.NoClue)
|
||||
reader, err = client.QueryFeed(lookupParams, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Error retrieving feed updates: %s", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
gotData, err = ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(databytes, gotData) {
|
||||
t.Fatalf("Expected: %v, got %v", databytes, gotData)
|
||||
}
|
||||
}
|
@ -1,174 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/contracts/ens"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/pss"
|
||||
"github.com/ethereum/go-ethereum/swarm/services/swap"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultHTTPListenAddr = "127.0.0.1"
|
||||
DefaultHTTPPort = "8500"
|
||||
)
|
||||
|
||||
// separate bzz directories
|
||||
// allow several bzz nodes running in parallel
|
||||
type Config struct {
|
||||
// serialised/persisted fields
|
||||
*storage.FileStoreParams
|
||||
|
||||
// LocalStore
|
||||
ChunkDbPath string
|
||||
DbCapacity uint64
|
||||
CacheCapacity uint
|
||||
BaseKey []byte
|
||||
|
||||
*network.HiveParams
|
||||
Swap *swap.LocalProfile
|
||||
Pss *pss.PssParams
|
||||
Contract common.Address
|
||||
EnsRoot common.Address
|
||||
EnsAPIs []string
|
||||
Path string
|
||||
ListenAddr string
|
||||
Port string
|
||||
PublicKey string
|
||||
BzzKey string
|
||||
Enode *enode.Node `toml:"-"`
|
||||
NetworkID uint64
|
||||
SwapEnabled bool
|
||||
SyncEnabled bool
|
||||
SyncingSkipCheck bool
|
||||
DeliverySkipCheck bool
|
||||
MaxStreamPeerServers int
|
||||
LightNodeEnabled bool
|
||||
BootnodeMode bool
|
||||
SyncUpdateDelay time.Duration
|
||||
SwapAPI string
|
||||
Cors string
|
||||
BzzAccount string
|
||||
GlobalStoreAPI string
|
||||
privateKey *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
//create a default config with all parameters to set to defaults
|
||||
func NewConfig() (c *Config) {
|
||||
|
||||
c = &Config{
|
||||
FileStoreParams: storage.NewFileStoreParams(),
|
||||
HiveParams: network.NewHiveParams(),
|
||||
Swap: swap.NewDefaultSwapParams(),
|
||||
Pss: pss.NewPssParams(),
|
||||
ListenAddr: DefaultHTTPListenAddr,
|
||||
Port: DefaultHTTPPort,
|
||||
Path: node.DefaultDataDir(),
|
||||
EnsAPIs: nil,
|
||||
EnsRoot: ens.TestNetAddress,
|
||||
NetworkID: network.DefaultNetworkID,
|
||||
SwapEnabled: false,
|
||||
SyncEnabled: true,
|
||||
SyncingSkipCheck: false,
|
||||
MaxStreamPeerServers: 10000,
|
||||
DeliverySkipCheck: true,
|
||||
SyncUpdateDelay: 15 * time.Second,
|
||||
SwapAPI: "",
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
//some config params need to be initialized after the complete
|
||||
//config building phase is completed (e.g. due to overriding flags)
|
||||
func (c *Config) Init(prvKey *ecdsa.PrivateKey, nodeKey *ecdsa.PrivateKey) error {
|
||||
|
||||
// create swarm dir and record key
|
||||
err := c.createAndSetPath(c.Path, prvKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating root swarm data directory: %v", err)
|
||||
}
|
||||
c.setKey(prvKey)
|
||||
|
||||
// create the new enode record
|
||||
// signed with the ephemeral node key
|
||||
enodeParams := &network.EnodeParams{
|
||||
PrivateKey: prvKey,
|
||||
EnodeKey: nodeKey,
|
||||
Lightnode: c.LightNodeEnabled,
|
||||
Bootnode: c.BootnodeMode,
|
||||
}
|
||||
c.Enode, err = network.NewEnode(enodeParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating enode: %v", err)
|
||||
}
|
||||
|
||||
// initialize components that depend on the swarm instance's private key
|
||||
if c.SwapEnabled {
|
||||
c.Swap.Init(c.Contract, prvKey)
|
||||
}
|
||||
|
||||
c.privateKey = prvKey
|
||||
c.ChunkDbPath = filepath.Join(c.Path, "chunks")
|
||||
c.BaseKey = common.FromHex(c.BzzKey)
|
||||
|
||||
c.Pss = c.Pss.WithPrivateKey(c.privateKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) ShiftPrivateKey() (privKey *ecdsa.PrivateKey) {
|
||||
if c.privateKey != nil {
|
||||
privKey = c.privateKey
|
||||
c.privateKey = nil
|
||||
}
|
||||
return privKey
|
||||
}
|
||||
|
||||
func (c *Config) setKey(prvKey *ecdsa.PrivateKey) {
|
||||
bzzkeybytes := network.PrivateKeyToBzzKey(prvKey)
|
||||
pubkey := crypto.FromECDSAPub(&prvKey.PublicKey)
|
||||
pubkeyhex := hexutil.Encode(pubkey)
|
||||
keyhex := hexutil.Encode(bzzkeybytes)
|
||||
|
||||
c.privateKey = prvKey
|
||||
c.PublicKey = pubkeyhex
|
||||
c.BzzKey = keyhex
|
||||
}
|
||||
|
||||
func (c *Config) createAndSetPath(datadirPath string, prvKey *ecdsa.PrivateKey) error {
|
||||
address := crypto.PubkeyToAddress(prvKey.PublicKey)
|
||||
bzzdirPath := filepath.Join(datadirPath, "bzz-"+common.Bytes2Hex(address.Bytes()))
|
||||
err := os.MkdirAll(bzzdirPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Path = bzzdirPath
|
||||
return nil
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
|
||||
var hexprvkey = "65138b2aa745041b372153550584587da326ab440576b2a1191dd95cee30039c"
|
||||
var hexnodekey = "75138b2aa745041b372153550584587da326ab440576b2a1191dd95cee30039c"
|
||||
|
||||
prvkey, err := crypto.HexToECDSA(hexprvkey)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load private key: %v", err)
|
||||
}
|
||||
nodekey, err := crypto.HexToECDSA(hexnodekey)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load private key: %v", err)
|
||||
}
|
||||
|
||||
one := NewConfig()
|
||||
two := NewConfig()
|
||||
|
||||
if equal := reflect.DeepEqual(one, two); !equal {
|
||||
t.Fatal("Two default configs are not equal")
|
||||
}
|
||||
|
||||
err = one.Init(prvkey, nodekey)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
//the init function should set the following fields
|
||||
if one.BzzKey == "" {
|
||||
t.Fatal("Expected BzzKey to be set")
|
||||
}
|
||||
if one.PublicKey == "" {
|
||||
t.Fatal("Expected PublicKey to be set")
|
||||
}
|
||||
if one.Swap.PayProfile.Beneficiary == (common.Address{}) && one.SwapEnabled {
|
||||
t.Fatal("Failed to correctly initialize SwapParams")
|
||||
}
|
||||
if one.ChunkDbPath == one.Path {
|
||||
t.Fatal("Failed to correctly initialize StoreParams")
|
||||
}
|
||||
}
|
@ -1,78 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
type RefEncryption struct {
|
||||
refSize int
|
||||
span []byte
|
||||
}
|
||||
|
||||
func NewRefEncryption(refSize int) *RefEncryption {
|
||||
span := make([]byte, 8)
|
||||
binary.LittleEndian.PutUint64(span, uint64(refSize))
|
||||
return &RefEncryption{
|
||||
refSize: refSize,
|
||||
span: span,
|
||||
}
|
||||
}
|
||||
|
||||
func (re *RefEncryption) Encrypt(ref []byte, key []byte) ([]byte, error) {
|
||||
spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewLegacyKeccak256)
|
||||
encryptedSpan, err := spanEncryption.Encrypt(re.span)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewLegacyKeccak256)
|
||||
encryptedData, err := dataEncryption.Encrypt(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encryptedRef := make([]byte, len(ref)+8)
|
||||
copy(encryptedRef[:8], encryptedSpan)
|
||||
copy(encryptedRef[8:], encryptedData)
|
||||
|
||||
return encryptedRef, nil
|
||||
}
|
||||
|
||||
func (re *RefEncryption) Decrypt(ref []byte, key []byte) ([]byte, error) {
|
||||
spanEncryption := encryption.New(key, 0, uint32(re.refSize/32), sha3.NewLegacyKeccak256)
|
||||
decryptedSpan, err := spanEncryption.Decrypt(ref[:8])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
size := binary.LittleEndian.Uint64(decryptedSpan)
|
||||
if size != uint64(len(ref)-8) {
|
||||
return nil, errors.New("invalid span in encrypted reference")
|
||||
}
|
||||
|
||||
dataEncryption := encryption.New(key, re.refSize, 0, sha3.NewLegacyKeccak256)
|
||||
decryptedRef, err := dataEncryption.Decrypt(ref[8:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return decryptedRef, nil
|
||||
}
|
@ -1,292 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
const maxParallelFiles = 5
|
||||
|
||||
type FileSystem struct {
|
||||
api *API
|
||||
}
|
||||
|
||||
func NewFileSystem(api *API) *FileSystem {
|
||||
return &FileSystem{api}
|
||||
}
|
||||
|
||||
// Upload replicates a local directory as a manifest file and uploads it
|
||||
// using FileStore store
|
||||
// This function waits the chunks to be stored.
|
||||
// TODO: localpath should point to a manifest
|
||||
//
|
||||
// DEPRECATED: Use the HTTP API instead
|
||||
func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error) {
|
||||
var list []*manifestTrieEntry
|
||||
localpath, err := filepath.Abs(filepath.Clean(lpath))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
f, err := os.Open(localpath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var start int
|
||||
if stat.IsDir() {
|
||||
start = len(localpath)
|
||||
log.Debug(fmt.Sprintf("uploading '%s'", localpath))
|
||||
err = filepath.Walk(localpath, func(path string, info os.FileInfo, err error) error {
|
||||
if (err == nil) && !info.IsDir() {
|
||||
if len(path) <= start {
|
||||
return fmt.Errorf("Path is too short")
|
||||
}
|
||||
if path[:start] != localpath {
|
||||
return fmt.Errorf("Path prefix of '%s' does not match localpath '%s'", path, localpath)
|
||||
}
|
||||
entry := newManifestTrieEntry(&ManifestEntry{Path: filepath.ToSlash(path)}, nil)
|
||||
list = append(list, entry)
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
dir := filepath.Dir(localpath)
|
||||
start = len(dir)
|
||||
if len(localpath) <= start {
|
||||
return "", fmt.Errorf("Path is too short")
|
||||
}
|
||||
if localpath[:start] != dir {
|
||||
return "", fmt.Errorf("Path prefix of '%s' does not match dir '%s'", localpath, dir)
|
||||
}
|
||||
entry := newManifestTrieEntry(&ManifestEntry{Path: filepath.ToSlash(localpath)}, nil)
|
||||
list = append(list, entry)
|
||||
}
|
||||
|
||||
errors := make([]error, len(list))
|
||||
sem := make(chan bool, maxParallelFiles)
|
||||
defer close(sem)
|
||||
|
||||
for i, entry := range list {
|
||||
sem <- true
|
||||
go func(i int, entry *manifestTrieEntry) {
|
||||
defer func() { <-sem }()
|
||||
|
||||
f, err := os.Open(entry.Path)
|
||||
if err != nil {
|
||||
errors[i] = err
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
errors[i] = err
|
||||
return
|
||||
}
|
||||
|
||||
var hash storage.Address
|
||||
var wait func(context.Context) error
|
||||
ctx := context.TODO()
|
||||
hash, wait, err = fs.api.fileStore.Store(ctx, f, stat.Size(), toEncrypt)
|
||||
if err != nil {
|
||||
errors[i] = err
|
||||
return
|
||||
}
|
||||
if hash != nil {
|
||||
list[i].Hash = hash.Hex()
|
||||
}
|
||||
if err := wait(ctx); err != nil {
|
||||
errors[i] = err
|
||||
return
|
||||
}
|
||||
|
||||
list[i].ContentType, err = DetectContentType(f.Name(), f)
|
||||
if err != nil {
|
||||
errors[i] = err
|
||||
return
|
||||
}
|
||||
|
||||
}(i, entry)
|
||||
}
|
||||
for i := 0; i < cap(sem); i++ {
|
||||
sem <- true
|
||||
}
|
||||
|
||||
trie := &manifestTrie{
|
||||
fileStore: fs.api.fileStore,
|
||||
}
|
||||
quitC := make(chan bool)
|
||||
for i, entry := range list {
|
||||
if errors[i] != nil {
|
||||
return "", errors[i]
|
||||
}
|
||||
entry.Path = RegularSlashes(entry.Path[start:])
|
||||
if entry.Path == index {
|
||||
ientry := newManifestTrieEntry(&ManifestEntry{
|
||||
ContentType: entry.ContentType,
|
||||
}, nil)
|
||||
ientry.Hash = entry.Hash
|
||||
trie.addEntry(ientry, quitC)
|
||||
}
|
||||
trie.addEntry(entry, quitC)
|
||||
}
|
||||
|
||||
err2 := trie.recalcAndStore()
|
||||
var hs string
|
||||
if err2 == nil {
|
||||
hs = trie.ref.Hex()
|
||||
}
|
||||
return hs, err2
|
||||
}
|
||||
|
||||
// Download replicates the manifest basePath structure on the local filesystem
|
||||
// under localpath
|
||||
//
|
||||
// DEPRECATED: Use the HTTP API instead
|
||||
func (fs *FileSystem) Download(bzzpath, localpath string) error {
|
||||
lpath, err := filepath.Abs(filepath.Clean(localpath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(lpath, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//resolving host and port
|
||||
uri, err := Parse(path.Join("bzz:/", bzzpath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addr, err := fs.api.Resolve(context.TODO(), uri.Addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := uri.Path
|
||||
|
||||
if len(path) > 0 {
|
||||
path += "/"
|
||||
}
|
||||
|
||||
quitC := make(chan bool)
|
||||
trie, err := loadManifest(context.TODO(), fs.api.fileStore, addr, quitC, NOOPDecrypt)
|
||||
if err != nil {
|
||||
log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err))
|
||||
return err
|
||||
}
|
||||
|
||||
type downloadListEntry struct {
|
||||
addr storage.Address
|
||||
path string
|
||||
}
|
||||
|
||||
var list []*downloadListEntry
|
||||
var mde error
|
||||
|
||||
prevPath := lpath
|
||||
err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) {
|
||||
log.Trace(fmt.Sprintf("fs.Download: %#v", entry))
|
||||
|
||||
addr = common.Hex2Bytes(entry.Hash)
|
||||
path := lpath + "/" + suffix
|
||||
dir := filepath.Dir(path)
|
||||
if dir != prevPath {
|
||||
mde = os.MkdirAll(dir, os.ModePerm)
|
||||
prevPath = dir
|
||||
}
|
||||
if (mde == nil) && (path != dir+"/") {
|
||||
list = append(list, &downloadListEntry{addr: addr, path: path})
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
errC := make(chan error)
|
||||
done := make(chan bool, maxParallelFiles)
|
||||
for i, entry := range list {
|
||||
select {
|
||||
case done <- true:
|
||||
wg.Add(1)
|
||||
case <-quitC:
|
||||
return fmt.Errorf("aborted")
|
||||
}
|
||||
go func(i int, entry *downloadListEntry) {
|
||||
defer wg.Done()
|
||||
err := retrieveToFile(quitC, fs.api.fileStore, entry.addr, entry.path)
|
||||
if err != nil {
|
||||
select {
|
||||
case errC <- err:
|
||||
case <-quitC:
|
||||
}
|
||||
return
|
||||
}
|
||||
<-done
|
||||
}(i, entry)
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errC)
|
||||
}()
|
||||
select {
|
||||
case err = <-errC:
|
||||
return err
|
||||
case <-quitC:
|
||||
return fmt.Errorf("aborted")
|
||||
}
|
||||
}
|
||||
|
||||
func retrieveToFile(quitC chan bool, fileStore *storage.FileStore, addr storage.Address, path string) error {
|
||||
f, err := os.Create(path) // TODO: basePath separators
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader, _ := fileStore.Retrieve(context.TODO(), addr)
|
||||
writer := bufio.NewWriter(f)
|
||||
size, err := reader.Size(context.TODO(), quitC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = io.CopyN(writer, reader, size); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writer.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
@ -1,200 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test")
|
||||
|
||||
func testFileSystem(t *testing.T, f func(*FileSystem, bool)) {
|
||||
testAPI(t, func(api *API, _ *chunk.Tags, toEncrypt bool) {
|
||||
f(NewFileSystem(api), toEncrypt)
|
||||
})
|
||||
}
|
||||
|
||||
func readPath(t *testing.T, parts ...string) string {
|
||||
file := filepath.Join(parts...)
|
||||
content, err := ioutil.ReadFile(file)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error reading '%v': %v", file, err)
|
||||
}
|
||||
return string(content)
|
||||
}
|
||||
|
||||
func TestApiDirUpload0(t *testing.T) {
|
||||
testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
|
||||
api := fs.api
|
||||
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
content := readPath(t, "testdata", "test0", "index.html")
|
||||
resp := testGet(t, api, bzzhash, "index.html")
|
||||
exp := expResponse(content, "text/html; charset=utf-8", 0)
|
||||
checkResponse(t, resp, exp)
|
||||
|
||||
content = readPath(t, "testdata", "test0", "index.css")
|
||||
resp = testGet(t, api, bzzhash, "index.css")
|
||||
exp = expResponse(content, "text/css; charset=utf-8", 0)
|
||||
checkResponse(t, resp, exp)
|
||||
|
||||
addr := storage.Address(common.Hex2Bytes(bzzhash))
|
||||
_, _, _, _, err = api.Get(context.TODO(), NOOPDecrypt, addr, "")
|
||||
if err == nil {
|
||||
t.Fatalf("expected error: %v", err)
|
||||
}
|
||||
|
||||
downloadDir := filepath.Join(testDownloadDir, "test0")
|
||||
defer os.RemoveAll(downloadDir)
|
||||
err = fs.Download(bzzhash, downloadDir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
newbzzhash, err := fs.Upload(downloadDir, "", toEncrypt)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
// TODO: currently the hash is not deterministic in the encrypted case
|
||||
if !toEncrypt && bzzhash != newbzzhash {
|
||||
t.Fatalf("download %v reuploaded has incorrect hash, expected %v, got %v", downloadDir, bzzhash, newbzzhash)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestApiDirUploadModify(t *testing.T) {
|
||||
testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
|
||||
api := fs.api
|
||||
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
addr := storage.Address(common.Hex2Bytes(bzzhash))
|
||||
addr, err = api.Modify(context.TODO(), addr, "index.html", "", "")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
index, err := ioutil.ReadFile(filepath.Join("testdata", "test0", "index.html"))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
ctx := context.TODO()
|
||||
hash, wait, err := api.Store(ctx, bytes.NewReader(index), int64(len(index)), toEncrypt)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
err = wait(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
addr, err = api.Modify(context.TODO(), addr, "index2.html", hash.Hex(), "text/html; charset=utf-8")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
addr, err = api.Modify(context.TODO(), addr, "img/logo.png", hash.Hex(), "text/html; charset=utf-8")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
bzzhash = addr.Hex()
|
||||
|
||||
content := readPath(t, "testdata", "test0", "index.html")
|
||||
resp := testGet(t, api, bzzhash, "index2.html")
|
||||
exp := expResponse(content, "text/html; charset=utf-8", 0)
|
||||
checkResponse(t, resp, exp)
|
||||
|
||||
resp = testGet(t, api, bzzhash, "img/logo.png")
|
||||
exp = expResponse(content, "text/html; charset=utf-8", 0)
|
||||
checkResponse(t, resp, exp)
|
||||
|
||||
content = readPath(t, "testdata", "test0", "index.css")
|
||||
resp = testGet(t, api, bzzhash, "index.css")
|
||||
exp = expResponse(content, "text/css; charset=utf-8", 0)
|
||||
checkResponse(t, resp, exp)
|
||||
|
||||
_, _, _, _, err = api.Get(context.TODO(), nil, addr, "")
|
||||
if err == nil {
|
||||
t.Errorf("expected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestApiDirUploadWithRootFile(t *testing.T) {
|
||||
testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
|
||||
api := fs.api
|
||||
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html", toEncrypt)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
content := readPath(t, "testdata", "test0", "index.html")
|
||||
resp := testGet(t, api, bzzhash, "")
|
||||
exp := expResponse(content, "text/html; charset=utf-8", 0)
|
||||
checkResponse(t, resp, exp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestApiFileUpload(t *testing.T) {
|
||||
testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
|
||||
api := fs.api
|
||||
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "", toEncrypt)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
content := readPath(t, "testdata", "test0", "index.html")
|
||||
resp := testGet(t, api, bzzhash, "index.html")
|
||||
exp := expResponse(content, "text/html; charset=utf-8", 0)
|
||||
checkResponse(t, resp, exp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestApiFileUploadWithRootFile(t *testing.T) {
|
||||
testFileSystem(t, func(fs *FileSystem, toEncrypt bool) {
|
||||
api := fs.api
|
||||
bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html", toEncrypt)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
content := readPath(t, "testdata", "test0", "index.html")
|
||||
resp := testGet(t, api, bzzhash, "")
|
||||
exp := expResponse(content, "text/html; charset=utf-8", 0)
|
||||
checkResponse(t, resp, exp)
|
||||
})
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,162 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||
"github.com/ethereum/go-ethereum/swarm/spancontext"
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
// Adapt chains h (main request handler) main handler to adapters (middleware handlers)
|
||||
// Please note that the order of execution for `adapters` is FIFO (adapters[0] will be executed first)
|
||||
func Adapt(h http.Handler, adapters ...Adapter) http.Handler {
|
||||
for i := range adapters {
|
||||
adapter := adapters[len(adapters)-1-i]
|
||||
h = adapter(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
type Adapter func(http.Handler) http.Handler
|
||||
|
||||
func SetRequestID(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
r = r.WithContext(SetRUID(r.Context(), uuid.New()[:8]))
|
||||
metrics.GetOrRegisterCounter(fmt.Sprintf("http.request.%s", r.Method), nil).Inc(1)
|
||||
log.Info("created ruid for request", "ruid", GetRUID(r.Context()), "method", r.Method, "url", r.RequestURI)
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func SetRequestHost(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
r = r.WithContext(sctx.SetHost(r.Context(), r.Host))
|
||||
log.Info("setting request host", "ruid", GetRUID(r.Context()), "host", sctx.GetHost(r.Context()))
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func ParseURI(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
uri, err := api.Parse(strings.TrimLeft(r.URL.Path, "/"))
|
||||
if err != nil {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
respondError(w, r, fmt.Sprintf("invalid URI %q", r.URL.Path), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
if uri.Addr != "" && strings.HasPrefix(uri.Addr, "0x") {
|
||||
uri.Addr = strings.TrimPrefix(uri.Addr, "0x")
|
||||
|
||||
msg := fmt.Sprintf(`The requested hash seems to be prefixed with '0x'. You will be redirected to the correct URL within 5 seconds.<br/>
|
||||
Please click <a href='%[1]s'>here</a> if your browser does not redirect you within 5 seconds.<script>setTimeout("location.href='%[1]s';",5000);</script>`, "/"+uri.String())
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write([]byte(msg))
|
||||
return
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
r = r.WithContext(SetURI(ctx, uri))
|
||||
log.Debug("parsed request path", "ruid", GetRUID(r.Context()), "method", r.Method, "uri.Addr", uri.Addr, "uri.Path", uri.Path, "uri.Scheme", uri.Scheme)
|
||||
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func InitLoggingResponseWriter(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
tn := time.Now()
|
||||
|
||||
writer := newLoggingResponseWriter(w)
|
||||
h.ServeHTTP(writer, r)
|
||||
|
||||
ts := time.Since(tn)
|
||||
log.Info("request served", "ruid", GetRUID(r.Context()), "code", writer.statusCode, "time", ts)
|
||||
metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.time", r.Method), nil).Update(ts)
|
||||
metrics.GetOrRegisterResettingTimer(fmt.Sprintf("http.request.%s.%d.time", r.Method, writer.statusCode), nil).Update(ts)
|
||||
})
|
||||
}
|
||||
|
||||
// InitUploadTag creates a new tag for an upload to the local HTTP proxy
|
||||
// if a tag is not named using the SwarmTagHeaderName, a fallback name will be used
|
||||
// when the Content-Length header is set, an ETA on chunking will be available since the
|
||||
// number of chunks to be split is known in advance (not including enclosing manifest chunks)
|
||||
// the tag can later be accessed using the appropriate identifier in the request context
|
||||
func InitUploadTag(h http.Handler, tags *chunk.Tags) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
tagName string
|
||||
err error
|
||||
estimatedTotal int64 = 0
|
||||
contentType = r.Header.Get("Content-Type")
|
||||
headerTag = r.Header.Get(SwarmTagHeaderName)
|
||||
)
|
||||
if headerTag != "" {
|
||||
tagName = headerTag
|
||||
log.Trace("got tag name from http header", "tagName", tagName)
|
||||
} else {
|
||||
tagName = fmt.Sprintf("unnamed_tag_%d", time.Now().Unix())
|
||||
}
|
||||
|
||||
if !strings.Contains(contentType, "multipart") && r.ContentLength > 0 {
|
||||
log.Trace("calculating tag size", "contentType", contentType, "contentLength", r.ContentLength)
|
||||
uri := GetURI(r.Context())
|
||||
if uri != nil {
|
||||
log.Debug("got uri from context")
|
||||
if uri.Addr == "encrypt" {
|
||||
estimatedTotal = calculateNumberOfChunks(r.ContentLength, true)
|
||||
} else {
|
||||
estimatedTotal = calculateNumberOfChunks(r.ContentLength, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace("creating tag", "tagName", tagName, "estimatedTotal", estimatedTotal)
|
||||
|
||||
t, err := tags.New(tagName, estimatedTotal)
|
||||
if err != nil {
|
||||
log.Error("error creating tag", "err", err, "tagName", tagName)
|
||||
}
|
||||
|
||||
log.Trace("setting tag id to context", "uid", t.Uid)
|
||||
ctx := sctx.SetTag(r.Context(), t.Uid)
|
||||
|
||||
h.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func InstrumentOpenTracing(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
uri := GetURI(r.Context())
|
||||
if uri == nil || r.Method == "" || (uri != nil && uri.Scheme == "") {
|
||||
h.ServeHTTP(w, r) // soft fail
|
||||
return
|
||||
}
|
||||
spanName := fmt.Sprintf("http.%s.%s", r.Method, uri.Scheme)
|
||||
ctx, sp := spancontext.StartSpan(r.Context(), spanName)
|
||||
|
||||
defer sp.Finish()
|
||||
h.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
func RecoverPanic(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
log.Error("panic recovery!", "stack trace", string(debug.Stack()), "url", r.URL.String(), "headers", r.Header)
|
||||
}
|
||||
}()
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
@ -1,132 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
)
|
||||
|
||||
var (
|
||||
htmlCounter = metrics.NewRegisteredCounter("api.http.errorpage.html.count", nil)
|
||||
jsonCounter = metrics.NewRegisteredCounter("api.http.errorpage.json.count", nil)
|
||||
plaintextCounter = metrics.NewRegisteredCounter("api.http.errorpage.plaintext.count", nil)
|
||||
)
|
||||
|
||||
type ResponseParams struct {
|
||||
Msg template.HTML
|
||||
Code int
|
||||
Timestamp string
|
||||
template *template.Template
|
||||
Details template.HTML
|
||||
}
|
||||
|
||||
// ShowMultipleChoices is used when a user requests a resource in a manifest which results
|
||||
// in ambiguous results. It returns a HTML page with clickable links of each of the entry
|
||||
// in the manifest which fits the request URI ambiguity.
|
||||
// For example, if the user requests bzz:/<hash>/read and that manifest contains entries
|
||||
// "readme.md" and "readinglist.txt", a HTML page is returned with this two links.
|
||||
// This only applies if the manifest has no default entry
|
||||
func ShowMultipleChoices(w http.ResponseWriter, r *http.Request, list api.ManifestList) {
|
||||
log.Debug("ShowMultipleChoices", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()))
|
||||
msg := ""
|
||||
if list.Entries == nil {
|
||||
respondError(w, r, "Could not resolve", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
requestUri := strings.TrimPrefix(r.RequestURI, "/")
|
||||
|
||||
uri, err := api.Parse(requestUri)
|
||||
if err != nil {
|
||||
respondError(w, r, "Bad Request", http.StatusBadRequest)
|
||||
}
|
||||
|
||||
uri.Scheme = "bzz-list"
|
||||
msg += fmt.Sprintf("Disambiguation:<br/>Your request may refer to multiple choices.<br/>Click <a class=\"orange\" href='"+"/"+uri.String()+"'>here</a> if your browser does not redirect you within 5 seconds.<script>setTimeout(\"location.href='%s';\",5000);</script><br/>", "/"+uri.String())
|
||||
respondTemplate(w, r, "error", msg, http.StatusMultipleChoices)
|
||||
}
|
||||
|
||||
func respondTemplate(w http.ResponseWriter, r *http.Request, templateName, msg string, code int) {
|
||||
log.Debug("respondTemplate", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()))
|
||||
respond(w, r, &ResponseParams{
|
||||
Code: code,
|
||||
Msg: template.HTML(msg),
|
||||
Timestamp: time.Now().Format(time.RFC1123),
|
||||
template: TemplatesMap[templateName],
|
||||
})
|
||||
}
|
||||
|
||||
func respondError(w http.ResponseWriter, r *http.Request, msg string, code int) {
|
||||
log.Info("respondError", "ruid", GetRUID(r.Context()), "uri", GetURI(r.Context()), "code", code, "msg", msg)
|
||||
respondTemplate(w, r, "error", msg, code)
|
||||
}
|
||||
|
||||
func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) {
|
||||
w.WriteHeader(params.Code)
|
||||
|
||||
if params.Code >= 400 {
|
||||
w.Header().Del("Cache-Control")
|
||||
w.Header().Del("ETag")
|
||||
}
|
||||
|
||||
acceptHeader := r.Header.Get("Accept")
|
||||
// this cannot be in a switch since an Accept header can have multiple values: "Accept: */*, text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8"
|
||||
if strings.Contains(acceptHeader, "application/json") {
|
||||
if err := respondJSON(w, r, params); err != nil {
|
||||
respondError(w, r, "Internal server error", http.StatusInternalServerError)
|
||||
}
|
||||
} else if strings.Contains(acceptHeader, "text/html") {
|
||||
respondHTML(w, r, params)
|
||||
} else {
|
||||
respondPlaintext(w, r, params) //returns nice errors for curl
|
||||
}
|
||||
}
|
||||
|
||||
func respondHTML(w http.ResponseWriter, r *http.Request, params *ResponseParams) {
|
||||
htmlCounter.Inc(1)
|
||||
log.Info("respondHTML", "ruid", GetRUID(r.Context()), "code", params.Code)
|
||||
err := params.template.Execute(w, params)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func respondJSON(w http.ResponseWriter, r *http.Request, params *ResponseParams) error {
|
||||
jsonCounter.Inc(1)
|
||||
log.Info("respondJSON", "ruid", GetRUID(r.Context()), "code", params.Code)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
return json.NewEncoder(w).Encode(params)
|
||||
}
|
||||
|
||||
func respondPlaintext(w http.ResponseWriter, r *http.Request, params *ResponseParams) error {
|
||||
plaintextCounter.Inc(1)
|
||||
log.Info("respondPlaintext", "ruid", GetRUID(r.Context()), "code", params.Code)
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
strToWrite := "Code: " + fmt.Sprintf("%d", params.Code) + "\n"
|
||||
strToWrite += "Message: " + string(params.Msg) + "\n"
|
||||
strToWrite += "Timestamp: " + params.Timestamp + "\n"
|
||||
_, err := w.Write([]byte(strToWrite))
|
||||
return err
|
||||
}
|
@ -1,170 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/html"
|
||||
)
|
||||
|
||||
func TestError(t *testing.T) {
|
||||
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
var resp *http.Response
|
||||
var respbody []byte
|
||||
|
||||
url := srv.URL + "/this_should_fail_as_no_bzz_protocol_present"
|
||||
resp, err := http.Get(url)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
respbody, err = ioutil.ReadAll(resp.Body)
|
||||
|
||||
if resp.StatusCode != 404 && !strings.Contains(string(respbody), "Invalid URI "/this_should_fail_as_no_bzz_protocol_present": unknown scheme") {
|
||||
t.Fatalf("Response body does not match, expected: %v, to contain: %v; received code %d, expected code: %d", string(respbody), "Invalid bzz URI: unknown scheme", 400, resp.StatusCode)
|
||||
}
|
||||
|
||||
_, err = html.Parse(strings.NewReader(string(respbody)))
|
||||
if err != nil {
|
||||
t.Fatalf("HTML validation failed for error page returned!")
|
||||
}
|
||||
}
|
||||
|
||||
func Test404Page(t *testing.T) {
|
||||
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
var resp *http.Response
|
||||
var respbody []byte
|
||||
|
||||
url := srv.URL + "/bzz:/1234567890123456789012345678901234567890123456789012345678901234"
|
||||
resp, err := http.Get(url)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
respbody, err = ioutil.ReadAll(resp.Body)
|
||||
|
||||
if resp.StatusCode != 404 || !strings.Contains(string(respbody), "404") {
|
||||
t.Fatalf("Invalid Status Code received, expected 404, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
_, err = html.Parse(strings.NewReader(string(respbody)))
|
||||
if err != nil {
|
||||
t.Fatalf("HTML validation failed for error page returned!")
|
||||
}
|
||||
}
|
||||
|
||||
func Test500Page(t *testing.T) {
|
||||
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
var resp *http.Response
|
||||
var respbody []byte
|
||||
|
||||
url := srv.URL + "/bzz:/thisShouldFailWith500Code"
|
||||
resp, err := http.Get(url)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
respbody, err = ioutil.ReadAll(resp.Body)
|
||||
|
||||
if resp.StatusCode != 404 {
|
||||
t.Fatalf("Invalid Status Code received, expected 404, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
_, err = html.Parse(strings.NewReader(string(respbody)))
|
||||
if err != nil {
|
||||
t.Fatalf("HTML validation failed for error page returned!")
|
||||
}
|
||||
}
|
||||
func Test500PageWith0xHashPrefix(t *testing.T) {
|
||||
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
var resp *http.Response
|
||||
var respbody []byte
|
||||
|
||||
url := srv.URL + "/bzz:/0xthisShouldFailWith500CodeAndAHelpfulMessage"
|
||||
resp, err := http.Get(url)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Request failed: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
respbody, err = ioutil.ReadAll(resp.Body)
|
||||
|
||||
if resp.StatusCode != 404 {
|
||||
t.Fatalf("Invalid Status Code received, expected 404, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
if !strings.Contains(string(respbody), "The requested hash seems to be prefixed with") {
|
||||
t.Fatalf("Did not receive the expected error message")
|
||||
}
|
||||
|
||||
_, err = html.Parse(strings.NewReader(string(respbody)))
|
||||
if err != nil {
|
||||
t.Fatalf("HTML validation failed for error page returned!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestJsonResponse(t *testing.T) {
|
||||
srv := NewTestSwarmServer(t, serverFunc, nil)
|
||||
defer srv.Close()
|
||||
|
||||
var resp *http.Response
|
||||
var respbody []byte
|
||||
|
||||
url := srv.URL + "/bzz:/thisShouldFailWith500Code/"
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Request failed: %v", err)
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
t.Fatalf("Request failed: %v", err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
respbody, err = ioutil.ReadAll(resp.Body)
|
||||
|
||||
if resp.StatusCode != 404 {
|
||||
t.Fatalf("Invalid Status Code received, expected 404, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
if !isJSON(string(respbody)) {
|
||||
t.Fatalf("Expected response to be JSON, received invalid JSON: %s", string(respbody))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func isJSON(s string) bool {
|
||||
var js map[string]interface{}
|
||||
return json.Unmarshal([]byte(s), &js) == nil
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
)
|
||||
|
||||
/*
|
||||
http roundtripper to register for bzz url scheme
|
||||
see https://github.com/ethereum/go-ethereum/issues/2040
|
||||
Usage:
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common/httpclient"
|
||||
"github.com/ethereum/go-ethereum/swarm/api/http"
|
||||
)
|
||||
client := httpclient.New()
|
||||
// for (private) swarm proxy running locally
|
||||
client.RegisterScheme("bzz", &http.RoundTripper{Port: port})
|
||||
client.RegisterScheme("bzz-immutable", &http.RoundTripper{Port: port})
|
||||
client.RegisterScheme("bzz-raw", &http.RoundTripper{Port: port})
|
||||
|
||||
The port you give the Roundtripper is the port the swarm proxy is listening on.
|
||||
If Host is left empty, localhost is assumed.
|
||||
|
||||
Using a public gateway, the above few lines gives you the leanest
|
||||
bzz-scheme aware read-only http client. You really only ever need this
|
||||
if you need go-native swarm access to bzz addresses.
|
||||
*/
|
||||
|
||||
type RoundTripper struct {
|
||||
Host string
|
||||
Port string
|
||||
}
|
||||
|
||||
func (self *RoundTripper) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
host := self.Host
|
||||
if len(host) == 0 {
|
||||
host = "localhost"
|
||||
}
|
||||
url := fmt.Sprintf("http://%s:%s/%s:/%s/%s", host, self.Port, req.Proto, req.URL.Host, req.URL.Path)
|
||||
log.Info(fmt.Sprintf("roundtripper: proxying request '%s' to '%s'", req.RequestURI, url))
|
||||
reqProxy, err := http.NewRequest(req.Method, url, req.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return http.DefaultClient.Do(reqProxy)
|
||||
}
|
@ -1,69 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRoundTripper(t *testing.T) {
|
||||
serveMux := http.NewServeMux()
|
||||
serveMux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == "GET" {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
http.ServeContent(w, r, "", time.Unix(0, 0), strings.NewReader(r.RequestURI))
|
||||
} else {
|
||||
http.Error(w, "Method "+r.Method+" is not supported.", http.StatusMethodNotAllowed)
|
||||
}
|
||||
})
|
||||
|
||||
srv := httptest.NewServer(serveMux)
|
||||
defer srv.Close()
|
||||
|
||||
host, port, _ := net.SplitHostPort(srv.Listener.Addr().String())
|
||||
rt := &RoundTripper{Host: host, Port: port}
|
||||
trans := &http.Transport{}
|
||||
trans.RegisterProtocol("bzz", rt)
|
||||
client := &http.Client{Transport: trans}
|
||||
resp, err := client.Get("bzz://test.com/path")
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
content, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
return
|
||||
}
|
||||
if string(content) != "/HTTP/1.1:/test.com/path" {
|
||||
t.Errorf("incorrect response from http server: expected '%v', got '%v'", "/HTTP/1.1:/test.com/path", string(content))
|
||||
}
|
||||
|
||||
}
|
@ -1,34 +0,0 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||
)
|
||||
|
||||
type uriKey struct{}
|
||||
|
||||
func GetRUID(ctx context.Context) string {
|
||||
v, ok := ctx.Value(sctx.HTTPRequestIDKey{}).(string)
|
||||
if ok {
|
||||
return v
|
||||
}
|
||||
return "xxxxxxxx"
|
||||
}
|
||||
|
||||
func SetRUID(ctx context.Context, ruid string) context.Context {
|
||||
return context.WithValue(ctx, sctx.HTTPRequestIDKey{}, ruid)
|
||||
}
|
||||
|
||||
func GetURI(ctx context.Context) *api.URI {
|
||||
v, ok := ctx.Value(uriKey{}).(*api.URI)
|
||||
if ok {
|
||||
return v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetURI(ctx context.Context, uri *api.URI) context.Context {
|
||||
return context.WithValue(ctx, uriKey{}, uri)
|
||||
}
|
@ -1,937 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
/*
|
||||
A simple http server interface to Swarm
|
||||
*/
|
||||
package http
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
var (
|
||||
postRawCount = metrics.NewRegisteredCounter("api.http.post.raw.count", nil)
|
||||
postRawFail = metrics.NewRegisteredCounter("api.http.post.raw.fail", nil)
|
||||
postFilesCount = metrics.NewRegisteredCounter("api.http.post.files.count", nil)
|
||||
postFilesFail = metrics.NewRegisteredCounter("api.http.post.files.fail", nil)
|
||||
deleteCount = metrics.NewRegisteredCounter("api.http.delete.count", nil)
|
||||
deleteFail = metrics.NewRegisteredCounter("api.http.delete.fail", nil)
|
||||
getCount = metrics.NewRegisteredCounter("api.http.get.count", nil)
|
||||
getFail = metrics.NewRegisteredCounter("api.http.get.fail", nil)
|
||||
getFileCount = metrics.NewRegisteredCounter("api.http.get.file.count", nil)
|
||||
getFileNotFound = metrics.NewRegisteredCounter("api.http.get.file.notfound", nil)
|
||||
getFileFail = metrics.NewRegisteredCounter("api.http.get.file.fail", nil)
|
||||
getListCount = metrics.NewRegisteredCounter("api.http.get.list.count", nil)
|
||||
getListFail = metrics.NewRegisteredCounter("api.http.get.list.fail", nil)
|
||||
)
|
||||
|
||||
const SwarmTagHeaderName = "x-swarm-tag"
|
||||
|
||||
type methodHandler map[string]http.Handler
|
||||
|
||||
func (m methodHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||
v, ok := m[r.Method]
|
||||
if ok {
|
||||
v.ServeHTTP(rw, r)
|
||||
return
|
||||
}
|
||||
rw.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
||||
func NewServer(api *api.API, corsString string) *Server {
|
||||
var allowedOrigins []string
|
||||
for _, domain := range strings.Split(corsString, ",") {
|
||||
allowedOrigins = append(allowedOrigins, strings.TrimSpace(domain))
|
||||
}
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: allowedOrigins,
|
||||
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodPatch, http.MethodPut},
|
||||
MaxAge: 600,
|
||||
AllowedHeaders: []string{"*"},
|
||||
})
|
||||
|
||||
server := &Server{api: api}
|
||||
|
||||
defaultMiddlewares := []Adapter{
|
||||
RecoverPanic,
|
||||
SetRequestID,
|
||||
SetRequestHost,
|
||||
InitLoggingResponseWriter,
|
||||
ParseURI,
|
||||
InstrumentOpenTracing,
|
||||
}
|
||||
|
||||
tagAdapter := Adapter(func(h http.Handler) http.Handler {
|
||||
return InitUploadTag(h, api.Tags)
|
||||
})
|
||||
|
||||
defaultPostMiddlewares := append(defaultMiddlewares, tagAdapter)
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/bzz:/", methodHandler{
|
||||
"GET": Adapt(
|
||||
http.HandlerFunc(server.HandleBzzGet),
|
||||
defaultMiddlewares...,
|
||||
),
|
||||
"POST": Adapt(
|
||||
http.HandlerFunc(server.HandlePostFiles),
|
||||
defaultPostMiddlewares...,
|
||||
),
|
||||
"DELETE": Adapt(
|
||||
http.HandlerFunc(server.HandleDelete),
|
||||
defaultMiddlewares...,
|
||||
),
|
||||
})
|
||||
mux.Handle("/bzz-raw:/", methodHandler{
|
||||
"GET": Adapt(
|
||||
http.HandlerFunc(server.HandleGet),
|
||||
defaultMiddlewares...,
|
||||
),
|
||||
"POST": Adapt(
|
||||
http.HandlerFunc(server.HandlePostRaw),
|
||||
defaultPostMiddlewares...,
|
||||
),
|
||||
})
|
||||
mux.Handle("/bzz-immutable:/", methodHandler{
|
||||
"GET": Adapt(
|
||||
http.HandlerFunc(server.HandleBzzGet),
|
||||
defaultMiddlewares...,
|
||||
),
|
||||
})
|
||||
mux.Handle("/bzz-hash:/", methodHandler{
|
||||
"GET": Adapt(
|
||||
http.HandlerFunc(server.HandleGet),
|
||||
defaultMiddlewares...,
|
||||
),
|
||||
})
|
||||
mux.Handle("/bzz-list:/", methodHandler{
|
||||
"GET": Adapt(
|
||||
http.HandlerFunc(server.HandleGetList),
|
||||
defaultMiddlewares...,
|
||||
),
|
||||
})
|
||||
mux.Handle("/bzz-feed:/", methodHandler{
|
||||
"GET": Adapt(
|
||||
http.HandlerFunc(server.HandleGetFeed),
|
||||
defaultMiddlewares...,
|
||||
),
|
||||
"POST": Adapt(
|
||||
http.HandlerFunc(server.HandlePostFeed),
|
||||
defaultMiddlewares...,
|
||||
),
|
||||
})
|
||||
|
||||
mux.Handle("/", methodHandler{
|
||||
"GET": Adapt(
|
||||
http.HandlerFunc(server.HandleRootPaths),
|
||||
SetRequestID,
|
||||
InitLoggingResponseWriter,
|
||||
),
|
||||
})
|
||||
server.Handler = c.Handler(mux)
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
func (s *Server) ListenAndServe(addr string) error {
|
||||
s.listenAddr = addr
|
||||
return http.ListenAndServe(addr, s)
|
||||
}
|
||||
|
||||
// browser API for registering bzz url scheme handlers:
|
||||
// https://developer.mozilla.org/en/docs/Web-based_protocol_handlers
|
||||
// electron (chromium) api for registering bzz url scheme handlers:
|
||||
// https://github.com/atom/electron/blob/master/docs/api/protocol.md
|
||||
type Server struct {
|
||||
http.Handler
|
||||
api *api.API
|
||||
listenAddr string
|
||||
}
|
||||
|
||||
func (s *Server) HandleBzzGet(w http.ResponseWriter, r *http.Request) {
|
||||
log.Debug("handleBzzGet", "ruid", GetRUID(r.Context()), "uri", r.RequestURI)
|
||||
if r.Header.Get("Accept") == "application/x-tar" {
|
||||
uri := GetURI(r.Context())
|
||||
_, credentials, _ := r.BasicAuth()
|
||||
reader, err := s.api.GetDirectoryTar(r.Context(), s.api.Decryptor(r.Context(), credentials), uri)
|
||||
if err != nil {
|
||||
if isDecryptError(err) {
|
||||
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", uri.Address().String()))
|
||||
respondError(w, r, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
respondError(w, r, fmt.Sprintf("Had an error building the tarball: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-tar")
|
||||
|
||||
fileName := uri.Addr
|
||||
if found := path.Base(uri.Path); found != "" && found != "." && found != "/" {
|
||||
fileName = found
|
||||
}
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=\"%s.tar\"", fileName))
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
io.Copy(w, reader)
|
||||
return
|
||||
}
|
||||
|
||||
s.HandleGetFile(w, r)
|
||||
}
|
||||
|
||||
func (s *Server) HandleRootPaths(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.RequestURI {
|
||||
case "/":
|
||||
respondTemplate(w, r, "landing-page", "Swarm: Please request a valid ENS or swarm hash with the appropriate bzz scheme", 200)
|
||||
return
|
||||
case "/robots.txt":
|
||||
w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat))
|
||||
fmt.Fprintf(w, "User-agent: *\nDisallow: /")
|
||||
case "/favicon.ico":
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(faviconBytes)
|
||||
default:
|
||||
respondError(w, r, "Not Found", http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
// HandlePostRaw handles a POST request to a raw bzz-raw:/ URI, stores the request
|
||||
// body in swarm and returns the resulting storage address as a text/plain response
|
||||
func (s *Server) HandlePostRaw(w http.ResponseWriter, r *http.Request) {
|
||||
ruid := GetRUID(r.Context())
|
||||
log.Debug("handle.post.raw", "ruid", ruid)
|
||||
|
||||
tagUid := sctx.GetTag(r.Context())
|
||||
tag, err := s.api.Tags.Get(tagUid)
|
||||
if err != nil {
|
||||
log.Error("handle post raw got an error retrieving tag for DoneSplit", "tagUid", tagUid, "err", err)
|
||||
}
|
||||
|
||||
postRawCount.Inc(1)
|
||||
|
||||
toEncrypt := false
|
||||
uri := GetURI(r.Context())
|
||||
if uri.Addr == "encrypt" {
|
||||
toEncrypt = true
|
||||
}
|
||||
|
||||
if uri.Path != "" {
|
||||
postRawFail.Inc(1)
|
||||
respondError(w, r, "raw POST request cannot contain a path", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if uri.Addr != "" && uri.Addr != "encrypt" {
|
||||
postRawFail.Inc(1)
|
||||
respondError(w, r, "raw POST request addr can only be empty or \"encrypt\"", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Header.Get("Content-Length") == "" {
|
||||
postRawFail.Inc(1)
|
||||
respondError(w, r, "missing Content-Length header in request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
addr, wait, err := s.api.Store(r.Context(), r.Body, r.ContentLength, toEncrypt)
|
||||
if err != nil {
|
||||
postRawFail.Inc(1)
|
||||
respondError(w, r, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
wait(r.Context())
|
||||
tag.DoneSplit(addr)
|
||||
|
||||
log.Debug("stored content", "ruid", ruid, "key", addr)
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, addr)
|
||||
}
|
||||
|
||||
// HandlePostFiles handles a POST request to
|
||||
// bzz:/<hash>/<path> which contains either a single file or multiple files
|
||||
// (either a tar archive or multipart form), adds those files either to an
|
||||
// existing manifest or to a new manifest under <path> and returns the
|
||||
// resulting manifest hash as a text/plain response
|
||||
func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) {
|
||||
ruid := GetRUID(r.Context())
|
||||
log.Debug("handle.post.files", "ruid", ruid)
|
||||
postFilesCount.Inc(1)
|
||||
|
||||
contentType, params, err := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
postFilesFail.Inc(1)
|
||||
respondError(w, r, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
toEncrypt := false
|
||||
uri := GetURI(r.Context())
|
||||
if uri.Addr == "encrypt" {
|
||||
toEncrypt = true
|
||||
}
|
||||
|
||||
var addr storage.Address
|
||||
if uri.Addr != "" && uri.Addr != "encrypt" {
|
||||
addr, err = s.api.Resolve(r.Context(), uri.Addr)
|
||||
if err != nil {
|
||||
postFilesFail.Inc(1)
|
||||
respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
log.Debug("resolved key", "ruid", ruid, "key", addr)
|
||||
} else {
|
||||
addr, err = s.api.NewManifest(r.Context(), toEncrypt)
|
||||
if err != nil {
|
||||
postFilesFail.Inc(1)
|
||||
respondError(w, r, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
log.Debug("new manifest", "ruid", ruid, "key", addr)
|
||||
}
|
||||
newAddr, err := s.api.UpdateManifest(r.Context(), addr, func(mw *api.ManifestWriter) error {
|
||||
switch contentType {
|
||||
case "application/x-tar":
|
||||
_, err := s.handleTarUpload(r, mw)
|
||||
if err != nil {
|
||||
respondError(w, r, fmt.Sprintf("error uploading tarball: %v", err), http.StatusInternalServerError)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case "multipart/form-data":
|
||||
return s.handleMultipartUpload(r, params["boundary"], mw)
|
||||
|
||||
default:
|
||||
return s.handleDirectUpload(r, mw)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
postFilesFail.Inc(1)
|
||||
respondError(w, r, fmt.Sprintf("cannot create manifest: %s", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
tagUid := sctx.GetTag(r.Context())
|
||||
tag, err := s.api.Tags.Get(tagUid)
|
||||
if err != nil {
|
||||
log.Error("got an error retrieving tag for DoneSplit", "tagUid", tagUid, "err", err)
|
||||
}
|
||||
|
||||
log.Debug("done splitting, setting tag total", "SPLIT", tag.Get(chunk.StateSplit), "TOTAL", tag.Total())
|
||||
tag.DoneSplit(newAddr)
|
||||
|
||||
log.Debug("stored content", "ruid", ruid, "key", newAddr)
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, newAddr)
|
||||
}
|
||||
|
||||
func (s *Server) handleTarUpload(r *http.Request, mw *api.ManifestWriter) (storage.Address, error) {
|
||||
log.Debug("handle.tar.upload", "ruid", GetRUID(r.Context()), "tag", sctx.GetTag(r.Context()))
|
||||
|
||||
defaultPath := r.URL.Query().Get("defaultpath")
|
||||
|
||||
key, err := s.api.UploadTar(r.Context(), r.Body, GetURI(r.Context()).Path, defaultPath, mw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func (s *Server) handleMultipartUpload(r *http.Request, boundary string, mw *api.ManifestWriter) error {
|
||||
ruid := GetRUID(r.Context())
|
||||
log.Debug("handle.multipart.upload", "ruid", ruid)
|
||||
mr := multipart.NewReader(r.Body, boundary)
|
||||
for {
|
||||
part, err := mr.NextPart()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error reading multipart form: %s", err)
|
||||
}
|
||||
|
||||
var size int64
|
||||
var reader io.Reader
|
||||
if contentLength := part.Header.Get("Content-Length"); contentLength != "" {
|
||||
size, err = strconv.ParseInt(contentLength, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing multipart content length: %s", err)
|
||||
}
|
||||
reader = part
|
||||
} else {
|
||||
// copy the part to a tmp file to get its size
|
||||
tmp, err := ioutil.TempFile("", "swarm-multipart")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tmp.Name())
|
||||
defer tmp.Close()
|
||||
size, err = io.Copy(tmp, part)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error copying multipart content: %s", err)
|
||||
}
|
||||
if _, err := tmp.Seek(0, io.SeekStart); err != nil {
|
||||
return fmt.Errorf("error copying multipart content: %s", err)
|
||||
}
|
||||
reader = tmp
|
||||
}
|
||||
|
||||
// add the entry under the path from the request
|
||||
name := part.FileName()
|
||||
if name == "" {
|
||||
name = part.FormName()
|
||||
}
|
||||
uri := GetURI(r.Context())
|
||||
path := path.Join(uri.Path, name)
|
||||
entry := &api.ManifestEntry{
|
||||
Path: path,
|
||||
ContentType: part.Header.Get("Content-Type"),
|
||||
Size: size,
|
||||
}
|
||||
log.Debug("adding path to new manifest", "ruid", ruid, "bytes", entry.Size, "path", entry.Path)
|
||||
contentKey, err := mw.AddEntry(r.Context(), reader, entry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding manifest entry from multipart form: %s", err)
|
||||
}
|
||||
log.Debug("stored content", "ruid", ruid, "key", contentKey)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) handleDirectUpload(r *http.Request, mw *api.ManifestWriter) error {
|
||||
ruid := GetRUID(r.Context())
|
||||
log.Debug("handle.direct.upload", "ruid", ruid)
|
||||
key, err := mw.AddEntry(r.Context(), r.Body, &api.ManifestEntry{
|
||||
Path: GetURI(r.Context()).Path,
|
||||
ContentType: r.Header.Get("Content-Type"),
|
||||
Mode: 0644,
|
||||
Size: r.ContentLength,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debug("stored content", "ruid", ruid, "key", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleDelete handles a DELETE request to bzz:/<manifest>/<path>, removes
|
||||
// <path> from <manifest> and returns the resulting manifest hash as a
|
||||
// text/plain response
|
||||
func (s *Server) HandleDelete(w http.ResponseWriter, r *http.Request) {
|
||||
ruid := GetRUID(r.Context())
|
||||
uri := GetURI(r.Context())
|
||||
log.Debug("handle.delete", "ruid", ruid)
|
||||
deleteCount.Inc(1)
|
||||
newKey, err := s.api.Delete(r.Context(), uri.Addr, uri.Path)
|
||||
if err != nil {
|
||||
deleteFail.Inc(1)
|
||||
respondError(w, r, fmt.Sprintf("could not delete from manifest: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, newKey)
|
||||
}
|
||||
|
||||
// Handles feed manifest creation and feed updates
|
||||
// The POST request admits a JSON structure as defined in the feeds package: `feed.updateRequestJSON`
|
||||
// The requests can be to a) create a feed manifest, b) update a feed or c) both a+b: create a feed manifest and publish a first update
|
||||
func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) {
|
||||
ruid := GetRUID(r.Context())
|
||||
uri := GetURI(r.Context())
|
||||
log.Debug("handle.post.feed", "ruid", ruid)
|
||||
var err error
|
||||
|
||||
// Creation and update must send feed.updateRequestJSON JSON structure
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
respondError(w, r, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
fd, err := s.api.ResolveFeed(r.Context(), uri, r.URL.Query())
|
||||
if err != nil { // couldn't parse query string or retrieve manifest
|
||||
getFail.Inc(1)
|
||||
httpStatus := http.StatusBadRequest
|
||||
if err == api.ErrCannotLoadFeedManifest || err == api.ErrCannotResolveFeedURI {
|
||||
httpStatus = http.StatusNotFound
|
||||
}
|
||||
respondError(w, r, fmt.Sprintf("cannot retrieve feed from manifest: %s", err), httpStatus)
|
||||
return
|
||||
}
|
||||
|
||||
var updateRequest feed.Request
|
||||
updateRequest.Feed = *fd
|
||||
query := r.URL.Query()
|
||||
|
||||
if err := updateRequest.FromValues(query, body); err != nil { // decodes request from query parameters
|
||||
respondError(w, r, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case updateRequest.IsUpdate():
|
||||
// Verify that the signature is intact and that the signer is authorized
|
||||
// to update this feed
|
||||
// Check this early, to avoid creating a feed and then not being able to set its first update.
|
||||
if err = updateRequest.Verify(); err != nil {
|
||||
respondError(w, r, err.Error(), http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
_, err = s.api.FeedsUpdate(r.Context(), &updateRequest)
|
||||
if err != nil {
|
||||
respondError(w, r, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case query.Get("manifest") == "1":
|
||||
// we create a manifest so we can retrieve feed updates with bzz:// later
|
||||
// this manifest has a special "feed type" manifest, and saves the
|
||||
// feed identification used to retrieve feed updates later
|
||||
m, err := s.api.NewFeedManifest(r.Context(), &updateRequest.Feed)
|
||||
if err != nil {
|
||||
respondError(w, r, fmt.Sprintf("failed to create feed manifest: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
// the key to the manifest will be passed back to the client
|
||||
// the client can access the feed directly through its Feed member
|
||||
// the manifest key can be set as content in the resolver of the ENS name
|
||||
outdata, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
respondError(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
fmt.Fprint(w, string(outdata))
|
||||
|
||||
w.Header().Add("Content-type", "application/json")
|
||||
default:
|
||||
respondError(w, r, "Missing signature in feed update request", http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleGetFeed retrieves Swarm feeds updates:
|
||||
// bzz-feed://<manifest address or ENS name> - get latest feed update, given a manifest address
|
||||
// - or -
|
||||
// specify user + topic (optional), subtopic name (optional) directly, without manifest:
|
||||
// bzz-feed://?user=0x...&topic=0x...&name=subtopic name
|
||||
// topic defaults to 0x000... if not specified.
|
||||
// name defaults to empty string if not specified.
|
||||
// thus, empty name and topic refers to the user's default feed.
|
||||
//
|
||||
// Optional parameters:
|
||||
// time=xx - get the latest update before time (in epoch seconds)
|
||||
// hint.time=xx - hint the lookup algorithm looking for updates at around that time
|
||||
// hint.level=xx - hint the lookup algorithm looking for updates at around this frequency level
|
||||
// meta=1 - get feed metadata and status information instead of performing a feed query
|
||||
// NOTE: meta=1 will be deprecated in the near future
|
||||
func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) {
|
||||
ruid := GetRUID(r.Context())
|
||||
uri := GetURI(r.Context())
|
||||
log.Debug("handle.get.feed", "ruid", ruid)
|
||||
var err error
|
||||
|
||||
fd, err := s.api.ResolveFeed(r.Context(), uri, r.URL.Query())
|
||||
if err != nil { // couldn't parse query string or retrieve manifest
|
||||
getFail.Inc(1)
|
||||
httpStatus := http.StatusBadRequest
|
||||
if err == api.ErrCannotLoadFeedManifest || err == api.ErrCannotResolveFeedURI {
|
||||
httpStatus = http.StatusNotFound
|
||||
}
|
||||
respondError(w, r, fmt.Sprintf("cannot retrieve feed information from manifest: %s", err), httpStatus)
|
||||
return
|
||||
}
|
||||
|
||||
// determine if the query specifies period and version or it is a metadata query
|
||||
if r.URL.Query().Get("meta") == "1" {
|
||||
unsignedUpdateRequest, err := s.api.FeedsNewRequest(r.Context(), fd)
|
||||
if err != nil {
|
||||
getFail.Inc(1)
|
||||
respondError(w, r, fmt.Sprintf("cannot retrieve feed metadata for feed=%s: %s", fd.Hex(), err), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
rawResponse, err := unsignedUpdateRequest.MarshalJSON()
|
||||
if err != nil {
|
||||
respondError(w, r, fmt.Sprintf("cannot encode unsigned feed update request: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Add("Content-type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, string(rawResponse))
|
||||
return
|
||||
}
|
||||
|
||||
lookupParams := &feed.Query{Feed: *fd}
|
||||
if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version
|
||||
respondError(w, r, fmt.Sprintf("invalid feed update request:%s", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := s.api.FeedsLookup(r.Context(), lookupParams)
|
||||
|
||||
// any error from the switch statement will end up here
|
||||
if err != nil {
|
||||
code, err2 := s.translateFeedError(w, r, "feed lookup fail", err)
|
||||
respondError(w, r, err2.Error(), code)
|
||||
return
|
||||
}
|
||||
|
||||
// All ok, serve the retrieved update
|
||||
log.Debug("Found update", "feed", fd.Hex(), "ruid", ruid)
|
||||
w.Header().Set("Content-Type", api.MimeOctetStream)
|
||||
http.ServeContent(w, r, "", time.Now(), bytes.NewReader(data))
|
||||
}
|
||||
|
||||
func (s *Server) translateFeedError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) {
|
||||
code := 0
|
||||
defaultErr := fmt.Errorf("%s: %v", supErr, err)
|
||||
rsrcErr, ok := err.(*feed.Error)
|
||||
if !ok && rsrcErr != nil {
|
||||
code = rsrcErr.Code()
|
||||
}
|
||||
switch code {
|
||||
case storage.ErrInvalidValue:
|
||||
return http.StatusBadRequest, defaultErr
|
||||
case storage.ErrNotFound, storage.ErrNotSynced, storage.ErrNothingToReturn, storage.ErrInit:
|
||||
return http.StatusNotFound, defaultErr
|
||||
case storage.ErrUnauthorized, storage.ErrInvalidSignature:
|
||||
return http.StatusUnauthorized, defaultErr
|
||||
case storage.ErrDataOverflow:
|
||||
return http.StatusRequestEntityTooLarge, defaultErr
|
||||
}
|
||||
|
||||
return http.StatusInternalServerError, defaultErr
|
||||
}
|
||||
|
||||
// HandleGet handles a GET request to
|
||||
// - bzz-raw://<key> and responds with the raw content stored at the
|
||||
// given storage key
|
||||
// - bzz-hash://<key> and responds with the hash of the content stored
|
||||
// at the given storage key as a text/plain response
|
||||
func (s *Server) HandleGet(w http.ResponseWriter, r *http.Request) {
|
||||
ruid := GetRUID(r.Context())
|
||||
uri := GetURI(r.Context())
|
||||
log.Debug("handle.get", "ruid", ruid, "uri", uri)
|
||||
getCount.Inc(1)
|
||||
_, pass, _ := r.BasicAuth()
|
||||
|
||||
addr, err := s.api.ResolveURI(r.Context(), uri, pass)
|
||||
if err != nil {
|
||||
getFail.Inc(1)
|
||||
respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz://<hex key>/path, so we are sure it is immutable.
|
||||
|
||||
log.Debug("handle.get: resolved", "ruid", ruid, "key", addr)
|
||||
|
||||
// if path is set, interpret <key> as a manifest and return the
|
||||
// raw entry at the given path
|
||||
etag := common.Bytes2Hex(addr)
|
||||
noneMatchEtag := r.Header.Get("If-None-Match")
|
||||
w.Header().Set("ETag", fmt.Sprintf("%q", etag)) // set etag to manifest key or raw entry key.
|
||||
if noneMatchEtag != "" {
|
||||
if bytes.Equal(storage.Address(common.Hex2Bytes(noneMatchEtag)), addr) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case uri.Raw():
|
||||
// check the root chunk exists by retrieving the file's size
|
||||
reader, isEncrypted := s.api.Retrieve(r.Context(), addr)
|
||||
if _, err := reader.Size(r.Context(), nil); err != nil {
|
||||
getFail.Inc(1)
|
||||
respondError(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("X-Decrypted", fmt.Sprintf("%v", isEncrypted))
|
||||
|
||||
// allow the request to overwrite the content type using a query
|
||||
// parameter
|
||||
if typ := r.URL.Query().Get("content_type"); typ != "" {
|
||||
w.Header().Set("Content-Type", typ)
|
||||
}
|
||||
http.ServeContent(w, r, "", time.Now(), reader)
|
||||
case uri.Hash():
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, addr)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// HandleGetList handles a GET request to bzz-list:/<manifest>/<path> and returns
|
||||
// a list of all files contained in <manifest> under <path> grouped into
|
||||
// common prefixes using "/" as a delimiter
|
||||
func (s *Server) HandleGetList(w http.ResponseWriter, r *http.Request) {
|
||||
ruid := GetRUID(r.Context())
|
||||
uri := GetURI(r.Context())
|
||||
_, credentials, _ := r.BasicAuth()
|
||||
log.Debug("handle.get.list", "ruid", ruid, "uri", uri)
|
||||
getListCount.Inc(1)
|
||||
|
||||
// ensure the root path has a trailing slash so that relative URLs work
|
||||
if uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") {
|
||||
http.Redirect(w, r, r.URL.Path+"/", http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
addr, err := s.api.Resolve(r.Context(), uri.Addr)
|
||||
if err != nil {
|
||||
getListFail.Inc(1)
|
||||
respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
log.Debug("handle.get.list: resolved", "ruid", ruid, "key", addr)
|
||||
|
||||
list, err := s.api.GetManifestList(r.Context(), s.api.Decryptor(r.Context(), credentials), addr, uri.Path)
|
||||
if err != nil {
|
||||
getListFail.Inc(1)
|
||||
if isDecryptError(err) {
|
||||
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", addr.String()))
|
||||
respondError(w, r, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
respondError(w, r, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// if the client wants HTML (e.g. a browser) then render the list as a
|
||||
// HTML index with relative URLs
|
||||
if strings.Contains(r.Header.Get("Accept"), "text/html") {
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
err := TemplatesMap["bzz-list"].Execute(w, &htmlListData{
|
||||
URI: &api.URI{
|
||||
Scheme: "bzz",
|
||||
Addr: uri.Addr,
|
||||
Path: uri.Path,
|
||||
},
|
||||
List: &list,
|
||||
})
|
||||
if err != nil {
|
||||
getListFail.Inc(1)
|
||||
log.Error(fmt.Sprintf("error rendering list HTML: %s", err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(&list)
|
||||
}
|
||||
|
||||
// HandleGetFile handles a GET request to bzz://<manifest>/<path> and responds
|
||||
// with the content of the file at <path> from the given <manifest>
|
||||
func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) {
|
||||
ruid := GetRUID(r.Context())
|
||||
uri := GetURI(r.Context())
|
||||
_, credentials, _ := r.BasicAuth()
|
||||
log.Debug("handle.get.file", "ruid", ruid, "uri", r.RequestURI)
|
||||
getFileCount.Inc(1)
|
||||
|
||||
// ensure the root path has a trailing slash so that relative URLs work
|
||||
if uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") {
|
||||
http.Redirect(w, r, r.URL.Path+"/", http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
var err error
|
||||
manifestAddr := uri.Address()
|
||||
|
||||
if manifestAddr == nil {
|
||||
manifestAddr, err = s.api.Resolve(r.Context(), uri.Addr)
|
||||
if err != nil {
|
||||
getFileFail.Inc(1)
|
||||
respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz://<hex key>/path, so we are sure it is immutable.
|
||||
}
|
||||
|
||||
log.Debug("handle.get.file: resolved", "ruid", ruid, "key", manifestAddr)
|
||||
|
||||
reader, contentType, status, contentKey, err := s.api.Get(r.Context(), s.api.Decryptor(r.Context(), credentials), manifestAddr, uri.Path)
|
||||
|
||||
etag := common.Bytes2Hex(contentKey)
|
||||
noneMatchEtag := r.Header.Get("If-None-Match")
|
||||
w.Header().Set("ETag", fmt.Sprintf("%q", etag)) // set etag to actual content key.
|
||||
if noneMatchEtag != "" {
|
||||
if bytes.Equal(storage.Address(common.Hex2Bytes(noneMatchEtag)), contentKey) {
|
||||
w.WriteHeader(http.StatusNotModified)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if isDecryptError(err) {
|
||||
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", manifestAddr))
|
||||
respondError(w, r, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
switch status {
|
||||
case http.StatusNotFound:
|
||||
getFileNotFound.Inc(1)
|
||||
respondError(w, r, err.Error(), http.StatusNotFound)
|
||||
default:
|
||||
getFileFail.Inc(1)
|
||||
respondError(w, r, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//the request results in ambiguous files
|
||||
//e.g. /read with readme.md and readinglist.txt available in manifest
|
||||
if status == http.StatusMultipleChoices {
|
||||
list, err := s.api.GetManifestList(r.Context(), s.api.Decryptor(r.Context(), credentials), manifestAddr, uri.Path)
|
||||
if err != nil {
|
||||
getFileFail.Inc(1)
|
||||
if isDecryptError(err) {
|
||||
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", manifestAddr))
|
||||
respondError(w, r, err.Error(), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
respondError(w, r, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug(fmt.Sprintf("Multiple choices! --> %v", list), "ruid", ruid)
|
||||
//show a nice page links to available entries
|
||||
ShowMultipleChoices(w, r, list)
|
||||
return
|
||||
}
|
||||
|
||||
// check the root chunk exists by retrieving the file's size
|
||||
if _, err := reader.Size(r.Context(), nil); err != nil {
|
||||
getFileNotFound.Inc(1)
|
||||
respondError(w, r, fmt.Sprintf("file not found %s: %s", uri, err), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if contentType != "" {
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
}
|
||||
|
||||
fileName := uri.Addr
|
||||
if found := path.Base(uri.Path); found != "" && found != "." && found != "/" {
|
||||
fileName = found
|
||||
}
|
||||
w.Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=\"%s\"", fileName))
|
||||
|
||||
http.ServeContent(w, r, fileName, time.Now(), newBufferedReadSeeker(reader, getFileBufferSize))
|
||||
}
|
||||
|
||||
// calculateNumberOfChunks calculates the number of chunks in an arbitrary content length
|
||||
func calculateNumberOfChunks(contentLength int64, isEncrypted bool) int64 {
|
||||
if contentLength < 4096 {
|
||||
return 1
|
||||
}
|
||||
branchingFactor := 128
|
||||
if isEncrypted {
|
||||
branchingFactor = 64
|
||||
}
|
||||
|
||||
dataChunks := math.Ceil(float64(contentLength) / float64(4096))
|
||||
totalChunks := dataChunks
|
||||
intermediate := dataChunks / float64(branchingFactor)
|
||||
|
||||
for intermediate > 1 {
|
||||
totalChunks += math.Ceil(intermediate)
|
||||
intermediate = intermediate / float64(branchingFactor)
|
||||
}
|
||||
|
||||
return int64(totalChunks) + 1
|
||||
}
|
||||
|
||||
// The size of buffer used for bufio.Reader on LazyChunkReader passed to
|
||||
// http.ServeContent in HandleGetFile.
|
||||
// Warning: This value influences the number of chunk requests and chunker join goroutines
|
||||
// per file request.
|
||||
// Recommended value is 4 times the io.Copy default buffer value which is 32kB.
|
||||
const getFileBufferSize = 4 * 32 * 1024
|
||||
|
||||
// bufferedReadSeeker wraps bufio.Reader to expose Seek method
|
||||
// from the provied io.ReadSeeker in newBufferedReadSeeker.
|
||||
type bufferedReadSeeker struct {
|
||||
r io.Reader
|
||||
s io.Seeker
|
||||
}
|
||||
|
||||
// newBufferedReadSeeker creates a new instance of bufferedReadSeeker,
|
||||
// out of io.ReadSeeker. Argument `size` is the size of the read buffer.
|
||||
func newBufferedReadSeeker(readSeeker io.ReadSeeker, size int) bufferedReadSeeker {
|
||||
return bufferedReadSeeker{
|
||||
r: bufio.NewReaderSize(readSeeker, size),
|
||||
s: readSeeker,
|
||||
}
|
||||
}
|
||||
|
||||
func (b bufferedReadSeeker) Read(p []byte) (n int, err error) {
|
||||
return b.r.Read(p)
|
||||
}
|
||||
|
||||
func (b bufferedReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
return b.s.Seek(offset, whence)
|
||||
}
|
||||
|
||||
type loggingResponseWriter struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func newLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
||||
return &loggingResponseWriter{w, http.StatusOK}
|
||||
}
|
||||
|
||||
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
||||
lrw.statusCode = code
|
||||
lrw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func isDecryptError(err error) bool {
|
||||
return strings.Contains(err.Error(), api.ErrDecrypt.Error())
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@ -1,100 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
|
||||
)
|
||||
|
||||
type TestServer interface {
|
||||
ServeHTTP(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
func NewTestSwarmServer(t *testing.T, serverFunc func(*api.API) TestServer, resolver api.Resolver) *TestSwarmServer {
|
||||
swarmDir, err := ioutil.TempDir("", "swarm-storage-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
localStore, err := localstore.New(swarmDir, make([]byte, 32), nil)
|
||||
if err != nil {
|
||||
os.RemoveAll(swarmDir)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tags := chunk.NewTags()
|
||||
fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams(), tags)
|
||||
|
||||
// Swarm feeds test setup
|
||||
feedsDir, err := ioutil.TempDir("", "swarm-feeds-test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
feeds, err := feed.NewTestHandler(feedsDir, &feed.HandlerParams{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
swarmApi := api.NewAPI(fileStore, resolver, feeds.Handler, nil, tags)
|
||||
apiServer := httptest.NewServer(serverFunc(swarmApi))
|
||||
|
||||
tss := &TestSwarmServer{
|
||||
Server: apiServer,
|
||||
FileStore: fileStore,
|
||||
Tags: tags,
|
||||
dir: swarmDir,
|
||||
Hasher: storage.MakeHashFunc(storage.DefaultHash)(),
|
||||
cleanup: func() {
|
||||
apiServer.Close()
|
||||
fileStore.Close()
|
||||
feeds.Close()
|
||||
os.RemoveAll(swarmDir)
|
||||
os.RemoveAll(feedsDir)
|
||||
},
|
||||
CurrentTime: 42,
|
||||
}
|
||||
feed.TimestampProvider = tss
|
||||
return tss
|
||||
}
|
||||
|
||||
type TestSwarmServer struct {
|
||||
*httptest.Server
|
||||
Hasher storage.SwarmHash
|
||||
FileStore *storage.FileStore
|
||||
Tags *chunk.Tags
|
||||
dir string
|
||||
cleanup func()
|
||||
CurrentTime uint64
|
||||
}
|
||||
|
||||
func (t *TestSwarmServer) Close() {
|
||||
t.cleanup()
|
||||
}
|
||||
|
||||
func (t *TestSwarmServer) Now() feed.Timestamp {
|
||||
return feed.Timestamp{Time: t.CurrentTime}
|
||||
}
|
@ -1,84 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
type Inspector struct {
|
||||
api *API
|
||||
hive *network.Hive
|
||||
netStore *storage.NetStore
|
||||
}
|
||||
|
||||
func NewInspector(api *API, hive *network.Hive, netStore *storage.NetStore) *Inspector {
|
||||
return &Inspector{api, hive, netStore}
|
||||
}
|
||||
|
||||
// Hive prints the kademlia table
|
||||
func (inspector *Inspector) Hive() string {
|
||||
return inspector.hive.String()
|
||||
}
|
||||
|
||||
func (inspector *Inspector) ListKnown() []string {
|
||||
res := []string{}
|
||||
for _, v := range inspector.hive.Kademlia.ListKnown() {
|
||||
res = append(res, fmt.Sprintf("%v", v))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (inspector *Inspector) IsSyncing() bool {
|
||||
lastReceivedChunksMsg := metrics.GetOrRegisterGauge("network.stream.received_chunks", nil)
|
||||
|
||||
// last received chunks msg time
|
||||
lrct := time.Unix(0, lastReceivedChunksMsg.Value())
|
||||
|
||||
// if last received chunks msg time is after now-15sec. (i.e. within the last 15sec.) then we say that the node is still syncing
|
||||
// technically this is not correct, because this might have been a retrieve request, but for the time being it works for our purposes
|
||||
// because we know we are not making retrieve requests on the node while checking this
|
||||
return lrct.After(time.Now().Add(-15 * time.Second))
|
||||
}
|
||||
|
||||
// Has checks whether each chunk address is present in the underlying datastore,
|
||||
// the bool in the returned structs indicates if the underlying datastore has
|
||||
// the chunk stored with the given address (true), or not (false)
|
||||
func (inspector *Inspector) Has(chunkAddresses []storage.Address) string {
|
||||
hostChunks := []string{}
|
||||
for _, addr := range chunkAddresses {
|
||||
has, err := inspector.netStore.Has(context.Background(), addr)
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
}
|
||||
if has {
|
||||
hostChunks = append(hostChunks, "1")
|
||||
} else {
|
||||
hostChunks = append(hostChunks, "0")
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(hostChunks, "")
|
||||
}
|
@ -1,584 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/storage/feed"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
ManifestType = "application/bzz-manifest+json"
|
||||
FeedContentType = "application/bzz-feed"
|
||||
|
||||
manifestSizeLimit = 5 * 1024 * 1024
|
||||
)
|
||||
|
||||
// Manifest represents a swarm manifest
|
||||
type Manifest struct {
|
||||
Entries []ManifestEntry `json:"entries,omitempty"`
|
||||
}
|
||||
|
||||
// ManifestEntry represents an entry in a swarm manifest
|
||||
type ManifestEntry struct {
|
||||
Hash string `json:"hash,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
ContentType string `json:"contentType,omitempty"`
|
||||
Mode int64 `json:"mode,omitempty"`
|
||||
Size int64 `json:"size,omitempty"`
|
||||
ModTime time.Time `json:"mod_time,omitempty"`
|
||||
Status int `json:"status,omitempty"`
|
||||
Access *AccessEntry `json:"access,omitempty"`
|
||||
Feed *feed.Feed `json:"feed,omitempty"`
|
||||
}
|
||||
|
||||
// ManifestList represents the result of listing files in a manifest
|
||||
type ManifestList struct {
|
||||
CommonPrefixes []string `json:"common_prefixes,omitempty"`
|
||||
Entries []*ManifestEntry `json:"entries,omitempty"`
|
||||
}
|
||||
|
||||
// NewManifest creates and stores a new, empty manifest
|
||||
func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, error) {
|
||||
var manifest Manifest
|
||||
data, err := json.Marshal(&manifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addr, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), toEncrypt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wait(ctx)
|
||||
return addr, err
|
||||
}
|
||||
|
||||
// Manifest hack for supporting Swarm feeds from the bzz: scheme
|
||||
// see swarm/api/api.go:API.Get() for more information
|
||||
func (a *API) NewFeedManifest(ctx context.Context, feed *feed.Feed) (storage.Address, error) {
|
||||
var manifest Manifest
|
||||
entry := ManifestEntry{
|
||||
Feed: feed,
|
||||
ContentType: FeedContentType,
|
||||
}
|
||||
manifest.Entries = append(manifest.Entries, entry)
|
||||
data, err := json.Marshal(&manifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addr, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wait(ctx)
|
||||
return addr, err
|
||||
}
|
||||
|
||||
// ManifestWriter is used to add and remove entries from an underlying manifest
|
||||
type ManifestWriter struct {
|
||||
api *API
|
||||
trie *manifestTrie
|
||||
quitC chan bool
|
||||
}
|
||||
|
||||
func (a *API) NewManifestWriter(ctx context.Context, addr storage.Address, quitC chan bool) (*ManifestWriter, error) {
|
||||
trie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading manifest %s: %s", addr, err)
|
||||
}
|
||||
return &ManifestWriter{a, trie, quitC}, nil
|
||||
}
|
||||
|
||||
// AddEntry stores the given data and adds the resulting address to the manifest
|
||||
func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (addr storage.Address, err error) {
|
||||
entry := newManifestTrieEntry(e, nil)
|
||||
if data != nil {
|
||||
var wait func(context.Context) error
|
||||
addr, wait, err = m.api.Store(ctx, data, e.Size, m.trie.encrypted)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = wait(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entry.Hash = addr.Hex()
|
||||
}
|
||||
if entry.Hash == "" {
|
||||
return addr, errors.New("missing entry hash")
|
||||
}
|
||||
m.trie.addEntry(entry, m.quitC)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// RemoveEntry removes the given path from the manifest
|
||||
func (m *ManifestWriter) RemoveEntry(path string) error {
|
||||
m.trie.deleteEntry(path, m.quitC)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store stores the manifest, returning the resulting storage address
|
||||
func (m *ManifestWriter) Store() (storage.Address, error) {
|
||||
return m.trie.ref, m.trie.recalcAndStore()
|
||||
}
|
||||
|
||||
// ManifestWalker is used to recursively walk the entries in the manifest and
|
||||
// all of its submanifests
|
||||
type ManifestWalker struct {
|
||||
api *API
|
||||
trie *manifestTrie
|
||||
quitC chan bool
|
||||
}
|
||||
|
||||
func (a *API) NewManifestWalker(ctx context.Context, addr storage.Address, decrypt DecryptFunc, quitC chan bool) (*ManifestWalker, error) {
|
||||
trie, err := loadManifest(ctx, a.fileStore, addr, quitC, decrypt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading manifest %s: %s", addr, err)
|
||||
}
|
||||
return &ManifestWalker{a, trie, quitC}, nil
|
||||
}
|
||||
|
||||
// ErrSkipManifest is used as a return value from WalkFn to indicate that the
|
||||
// manifest should be skipped
|
||||
var ErrSkipManifest = errors.New("skip this manifest")
|
||||
|
||||
// WalkFn is the type of function called for each entry visited by a recursive
|
||||
// manifest walk
|
||||
type WalkFn func(entry *ManifestEntry) error
|
||||
|
||||
// Walk recursively walks the manifest calling walkFn for each entry in the
|
||||
// manifest, including submanifests
|
||||
func (m *ManifestWalker) Walk(walkFn WalkFn) error {
|
||||
return m.walk(m.trie, "", walkFn)
|
||||
}
|
||||
|
||||
func (m *ManifestWalker) walk(trie *manifestTrie, prefix string, walkFn WalkFn) error {
|
||||
for _, entry := range &trie.entries {
|
||||
if entry == nil {
|
||||
continue
|
||||
}
|
||||
entry.Path = prefix + entry.Path
|
||||
err := walkFn(&entry.ManifestEntry)
|
||||
if err != nil {
|
||||
if entry.ContentType == ManifestType && err == ErrSkipManifest {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if entry.ContentType != ManifestType {
|
||||
continue
|
||||
}
|
||||
if err := trie.loadSubTrie(entry, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.walk(entry.subtrie, entry.Path, walkFn); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type manifestTrie struct {
|
||||
fileStore *storage.FileStore
|
||||
entries [257]*manifestTrieEntry // indexed by first character of basePath, entries[256] is the empty basePath entry
|
||||
ref storage.Address // if ref != nil, it is stored
|
||||
encrypted bool
|
||||
decrypt DecryptFunc
|
||||
}
|
||||
|
||||
func newManifestTrieEntry(entry *ManifestEntry, subtrie *manifestTrie) *manifestTrieEntry {
|
||||
return &manifestTrieEntry{
|
||||
ManifestEntry: *entry,
|
||||
subtrie: subtrie,
|
||||
}
|
||||
}
|
||||
|
||||
type manifestTrieEntry struct {
|
||||
ManifestEntry
|
||||
|
||||
subtrie *manifestTrie
|
||||
}
|
||||
|
||||
func loadManifest(ctx context.Context, fileStore *storage.FileStore, addr storage.Address, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
|
||||
log.Trace("manifest lookup", "addr", addr)
|
||||
// retrieve manifest via FileStore
|
||||
manifestReader, isEncrypted := fileStore.Retrieve(ctx, addr)
|
||||
log.Trace("reader retrieved", "addr", addr)
|
||||
return readManifest(manifestReader, addr, fileStore, isEncrypted, quitC, decrypt)
|
||||
}
|
||||
|
||||
func readManifest(mr storage.LazySectionReader, addr storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
|
||||
// TODO check size for oversized manifests
|
||||
size, err := mr.Size(mr.Context(), quitC)
|
||||
if err != nil { // size == 0
|
||||
// can't determine size means we don't have the root chunk
|
||||
log.Trace("manifest not found", "addr", addr)
|
||||
err = fmt.Errorf("Manifest not Found")
|
||||
return
|
||||
}
|
||||
if size > manifestSizeLimit {
|
||||
log.Warn("manifest exceeds size limit", "addr", addr, "size", size, "limit", manifestSizeLimit)
|
||||
err = fmt.Errorf("Manifest size of %v bytes exceeds the %v byte limit", size, manifestSizeLimit)
|
||||
return
|
||||
}
|
||||
manifestData := make([]byte, size)
|
||||
read, err := mr.Read(manifestData)
|
||||
if int64(read) < size {
|
||||
log.Trace("manifest not found", "addr", addr)
|
||||
if err == nil {
|
||||
err = fmt.Errorf("Manifest retrieval cut short: read %v, expect %v", read, size)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug("manifest retrieved", "addr", addr)
|
||||
var man struct {
|
||||
Entries []*manifestTrieEntry `json:"entries"`
|
||||
}
|
||||
err = json.Unmarshal(manifestData, &man)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Manifest %v is malformed: %v", addr.Log(), err)
|
||||
log.Trace("malformed manifest", "addr", addr)
|
||||
return
|
||||
}
|
||||
|
||||
log.Trace("manifest entries", "addr", addr, "len", len(man.Entries))
|
||||
|
||||
trie = &manifestTrie{
|
||||
fileStore: fileStore,
|
||||
encrypted: isEncrypted,
|
||||
decrypt: decrypt,
|
||||
}
|
||||
for _, entry := range man.Entries {
|
||||
err = trie.addEntry(entry, quitC)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) error {
|
||||
mt.ref = nil // trie modified, hash needs to be re-calculated on demand
|
||||
|
||||
if entry.ManifestEntry.Access != nil {
|
||||
if mt.decrypt == nil {
|
||||
return errors.New("dont have decryptor")
|
||||
}
|
||||
|
||||
err := mt.decrypt(&entry.ManifestEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(entry.Path) == 0 {
|
||||
mt.entries[256] = entry
|
||||
return nil
|
||||
}
|
||||
|
||||
b := entry.Path[0]
|
||||
oldentry := mt.entries[b]
|
||||
if (oldentry == nil) || (oldentry.Path == entry.Path && oldentry.ContentType != ManifestType) {
|
||||
mt.entries[b] = entry
|
||||
return nil
|
||||
}
|
||||
|
||||
cpl := 0
|
||||
for (len(entry.Path) > cpl) && (len(oldentry.Path) > cpl) && (entry.Path[cpl] == oldentry.Path[cpl]) {
|
||||
cpl++
|
||||
}
|
||||
|
||||
if (oldentry.ContentType == ManifestType) && (cpl == len(oldentry.Path)) {
|
||||
if mt.loadSubTrie(oldentry, quitC) != nil {
|
||||
return nil
|
||||
}
|
||||
entry.Path = entry.Path[cpl:]
|
||||
oldentry.subtrie.addEntry(entry, quitC)
|
||||
oldentry.Hash = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
commonPrefix := entry.Path[:cpl]
|
||||
|
||||
subtrie := &manifestTrie{
|
||||
fileStore: mt.fileStore,
|
||||
encrypted: mt.encrypted,
|
||||
}
|
||||
entry.Path = entry.Path[cpl:]
|
||||
oldentry.Path = oldentry.Path[cpl:]
|
||||
subtrie.addEntry(entry, quitC)
|
||||
subtrie.addEntry(oldentry, quitC)
|
||||
|
||||
mt.entries[b] = newManifestTrieEntry(&ManifestEntry{
|
||||
Path: commonPrefix,
|
||||
ContentType: ManifestType,
|
||||
}, subtrie)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *manifestTrie) getCountLast() (cnt int, entry *manifestTrieEntry) {
|
||||
for _, e := range &mt.entries {
|
||||
if e != nil {
|
||||
cnt++
|
||||
entry = e
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (mt *manifestTrie) deleteEntry(path string, quitC chan bool) {
|
||||
mt.ref = nil // trie modified, hash needs to be re-calculated on demand
|
||||
|
||||
if len(path) == 0 {
|
||||
mt.entries[256] = nil
|
||||
return
|
||||
}
|
||||
|
||||
b := path[0]
|
||||
entry := mt.entries[b]
|
||||
if entry == nil {
|
||||
return
|
||||
}
|
||||
if entry.Path == path {
|
||||
mt.entries[b] = nil
|
||||
return
|
||||
}
|
||||
|
||||
epl := len(entry.Path)
|
||||
if (entry.ContentType == ManifestType) && (len(path) >= epl) && (path[:epl] == entry.Path) {
|
||||
if mt.loadSubTrie(entry, quitC) != nil {
|
||||
return
|
||||
}
|
||||
entry.subtrie.deleteEntry(path[epl:], quitC)
|
||||
entry.Hash = ""
|
||||
// remove subtree if it has less than 2 elements
|
||||
cnt, lastentry := entry.subtrie.getCountLast()
|
||||
if cnt < 2 {
|
||||
if lastentry != nil {
|
||||
lastentry.Path = entry.Path + lastentry.Path
|
||||
}
|
||||
mt.entries[b] = lastentry
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *manifestTrie) recalcAndStore() error {
|
||||
if mt.ref != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(`{"entries":[`)
|
||||
|
||||
list := &Manifest{}
|
||||
for _, entry := range &mt.entries {
|
||||
if entry != nil {
|
||||
if entry.Hash == "" { // TODO: paralellize
|
||||
err := entry.subtrie.recalcAndStore()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entry.Hash = entry.subtrie.ref.Hex()
|
||||
}
|
||||
list.Entries = append(list.Entries, entry.ManifestEntry)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
manifest, err := json.Marshal(list)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr := bytes.NewReader(manifest)
|
||||
ctx := context.TODO()
|
||||
addr, wait, err2 := mt.fileStore.Store(ctx, sr, int64(len(manifest)), mt.encrypted)
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
err2 = wait(ctx)
|
||||
mt.ref = addr
|
||||
return err2
|
||||
}
|
||||
|
||||
func (mt *manifestTrie) loadSubTrie(entry *manifestTrieEntry, quitC chan bool) (err error) {
|
||||
if entry.ManifestEntry.Access != nil {
|
||||
if mt.decrypt == nil {
|
||||
return errors.New("dont have decryptor")
|
||||
}
|
||||
|
||||
err := mt.decrypt(&entry.ManifestEntry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if entry.subtrie == nil {
|
||||
hash := common.Hex2Bytes(entry.Hash)
|
||||
entry.subtrie, err = loadManifest(context.TODO(), mt.fileStore, hash, quitC, mt.decrypt)
|
||||
entry.Hash = "" // might not match, should be recalculated
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (mt *manifestTrie) listWithPrefixInt(prefix, rp string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) error {
|
||||
plen := len(prefix)
|
||||
var start, stop int
|
||||
if plen == 0 {
|
||||
start = 0
|
||||
stop = 256
|
||||
} else {
|
||||
start = int(prefix[0])
|
||||
stop = start
|
||||
}
|
||||
|
||||
for i := start; i <= stop; i++ {
|
||||
select {
|
||||
case <-quitC:
|
||||
return fmt.Errorf("aborted")
|
||||
default:
|
||||
}
|
||||
entry := mt.entries[i]
|
||||
if entry != nil {
|
||||
epl := len(entry.Path)
|
||||
if entry.ContentType == ManifestType {
|
||||
l := plen
|
||||
if epl < l {
|
||||
l = epl
|
||||
}
|
||||
if prefix[:l] == entry.Path[:l] {
|
||||
err := mt.loadSubTrie(entry, quitC)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = entry.subtrie.listWithPrefixInt(prefix[l:], rp+entry.Path[l:], quitC, cb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (epl >= plen) && (prefix == entry.Path[:plen]) {
|
||||
cb(entry, rp+entry.Path[plen:])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mt *manifestTrie) listWithPrefix(prefix string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) (err error) {
|
||||
return mt.listWithPrefixInt(prefix, "", quitC, cb)
|
||||
}
|
||||
|
||||
func (mt *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *manifestTrieEntry, pos int) {
|
||||
log.Trace(fmt.Sprintf("findPrefixOf(%s)", path))
|
||||
|
||||
if len(path) == 0 {
|
||||
return mt.entries[256], 0
|
||||
}
|
||||
|
||||
//see if first char is in manifest entries
|
||||
b := path[0]
|
||||
entry = mt.entries[b]
|
||||
if entry == nil {
|
||||
return mt.entries[256], 0
|
||||
}
|
||||
|
||||
epl := len(entry.Path)
|
||||
log.Trace(fmt.Sprintf("path = %v entry.Path = %v epl = %v", path, entry.Path, epl))
|
||||
if len(path) <= epl {
|
||||
if entry.Path[:len(path)] == path {
|
||||
if entry.ContentType == ManifestType {
|
||||
err := mt.loadSubTrie(entry, quitC)
|
||||
if err == nil && entry.subtrie != nil {
|
||||
subentries := entry.subtrie.entries
|
||||
for i := 0; i < len(subentries); i++ {
|
||||
sub := subentries[i]
|
||||
if sub != nil && sub.Path == "" {
|
||||
return sub, len(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
entry.Status = http.StatusMultipleChoices
|
||||
}
|
||||
pos = len(path)
|
||||
return
|
||||
}
|
||||
return nil, 0
|
||||
}
|
||||
if path[:epl] == entry.Path {
|
||||
log.Trace(fmt.Sprintf("entry.ContentType = %v", entry.ContentType))
|
||||
//the subentry is a manifest, load subtrie
|
||||
if entry.ContentType == ManifestType && (strings.Contains(entry.Path, path) || strings.Contains(path, entry.Path)) {
|
||||
err := mt.loadSubTrie(entry, quitC)
|
||||
if err != nil {
|
||||
return nil, 0
|
||||
}
|
||||
sub, pos := entry.subtrie.findPrefixOf(path[epl:], quitC)
|
||||
if sub != nil {
|
||||
entry = sub
|
||||
pos += epl
|
||||
return sub, pos
|
||||
} else if path == entry.Path {
|
||||
entry.Status = http.StatusMultipleChoices
|
||||
}
|
||||
|
||||
} else {
|
||||
//entry is not a manifest, return it
|
||||
if path != entry.Path {
|
||||
return nil, 0
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
// file system manifest always contains regularized paths
|
||||
// no leading or trailing slashes, only single slashes inside
|
||||
func RegularSlashes(path string) (res string) {
|
||||
for i := 0; i < len(path); i++ {
|
||||
if (path[i] != '/') || ((i > 0) && (path[i-1] != '/')) {
|
||||
res = res + path[i:i+1]
|
||||
}
|
||||
}
|
||||
if (len(res) > 0) && (res[len(res)-1] == '/') {
|
||||
res = res[:len(res)-1]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (mt *manifestTrie) getEntry(spath string) (entry *manifestTrieEntry, fullpath string) {
|
||||
path := RegularSlashes(spath)
|
||||
var pos int
|
||||
quitC := make(chan bool)
|
||||
entry, pos = mt.findPrefixOf(path, quitC)
|
||||
return entry, path[:pos]
|
||||
}
|
@ -1,176 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/chunk"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
func manifest(paths ...string) (manifestReader storage.LazySectionReader) {
|
||||
var entries []string
|
||||
for _, path := range paths {
|
||||
entry := fmt.Sprintf(`{"path":"%s"}`, path)
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
manifest := fmt.Sprintf(`{"entries":[%s]}`, strings.Join(entries, ","))
|
||||
return &storage.LazyTestSectionReader{
|
||||
SectionReader: io.NewSectionReader(strings.NewReader(manifest), 0, int64(len(manifest))),
|
||||
}
|
||||
}
|
||||
|
||||
func testGetEntry(t *testing.T, path, match string, multiple bool, paths ...string) *manifestTrie {
|
||||
quitC := make(chan bool)
|
||||
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
|
||||
ref := make([]byte, fileStore.HashSize())
|
||||
trie, err := readManifest(manifest(paths...), ref, fileStore, false, quitC, NOOPDecrypt)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error making manifest: %v", err)
|
||||
}
|
||||
checkEntry(t, path, match, multiple, trie)
|
||||
return trie
|
||||
}
|
||||
|
||||
func checkEntry(t *testing.T, path, match string, multiple bool, trie *manifestTrie) {
|
||||
entry, fullpath := trie.getEntry(path)
|
||||
if match == "-" && entry != nil {
|
||||
t.Errorf("expected no match for '%s', got '%s'", path, fullpath)
|
||||
} else if entry == nil {
|
||||
if match != "-" {
|
||||
t.Errorf("expected entry '%s' to match '%s', got no match", match, path)
|
||||
}
|
||||
} else if fullpath != match {
|
||||
t.Errorf("incorrect entry retrieved for '%s'. expected path '%v', got '%s'", path, match, fullpath)
|
||||
}
|
||||
|
||||
if multiple && entry.Status != http.StatusMultipleChoices {
|
||||
t.Errorf("Expected %d Multiple Choices Status for path %s, match %s, got %d", http.StatusMultipleChoices, path, match, entry.Status)
|
||||
} else if !multiple && entry != nil && entry.Status == http.StatusMultipleChoices {
|
||||
t.Errorf("Were not expecting %d Multiple Choices Status for path %s, match %s, but got it", http.StatusMultipleChoices, path, match)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEntry(t *testing.T) {
|
||||
// file system manifest always contains regularized paths
|
||||
testGetEntry(t, "a", "a", false, "a")
|
||||
testGetEntry(t, "b", "-", false, "a")
|
||||
testGetEntry(t, "/a//", "a", false, "a")
|
||||
// fallback
|
||||
testGetEntry(t, "/a", "", false, "")
|
||||
testGetEntry(t, "/a/b", "a/b", false, "a/b")
|
||||
// longest/deepest math
|
||||
testGetEntry(t, "read", "read", true, "readme.md", "readit.md")
|
||||
testGetEntry(t, "rf", "-", false, "readme.md", "readit.md")
|
||||
testGetEntry(t, "readme", "readme", false, "readme.md")
|
||||
testGetEntry(t, "readme", "-", false, "readit.md")
|
||||
testGetEntry(t, "readme.md", "readme.md", false, "readme.md")
|
||||
testGetEntry(t, "readme.md", "-", false, "readit.md")
|
||||
testGetEntry(t, "readmeAmd", "-", false, "readit.md")
|
||||
testGetEntry(t, "readme.mdffff", "-", false, "readme.md")
|
||||
testGetEntry(t, "ab", "ab", true, "ab/cefg", "ab/cedh", "ab/kkkkkk")
|
||||
testGetEntry(t, "ab/ce", "ab/ce", true, "ab/cefg", "ab/cedh", "ab/ceuuuuuuuuuu")
|
||||
testGetEntry(t, "abc", "abc", true, "abcd", "abczzzzef", "abc/def", "abc/e/g")
|
||||
testGetEntry(t, "a/b", "a/b", true, "a", "a/bc", "a/ba", "a/b/c")
|
||||
testGetEntry(t, "a/b", "a/b", false, "a", "a/b", "a/bb", "a/b/c")
|
||||
testGetEntry(t, "//a//b//", "a/b", false, "a", "a/b", "a/bb", "a/b/c")
|
||||
}
|
||||
|
||||
func TestExactMatch(t *testing.T) {
|
||||
quitC := make(chan bool)
|
||||
mf := manifest("shouldBeExactMatch.css", "shouldBeExactMatch.css.map")
|
||||
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
|
||||
ref := make([]byte, fileStore.HashSize())
|
||||
trie, err := readManifest(mf, ref, fileStore, false, quitC, nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error making manifest: %v", err)
|
||||
}
|
||||
entry, _ := trie.getEntry("shouldBeExactMatch.css")
|
||||
if entry.Path != "" {
|
||||
t.Errorf("Expected entry to match %s, got: %s", "shouldBeExactMatch.css", entry.Path)
|
||||
}
|
||||
if entry.Status == http.StatusMultipleChoices {
|
||||
t.Errorf("Got status %d, which is unexepcted", http.StatusMultipleChoices)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteEntry(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
// TestAddFileWithManifestPath tests that adding an entry at a path which
|
||||
// already exists as a manifest just adds the entry to the manifest rather
|
||||
// than replacing the manifest with the entry
|
||||
func TestAddFileWithManifestPath(t *testing.T) {
|
||||
// create a manifest containing "ab" and "ac"
|
||||
manifest, _ := json.Marshal(&Manifest{
|
||||
Entries: []ManifestEntry{
|
||||
{Path: "ab", Hash: "ab"},
|
||||
{Path: "ac", Hash: "ac"},
|
||||
},
|
||||
})
|
||||
reader := &storage.LazyTestSectionReader{
|
||||
SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))),
|
||||
}
|
||||
fileStore := storage.NewFileStore(nil, storage.NewFileStoreParams(), chunk.NewTags())
|
||||
ref := make([]byte, fileStore.HashSize())
|
||||
trie, err := readManifest(reader, ref, fileStore, false, nil, NOOPDecrypt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkEntry(t, "ab", "ab", false, trie)
|
||||
checkEntry(t, "ac", "ac", false, trie)
|
||||
|
||||
// now add path "a" and check we can still get "ab" and "ac"
|
||||
entry := &manifestTrieEntry{}
|
||||
entry.Path = "a"
|
||||
entry.Hash = "a"
|
||||
trie.addEntry(entry, nil)
|
||||
checkEntry(t, "ab", "ab", false, trie)
|
||||
checkEntry(t, "ac", "ac", false, trie)
|
||||
checkEntry(t, "a", "a", false, trie)
|
||||
}
|
||||
|
||||
// TestReadManifestOverSizeLimit creates a manifest reader with data longer then
|
||||
// manifestSizeLimit and checks if readManifest function will return the exact error
|
||||
// message.
|
||||
// The manifest data is not in json-encoded format, preventing possbile
|
||||
// successful parsing attempts if limit check fails.
|
||||
func TestReadManifestOverSizeLimit(t *testing.T) {
|
||||
manifest := make([]byte, manifestSizeLimit+1)
|
||||
reader := &storage.LazyTestSectionReader{
|
||||
SectionReader: io.NewSectionReader(bytes.NewReader(manifest), 0, int64(len(manifest))),
|
||||
}
|
||||
_, err := readManifest(reader, storage.Address{}, nil, false, nil, NOOPDecrypt)
|
||||
if err == nil {
|
||||
t.Fatal("got no error from readManifest")
|
||||
}
|
||||
// Error message is part of the http response body
|
||||
// which justifies exact string validation.
|
||||
got := err.Error()
|
||||
want := fmt.Sprintf("Manifest size of %v bytes exceeds the %v byte limit", len(manifest), manifestSizeLimit)
|
||||
if got != want {
|
||||
t.Fatalf("got error mesage %q, expected %q", got, want)
|
||||
}
|
||||
}
|
BIN
swarm/api/testdata/test0/img/logo.png
vendored
BIN
swarm/api/testdata/test0/img/logo.png
vendored
Binary file not shown.
Before Width: | Height: | Size: 4.0 KiB |
9
swarm/api/testdata/test0/index.css
vendored
9
swarm/api/testdata/test0/index.css
vendored
@ -1,9 +0,0 @@
|
||||
h1 {
|
||||
color: black;
|
||||
font-size: 12px;
|
||||
background-color: orange;
|
||||
border: 4px solid black;
|
||||
}
|
||||
body {
|
||||
background-color: orange
|
||||
}
|
10
swarm/api/testdata/test0/index.html
vendored
10
swarm/api/testdata/test0/index.html
vendored
@ -1,10 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<link rel="stylesheet" href="index.css">
|
||||
</head>
|
||||
<body>
|
||||
<h1>Swarm Test</h1>
|
||||
<img src="img/logo.gif" align="center", alt="Ethereum logo">
|
||||
</body>
|
||||
</html>
|
144
swarm/api/uri.go
144
swarm/api/uri.go
@ -1,144 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
//matches hex swarm hashes
|
||||
// TODO: this is bad, it should not be hardcoded how long is a hash
|
||||
var hashMatcher = regexp.MustCompile("^([0-9A-Fa-f]{64})([0-9A-Fa-f]{64})?$")
|
||||
|
||||
// URI is a reference to content stored in swarm.
|
||||
type URI struct {
|
||||
// Scheme has one of the following values:
|
||||
//
|
||||
// * bzz - an entry in a swarm manifest
|
||||
// * bzz-raw - raw swarm content
|
||||
// * bzz-immutable - immutable URI of an entry in a swarm manifest
|
||||
// (address is not resolved)
|
||||
// * bzz-list - list of all files contained in a swarm manifest
|
||||
//
|
||||
Scheme string
|
||||
|
||||
// Addr is either a hexadecimal storage address or it an address which
|
||||
// resolves to a storage address
|
||||
Addr string
|
||||
|
||||
// addr stores the parsed storage address
|
||||
addr storage.Address
|
||||
|
||||
// Path is the path to the content within a swarm manifest
|
||||
Path string
|
||||
}
|
||||
|
||||
func (u *URI) MarshalJSON() (out []byte, err error) {
|
||||
return []byte(`"` + u.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (u *URI) UnmarshalJSON(value []byte) error {
|
||||
uri, err := Parse(string(value))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = *uri
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse parses rawuri into a URI struct, where rawuri is expected to have one
|
||||
// of the following formats:
|
||||
//
|
||||
// * <scheme>:/
|
||||
// * <scheme>:/<addr>
|
||||
// * <scheme>:/<addr>/<path>
|
||||
// * <scheme>://
|
||||
// * <scheme>://<addr>
|
||||
// * <scheme>://<addr>/<path>
|
||||
//
|
||||
// with scheme one of bzz, bzz-raw, bzz-immutable, bzz-list or bzz-hash
|
||||
func Parse(rawuri string) (*URI, error) {
|
||||
u, err := url.Parse(rawuri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uri := &URI{Scheme: u.Scheme}
|
||||
|
||||
// check the scheme is valid
|
||||
switch uri.Scheme {
|
||||
case "bzz", "bzz-raw", "bzz-immutable", "bzz-list", "bzz-hash", "bzz-feed":
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown scheme %q", u.Scheme)
|
||||
}
|
||||
|
||||
// handle URIs like bzz://<addr>/<path> where the addr and path
|
||||
// have already been split by url.Parse
|
||||
if u.Host != "" {
|
||||
uri.Addr = u.Host
|
||||
uri.Path = strings.TrimLeft(u.Path, "/")
|
||||
return uri, nil
|
||||
}
|
||||
|
||||
// URI is like bzz:/<addr>/<path> so split the addr and path from
|
||||
// the raw path (which will be /<addr>/<path>)
|
||||
parts := strings.SplitN(strings.TrimLeft(u.Path, "/"), "/", 2)
|
||||
uri.Addr = parts[0]
|
||||
if len(parts) == 2 {
|
||||
uri.Path = parts[1]
|
||||
}
|
||||
return uri, nil
|
||||
}
|
||||
func (u *URI) Feed() bool {
|
||||
return u.Scheme == "bzz-feed"
|
||||
}
|
||||
|
||||
func (u *URI) Raw() bool {
|
||||
return u.Scheme == "bzz-raw"
|
||||
}
|
||||
|
||||
func (u *URI) Immutable() bool {
|
||||
return u.Scheme == "bzz-immutable"
|
||||
}
|
||||
|
||||
func (u *URI) List() bool {
|
||||
return u.Scheme == "bzz-list"
|
||||
}
|
||||
|
||||
func (u *URI) Hash() bool {
|
||||
return u.Scheme == "bzz-hash"
|
||||
}
|
||||
|
||||
func (u *URI) String() string {
|
||||
return u.Scheme + ":/" + u.Addr + "/" + u.Path
|
||||
}
|
||||
|
||||
func (u *URI) Address() storage.Address {
|
||||
if u.addr != nil {
|
||||
return u.addr
|
||||
}
|
||||
if hashMatcher.MatchString(u.Addr) {
|
||||
u.addr = common.Hex2Bytes(u.Addr)
|
||||
return u.addr
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,175 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
)
|
||||
|
||||
func TestParseURI(t *testing.T) {
|
||||
type test struct {
|
||||
uri string
|
||||
expectURI *URI
|
||||
expectErr bool
|
||||
expectRaw bool
|
||||
expectImmutable bool
|
||||
expectList bool
|
||||
expectHash bool
|
||||
expectValidKey bool
|
||||
expectAddr storage.Address
|
||||
}
|
||||
tests := []test{
|
||||
{
|
||||
uri: "",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
uri: "foo",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz",
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz:",
|
||||
expectURI: &URI{Scheme: "bzz"},
|
||||
},
|
||||
{
|
||||
uri: "bzz-immutable:",
|
||||
expectURI: &URI{Scheme: "bzz-immutable"},
|
||||
expectImmutable: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz-raw:",
|
||||
expectURI: &URI{Scheme: "bzz-raw"},
|
||||
expectRaw: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz:/",
|
||||
expectURI: &URI{Scheme: "bzz"},
|
||||
},
|
||||
{
|
||||
uri: "bzz:/abc123",
|
||||
expectURI: &URI{Scheme: "bzz", Addr: "abc123"},
|
||||
},
|
||||
{
|
||||
uri: "bzz:/abc123/path/to/entry",
|
||||
expectURI: &URI{Scheme: "bzz", Addr: "abc123", Path: "path/to/entry"},
|
||||
},
|
||||
{
|
||||
uri: "bzz-raw:/",
|
||||
expectURI: &URI{Scheme: "bzz-raw"},
|
||||
expectRaw: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz-raw:/abc123",
|
||||
expectURI: &URI{Scheme: "bzz-raw", Addr: "abc123"},
|
||||
expectRaw: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz-raw:/abc123/path/to/entry",
|
||||
expectURI: &URI{Scheme: "bzz-raw", Addr: "abc123", Path: "path/to/entry"},
|
||||
expectRaw: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz://",
|
||||
expectURI: &URI{Scheme: "bzz"},
|
||||
},
|
||||
{
|
||||
uri: "bzz://abc123",
|
||||
expectURI: &URI{Scheme: "bzz", Addr: "abc123"},
|
||||
},
|
||||
{
|
||||
uri: "bzz://abc123/path/to/entry",
|
||||
expectURI: &URI{Scheme: "bzz", Addr: "abc123", Path: "path/to/entry"},
|
||||
},
|
||||
{
|
||||
uri: "bzz-hash:",
|
||||
expectURI: &URI{Scheme: "bzz-hash"},
|
||||
expectHash: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz-hash:/",
|
||||
expectURI: &URI{Scheme: "bzz-hash"},
|
||||
expectHash: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz-list:",
|
||||
expectURI: &URI{Scheme: "bzz-list"},
|
||||
expectList: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz-list:/",
|
||||
expectURI: &URI{Scheme: "bzz-list"},
|
||||
expectList: true,
|
||||
},
|
||||
{
|
||||
uri: "bzz-raw://4378d19c26590f1a818ed7d6a62c3809e149b0999cab5ce5f26233b3b423bf8c",
|
||||
expectURI: &URI{Scheme: "bzz-raw",
|
||||
Addr: "4378d19c26590f1a818ed7d6a62c3809e149b0999cab5ce5f26233b3b423bf8c",
|
||||
},
|
||||
expectValidKey: true,
|
||||
expectRaw: true,
|
||||
expectAddr: storage.Address{67, 120, 209, 156, 38, 89, 15, 26,
|
||||
129, 142, 215, 214, 166, 44, 56, 9,
|
||||
225, 73, 176, 153, 156, 171, 92, 229,
|
||||
242, 98, 51, 179, 180, 35, 191, 140,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, x := range tests {
|
||||
actual, err := Parse(x.uri)
|
||||
if x.expectErr {
|
||||
if err == nil {
|
||||
t.Fatalf("expected %s to error", x.uri)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing %s: %s", x.uri, err)
|
||||
}
|
||||
if !reflect.DeepEqual(actual, x.expectURI) {
|
||||
t.Fatalf("expected %s to return %#v, got %#v", x.uri, x.expectURI, actual)
|
||||
}
|
||||
if actual.Raw() != x.expectRaw {
|
||||
t.Fatalf("expected %s raw to be %t, got %t", x.uri, x.expectRaw, actual.Raw())
|
||||
}
|
||||
if actual.Immutable() != x.expectImmutable {
|
||||
t.Fatalf("expected %s immutable to be %t, got %t", x.uri, x.expectImmutable, actual.Immutable())
|
||||
}
|
||||
if actual.List() != x.expectList {
|
||||
t.Fatalf("expected %s list to be %t, got %t", x.uri, x.expectList, actual.List())
|
||||
}
|
||||
if actual.Hash() != x.expectHash {
|
||||
t.Fatalf("expected %s hash to be %t, got %t", x.uri, x.expectHash, actual.Hash())
|
||||
}
|
||||
if x.expectValidKey {
|
||||
if actual.Address() == nil {
|
||||
t.Fatalf("expected %s to return a valid key, got nil", x.uri)
|
||||
} else {
|
||||
if !bytes.Equal(x.expectAddr, actual.Address()) {
|
||||
t.Fatalf("expected %s to be decoded to %v", x.expectURI.Addr, x.expectAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
690
swarm/bmt/bmt.go
690
swarm/bmt/bmt.go
@ -1,690 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt provides a binary merkle tree implementation used for swarm chunk hash
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
/*
|
||||
Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size.
|
||||
It is defined as the root hash of the binary merkle tree built over fixed size segments
|
||||
of the underlying chunk using any base hash function (e.g., keccak 256 SHA3).
|
||||
Chunks with data shorter than the fixed size are hashed as if they had zero padding.
|
||||
|
||||
BMT hash is used as the chunk hash function in swarm which in turn is the basis for the
|
||||
128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash
|
||||
|
||||
The BMT is optimal for providing compact inclusion proofs, i.e. prove that a
|
||||
segment is a substring of a chunk starting at a particular offset.
|
||||
The size of the underlying segments is fixed to the size of the base hash (called the resolution
|
||||
of the BMT hash), Using Keccak256 SHA3 hash is 32 bytes, the EVM word size to optimize for on-chain BMT verification
|
||||
as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash.
|
||||
|
||||
Two implementations are provided:
|
||||
|
||||
* RefHasher is optimized for code simplicity and meant as a reference implementation
|
||||
that is simple to understand
|
||||
* Hasher is optimized for speed taking advantage of concurrency with minimalistic
|
||||
control structure to coordinate the concurrent routines
|
||||
|
||||
BMT Hasher implements the following interfaces
|
||||
* standard golang hash.Hash - synchronous, reusable
|
||||
* SwarmHash - SumWithSpan provided
|
||||
* io.Writer - synchronous left-to-right datawriter
|
||||
* AsyncWriter - concurrent section writes and asynchronous Sum call
|
||||
*/
|
||||
|
||||
const (
|
||||
// PoolSize is the maximum number of bmt trees used by the hashers, i.e,
|
||||
// the maximum number of concurrent BMT hashing operations performed by the same hasher
|
||||
PoolSize = 8
|
||||
)
|
||||
|
||||
// BaseHasherFunc is a hash.Hash constructor function used for the base hash of the BMT.
|
||||
// implemented by Keccak256 SHA3 sha3.NewLegacyKeccak256
|
||||
type BaseHasherFunc func() hash.Hash
|
||||
|
||||
// Hasher a reusable hasher for fixed maximum size chunks representing a BMT
|
||||
// - implements the hash.Hash interface
|
||||
// - reuses a pool of trees for amortised memory allocation and resource control
|
||||
// - supports order-agnostic concurrent segment writes and section (double segment) writes
|
||||
// as well as sequential read and write
|
||||
// - the same hasher instance must not be called concurrently on more than one chunk
|
||||
// - the same hasher instance is synchronously reuseable
|
||||
// - Sum gives back the tree to the pool and guaranteed to leave
|
||||
// the tree and itself in a state reusable for hashing a new chunk
|
||||
// - generates and verifies segment inclusion proofs (TODO:)
|
||||
type Hasher struct {
|
||||
pool *TreePool // BMT resource pool
|
||||
bmt *tree // prebuilt BMT resource for flowcontrol and proofs
|
||||
}
|
||||
|
||||
// New creates a reusable BMT Hasher that
|
||||
// pulls a new tree from a resource pool for hashing each chunk
|
||||
func New(p *TreePool) *Hasher {
|
||||
return &Hasher{
|
||||
pool: p,
|
||||
}
|
||||
}
|
||||
|
||||
// TreePool provides a pool of trees used as resources by the BMT Hasher.
|
||||
// A tree popped from the pool is guaranteed to have a clean state ready
|
||||
// for hashing a new chunk.
|
||||
type TreePool struct {
|
||||
lock sync.Mutex
|
||||
c chan *tree // the channel to obtain a resource from the pool
|
||||
hasher BaseHasherFunc // base hasher to use for the BMT levels
|
||||
SegmentSize int // size of leaf segments, stipulated to be = hash size
|
||||
SegmentCount int // the number of segments on the base level of the BMT
|
||||
Capacity int // pool capacity, controls concurrency
|
||||
Depth int // depth of the bmt trees = int(log2(segmentCount))+1
|
||||
Size int // the total length of the data (count * size)
|
||||
count int // current count of (ever) allocated resources
|
||||
zerohashes [][]byte // lookup table for predictable padding subtrees for all levels
|
||||
}
|
||||
|
||||
// NewTreePool creates a tree pool with hasher, segment size, segment count and capacity
|
||||
// on Hasher.getTree it reuses free trees or creates a new one if capacity is not reached
|
||||
func NewTreePool(hasher BaseHasherFunc, segmentCount, capacity int) *TreePool {
|
||||
// initialises the zerohashes lookup table
|
||||
depth := calculateDepthFor(segmentCount)
|
||||
segmentSize := hasher().Size()
|
||||
zerohashes := make([][]byte, depth+1)
|
||||
zeros := make([]byte, segmentSize)
|
||||
zerohashes[0] = zeros
|
||||
h := hasher()
|
||||
for i := 1; i < depth+1; i++ {
|
||||
zeros = doSum(h, nil, zeros, zeros)
|
||||
zerohashes[i] = zeros
|
||||
}
|
||||
return &TreePool{
|
||||
c: make(chan *tree, capacity),
|
||||
hasher: hasher,
|
||||
SegmentSize: segmentSize,
|
||||
SegmentCount: segmentCount,
|
||||
Capacity: capacity,
|
||||
Size: segmentCount * segmentSize,
|
||||
Depth: depth,
|
||||
zerohashes: zerohashes,
|
||||
}
|
||||
}
|
||||
|
||||
// Drain drains the pool until it has no more than n resources
|
||||
func (p *TreePool) Drain(n int) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
for len(p.c) > n {
|
||||
<-p.c
|
||||
p.count--
|
||||
}
|
||||
}
|
||||
|
||||
// Reserve is blocking until it returns an available tree
|
||||
// it reuses free trees or creates a new one if size is not reached
|
||||
// TODO: should use a context here
|
||||
func (p *TreePool) reserve() *tree {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
var t *tree
|
||||
if p.count == p.Capacity {
|
||||
return <-p.c
|
||||
}
|
||||
select {
|
||||
case t = <-p.c:
|
||||
default:
|
||||
t = newTree(p.SegmentSize, p.Depth, p.hasher)
|
||||
p.count++
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// release gives back a tree to the pool.
|
||||
// this tree is guaranteed to be in reusable state
|
||||
func (p *TreePool) release(t *tree) {
|
||||
p.c <- t // can never fail ...
|
||||
}
|
||||
|
||||
// tree is a reusable control structure representing a BMT
|
||||
// organised in a binary tree
|
||||
// Hasher uses a TreePool to obtain a tree for each chunk hash
|
||||
// the tree is 'locked' while not in the pool
|
||||
type tree struct {
|
||||
leaves []*node // leaf nodes of the tree, other nodes accessible via parent links
|
||||
cursor int // index of rightmost currently open segment
|
||||
offset int // offset (cursor position) within currently open segment
|
||||
section []byte // the rightmost open section (double segment)
|
||||
result chan []byte // result channel
|
||||
span []byte // The span of the data subsumed under the chunk
|
||||
}
|
||||
|
||||
// node is a reuseable segment hasher representing a node in a BMT
|
||||
type node struct {
|
||||
isLeft bool // whether it is left side of the parent double segment
|
||||
parent *node // pointer to parent node in the BMT
|
||||
state int32 // atomic increment impl concurrent boolean toggle
|
||||
left, right []byte // this is where the two children sections are written
|
||||
hasher hash.Hash // preconstructed hasher on nodes
|
||||
}
|
||||
|
||||
// newNode constructs a segment hasher node in the BMT (used by newTree)
|
||||
func newNode(index int, parent *node, hasher hash.Hash) *node {
|
||||
return &node{
|
||||
parent: parent,
|
||||
isLeft: index%2 == 0,
|
||||
hasher: hasher,
|
||||
}
|
||||
}
|
||||
|
||||
// Draw draws the BMT (badly)
|
||||
func (t *tree) draw(hash []byte) string {
|
||||
var left, right []string
|
||||
var anc []*node
|
||||
for i, n := range t.leaves {
|
||||
left = append(left, fmt.Sprintf("%v", hashstr(n.left)))
|
||||
if i%2 == 0 {
|
||||
anc = append(anc, n.parent)
|
||||
}
|
||||
right = append(right, fmt.Sprintf("%v", hashstr(n.right)))
|
||||
}
|
||||
anc = t.leaves
|
||||
var hashes [][]string
|
||||
for l := 0; len(anc) > 0; l++ {
|
||||
var nodes []*node
|
||||
hash := []string{""}
|
||||
for i, n := range anc {
|
||||
hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right)))
|
||||
if i%2 == 0 && n.parent != nil {
|
||||
nodes = append(nodes, n.parent)
|
||||
}
|
||||
}
|
||||
hash = append(hash, "")
|
||||
hashes = append(hashes, hash)
|
||||
anc = nodes
|
||||
}
|
||||
hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""})
|
||||
total := 60
|
||||
del := " "
|
||||
var rows []string
|
||||
for i := len(hashes) - 1; i >= 0; i-- {
|
||||
var textlen int
|
||||
hash := hashes[i]
|
||||
for _, s := range hash {
|
||||
textlen += len(s)
|
||||
}
|
||||
if total < textlen {
|
||||
total = textlen + len(hash)
|
||||
}
|
||||
delsize := (total - textlen) / (len(hash) - 1)
|
||||
if delsize > len(del) {
|
||||
delsize = len(del)
|
||||
}
|
||||
row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize]))
|
||||
rows = append(rows, row)
|
||||
|
||||
}
|
||||
rows = append(rows, strings.Join(left, " "))
|
||||
rows = append(rows, strings.Join(right, " "))
|
||||
return strings.Join(rows, "\n") + "\n"
|
||||
}
|
||||
|
||||
// newTree initialises a tree by building up the nodes of a BMT
|
||||
// - segment size is stipulated to be the size of the hash
|
||||
func newTree(segmentSize, depth int, hashfunc func() hash.Hash) *tree {
|
||||
n := newNode(0, nil, hashfunc())
|
||||
prevlevel := []*node{n}
|
||||
// iterate over levels and creates 2^(depth-level) nodes
|
||||
// the 0 level is on double segment sections so we start at depth - 2 since
|
||||
count := 2
|
||||
for level := depth - 2; level >= 0; level-- {
|
||||
nodes := make([]*node, count)
|
||||
for i := 0; i < count; i++ {
|
||||
parent := prevlevel[i/2]
|
||||
var hasher hash.Hash
|
||||
if level == 0 {
|
||||
hasher = hashfunc()
|
||||
}
|
||||
nodes[i] = newNode(i, parent, hasher)
|
||||
}
|
||||
prevlevel = nodes
|
||||
count *= 2
|
||||
}
|
||||
// the datanode level is the nodes on the last level
|
||||
return &tree{
|
||||
leaves: prevlevel,
|
||||
result: make(chan []byte),
|
||||
section: make([]byte, 2*segmentSize),
|
||||
}
|
||||
}
|
||||
|
||||
// methods needed to implement hash.Hash
|
||||
|
||||
// Size returns the size
|
||||
func (h *Hasher) Size() int {
|
||||
return h.pool.SegmentSize
|
||||
}
|
||||
|
||||
// BlockSize returns the block size
|
||||
func (h *Hasher) BlockSize() int {
|
||||
return 2 * h.pool.SegmentSize
|
||||
}
|
||||
|
||||
// Sum returns the BMT root hash of the buffer
|
||||
// using Sum presupposes sequential synchronous writes (io.Writer interface)
|
||||
// hash.Hash interface Sum method appends the byte slice to the underlying
|
||||
// data before it calculates and returns the hash of the chunk
|
||||
// caller must make sure Sum is not called concurrently with Write, writeSection
|
||||
func (h *Hasher) Sum(b []byte) (s []byte) {
|
||||
t := h.getTree()
|
||||
// write the last section with final flag set to true
|
||||
go h.writeSection(t.cursor, t.section, true, true)
|
||||
// wait for the result
|
||||
s = <-t.result
|
||||
span := t.span
|
||||
// release the tree resource back to the pool
|
||||
h.releaseTree()
|
||||
// b + sha3(span + BMT(pure_chunk))
|
||||
if len(span) == 0 {
|
||||
return append(b, s...)
|
||||
}
|
||||
return doSum(h.pool.hasher(), b, span, s)
|
||||
}
|
||||
|
||||
// methods needed to implement the SwarmHash and the io.Writer interfaces
|
||||
|
||||
// Write calls sequentially add to the buffer to be hashed,
|
||||
// with every full segment calls writeSection in a go routine
|
||||
func (h *Hasher) Write(b []byte) (int, error) {
|
||||
l := len(b)
|
||||
if l == 0 || l > h.pool.Size {
|
||||
return 0, nil
|
||||
}
|
||||
t := h.getTree()
|
||||
secsize := 2 * h.pool.SegmentSize
|
||||
// calculate length of missing bit to complete current open section
|
||||
smax := secsize - t.offset
|
||||
// if at the beginning of chunk or middle of the section
|
||||
if t.offset < secsize {
|
||||
// fill up current segment from buffer
|
||||
copy(t.section[t.offset:], b)
|
||||
// if input buffer consumed and open section not complete, then
|
||||
// advance offset and return
|
||||
if smax == 0 {
|
||||
smax = secsize
|
||||
}
|
||||
if l <= smax {
|
||||
t.offset += l
|
||||
return l, nil
|
||||
}
|
||||
} else {
|
||||
// if end of a section
|
||||
if t.cursor == h.pool.SegmentCount*2 {
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
// read full sections and the last possibly partial section from the input buffer
|
||||
for smax < l {
|
||||
// section complete; push to tree asynchronously
|
||||
go h.writeSection(t.cursor, t.section, true, false)
|
||||
// reset section
|
||||
t.section = make([]byte, secsize)
|
||||
// copy from input buffer at smax to right half of section
|
||||
copy(t.section, b[smax:])
|
||||
// advance cursor
|
||||
t.cursor++
|
||||
// smax here represents successive offsets in the input buffer
|
||||
smax += secsize
|
||||
}
|
||||
t.offset = l - smax + secsize
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// Reset needs to be called before writing to the hasher
|
||||
func (h *Hasher) Reset() {
|
||||
h.releaseTree()
|
||||
}
|
||||
|
||||
// methods needed to implement the SwarmHash interface
|
||||
|
||||
// ResetWithLength needs to be called before writing to the hasher
|
||||
// the argument is supposed to be the byte slice binary representation of
|
||||
// the length of the data subsumed under the hash, i.e., span
|
||||
func (h *Hasher) ResetWithLength(span []byte) {
|
||||
h.Reset()
|
||||
h.getTree().span = span
|
||||
}
|
||||
|
||||
// releaseTree gives back the Tree to the pool whereby it unlocks
|
||||
// it resets tree, segment and index
|
||||
func (h *Hasher) releaseTree() {
|
||||
t := h.bmt
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
h.bmt = nil
|
||||
go func() {
|
||||
t.cursor = 0
|
||||
t.offset = 0
|
||||
t.span = nil
|
||||
t.section = make([]byte, h.pool.SegmentSize*2)
|
||||
select {
|
||||
case <-t.result:
|
||||
default:
|
||||
}
|
||||
h.pool.release(t)
|
||||
}()
|
||||
}
|
||||
|
||||
// NewAsyncWriter extends Hasher with an interface for concurrent segment/section writes
|
||||
func (h *Hasher) NewAsyncWriter(double bool) *AsyncHasher {
|
||||
secsize := h.pool.SegmentSize
|
||||
if double {
|
||||
secsize *= 2
|
||||
}
|
||||
write := func(i int, section []byte, final bool) {
|
||||
h.writeSection(i, section, double, final)
|
||||
}
|
||||
return &AsyncHasher{
|
||||
Hasher: h,
|
||||
double: double,
|
||||
secsize: secsize,
|
||||
write: write,
|
||||
}
|
||||
}
|
||||
|
||||
// SectionWriter is an asynchronous segment/section writer interface
|
||||
type SectionWriter interface {
|
||||
Reset() // standard init to be called before reuse
|
||||
Write(index int, data []byte) // write into section of index
|
||||
Sum(b []byte, length int, span []byte) []byte // returns the hash of the buffer
|
||||
SectionSize() int // size of the async section unit to use
|
||||
}
|
||||
|
||||
// AsyncHasher extends BMT Hasher with an asynchronous segment/section writer interface
|
||||
// AsyncHasher is unsafe and does not check indexes and section data lengths
|
||||
// it must be used with the right indexes and length and the right number of sections
|
||||
//
|
||||
// behaviour is undefined if
|
||||
// * non-final sections are shorter or longer than secsize
|
||||
// * if final section does not match length
|
||||
// * write a section with index that is higher than length/secsize
|
||||
// * set length in Sum call when length/secsize < maxsec
|
||||
//
|
||||
// * if Sum() is not called on a Hasher that is fully written
|
||||
// a process will block, can be terminated with Reset
|
||||
// * it will not leak processes if not all sections are written but it blocks
|
||||
// and keeps the resource which can be released calling Reset()
|
||||
type AsyncHasher struct {
|
||||
*Hasher // extends the Hasher
|
||||
mtx sync.Mutex // to lock the cursor access
|
||||
double bool // whether to use double segments (call Hasher.writeSection)
|
||||
secsize int // size of base section (size of hash or double)
|
||||
write func(i int, section []byte, final bool)
|
||||
}
|
||||
|
||||
// methods needed to implement AsyncWriter
|
||||
|
||||
// SectionSize returns the size of async section unit to use
|
||||
func (sw *AsyncHasher) SectionSize() int {
|
||||
return sw.secsize
|
||||
}
|
||||
|
||||
// Write writes the i-th section of the BMT base
|
||||
// this function can and is meant to be called concurrently
|
||||
// it sets max segment threadsafely
|
||||
func (sw *AsyncHasher) Write(i int, section []byte) {
|
||||
sw.mtx.Lock()
|
||||
defer sw.mtx.Unlock()
|
||||
t := sw.getTree()
|
||||
// cursor keeps track of the rightmost section written so far
|
||||
// if index is lower than cursor then just write non-final section as is
|
||||
if i < t.cursor {
|
||||
// if index is not the rightmost, safe to write section
|
||||
go sw.write(i, section, false)
|
||||
return
|
||||
}
|
||||
// if there is a previous rightmost section safe to write section
|
||||
if t.offset > 0 {
|
||||
if i == t.cursor {
|
||||
// i==cursor implies cursor was set by Hash call so we can write section as final one
|
||||
// since it can be shorter, first we copy it to the padded buffer
|
||||
t.section = make([]byte, sw.secsize)
|
||||
copy(t.section, section)
|
||||
go sw.write(i, t.section, true)
|
||||
return
|
||||
}
|
||||
// the rightmost section just changed, so we write the previous one as non-final
|
||||
go sw.write(t.cursor, t.section, false)
|
||||
}
|
||||
// set i as the index of the righmost section written so far
|
||||
// set t.offset to cursor*secsize+1
|
||||
t.cursor = i
|
||||
t.offset = i*sw.secsize + 1
|
||||
t.section = make([]byte, sw.secsize)
|
||||
copy(t.section, section)
|
||||
}
|
||||
|
||||
// Sum can be called any time once the length and the span is known
|
||||
// potentially even before all segments have been written
|
||||
// in such cases Sum will block until all segments are present and
|
||||
// the hash for the length can be calculated.
|
||||
//
|
||||
// b: digest is appended to b
|
||||
// length: known length of the input (unsafe; undefined if out of range)
|
||||
// meta: metadata to hash together with BMT root for the final digest
|
||||
// e.g., span for protection against existential forgery
|
||||
func (sw *AsyncHasher) Sum(b []byte, length int, meta []byte) (s []byte) {
|
||||
sw.mtx.Lock()
|
||||
t := sw.getTree()
|
||||
if length == 0 {
|
||||
sw.mtx.Unlock()
|
||||
s = sw.pool.zerohashes[sw.pool.Depth]
|
||||
} else {
|
||||
// for non-zero input the rightmost section is written to the tree asynchronously
|
||||
// if the actual last section has been written (t.cursor == length/t.secsize)
|
||||
maxsec := (length - 1) / sw.secsize
|
||||
if t.offset > 0 {
|
||||
go sw.write(t.cursor, t.section, maxsec == t.cursor)
|
||||
}
|
||||
// set cursor to maxsec so final section is written when it arrives
|
||||
t.cursor = maxsec
|
||||
t.offset = length
|
||||
result := t.result
|
||||
sw.mtx.Unlock()
|
||||
// wait for the result or reset
|
||||
s = <-result
|
||||
}
|
||||
// relesase the tree back to the pool
|
||||
sw.releaseTree()
|
||||
// if no meta is given just append digest to b
|
||||
if len(meta) == 0 {
|
||||
return append(b, s...)
|
||||
}
|
||||
// hash together meta and BMT root hash using the pools
|
||||
return doSum(sw.pool.hasher(), b, meta, s)
|
||||
}
|
||||
|
||||
// writeSection writes the hash of i-th section into level 1 node of the BMT tree
|
||||
func (h *Hasher) writeSection(i int, section []byte, double bool, final bool) {
|
||||
// select the leaf node for the section
|
||||
var n *node
|
||||
var isLeft bool
|
||||
var hasher hash.Hash
|
||||
var level int
|
||||
t := h.getTree()
|
||||
if double {
|
||||
level++
|
||||
n = t.leaves[i]
|
||||
hasher = n.hasher
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
// hash the section
|
||||
section = doSum(hasher, nil, section)
|
||||
} else {
|
||||
n = t.leaves[i/2]
|
||||
hasher = n.hasher
|
||||
isLeft = i%2 == 0
|
||||
}
|
||||
// write hash into parent node
|
||||
if final {
|
||||
// for the last segment use writeFinalNode
|
||||
h.writeFinalNode(level, n, hasher, isLeft, section)
|
||||
} else {
|
||||
h.writeNode(n, hasher, isLeft, section)
|
||||
}
|
||||
}
|
||||
|
||||
// writeNode pushes the data to the node
|
||||
// if it is the first of 2 sisters written, the routine terminates
|
||||
// if it is the second, it calculates the hash and writes it
|
||||
// to the parent node recursively
|
||||
// since hashing the parent is synchronous the same hasher can be used
|
||||
func (h *Hasher) writeNode(n *node, bh hash.Hash, isLeft bool, s []byte) {
|
||||
level := 1
|
||||
for {
|
||||
// at the root of the bmt just write the result to the result channel
|
||||
if n == nil {
|
||||
h.getTree().result <- s
|
||||
return
|
||||
}
|
||||
// otherwise assign child hash to left or right segment
|
||||
if isLeft {
|
||||
n.left = s
|
||||
} else {
|
||||
n.right = s
|
||||
}
|
||||
// the child-thread first arriving will terminate
|
||||
if n.toggle() {
|
||||
return
|
||||
}
|
||||
// the thread coming second now can be sure both left and right children are written
|
||||
// so it calculates the hash of left|right and pushes it to the parent
|
||||
s = doSum(bh, nil, n.left, n.right)
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
level++
|
||||
}
|
||||
}
|
||||
|
||||
// writeFinalNode is following the path starting from the final datasegment to the
|
||||
// BMT root via parents
|
||||
// for unbalanced trees it fills in the missing right sister nodes using
|
||||
// the pool's lookup table for BMT subtree root hashes for all-zero sections
|
||||
// otherwise behaves like `writeNode`
|
||||
func (h *Hasher) writeFinalNode(level int, n *node, bh hash.Hash, isLeft bool, s []byte) {
|
||||
|
||||
for {
|
||||
// at the root of the bmt just write the result to the result channel
|
||||
if n == nil {
|
||||
if s != nil {
|
||||
h.getTree().result <- s
|
||||
}
|
||||
return
|
||||
}
|
||||
var noHash bool
|
||||
if isLeft {
|
||||
// coming from left sister branch
|
||||
// when the final section's path is going via left child node
|
||||
// we include an all-zero subtree hash for the right level and toggle the node.
|
||||
n.right = h.pool.zerohashes[level]
|
||||
if s != nil {
|
||||
n.left = s
|
||||
// if a left final node carries a hash, it must be the first (and only thread)
|
||||
// so the toggle is already in passive state no need no call
|
||||
// yet thread needs to carry on pushing hash to parent
|
||||
noHash = false
|
||||
} else {
|
||||
// if again first thread then propagate nil and calculate no hash
|
||||
noHash = n.toggle()
|
||||
}
|
||||
} else {
|
||||
// right sister branch
|
||||
if s != nil {
|
||||
// if hash was pushed from right child node, write right segment change state
|
||||
n.right = s
|
||||
// if toggle is true, we arrived first so no hashing just push nil to parent
|
||||
noHash = n.toggle()
|
||||
|
||||
} else {
|
||||
// if s is nil, then thread arrived first at previous node and here there will be two,
|
||||
// so no need to do anything and keep s = nil for parent
|
||||
noHash = true
|
||||
}
|
||||
}
|
||||
// the child-thread first arriving will just continue resetting s to nil
|
||||
// the second thread now can be sure both left and right children are written
|
||||
// it calculates the hash of left|right and pushes it to the parent
|
||||
if noHash {
|
||||
s = nil
|
||||
} else {
|
||||
s = doSum(bh, nil, n.left, n.right)
|
||||
}
|
||||
// iterate to parent
|
||||
isLeft = n.isLeft
|
||||
n = n.parent
|
||||
level++
|
||||
}
|
||||
}
|
||||
|
||||
// getTree obtains a BMT resource by reserving one from the pool and assigns it to the bmt field
|
||||
func (h *Hasher) getTree() *tree {
|
||||
if h.bmt != nil {
|
||||
return h.bmt
|
||||
}
|
||||
t := h.pool.reserve()
|
||||
h.bmt = t
|
||||
return t
|
||||
}
|
||||
|
||||
// atomic bool toggle implementing a concurrent reusable 2-state object
|
||||
// atomic addint with %2 implements atomic bool toggle
|
||||
// it returns true if the toggler just put it in the active/waiting state
|
||||
func (n *node) toggle() bool {
|
||||
return atomic.AddInt32(&n.state, 1)%2 == 1
|
||||
}
|
||||
|
||||
// calculates the hash of the data using hash.Hash
|
||||
func doSum(h hash.Hash, b []byte, data ...[]byte) []byte {
|
||||
h.Reset()
|
||||
for _, v := range data {
|
||||
h.Write(v)
|
||||
}
|
||||
return h.Sum(b)
|
||||
}
|
||||
|
||||
// hashstr is a pretty printer for bytes used in tree.draw
|
||||
func hashstr(b []byte) string {
|
||||
end := len(b)
|
||||
if end > 4 {
|
||||
end = 4
|
||||
}
|
||||
return fmt.Sprintf("%x", b[:end])
|
||||
}
|
||||
|
||||
// calculateDepthFor calculates the depth (number of levels) in the BMT tree
|
||||
func calculateDepthFor(n int) (d int) {
|
||||
c := 2
|
||||
for ; c < n; c *= 2 {
|
||||
d++
|
||||
}
|
||||
return d + 1
|
||||
}
|
@ -1,84 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bmt is a simple nonconcurrent reference implementation for hashsize segment based
|
||||
// Binary Merkle tree hash on arbitrary but fixed maximum chunksize
|
||||
//
|
||||
// This implementation does not take advantage of any paralellisms and uses
|
||||
// far more memory than necessary, but it is easy to see that it is correct.
|
||||
// It can be used for generating test cases for optimized implementations.
|
||||
// There is extra check on reference hasher correctness in bmt_test.go
|
||||
// * TestRefHasher
|
||||
// * testBMTHasherCorrectness function
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"hash"
|
||||
)
|
||||
|
||||
// RefHasher is the non-optimized easy-to-read reference implementation of BMT
|
||||
type RefHasher struct {
|
||||
maxDataLength int // c * hashSize, where c = 2 ^ ceil(log2(count)), where count = ceil(length / hashSize)
|
||||
sectionLength int // 2 * hashSize
|
||||
hasher hash.Hash // base hash func (Keccak256 SHA3)
|
||||
}
|
||||
|
||||
// NewRefHasher returns a new RefHasher
|
||||
func NewRefHasher(hasher BaseHasherFunc, count int) *RefHasher {
|
||||
h := hasher()
|
||||
hashsize := h.Size()
|
||||
c := 2
|
||||
for ; c < count; c *= 2 {
|
||||
}
|
||||
return &RefHasher{
|
||||
sectionLength: 2 * hashsize,
|
||||
maxDataLength: c * hashsize,
|
||||
hasher: h,
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the BMT hash of the byte slice
|
||||
// implements the SwarmHash interface
|
||||
func (rh *RefHasher) Hash(data []byte) []byte {
|
||||
// if data is shorter than the base length (maxDataLength), we provide padding with zeros
|
||||
d := make([]byte, rh.maxDataLength)
|
||||
length := len(data)
|
||||
if length > rh.maxDataLength {
|
||||
length = rh.maxDataLength
|
||||
}
|
||||
copy(d, data[:length])
|
||||
return rh.hash(d, rh.maxDataLength)
|
||||
}
|
||||
|
||||
// data has length maxDataLength = segmentSize * 2^k
|
||||
// hash calls itself recursively on both halves of the given slice
|
||||
// concatenates the results, and returns the hash of that
|
||||
// if the length of d is 2 * segmentSize then just returns the hash of that section
|
||||
func (rh *RefHasher) hash(data []byte, length int) []byte {
|
||||
var section []byte
|
||||
if length == rh.sectionLength {
|
||||
// section contains two data segments (d)
|
||||
section = data
|
||||
} else {
|
||||
// section contains hashes of left and right BMT subtreea
|
||||
// to be calculated by calling hash recursively on left and right half of d
|
||||
length /= 2
|
||||
section = append(rh.hash(data[:length], length), rh.hash(data[length:], length)...)
|
||||
}
|
||||
rh.hasher.Reset()
|
||||
rh.hasher.Write(section)
|
||||
return rh.hasher.Sum(nil)
|
||||
}
|
@ -1,583 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/testutil"
|
||||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
// the actual data length generated (could be longer than max datalength of the BMT)
|
||||
const BufferSize = 4128
|
||||
|
||||
const (
|
||||
// segmentCount is the maximum number of segments of the underlying chunk
|
||||
// Should be equal to max-chunk-data-size / hash-size
|
||||
// Currently set to 128 == 4096 (default chunk size) / 32 (sha3.keccak256 size)
|
||||
segmentCount = 128
|
||||
)
|
||||
|
||||
var counts = []int{1, 2, 3, 4, 5, 8, 9, 15, 16, 17, 32, 37, 42, 53, 63, 64, 65, 111, 127, 128}
|
||||
|
||||
// calculates the Keccak256 SHA3 hash of the data
|
||||
func sha3hash(data ...[]byte) []byte {
|
||||
h := sha3.NewLegacyKeccak256()
|
||||
return doSum(h, nil, data...)
|
||||
}
|
||||
|
||||
// TestRefHasher tests that the RefHasher computes the expected BMT hash for
|
||||
// some small data lengths
|
||||
func TestRefHasher(t *testing.T) {
|
||||
// the test struct is used to specify the expected BMT hash for
|
||||
// segment counts between from and to and lengths from 1 to datalength
|
||||
type test struct {
|
||||
from int
|
||||
to int
|
||||
expected func([]byte) []byte
|
||||
}
|
||||
|
||||
var tests []*test
|
||||
// all lengths in [0,64] should be:
|
||||
//
|
||||
// sha3hash(data)
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 1,
|
||||
to: 2,
|
||||
expected: func(d []byte) []byte {
|
||||
data := make([]byte, 64)
|
||||
copy(data, d)
|
||||
return sha3hash(data)
|
||||
},
|
||||
})
|
||||
|
||||
// all lengths in [3,4] should be:
|
||||
//
|
||||
// sha3hash(
|
||||
// sha3hash(data[:64])
|
||||
// sha3hash(data[64:])
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 3,
|
||||
to: 4,
|
||||
expected: func(d []byte) []byte {
|
||||
data := make([]byte, 128)
|
||||
copy(data, d)
|
||||
return sha3hash(sha3hash(data[:64]), sha3hash(data[64:]))
|
||||
},
|
||||
})
|
||||
|
||||
// all segmentCounts in [5,8] should be:
|
||||
//
|
||||
// sha3hash(
|
||||
// sha3hash(
|
||||
// sha3hash(data[:64])
|
||||
// sha3hash(data[64:128])
|
||||
// )
|
||||
// sha3hash(
|
||||
// sha3hash(data[128:192])
|
||||
// sha3hash(data[192:])
|
||||
// )
|
||||
// )
|
||||
//
|
||||
tests = append(tests, &test{
|
||||
from: 5,
|
||||
to: 8,
|
||||
expected: func(d []byte) []byte {
|
||||
data := make([]byte, 256)
|
||||
copy(data, d)
|
||||
return sha3hash(sha3hash(sha3hash(data[:64]), sha3hash(data[64:128])), sha3hash(sha3hash(data[128:192]), sha3hash(data[192:])))
|
||||
},
|
||||
})
|
||||
|
||||
// run the tests
|
||||
for i, x := range tests {
|
||||
for segmentCount := x.from; segmentCount <= x.to; segmentCount++ {
|
||||
for length := 1; length <= segmentCount*32; length++ {
|
||||
t.Run(fmt.Sprintf("%d_segments_%d_bytes", segmentCount, length), func(t *testing.T) {
|
||||
data := testutil.RandomBytes(i, length)
|
||||
expected := x.expected(data)
|
||||
actual := NewRefHasher(sha3.NewLegacyKeccak256, segmentCount).Hash(data)
|
||||
if !bytes.Equal(actual, expected) {
|
||||
t.Fatalf("expected %x, got %x", expected, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tests if hasher responds with correct hash comparing the reference implementation return value
|
||||
func TestHasherEmptyData(t *testing.T) {
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
var data []byte
|
||||
for _, count := range counts {
|
||||
t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) {
|
||||
pool := NewTreePool(hasher, count, PoolSize)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
refHash := rbmt.Hash(data)
|
||||
expHash := syncHash(bmt, nil, data)
|
||||
if !bytes.Equal(expHash, refHash) {
|
||||
t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// tests sequential write with entire max size written in one go
|
||||
func TestSyncHasherCorrectness(t *testing.T) {
|
||||
data := testutil.RandomBytes(1, BufferSize)
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
size := hasher().Size()
|
||||
|
||||
var err error
|
||||
for _, count := range counts {
|
||||
t.Run(fmt.Sprintf("segments_%v", count), func(t *testing.T) {
|
||||
max := count * size
|
||||
var incr int
|
||||
capacity := 1
|
||||
pool := NewTreePool(hasher, count, capacity)
|
||||
defer pool.Drain(0)
|
||||
for n := 0; n <= max; n += incr {
|
||||
incr = 1 + rand.Intn(5)
|
||||
bmt := New(pool)
|
||||
err = testHasherCorrectness(bmt, hasher, data, n, count)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// tests order-neutral concurrent writes with entire max size written in one go
|
||||
func TestAsyncCorrectness(t *testing.T) {
|
||||
data := testutil.RandomBytes(1, BufferSize)
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
size := hasher().Size()
|
||||
whs := []whenHash{first, last, random}
|
||||
|
||||
for _, double := range []bool{false, true} {
|
||||
for _, wh := range whs {
|
||||
for _, count := range counts {
|
||||
t.Run(fmt.Sprintf("double_%v_hash_when_%v_segments_%v", double, wh, count), func(t *testing.T) {
|
||||
max := count * size
|
||||
var incr int
|
||||
capacity := 1
|
||||
pool := NewTreePool(hasher, count, capacity)
|
||||
defer pool.Drain(0)
|
||||
for n := 1; n <= max; n += incr {
|
||||
incr = 1 + rand.Intn(5)
|
||||
bmt := New(pool)
|
||||
d := data[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := rbmt.Hash(d)
|
||||
got := syncHash(bmt, nil, d)
|
||||
if !bytes.Equal(got, exp) {
|
||||
t.Fatalf("wrong sync hash for datalength %v: expected %x (ref), got %x", n, exp, got)
|
||||
}
|
||||
sw := bmt.NewAsyncWriter(double)
|
||||
got = asyncHashRandom(sw, nil, d, wh)
|
||||
if !bytes.Equal(got, exp) {
|
||||
t.Fatalf("wrong async hash for datalength %v: expected %x, got %x", n, exp, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the BMT hasher can be synchronously reused with poolsizes 1 and PoolSize
|
||||
func TestHasherReuse(t *testing.T) {
|
||||
t.Run(fmt.Sprintf("poolsize_%d", 1), func(t *testing.T) {
|
||||
testHasherReuse(1, t)
|
||||
})
|
||||
t.Run(fmt.Sprintf("poolsize_%d", PoolSize), func(t *testing.T) {
|
||||
testHasherReuse(PoolSize, t)
|
||||
})
|
||||
}
|
||||
|
||||
// tests if bmt reuse is not corrupting result
|
||||
func testHasherReuse(poolsize int, t *testing.T) {
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||
defer pool.Drain(0)
|
||||
bmt := New(pool)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
data := testutil.RandomBytes(1, BufferSize)
|
||||
n := rand.Intn(bmt.Size())
|
||||
err := testHasherCorrectness(bmt, hasher, data, n, segmentCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests if pool can be cleanly reused even in concurrent use by several hasher
|
||||
func TestBMTConcurrentUse(t *testing.T) {
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
pool := NewTreePool(hasher, segmentCount, PoolSize)
|
||||
defer pool.Drain(0)
|
||||
cycles := 100
|
||||
errc := make(chan error)
|
||||
|
||||
for i := 0; i < cycles; i++ {
|
||||
go func() {
|
||||
bmt := New(pool)
|
||||
data := testutil.RandomBytes(1, BufferSize)
|
||||
n := rand.Intn(bmt.Size())
|
||||
errc <- testHasherCorrectness(bmt, hasher, data, n, 128)
|
||||
}()
|
||||
}
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-time.NewTimer(5 * time.Second).C:
|
||||
t.Fatal("timed out")
|
||||
case err := <-errc:
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cycles--
|
||||
if cycles == 0 {
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests BMT Hasher io.Writer interface is working correctly
|
||||
// even multiple short random write buffers
|
||||
func TestBMTWriterBuffers(t *testing.T) {
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
|
||||
for _, count := range counts {
|
||||
t.Run(fmt.Sprintf("%d_segments", count), func(t *testing.T) {
|
||||
errc := make(chan error)
|
||||
pool := NewTreePool(hasher, count, PoolSize)
|
||||
defer pool.Drain(0)
|
||||
n := count * 32
|
||||
bmt := New(pool)
|
||||
data := testutil.RandomBytes(1, n)
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
refHash := rbmt.Hash(data)
|
||||
expHash := syncHash(bmt, nil, data)
|
||||
if !bytes.Equal(expHash, refHash) {
|
||||
t.Fatalf("hash mismatch with reference. expected %x, got %x", refHash, expHash)
|
||||
}
|
||||
attempts := 10
|
||||
f := func() error {
|
||||
bmt := New(pool)
|
||||
bmt.Reset()
|
||||
var buflen int
|
||||
for offset := 0; offset < n; offset += buflen {
|
||||
buflen = rand.Intn(n-offset) + 1
|
||||
read, err := bmt.Write(data[offset : offset+buflen])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if read != buflen {
|
||||
return fmt.Errorf("incorrect read. expected %v bytes, got %v", buflen, read)
|
||||
}
|
||||
}
|
||||
hash := bmt.Sum(nil)
|
||||
if !bytes.Equal(hash, expHash) {
|
||||
return fmt.Errorf("hash mismatch. expected %x, got %x", hash, expHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for j := 0; j < attempts; j++ {
|
||||
go func() {
|
||||
errc <- f()
|
||||
}()
|
||||
}
|
||||
timeout := time.NewTimer(2 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
attempts--
|
||||
if attempts == 0 {
|
||||
return
|
||||
}
|
||||
case <-timeout.C:
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// helper function that compares reference and optimised implementations on
|
||||
// correctness
|
||||
func testHasherCorrectness(bmt *Hasher, hasher BaseHasherFunc, d []byte, n, count int) (err error) {
|
||||
span := make([]byte, 8)
|
||||
if len(d) < n {
|
||||
n = len(d)
|
||||
}
|
||||
binary.BigEndian.PutUint64(span, uint64(n))
|
||||
data := d[:n]
|
||||
rbmt := NewRefHasher(hasher, count)
|
||||
exp := sha3hash(span, rbmt.Hash(data))
|
||||
got := syncHash(bmt, span, data)
|
||||
if !bytes.Equal(got, exp) {
|
||||
return fmt.Errorf("wrong hash: expected %x, got %x", exp, got)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
//
|
||||
func BenchmarkBMT(t *testing.B) {
|
||||
for size := 4096; size >= 128; size /= 2 {
|
||||
t.Run(fmt.Sprintf("%v_size_%v", "SHA3", size), func(t *testing.B) {
|
||||
benchmarkSHA3(t, size)
|
||||
})
|
||||
t.Run(fmt.Sprintf("%v_size_%v", "Baseline", size), func(t *testing.B) {
|
||||
benchmarkBMTBaseline(t, size)
|
||||
})
|
||||
t.Run(fmt.Sprintf("%v_size_%v", "REF", size), func(t *testing.B) {
|
||||
benchmarkRefHasher(t, size)
|
||||
})
|
||||
t.Run(fmt.Sprintf("%v_size_%v", "BMT", size), func(t *testing.B) {
|
||||
benchmarkBMT(t, size)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type whenHash = int
|
||||
|
||||
const (
|
||||
first whenHash = iota
|
||||
last
|
||||
random
|
||||
)
|
||||
|
||||
func BenchmarkBMTAsync(t *testing.B) {
|
||||
whs := []whenHash{first, last, random}
|
||||
for size := 4096; size >= 128; size /= 2 {
|
||||
for _, wh := range whs {
|
||||
for _, double := range []bool{false, true} {
|
||||
t.Run(fmt.Sprintf("double_%v_hash_when_%v_size_%v", double, wh, size), func(t *testing.B) {
|
||||
benchmarkBMTAsync(t, size, wh, double)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPool(t *testing.B) {
|
||||
caps := []int{1, PoolSize}
|
||||
for size := 4096; size >= 128; size /= 2 {
|
||||
for _, c := range caps {
|
||||
t.Run(fmt.Sprintf("poolsize_%v_size_%v", c, size), func(t *testing.B) {
|
||||
benchmarkPool(t, c, size)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarks simple sha3 hash on chunks
|
||||
func benchmarkSHA3(t *testing.B, n int) {
|
||||
data := testutil.RandomBytes(1, n)
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
h := hasher()
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
doSum(h, nil, data)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarks the minimum hashing time for a balanced (for simplicity) BMT
|
||||
// by doing count/segmentsize parallel hashings of 2*segmentsize bytes
|
||||
// doing it on n PoolSize each reusing the base hasher
|
||||
// the premise is that this is the minimum computation needed for a BMT
|
||||
// therefore this serves as a theoretical optimum for concurrent implementations
|
||||
func benchmarkBMTBaseline(t *testing.B, n int) {
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
hashSize := hasher().Size()
|
||||
data := testutil.RandomBytes(1, hashSize)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
count := int32((n-1)/hashSize + 1)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(PoolSize)
|
||||
var i int32
|
||||
for j := 0; j < PoolSize; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
h := hasher()
|
||||
for atomic.AddInt32(&i, 1) < count {
|
||||
doSum(h, nil, data)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarks BMT Hasher
|
||||
func benchmarkBMT(t *testing.B, n int) {
|
||||
data := testutil.RandomBytes(1, n)
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
pool := NewTreePool(hasher, segmentCount, PoolSize)
|
||||
bmt := New(pool)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
syncHash(bmt, nil, data)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarks BMT hasher with asynchronous concurrent segment/section writes
|
||||
func benchmarkBMTAsync(t *testing.B, n int, wh whenHash, double bool) {
|
||||
data := testutil.RandomBytes(1, n)
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
pool := NewTreePool(hasher, segmentCount, PoolSize)
|
||||
bmt := New(pool).NewAsyncWriter(double)
|
||||
idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
|
||||
rand.Shuffle(len(idxs), func(i int, j int) {
|
||||
idxs[i], idxs[j] = idxs[j], idxs[i]
|
||||
})
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
asyncHash(bmt, nil, n, wh, idxs, segments)
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarks 100 concurrent bmt hashes with pool capacity
|
||||
func benchmarkPool(t *testing.B, poolsize, n int) {
|
||||
data := testutil.RandomBytes(1, n)
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
pool := NewTreePool(hasher, segmentCount, poolsize)
|
||||
cycles := 100
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < t.N; i++ {
|
||||
wg.Add(cycles)
|
||||
for j := 0; j < cycles; j++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
bmt := New(pool)
|
||||
syncHash(bmt, nil, data)
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarks the reference hasher
|
||||
func benchmarkRefHasher(t *testing.B, n int) {
|
||||
data := testutil.RandomBytes(1, n)
|
||||
hasher := sha3.NewLegacyKeccak256
|
||||
rbmt := NewRefHasher(hasher, 128)
|
||||
|
||||
t.ReportAllocs()
|
||||
t.ResetTimer()
|
||||
for i := 0; i < t.N; i++ {
|
||||
rbmt.Hash(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Hash hashes the data and the span using the bmt hasher
|
||||
func syncHash(h *Hasher, span, data []byte) []byte {
|
||||
h.ResetWithLength(span)
|
||||
h.Write(data)
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
func splitAndShuffle(secsize int, data []byte) (idxs []int, segments [][]byte) {
|
||||
l := len(data)
|
||||
n := l / secsize
|
||||
if l%secsize > 0 {
|
||||
n++
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
idxs = append(idxs, i)
|
||||
end := (i + 1) * secsize
|
||||
if end > l {
|
||||
end = l
|
||||
}
|
||||
section := data[i*secsize : end]
|
||||
segments = append(segments, section)
|
||||
}
|
||||
rand.Shuffle(n, func(i int, j int) {
|
||||
idxs[i], idxs[j] = idxs[j], idxs[i]
|
||||
})
|
||||
return idxs, segments
|
||||
}
|
||||
|
||||
// splits the input data performs a random shuffle to mock async section writes
|
||||
func asyncHashRandom(bmt SectionWriter, span []byte, data []byte, wh whenHash) (s []byte) {
|
||||
idxs, segments := splitAndShuffle(bmt.SectionSize(), data)
|
||||
return asyncHash(bmt, span, len(data), wh, idxs, segments)
|
||||
}
|
||||
|
||||
// mock for async section writes for BMT SectionWriter
|
||||
// requires a permutation (a random shuffle) of list of all indexes of segments
|
||||
// and writes them in order to the appropriate section
|
||||
// the Sum function is called according to the wh parameter (first, last, random [relative to segment writes])
|
||||
func asyncHash(bmt SectionWriter, span []byte, l int, wh whenHash, idxs []int, segments [][]byte) (s []byte) {
|
||||
bmt.Reset()
|
||||
if l == 0 {
|
||||
return bmt.Sum(nil, l, span)
|
||||
}
|
||||
c := make(chan []byte, 1)
|
||||
hashf := func() {
|
||||
c <- bmt.Sum(nil, l, span)
|
||||
}
|
||||
maxsize := len(idxs)
|
||||
var r int
|
||||
if wh == random {
|
||||
r = rand.Intn(maxsize)
|
||||
}
|
||||
for i, idx := range idxs {
|
||||
bmt.Write(idx, segments[idx])
|
||||
if (wh == first || wh == random) && i == r {
|
||||
go hashf()
|
||||
}
|
||||
}
|
||||
if wh == last {
|
||||
return bmt.Sum(nil, l, span)
|
||||
}
|
||||
return <-c
|
||||
}
|
@ -1,261 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultSize = 4096
|
||||
MaxPO = 16
|
||||
AddressLength = 32
|
||||
)
|
||||
|
||||
var (
|
||||
ErrChunkNotFound = errors.New("chunk not found")
|
||||
ErrChunkInvalid = errors.New("invalid chunk")
|
||||
)
|
||||
|
||||
type Chunk interface {
|
||||
Address() Address
|
||||
Data() []byte
|
||||
}
|
||||
|
||||
type chunk struct {
|
||||
addr Address
|
||||
sdata []byte
|
||||
}
|
||||
|
||||
func NewChunk(addr Address, data []byte) Chunk {
|
||||
return &chunk{
|
||||
addr: addr,
|
||||
sdata: data,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *chunk) Address() Address {
|
||||
return c.addr
|
||||
}
|
||||
|
||||
func (c *chunk) Data() []byte {
|
||||
return c.sdata
|
||||
}
|
||||
|
||||
func (self *chunk) String() string {
|
||||
return fmt.Sprintf("Address: %v Chunksize: %v", self.addr.Log(), len(self.sdata))
|
||||
}
|
||||
|
||||
type Address []byte
|
||||
|
||||
var ZeroAddr = Address(common.Hash{}.Bytes())
|
||||
|
||||
func (a Address) Hex() string {
|
||||
return fmt.Sprintf("%064x", []byte(a[:]))
|
||||
}
|
||||
|
||||
func (a Address) Log() string {
|
||||
if len(a[:]) < 8 {
|
||||
return fmt.Sprintf("%x", []byte(a[:]))
|
||||
}
|
||||
return fmt.Sprintf("%016x", []byte(a[:8]))
|
||||
}
|
||||
|
||||
func (a Address) String() string {
|
||||
return fmt.Sprintf("%064x", []byte(a))
|
||||
}
|
||||
|
||||
func (a Address) MarshalJSON() (out []byte, err error) {
|
||||
return []byte(`"` + a.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (a *Address) UnmarshalJSON(value []byte) error {
|
||||
s := string(value)
|
||||
*a = make([]byte, 32)
|
||||
h := common.Hex2Bytes(s[1 : len(s)-1])
|
||||
copy(*a, h)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Proximity returns the proximity order of the MSB distance between x and y
|
||||
//
|
||||
// The distance metric MSB(x, y) of two equal length byte sequences x an y is the
|
||||
// value of the binary integer cast of the x^y, ie., x and y bitwise xor-ed.
|
||||
// the binary cast is big endian: most significant bit first (=MSB).
|
||||
//
|
||||
// Proximity(x, y) is a discrete logarithmic scaling of the MSB distance.
|
||||
// It is defined as the reverse rank of the integer part of the base 2
|
||||
// logarithm of the distance.
|
||||
// It is calculated by counting the number of common leading zeros in the (MSB)
|
||||
// binary representation of the x^y.
|
||||
//
|
||||
// (0 farthest, 255 closest, 256 self)
|
||||
func Proximity(one, other []byte) (ret int) {
|
||||
b := (MaxPO-1)/8 + 1
|
||||
if b > len(one) {
|
||||
b = len(one)
|
||||
}
|
||||
m := 8
|
||||
for i := 0; i < b; i++ {
|
||||
oxo := one[i] ^ other[i]
|
||||
for j := 0; j < m; j++ {
|
||||
if (oxo>>uint8(7-j))&0x01 != 0 {
|
||||
return i*8 + j
|
||||
}
|
||||
}
|
||||
}
|
||||
return MaxPO
|
||||
}
|
||||
|
||||
// ModeGet enumerates different Getter modes.
|
||||
type ModeGet int
|
||||
|
||||
func (m ModeGet) String() string {
|
||||
switch m {
|
||||
case ModeGetRequest:
|
||||
return "Request"
|
||||
case ModeGetSync:
|
||||
return "Sync"
|
||||
case ModeGetLookup:
|
||||
return "Lookup"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Getter modes.
|
||||
const (
|
||||
// ModeGetRequest: when accessed for retrieval
|
||||
ModeGetRequest ModeGet = iota
|
||||
// ModeGetSync: when accessed for syncing or proof of custody request
|
||||
ModeGetSync
|
||||
// ModeGetLookup: when accessed to lookup a a chunk in feeds or other places
|
||||
ModeGetLookup
|
||||
)
|
||||
|
||||
// ModePut enumerates different Putter modes.
|
||||
type ModePut int
|
||||
|
||||
func (m ModePut) String() string {
|
||||
switch m {
|
||||
case ModePutRequest:
|
||||
return "Request"
|
||||
case ModePutSync:
|
||||
return "Sync"
|
||||
case ModePutUpload:
|
||||
return "Upload"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Putter modes.
|
||||
const (
|
||||
// ModePutRequest: when a chunk is received as a result of retrieve request and delivery
|
||||
ModePutRequest ModePut = iota
|
||||
// ModePutSync: when a chunk is received via syncing
|
||||
ModePutSync
|
||||
// ModePutUpload: when a chunk is created by local upload
|
||||
ModePutUpload
|
||||
)
|
||||
|
||||
// ModeSet enumerates different Setter modes.
|
||||
type ModeSet int
|
||||
|
||||
func (m ModeSet) String() string {
|
||||
switch m {
|
||||
case ModeSetAccess:
|
||||
return "Access"
|
||||
case ModeSetSync:
|
||||
return "Sync"
|
||||
case ModeSetRemove:
|
||||
return "Remove"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Setter modes.
|
||||
const (
|
||||
// ModeSetAccess: when an update request is received for a chunk or chunk is retrieved for delivery
|
||||
ModeSetAccess ModeSet = iota
|
||||
// ModeSetSync: when a chunk is added to a pull sync batch or when a push sync receipt is received
|
||||
ModeSetSync
|
||||
// ModeSetRemove: when a chunk is removed
|
||||
ModeSetRemove
|
||||
)
|
||||
|
||||
// Descriptor holds information required for Pull syncing. This struct
|
||||
// is provided by subscribing to pull index.
|
||||
type Descriptor struct {
|
||||
Address Address
|
||||
BinID uint64
|
||||
}
|
||||
|
||||
func (d *Descriptor) String() string {
|
||||
if d == nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s bin id %v", d.Address.Hex(), d.BinID)
|
||||
}
|
||||
|
||||
type Store interface {
|
||||
Get(ctx context.Context, mode ModeGet, addr Address) (ch Chunk, err error)
|
||||
Put(ctx context.Context, mode ModePut, ch Chunk) (exists bool, err error)
|
||||
Has(ctx context.Context, addr Address) (yes bool, err error)
|
||||
Set(ctx context.Context, mode ModeSet, addr Address) (err error)
|
||||
LastPullSubscriptionBinID(bin uint8) (id uint64, err error)
|
||||
SubscribePull(ctx context.Context, bin uint8, since, until uint64) (c <-chan Descriptor, stop func())
|
||||
Close() (err error)
|
||||
}
|
||||
|
||||
// Validator validates a chunk.
|
||||
type Validator interface {
|
||||
Validate(ch Chunk) bool
|
||||
}
|
||||
|
||||
// ValidatorStore encapsulates Store by decorting the Put method
|
||||
// with validators check.
|
||||
type ValidatorStore struct {
|
||||
Store
|
||||
validators []Validator
|
||||
}
|
||||
|
||||
// NewValidatorStore returns a new ValidatorStore which uses
|
||||
// provided validators to validate chunks on Put.
|
||||
func NewValidatorStore(store Store, validators ...Validator) (s *ValidatorStore) {
|
||||
return &ValidatorStore{
|
||||
Store: store,
|
||||
validators: validators,
|
||||
}
|
||||
}
|
||||
|
||||
// Put overrides Store put method with validators check. If one of the validators
|
||||
// return true, the chunk is considered valid and Store Put method is called.
|
||||
// If all validators return false, ErrChunkInvalid is returned.
|
||||
func (s *ValidatorStore) Put(ctx context.Context, mode ModePut, ch Chunk) (exists bool, err error) {
|
||||
for _, v := range s.validators {
|
||||
if v.Validate(ch) {
|
||||
return s.Store.Put(ctx, mode, ch)
|
||||
}
|
||||
}
|
||||
return false, ErrChunkInvalid
|
||||
}
|
@ -1,186 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestProximity validates Proximity function with explicit
|
||||
// values in a table-driven test. It is highly dependant on
|
||||
// MaxPO constant and it validates cases up to MaxPO=32.
|
||||
func TestProximity(t *testing.T) {
|
||||
// integer from base2 encoded string
|
||||
bx := func(s string) uint8 {
|
||||
i, err := strconv.ParseUint(s, 2, 8)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return uint8(i)
|
||||
}
|
||||
// adjust expected bins in respect to MaxPO
|
||||
limitPO := func(po uint8) uint8 {
|
||||
if po > MaxPO {
|
||||
return MaxPO
|
||||
}
|
||||
return po
|
||||
}
|
||||
base := []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000000")}
|
||||
for _, tc := range []struct {
|
||||
addr []byte
|
||||
po uint8
|
||||
}{
|
||||
{
|
||||
addr: base,
|
||||
po: MaxPO,
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("10000000"), bx("00000000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(0),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("01000000"), bx("00000000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(1),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00100000"), bx("00000000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(2),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00010000"), bx("00000000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(3),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00001000"), bx("00000000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(4),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000100"), bx("00000000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(5),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000010"), bx("00000000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(6),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000001"), bx("00000000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(7),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("10000000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(8),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("01000000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(9),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00100000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(10),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00010000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(11),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00001000"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(12),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000100"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(13),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000010"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(14),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000001"), bx("00000000"), bx("00000000")},
|
||||
po: limitPO(15),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("10000000"), bx("00000000")},
|
||||
po: limitPO(16),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("01000000"), bx("00000000")},
|
||||
po: limitPO(17),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00100000"), bx("00000000")},
|
||||
po: limitPO(18),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00010000"), bx("00000000")},
|
||||
po: limitPO(19),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00001000"), bx("00000000")},
|
||||
po: limitPO(20),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000100"), bx("00000000")},
|
||||
po: limitPO(21),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000010"), bx("00000000")},
|
||||
po: limitPO(22),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000001"), bx("00000000")},
|
||||
po: limitPO(23),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("10000000")},
|
||||
po: limitPO(24),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("01000000")},
|
||||
po: limitPO(25),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00100000")},
|
||||
po: limitPO(26),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00010000")},
|
||||
po: limitPO(27),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00001000")},
|
||||
po: limitPO(28),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000100")},
|
||||
po: limitPO(29),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000010")},
|
||||
po: limitPO(30),
|
||||
},
|
||||
{
|
||||
addr: []byte{bx("00000000"), bx("00000000"), bx("00000000"), bx("00000001")},
|
||||
po: limitPO(31),
|
||||
},
|
||||
} {
|
||||
got := uint8(Proximity(base, tc.addr))
|
||||
if got != tc.po {
|
||||
t.Errorf("got %v bin, want %v", got, tc.po)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,218 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
errExists = errors.New("already exists")
|
||||
errNA = errors.New("not available yet")
|
||||
errNoETA = errors.New("unable to calculate ETA")
|
||||
errTagNotFound = errors.New("tag not found")
|
||||
)
|
||||
|
||||
// State is the enum type for chunk states
|
||||
type State = uint32
|
||||
|
||||
const (
|
||||
StateSplit State = iota // chunk has been processed by filehasher/swarm safe call
|
||||
StateStored // chunk stored locally
|
||||
StateSeen // chunk previously seen
|
||||
StateSent // chunk sent to neighbourhood
|
||||
StateSynced // proof is received; chunk removed from sync db; chunk is available everywhere
|
||||
)
|
||||
|
||||
// Tag represents info on the status of new chunks
|
||||
type Tag struct {
|
||||
Uid uint32 // a unique identifier for this tag
|
||||
Name string // a name tag for this tag
|
||||
Address Address // the associated swarm hash for this tag
|
||||
total int64 // total chunks belonging to a tag
|
||||
split int64 // number of chunks already processed by splitter for hashing
|
||||
seen int64 // number of chunks already seen
|
||||
stored int64 // number of chunks already stored locally
|
||||
sent int64 // number of chunks sent for push syncing
|
||||
synced int64 // number of chunks synced with proof
|
||||
startedAt time.Time // tag started to calculate ETA
|
||||
}
|
||||
|
||||
// New creates a new tag, stores it by the name and returns it
|
||||
// it returns an error if the tag with this name already exists
|
||||
func NewTag(uid uint32, s string, total int64) *Tag {
|
||||
t := &Tag{
|
||||
Uid: uid,
|
||||
Name: s,
|
||||
startedAt: time.Now(),
|
||||
total: total,
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Inc increments the count for a state
|
||||
func (t *Tag) Inc(state State) {
|
||||
var v *int64
|
||||
switch state {
|
||||
case StateSplit:
|
||||
v = &t.split
|
||||
case StateStored:
|
||||
v = &t.stored
|
||||
case StateSeen:
|
||||
v = &t.seen
|
||||
case StateSent:
|
||||
v = &t.sent
|
||||
case StateSynced:
|
||||
v = &t.synced
|
||||
}
|
||||
atomic.AddInt64(v, 1)
|
||||
}
|
||||
|
||||
// Get returns the count for a state on a tag
|
||||
func (t *Tag) Get(state State) int64 {
|
||||
var v *int64
|
||||
switch state {
|
||||
case StateSplit:
|
||||
v = &t.split
|
||||
case StateStored:
|
||||
v = &t.stored
|
||||
case StateSeen:
|
||||
v = &t.seen
|
||||
case StateSent:
|
||||
v = &t.sent
|
||||
case StateSynced:
|
||||
v = &t.synced
|
||||
}
|
||||
return atomic.LoadInt64(v)
|
||||
}
|
||||
|
||||
// GetTotal returns the total count
|
||||
func (t *Tag) Total() int64 {
|
||||
return atomic.LoadInt64(&t.total)
|
||||
}
|
||||
|
||||
// DoneSplit sets total count to SPLIT count and sets the associated swarm hash for this tag
|
||||
// is meant to be called when splitter finishes for input streams of unknown size
|
||||
func (t *Tag) DoneSplit(address Address) int64 {
|
||||
total := atomic.LoadInt64(&t.split)
|
||||
atomic.StoreInt64(&t.total, total)
|
||||
t.Address = address
|
||||
return total
|
||||
}
|
||||
|
||||
// Status returns the value of state and the total count
|
||||
func (t *Tag) Status(state State) (int64, int64, error) {
|
||||
count, seen, total := t.Get(state), atomic.LoadInt64(&t.seen), atomic.LoadInt64(&t.total)
|
||||
if total == 0 {
|
||||
return count, total, errNA
|
||||
}
|
||||
switch state {
|
||||
case StateSplit, StateStored, StateSeen:
|
||||
return count, total, nil
|
||||
case StateSent, StateSynced:
|
||||
stored := atomic.LoadInt64(&t.stored)
|
||||
if stored < total {
|
||||
return count, total - seen, errNA
|
||||
}
|
||||
return count, total - seen, nil
|
||||
}
|
||||
return count, total, errNA
|
||||
}
|
||||
|
||||
// ETA returns the time of completion estimated based on time passed and rate of completion
|
||||
func (t *Tag) ETA(state State) (time.Time, error) {
|
||||
cnt, total, err := t.Status(state)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
if cnt == 0 || total == 0 {
|
||||
return time.Time{}, errNoETA
|
||||
}
|
||||
diff := time.Since(t.startedAt)
|
||||
dur := time.Duration(total) * diff / time.Duration(cnt)
|
||||
return t.startedAt.Add(dur), nil
|
||||
}
|
||||
|
||||
// MarshalBinary marshals the tag into a byte slice
|
||||
func (tag *Tag) MarshalBinary() (data []byte, err error) {
|
||||
buffer := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(buffer, tag.Uid)
|
||||
encodeInt64Append(&buffer, tag.total)
|
||||
encodeInt64Append(&buffer, tag.split)
|
||||
encodeInt64Append(&buffer, tag.seen)
|
||||
encodeInt64Append(&buffer, tag.stored)
|
||||
encodeInt64Append(&buffer, tag.sent)
|
||||
encodeInt64Append(&buffer, tag.synced)
|
||||
|
||||
intBuffer := make([]byte, 8)
|
||||
|
||||
n := binary.PutVarint(intBuffer, tag.startedAt.Unix())
|
||||
buffer = append(buffer, intBuffer[:n]...)
|
||||
|
||||
n = binary.PutVarint(intBuffer, int64(len(tag.Address)))
|
||||
buffer = append(buffer, intBuffer[:n]...)
|
||||
|
||||
buffer = append(buffer, tag.Address[:]...)
|
||||
|
||||
buffer = append(buffer, []byte(tag.Name)...)
|
||||
|
||||
return buffer, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary unmarshals a byte slice into a tag
|
||||
func (tag *Tag) UnmarshalBinary(buffer []byte) error {
|
||||
if len(buffer) < 13 {
|
||||
return errors.New("buffer too short")
|
||||
}
|
||||
tag.Uid = binary.BigEndian.Uint32(buffer)
|
||||
buffer = buffer[4:]
|
||||
|
||||
tag.total = decodeInt64Splice(&buffer)
|
||||
tag.split = decodeInt64Splice(&buffer)
|
||||
tag.seen = decodeInt64Splice(&buffer)
|
||||
tag.stored = decodeInt64Splice(&buffer)
|
||||
tag.sent = decodeInt64Splice(&buffer)
|
||||
tag.synced = decodeInt64Splice(&buffer)
|
||||
|
||||
t, n := binary.Varint(buffer)
|
||||
tag.startedAt = time.Unix(t, 0)
|
||||
buffer = buffer[n:]
|
||||
|
||||
t, n = binary.Varint(buffer)
|
||||
buffer = buffer[n:]
|
||||
if t > 0 {
|
||||
tag.Address = buffer[:t]
|
||||
}
|
||||
tag.Name = string(buffer[t:])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func encodeInt64Append(buffer *[]byte, val int64) {
|
||||
intBuffer := make([]byte, 8)
|
||||
n := binary.PutVarint(intBuffer, val)
|
||||
*buffer = append(*buffer, intBuffer[:n]...)
|
||||
}
|
||||
|
||||
func decodeInt64Splice(buffer *[]byte) int64 {
|
||||
val, n := binary.Varint((*buffer))
|
||||
*buffer = (*buffer)[n:]
|
||||
return val
|
||||
}
|
@ -1,273 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
allStates = []State{StateSplit, StateStored, StateSeen, StateSent, StateSynced}
|
||||
)
|
||||
|
||||
// TestTagSingleIncrements tests if Inc increments the tag state value
|
||||
func TestTagSingleIncrements(t *testing.T) {
|
||||
tg := &Tag{total: 10}
|
||||
|
||||
tc := []struct {
|
||||
state uint32
|
||||
inc int
|
||||
expcount int64
|
||||
exptotal int64
|
||||
}{
|
||||
{state: StateSplit, inc: 10, expcount: 10, exptotal: 10},
|
||||
{state: StateStored, inc: 9, expcount: 9, exptotal: 9},
|
||||
{state: StateSeen, inc: 1, expcount: 1, exptotal: 10},
|
||||
{state: StateSent, inc: 9, expcount: 9, exptotal: 9},
|
||||
{state: StateSynced, inc: 9, expcount: 9, exptotal: 9},
|
||||
}
|
||||
|
||||
for _, tc := range tc {
|
||||
for i := 0; i < tc.inc; i++ {
|
||||
tg.Inc(tc.state)
|
||||
}
|
||||
}
|
||||
|
||||
for _, tc := range tc {
|
||||
if tg.Get(tc.state) != tc.expcount {
|
||||
t.Fatalf("not incremented")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestTagStatus is a unit test to cover Tag.Status method functionality
|
||||
func TestTagStatus(t *testing.T) {
|
||||
tg := &Tag{total: 10}
|
||||
tg.Inc(StateSeen)
|
||||
tg.Inc(StateSent)
|
||||
tg.Inc(StateSynced)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
tg.Inc(StateSplit)
|
||||
tg.Inc(StateStored)
|
||||
}
|
||||
for _, v := range []struct {
|
||||
state State
|
||||
expVal int64
|
||||
expTotal int64
|
||||
}{
|
||||
{state: StateStored, expVal: 10, expTotal: 10},
|
||||
{state: StateSplit, expVal: 10, expTotal: 10},
|
||||
{state: StateSeen, expVal: 1, expTotal: 10},
|
||||
{state: StateSent, expVal: 1, expTotal: 9},
|
||||
{state: StateSynced, expVal: 1, expTotal: 9},
|
||||
} {
|
||||
val, total, err := tg.Status(v.state)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if val != v.expVal {
|
||||
t.Fatalf("should be %d, got %d", v.expVal, val)
|
||||
}
|
||||
if total != v.expTotal {
|
||||
t.Fatalf("expected total to be %d, got %d", v.expTotal, total)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// tests ETA is precise
|
||||
func TestTagETA(t *testing.T) {
|
||||
now := time.Now()
|
||||
maxDiff := 100000 // 100 microsecond
|
||||
tg := &Tag{total: 10, startedAt: now}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
tg.Inc(StateSplit)
|
||||
eta, err := tg.ETA(StateSplit)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
diff := time.Until(eta) - 9*time.Since(now)
|
||||
if int(diff) > maxDiff {
|
||||
t.Fatalf("ETA is not precise, got diff %v > .1ms", diff)
|
||||
}
|
||||
}
|
||||
|
||||
// TestTagConcurrentIncrements tests Inc calls concurrently
|
||||
func TestTagConcurrentIncrements(t *testing.T) {
|
||||
tg := &Tag{}
|
||||
n := 1000
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(5 * n)
|
||||
for _, f := range allStates {
|
||||
go func(f State) {
|
||||
for j := 0; j < n; j++ {
|
||||
go func() {
|
||||
tg.Inc(f)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
}(f)
|
||||
}
|
||||
wg.Wait()
|
||||
for _, f := range allStates {
|
||||
v := tg.Get(f)
|
||||
if v != int64(n) {
|
||||
t.Fatalf("expected state %v to be %v, got %v", f, n, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestTagsMultipleConcurrentIncrements tests Inc calls concurrently
|
||||
func TestTagsMultipleConcurrentIncrementsSyncMap(t *testing.T) {
|
||||
ts := NewTags()
|
||||
n := 100
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(10 * 5 * n)
|
||||
for i := 0; i < 10; i++ {
|
||||
s := string([]byte{uint8(i)})
|
||||
tag, err := ts.New(s, int64(n))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, f := range allStates {
|
||||
go func(tag *Tag, f State) {
|
||||
for j := 0; j < n; j++ {
|
||||
go func() {
|
||||
tag.Inc(f)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
}(tag, f)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
i := 0
|
||||
ts.Range(func(k, v interface{}) bool {
|
||||
i++
|
||||
uid := k.(uint32)
|
||||
for _, f := range allStates {
|
||||
tag, err := ts.Get(uid)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
stateVal := tag.Get(f)
|
||||
if stateVal != int64(n) {
|
||||
t.Fatalf("expected tag %v state %v to be %v, got %v", uid, f, n, v)
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
})
|
||||
if i != 10 {
|
||||
t.Fatal("not enough tagz")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMarshallingWithAddr tests that marshalling and unmarshalling is done correctly when the
|
||||
// tag Address (byte slice) contains some arbitrary value
|
||||
func TestMarshallingWithAddr(t *testing.T) {
|
||||
tg := NewTag(111, "test/tag", 10)
|
||||
tg.Address = []byte{0, 1, 2, 3, 4, 5, 6}
|
||||
|
||||
for _, f := range allStates {
|
||||
tg.Inc(f)
|
||||
}
|
||||
|
||||
b, err := tg.MarshalBinary()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
unmarshalledTag := &Tag{}
|
||||
err = unmarshalledTag.UnmarshalBinary(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if unmarshalledTag.Uid != tg.Uid {
|
||||
t.Fatalf("tag uids not equal. want %d got %d", tg.Uid, unmarshalledTag.Uid)
|
||||
}
|
||||
|
||||
if unmarshalledTag.Name != tg.Name {
|
||||
t.Fatalf("tag names not equal. want %s got %s", tg.Name, unmarshalledTag.Name)
|
||||
}
|
||||
|
||||
for _, state := range allStates {
|
||||
uv, tv := unmarshalledTag.Get(state), tg.Get(state)
|
||||
if uv != tv {
|
||||
t.Fatalf("state %d inconsistent. expected %d to equal %d", state, uv, tv)
|
||||
}
|
||||
}
|
||||
|
||||
if unmarshalledTag.Total() != tg.Total() {
|
||||
t.Fatalf("tag names not equal. want %d got %d", tg.Total(), unmarshalledTag.Total())
|
||||
}
|
||||
|
||||
if len(unmarshalledTag.Address) != len(tg.Address) {
|
||||
t.Fatalf("tag addresses length mismatch, want %d, got %d", len(tg.Address), len(unmarshalledTag.Address))
|
||||
}
|
||||
|
||||
if !bytes.Equal(unmarshalledTag.Address, tg.Address) {
|
||||
t.Fatalf("expected tag address to be %v got %v", unmarshalledTag.Address, tg.Address)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMarshallingNoAddress tests that marshalling and unmarshalling is done correctly
|
||||
// when the tag Address (byte slice) is empty in this case
|
||||
func TestMarshallingNoAddr(t *testing.T) {
|
||||
tg := NewTag(111, "test/tag", 10)
|
||||
for _, f := range allStates {
|
||||
tg.Inc(f)
|
||||
}
|
||||
|
||||
b, err := tg.MarshalBinary()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
unmarshalledTag := &Tag{}
|
||||
err = unmarshalledTag.UnmarshalBinary(b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if unmarshalledTag.Uid != tg.Uid {
|
||||
t.Fatalf("tag uids not equal. want %d got %d", tg.Uid, unmarshalledTag.Uid)
|
||||
}
|
||||
|
||||
if unmarshalledTag.Name != tg.Name {
|
||||
t.Fatalf("tag names not equal. want %s got %s", tg.Name, unmarshalledTag.Name)
|
||||
}
|
||||
|
||||
for _, state := range allStates {
|
||||
uv, tv := unmarshalledTag.Get(state), tg.Get(state)
|
||||
if uv != tv {
|
||||
t.Fatalf("state %d inconsistent. expected %d to equal %d", state, uv, tv)
|
||||
}
|
||||
}
|
||||
|
||||
if unmarshalledTag.Total() != tg.Total() {
|
||||
t.Fatalf("tag names not equal. want %d got %d", tg.Total(), unmarshalledTag.Total())
|
||||
}
|
||||
|
||||
if len(unmarshalledTag.Address) != len(tg.Address) {
|
||||
t.Fatalf("expected tag addresses to be equal length")
|
||||
}
|
||||
}
|
@ -1,96 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package chunk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/sctx"
|
||||
)
|
||||
|
||||
// Tags hold tag information indexed by a unique random uint32
|
||||
type Tags struct {
|
||||
tags *sync.Map
|
||||
rng *rand.Rand
|
||||
}
|
||||
|
||||
// NewTags creates a tags object
|
||||
func NewTags() *Tags {
|
||||
return &Tags{
|
||||
tags: &sync.Map{},
|
||||
rng: rand.New(rand.NewSource(time.Now().Unix())),
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new tag, stores it by the name and returns it
|
||||
// it returns an error if the tag with this name already exists
|
||||
func (ts *Tags) New(s string, total int64) (*Tag, error) {
|
||||
t := &Tag{
|
||||
Uid: ts.rng.Uint32(),
|
||||
Name: s,
|
||||
startedAt: time.Now(),
|
||||
total: total,
|
||||
}
|
||||
if _, loaded := ts.tags.LoadOrStore(t.Uid, t); loaded {
|
||||
return nil, errExists
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// All returns all existing tags in Tags' sync.Map
|
||||
// Note that tags are returned in no particular order
|
||||
func (ts *Tags) All() (t []*Tag) {
|
||||
ts.tags.Range(func(k, v interface{}) bool {
|
||||
t = append(t, v.(*Tag))
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
// Get returns the undelying tag for the uid or an error if not found
|
||||
func (ts *Tags) Get(uid uint32) (*Tag, error) {
|
||||
t, ok := ts.tags.Load(uid)
|
||||
if !ok {
|
||||
return nil, errors.New("tag not found")
|
||||
}
|
||||
return t.(*Tag), nil
|
||||
}
|
||||
|
||||
// GetFromContext gets a tag from the tag uid stored in the context
|
||||
func (ts *Tags) GetFromContext(ctx context.Context) (*Tag, error) {
|
||||
uid := sctx.GetTag(ctx)
|
||||
t, ok := ts.tags.Load(uid)
|
||||
if !ok {
|
||||
return nil, errTagNotFound
|
||||
}
|
||||
return t.(*Tag), nil
|
||||
}
|
||||
|
||||
// Range exposes sync.Map's iterator
|
||||
func (ts *Tags) Range(fn func(k, v interface{}) bool) {
|
||||
ts.tags.Range(fn)
|
||||
}
|
||||
|
||||
func (ts *Tags) Delete(k interface{}) {
|
||||
ts.tags.Delete(k)
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package chunk
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestAll(t *testing.T) {
|
||||
ts := NewTags()
|
||||
|
||||
ts.New("1", 1)
|
||||
ts.New("2", 1)
|
||||
|
||||
all := ts.All()
|
||||
|
||||
if len(all) != 2 {
|
||||
t.Fatalf("expected length to be 2 got %d", len(all))
|
||||
}
|
||||
|
||||
if n := all[0].Total(); n != 1 {
|
||||
t.Fatalf("expected tag 0 total to be 1 got %d", n)
|
||||
}
|
||||
|
||||
if n := all[1].Total(); n != 1 {
|
||||
t.Fatalf("expected tag 1 total to be 1 got %d", n)
|
||||
}
|
||||
|
||||
ts.New("3", 1)
|
||||
all = ts.All()
|
||||
|
||||
if len(all) != 3 {
|
||||
t.Fatalf("expected length to be 3 got %d", len(all))
|
||||
}
|
||||
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
bin/*
|
||||
cluster/*
|
2
swarm/dev/.gitignore
vendored
2
swarm/dev/.gitignore
vendored
@ -1,2 +0,0 @@
|
||||
bin/*
|
||||
cluster/*
|
@ -1,42 +0,0 @@
|
||||
FROM ubuntu:xenial
|
||||
|
||||
# install build + test dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
fuse \
|
||||
g++ \
|
||||
gcc \
|
||||
git \
|
||||
iproute2 \
|
||||
iputils-ping \
|
||||
less \
|
||||
libc6-dev \
|
||||
make \
|
||||
pkg-config \
|
||||
&& \
|
||||
apt-get clean
|
||||
|
||||
# install Go
|
||||
ENV GO_VERSION 1.8.1
|
||||
RUN curl -fSLo golang.tar.gz "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" && \
|
||||
tar -xzf golang.tar.gz -C /usr/local && \
|
||||
rm golang.tar.gz
|
||||
ENV GOPATH /go
|
||||
ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
|
||||
|
||||
# install docker CLI
|
||||
RUN curl -fSLo docker.tar.gz https://get.docker.com/builds/Linux/x86_64/docker-17.04.0-ce.tgz && \
|
||||
tar -xzf docker.tar.gz -C /usr/local/bin --strip-components=1 docker/docker && \
|
||||
rm docker.tar.gz
|
||||
|
||||
# install jq
|
||||
RUN curl -fSLo /usr/local/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 && \
|
||||
chmod +x /usr/local/bin/jq
|
||||
|
||||
# install govendor
|
||||
RUN go get -u github.com/kardianos/govendor
|
||||
|
||||
# add custom bashrc
|
||||
ADD bashrc /root/.bashrc
|
@ -1,14 +0,0 @@
|
||||
.PHONY: build cluster test
|
||||
|
||||
default: build
|
||||
|
||||
build:
|
||||
go build -o bin/swarm github.com/ethereum/go-ethereum/cmd/swarm
|
||||
go build -o bin/geth github.com/ethereum/go-ethereum/cmd/geth
|
||||
go build -o bin/bootnode github.com/ethereum/go-ethereum/cmd/bootnode
|
||||
|
||||
cluster: build
|
||||
scripts/boot-cluster.sh
|
||||
|
||||
test:
|
||||
go test -v github.com/ethereum/go-ethereum/swarm/...
|
@ -1,20 +0,0 @@
|
||||
Swarm development environment
|
||||
=============================
|
||||
|
||||
The Swarm development environment is a Linux bash shell which can be run in a
|
||||
Docker container and provides a predictable build and test environment.
|
||||
|
||||
### Start the Docker container
|
||||
|
||||
Run the `run.sh` script to build the Docker image and run it, you will then be
|
||||
at a bash prompt inside the `swarm/dev` directory.
|
||||
|
||||
### Build binaries
|
||||
|
||||
Run `make` to build the `swarm`, `geth` and `bootnode` binaries into the
|
||||
`swarm/dev/bin` directory.
|
||||
|
||||
### Boot a cluster
|
||||
|
||||
Run `make cluster` to start a 3 node Swarm cluster, or run
|
||||
`scripts/boot-cluster.sh --size N` to boot a cluster of size N.
|
@ -1,21 +0,0 @@
|
||||
export ROOT="${GOPATH}/src/github.com/ethereum/go-ethereum"
|
||||
export PATH="${ROOT}/swarm/dev/bin:${PATH}"
|
||||
|
||||
cd "${ROOT}/swarm/dev"
|
||||
|
||||
cat <<WELCOME
|
||||
|
||||
=============================================
|
||||
|
||||
Welcome to the swarm development environment.
|
||||
|
||||
- Run 'make' to build the swarm, geth and bootnode binaries
|
||||
- Run 'make test' to run the swarm unit tests
|
||||
- Run 'make cluster' to start a swarm cluster
|
||||
- Run 'exit' to exit the development environment
|
||||
|
||||
See the 'scripts' directory for some useful scripts.
|
||||
|
||||
=============================================
|
||||
|
||||
WELCOME
|
@ -1,90 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# A script to build and run the Swarm development environment using Docker.
|
||||
|
||||
set -e
|
||||
|
||||
ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
|
||||
|
||||
# DEFAULT_NAME is the default name for the Docker image and container
|
||||
DEFAULT_NAME="swarm-dev"
|
||||
|
||||
usage() {
|
||||
cat >&2 <<USAGE
|
||||
usage: $0 [options]
|
||||
|
||||
Build and run the Swarm development environment.
|
||||
|
||||
Depends on Docker being installed locally.
|
||||
|
||||
OPTIONS:
|
||||
-n, --name NAME Docker image and container name [default: ${DEFAULT_NAME}]
|
||||
-d, --docker-args ARGS Custom args to pass to 'docker run' (e.g. '-p 8000:8000' to expose a port)
|
||||
-h, --help Show this message
|
||||
USAGE
|
||||
}
|
||||
|
||||
main() {
|
||||
local name="${DEFAULT_NAME}"
|
||||
local docker_args=""
|
||||
parse_args "$@"
|
||||
build_image
|
||||
run_image
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-n | --name)
|
||||
if [[ -z "$2" ]]; then
|
||||
echo "ERROR: --name flag requires an argument" >&2
|
||||
exit 1
|
||||
fi
|
||||
name="$2"
|
||||
shift 2
|
||||
;;
|
||||
-d | --docker-args)
|
||||
if [[ -z "$2" ]]; then
|
||||
echo "ERROR: --docker-args flag requires an argument" >&2
|
||||
exit 1
|
||||
fi
|
||||
docker_args="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -ne 0 ]]; then
|
||||
usage
|
||||
echo "ERROR: invalid arguments" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
build_image() {
|
||||
docker build --tag "${name}" "${ROOT}/swarm/dev"
|
||||
}
|
||||
|
||||
run_image() {
|
||||
exec docker run \
|
||||
--privileged \
|
||||
--interactive \
|
||||
--tty \
|
||||
--rm \
|
||||
--hostname "${name}" \
|
||||
--name "${name}" \
|
||||
--volume "${ROOT}:/go/src/github.com/ethereum/go-ethereum" \
|
||||
--volume "/var/run/docker.sock:/var/run/docker.sock" \
|
||||
${docker_args} \
|
||||
"${name}" \
|
||||
/bin/bash
|
||||
}
|
||||
|
||||
main "$@"
|
@ -1,288 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# A script to boot a dev swarm cluster on a Linux host (typically in a Docker
|
||||
# container started with swarm/dev/run.sh).
|
||||
#
|
||||
# The cluster contains a bootnode, a geth node and multiple swarm nodes, with
|
||||
# each node having its own data directory in a base directory passed with the
|
||||
# --dir flag (default is swarm/dev/cluster).
|
||||
#
|
||||
# To avoid using different ports for each node and to make networking more
|
||||
# realistic, each node gets its own network namespace with IPs assigned from
|
||||
# the 192.168.33.0/24 subnet:
|
||||
#
|
||||
# bootnode: 192.168.33.2
|
||||
# geth: 192.168.33.3
|
||||
# swarm: 192.168.33.10{1,2,...,n}
|
||||
|
||||
set -e
|
||||
|
||||
ROOT="$(cd "$(dirname "$0")/../../.." && pwd)"
|
||||
source "${ROOT}/swarm/dev/scripts/util.sh"
|
||||
|
||||
# DEFAULT_BASE_DIR is the default base directory to store node data
|
||||
DEFAULT_BASE_DIR="${ROOT}/swarm/dev/cluster"
|
||||
|
||||
# DEFAULT_CLUSTER_SIZE is the default swarm cluster size
|
||||
DEFAULT_CLUSTER_SIZE=3
|
||||
|
||||
# Linux bridge configuration for connecting the node network namespaces
|
||||
BRIDGE_NAME="swarmbr0"
|
||||
BRIDGE_IP="192.168.33.1"
|
||||
|
||||
# static bootnode configuration
|
||||
BOOTNODE_IP="192.168.33.2"
|
||||
BOOTNODE_PORT="30301"
|
||||
BOOTNODE_KEY="32078f313bea771848db70745225c52c00981589ad6b5b49163f0f5ee852617d"
|
||||
BOOTNODE_PUBKEY="760c4460e5336ac9bbd87952a3c7ec4363fc0a97bd31c86430806e287b437fd1b01abc6e1db640cf3106b520344af1d58b00b57823db3e1407cbc433e1b6d04d"
|
||||
BOOTNODE_URL="enode://${BOOTNODE_PUBKEY}@${BOOTNODE_IP}:${BOOTNODE_PORT}"
|
||||
|
||||
# static geth configuration
|
||||
GETH_IP="192.168.33.3"
|
||||
GETH_RPC_PORT="8545"
|
||||
GETH_RPC_URL="http://${GETH_IP}:${GETH_RPC_PORT}"
|
||||
|
||||
usage() {
|
||||
cat >&2 <<USAGE
|
||||
usage: $0 [options]
|
||||
|
||||
Boot a dev swarm cluster.
|
||||
|
||||
OPTIONS:
|
||||
-d, --dir DIR Base directory to store node data [default: ${DEFAULT_BASE_DIR}]
|
||||
-s, --size SIZE Size of swarm cluster [default: ${DEFAULT_CLUSTER_SIZE}]
|
||||
-h, --help Show this message
|
||||
USAGE
|
||||
}
|
||||
|
||||
main() {
|
||||
local base_dir="${DEFAULT_BASE_DIR}"
|
||||
local cluster_size="${DEFAULT_CLUSTER_SIZE}"
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
local pid_dir="${base_dir}/pids"
|
||||
local log_dir="${base_dir}/logs"
|
||||
mkdir -p "${base_dir}" "${pid_dir}" "${log_dir}"
|
||||
|
||||
stop_cluster
|
||||
create_network
|
||||
start_bootnode
|
||||
start_geth_node
|
||||
start_swarm_nodes
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-d | --dir)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--dir flag requires an argument"
|
||||
fi
|
||||
base_dir="$2"
|
||||
shift 2
|
||||
;;
|
||||
-s | --size)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--size flag requires an argument"
|
||||
fi
|
||||
cluster_size="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -ne 0 ]]; then
|
||||
usage
|
||||
fail "ERROR: invalid arguments: $@"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_cluster() {
|
||||
info "stopping existing cluster"
|
||||
"${ROOT}/swarm/dev/scripts/stop-cluster.sh" --dir "${base_dir}"
|
||||
}
|
||||
|
||||
# create_network creates a Linux bridge which is used to connect the node
|
||||
# network namespaces together
|
||||
create_network() {
|
||||
local subnet="${BRIDGE_IP}/24"
|
||||
|
||||
info "creating ${subnet} network on ${BRIDGE_NAME}"
|
||||
ip link add name "${BRIDGE_NAME}" type bridge
|
||||
ip link set dev "${BRIDGE_NAME}" up
|
||||
ip address add "${subnet}" dev "${BRIDGE_NAME}"
|
||||
}
|
||||
|
||||
# start_bootnode starts a bootnode which is used to bootstrap the geth and
|
||||
# swarm nodes
|
||||
start_bootnode() {
|
||||
local key_file="${base_dir}/bootnode.key"
|
||||
echo -n "${BOOTNODE_KEY}" > "${key_file}"
|
||||
|
||||
local args=(
|
||||
--addr "${BOOTNODE_IP}:${BOOTNODE_PORT}"
|
||||
--nodekey "${key_file}"
|
||||
--verbosity "6"
|
||||
)
|
||||
|
||||
start_node "bootnode" "${BOOTNODE_IP}" "$(which bootnode)" ${args[@]}
|
||||
}
|
||||
|
||||
# start_geth_node starts a geth node with --datadir pointing at <base-dir>/geth
|
||||
# and a single, unlocked account with password "geth"
|
||||
start_geth_node() {
|
||||
local dir="${base_dir}/geth"
|
||||
mkdir -p "${dir}"
|
||||
|
||||
local password="geth"
|
||||
echo "${password}" > "${dir}/password"
|
||||
|
||||
# create an account if necessary
|
||||
if [[ ! -e "${dir}/keystore" ]]; then
|
||||
info "creating geth account"
|
||||
create_account "${dir}" "${password}"
|
||||
fi
|
||||
|
||||
# get the account address
|
||||
local address="$(jq --raw-output '.address' ${dir}/keystore/*)"
|
||||
if [[ -z "${address}" ]]; then
|
||||
fail "failed to get geth account address"
|
||||
fi
|
||||
|
||||
local args=(
|
||||
--datadir "${dir}"
|
||||
--networkid "321"
|
||||
--bootnodes "${BOOTNODE_URL}"
|
||||
--unlock "${address}"
|
||||
--password "${dir}/password"
|
||||
--rpc
|
||||
--rpcaddr "${GETH_IP}"
|
||||
--rpcport "${GETH_RPC_PORT}"
|
||||
--verbosity "6"
|
||||
)
|
||||
|
||||
start_node "geth" "${GETH_IP}" "$(which geth)" ${args[@]}
|
||||
}
|
||||
|
||||
start_swarm_nodes() {
|
||||
for i in $(seq 1 ${cluster_size}); do
|
||||
start_swarm_node "${i}"
|
||||
done
|
||||
}
|
||||
|
||||
# start_swarm_node starts a swarm node with a name like "swarmNN" (where NN is
|
||||
# a zero-padded integer like "07"), --datadir pointing at <base-dir>/<name>
|
||||
# (e.g. <base-dir>/swarm07) and a single account with <name> as the password
|
||||
start_swarm_node() {
|
||||
local num=$1
|
||||
local name="swarm$(printf '%02d' ${num})"
|
||||
local ip="192.168.33.1$(printf '%02d' ${num})"
|
||||
|
||||
local dir="${base_dir}/${name}"
|
||||
mkdir -p "${dir}"
|
||||
|
||||
local password="${name}"
|
||||
echo "${password}" > "${dir}/password"
|
||||
|
||||
# create an account if necessary
|
||||
if [[ ! -e "${dir}/keystore" ]]; then
|
||||
info "creating account for ${name}"
|
||||
create_account "${dir}" "${password}"
|
||||
fi
|
||||
|
||||
# get the account address
|
||||
local address="$(jq --raw-output '.address' ${dir}/keystore/*)"
|
||||
if [[ -z "${address}" ]]; then
|
||||
fail "failed to get swarm account address"
|
||||
fi
|
||||
|
||||
local args=(
|
||||
--bootnodes "${BOOTNODE_URL}"
|
||||
--datadir "${dir}"
|
||||
--identity "${name}"
|
||||
--ens-api "${GETH_RPC_URL}"
|
||||
--bzznetworkid "321"
|
||||
--bzzaccount "${address}"
|
||||
--password "${dir}/password"
|
||||
--verbosity "6"
|
||||
)
|
||||
|
||||
start_node "${name}" "${ip}" "$(which swarm)" ${args[@]}
|
||||
}
|
||||
|
||||
# start_node runs the node command as a daemon in a network namespace
|
||||
start_node() {
|
||||
local name="$1"
|
||||
local ip="$2"
|
||||
local path="$3"
|
||||
local cmd_args=${@:4}
|
||||
|
||||
info "starting ${name} with IP ${ip}"
|
||||
|
||||
create_node_network "${name}" "${ip}"
|
||||
|
||||
# add a marker to the log file
|
||||
cat >> "${log_dir}/${name}.log" <<EOF
|
||||
|
||||
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||
Starting ${name} node - $(date)
|
||||
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||
|
||||
EOF
|
||||
|
||||
# run the command in the network namespace using start-stop-daemon to
|
||||
# daemonise the process, sending all output to the log file
|
||||
local daemon_args=(
|
||||
--start
|
||||
--background
|
||||
--no-close
|
||||
--make-pidfile
|
||||
--pidfile "${pid_dir}/${name}.pid"
|
||||
--exec "${path}"
|
||||
)
|
||||
if ! ip netns exec "${name}" start-stop-daemon ${daemon_args[@]} -- $cmd_args &>> "${log_dir}/${name}.log"; then
|
||||
fail "could not start ${name}, check ${log_dir}/${name}.log"
|
||||
fi
|
||||
}
|
||||
|
||||
# create_node_network creates a network namespace and connects it to the Linux
|
||||
# bridge using a veth pair
|
||||
create_node_network() {
|
||||
local name="$1"
|
||||
local ip="$2"
|
||||
|
||||
# create the namespace
|
||||
ip netns add "${name}"
|
||||
|
||||
# create the veth pair
|
||||
local veth0="veth${name}0"
|
||||
local veth1="veth${name}1"
|
||||
ip link add name "${veth0}" type veth peer name "${veth1}"
|
||||
|
||||
# add one end to the bridge
|
||||
ip link set dev "${veth0}" master "${BRIDGE_NAME}"
|
||||
ip link set dev "${veth0}" up
|
||||
|
||||
# add the other end to the namespace, rename it eth0 and give it the ip
|
||||
ip link set dev "${veth1}" netns "${name}"
|
||||
ip netns exec "${name}" ip link set dev "${veth1}" name "eth0"
|
||||
ip netns exec "${name}" ip link set dev "eth0" up
|
||||
ip netns exec "${name}" ip address add "${ip}/24" dev "eth0"
|
||||
}
|
||||
|
||||
create_account() {
|
||||
local dir=$1
|
||||
local password=$2
|
||||
|
||||
geth --datadir "${dir}" --password /dev/stdin account new <<< "${password}"
|
||||
}
|
||||
|
||||
main "$@"
|
@ -1,96 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# A script to upload random data to a swarm cluster.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# random-uploads.sh --addr 192.168.33.101:8500 --size 40k --count 1000
|
||||
|
||||
set -e
|
||||
|
||||
ROOT="$(cd "$(dirname "$0")/../../.." && pwd)"
|
||||
source "${ROOT}/swarm/dev/scripts/util.sh"
|
||||
|
||||
DEFAULT_ADDR="localhost:8500"
|
||||
DEFAULT_UPLOAD_SIZE="40k"
|
||||
DEFAULT_UPLOAD_COUNT="1000"
|
||||
|
||||
usage() {
|
||||
cat >&2 <<USAGE
|
||||
usage: $0 [options]
|
||||
|
||||
Upload random data to a Swarm cluster.
|
||||
|
||||
OPTIONS:
|
||||
-a, --addr ADDR Swarm API address [default: ${DEFAULT_ADDR}]
|
||||
-s, --size SIZE Individual upload size [default: ${DEFAULT_UPLOAD_SIZE}]
|
||||
-c, --count COUNT Number of uploads [default: ${DEFAULT_UPLOAD_COUNT}]
|
||||
-h, --help Show this message
|
||||
USAGE
|
||||
}
|
||||
|
||||
main() {
|
||||
local addr="${DEFAULT_ADDR}"
|
||||
local upload_size="${DEFAULT_UPLOAD_SIZE}"
|
||||
local upload_count="${DEFAULT_UPLOAD_COUNT}"
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
info "uploading ${upload_count} ${upload_size} random files to ${addr}"
|
||||
|
||||
for i in $(seq 1 ${upload_count}); do
|
||||
info "upload ${i} / ${upload_count}:"
|
||||
do_random_upload
|
||||
echo
|
||||
done
|
||||
}
|
||||
|
||||
do_random_upload() {
|
||||
curl -fsSL -X POST --data-binary "$(random_data)" "http://${addr}/bzz-raw:/"
|
||||
}
|
||||
|
||||
random_data() {
|
||||
dd if=/dev/urandom of=/dev/stdout bs="${upload_size}" count=1 2>/dev/null
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-a | --addr)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--addr flag requires an argument"
|
||||
fi
|
||||
addr="$2"
|
||||
shift 2
|
||||
;;
|
||||
-s | --size)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--size flag requires an argument"
|
||||
fi
|
||||
upload_size="$2"
|
||||
shift 2
|
||||
;;
|
||||
-c | --count)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--count flag requires an argument"
|
||||
fi
|
||||
upload_count="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -ne 0 ]]; then
|
||||
usage
|
||||
fail "ERROR: invalid arguments: $@"
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
@ -1,98 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# A script to shutdown a dev swarm cluster.
|
||||
|
||||
set -e
|
||||
|
||||
ROOT="$(cd "$(dirname "$0")/../../.." && pwd)"
|
||||
source "${ROOT}/swarm/dev/scripts/util.sh"
|
||||
|
||||
DEFAULT_BASE_DIR="${ROOT}/swarm/dev/cluster"
|
||||
|
||||
usage() {
|
||||
cat >&2 <<USAGE
|
||||
usage: $0 [options]
|
||||
|
||||
Shutdown a dev swarm cluster.
|
||||
|
||||
OPTIONS:
|
||||
-d, --dir DIR Base directory [default: ${DEFAULT_BASE_DIR}]
|
||||
-h, --help Show this message
|
||||
USAGE
|
||||
}
|
||||
|
||||
main() {
|
||||
local base_dir="${DEFAULT_BASE_DIR}"
|
||||
|
||||
parse_args "$@"
|
||||
|
||||
local pid_dir="${base_dir}/pids"
|
||||
|
||||
stop_swarm_nodes
|
||||
stop_node "geth"
|
||||
stop_node "bootnode"
|
||||
delete_network
|
||||
}
|
||||
|
||||
parse_args() {
|
||||
while true; do
|
||||
case "$1" in
|
||||
-h | --help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
-d | --dir)
|
||||
if [[ -z "$2" ]]; then
|
||||
fail "--dir flag requires an argument"
|
||||
fi
|
||||
base_dir="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $# -ne 0 ]]; then
|
||||
usage
|
||||
fail "ERROR: invalid arguments: $@"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_swarm_nodes() {
|
||||
for name in $(ls "${pid_dir}" | grep -oP 'swarm\d+'); do
|
||||
stop_node "${name}"
|
||||
done
|
||||
}
|
||||
|
||||
stop_node() {
|
||||
local name=$1
|
||||
local pid_file="${pid_dir}/${name}.pid"
|
||||
|
||||
if [[ -e "${pid_file}" ]]; then
|
||||
info "stopping ${name}"
|
||||
start-stop-daemon \
|
||||
--stop \
|
||||
--pidfile "${pid_file}" \
|
||||
--remove-pidfile \
|
||||
--oknodo \
|
||||
--retry 15
|
||||
fi
|
||||
|
||||
if ip netns list | grep -qF "${name}"; then
|
||||
ip netns delete "${name}"
|
||||
fi
|
||||
|
||||
if ip link show "veth${name}0" &>/dev/null; then
|
||||
ip link delete dev "veth${name}0"
|
||||
fi
|
||||
}
|
||||
|
||||
delete_network() {
|
||||
if ip link show "swarmbr0" &>/dev/null; then
|
||||
ip link delete dev "swarmbr0"
|
||||
fi
|
||||
}
|
||||
|
||||
main "$@"
|
@ -1,53 +0,0 @@
|
||||
# shared shell functions
|
||||
|
||||
info() {
|
||||
local msg="$@"
|
||||
local timestamp="$(date +%H:%M:%S)"
|
||||
say "===> ${timestamp} ${msg}" "green"
|
||||
}
|
||||
|
||||
warn() {
|
||||
local msg="$@"
|
||||
local timestamp=$(date +%H:%M:%S)
|
||||
say "===> ${timestamp} WARN: ${msg}" "yellow" >&2
|
||||
}
|
||||
|
||||
fail() {
|
||||
local msg="$@"
|
||||
say "ERROR: ${msg}" "red" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# say prints the given message to STDOUT, using the optional color if
|
||||
# STDOUT is a terminal.
|
||||
#
|
||||
# usage:
|
||||
#
|
||||
# say "foo" - prints "foo"
|
||||
# say "bar" "red" - prints "bar" in red
|
||||
# say "baz" "green" - prints "baz" in green
|
||||
# say "qux" "red" | tee - prints "qux" with no colour
|
||||
#
|
||||
say() {
|
||||
local msg=$1
|
||||
local color=$2
|
||||
|
||||
if [[ -n "${color}" ]] && [[ -t 1 ]]; then
|
||||
case "${color}" in
|
||||
red)
|
||||
echo -e "\033[1;31m${msg}\033[0m"
|
||||
;;
|
||||
green)
|
||||
echo -e "\033[1;32m${msg}\033[0m"
|
||||
;;
|
||||
yellow)
|
||||
echo -e "\033[1;33m${msg}\033[0m"
|
||||
;;
|
||||
*)
|
||||
echo "${msg}"
|
||||
;;
|
||||
esac
|
||||
else
|
||||
echo "${msg}"
|
||||
fi
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
FROM golang:1.11-alpine as builder
|
||||
|
||||
ARG VERSION
|
||||
|
||||
RUN apk add --update git gcc g++ linux-headers
|
||||
RUN mkdir -p $GOPATH/src/github.com/ethereum && \
|
||||
cd $GOPATH/src/github.com/ethereum && \
|
||||
git clone https://github.com/ethersphere/go-ethereum && \
|
||||
cd $GOPATH/src/github.com/ethereum/go-ethereum && \
|
||||
git checkout ${VERSION} && \
|
||||
go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/swarm && \
|
||||
go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/swarm/swarm-smoke && \
|
||||
go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/swarm/global-store && \
|
||||
go install -ldflags "-X main.gitCommit=${VERSION}" ./cmd/geth
|
||||
|
||||
|
||||
FROM alpine:3.8 as swarm-smoke
|
||||
WORKDIR /
|
||||
COPY --from=builder /go/bin/swarm-smoke /
|
||||
ADD run-smoke.sh /run-smoke.sh
|
||||
ENTRYPOINT ["/run-smoke.sh"]
|
||||
|
||||
FROM alpine:3.8 as swarm-global-store
|
||||
WORKDIR /
|
||||
COPY --from=builder /go/bin/global-store /
|
||||
ENTRYPOINT ["/global-store"]
|
||||
|
||||
FROM alpine:3.8 as swarm
|
||||
WORKDIR /
|
||||
COPY --from=builder /go/bin/swarm /go/bin/geth /
|
||||
ADD run.sh /run.sh
|
||||
ENTRYPOINT ["/run.sh"]
|
@ -1,7 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
/swarm-smoke $@ 2>&1 || true
|
@ -1,26 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
PASSWORD=${PASSWORD:-}
|
||||
DATADIR=${DATADIR:-/root/.ethereum/}
|
||||
|
||||
if [ "$PASSWORD" == "" ]; then echo "Password must be set, in order to use swarm non-interactively." && exit 1; fi
|
||||
|
||||
echo $PASSWORD > /password
|
||||
|
||||
KEYFILE=`find $DATADIR | grep UTC | head -n 1` || true
|
||||
if [ ! -f "$KEYFILE" ]; then echo "No keyfile found. Generating..." && /geth --datadir $DATADIR --password /password account new; fi
|
||||
KEYFILE=`find $DATADIR | grep UTC | head -n 1` || true
|
||||
if [ ! -f "$KEYFILE" ]; then echo "Could not find nor generate a BZZ keyfile." && exit 1; else echo "Found keyfile $KEYFILE"; fi
|
||||
|
||||
VERSION=`/swarm version`
|
||||
echo "Running Swarm:"
|
||||
echo $VERSION
|
||||
|
||||
export BZZACCOUNT="`echo -n $KEYFILE | tail -c 40`" || true
|
||||
if [ "$BZZACCOUNT" == "" ]; then echo "Could not parse BZZACCOUNT from keyfile." && exit 1; fi
|
||||
|
||||
exec /swarm --bzzaccount=$BZZACCOUNT --password /password --datadir $DATADIR $@ 2>&1
|
@ -1,31 +0,0 @@
|
||||
Swarm DB migration notes
|
||||
=========================
|
||||
Swarm `v0.4` introduces major changes to the existing codebase. Among other things, the storage layer has been rewritten to be more modular and flexible
|
||||
in a manner that will accomodate for our future needs. Since Swarm at this point does not provide any storage guarantees, we have made the decision to not impose any migrations on our public cluster nor on our users. What this essentially means is that local storage will be purged on `v0.4`. We have nevertheless, provided a procedure below for those of you running private clusters and would like to migrate the data to the new local storage format.
|
||||
|
||||
You are highly encouraged to report to us any bugs or problems caused by running the migration steps below.
|
||||
|
||||
**Note**: we highly recommend you run the commands below with `--verbosity 5` flag and open an issue with the relevant terminal output in case something goes wrong.
|
||||
|
||||
**Important**: since you would be creating an export of your local store, the potential disk usage might peak at `x2-x3` times the normal Swarm data folder size. Please make sure you have enough disk space, backup mediums or other form of local/network attached storage _before_ executing the following steps!
|
||||
|
||||
**Important**: when trying to run Swarm with an old local store format, the Swarm binary will refuse to start showing an error message.
|
||||
|
||||
You will need the following information for the migration procedure:
|
||||
1. Your `datadir` path. This is indicated with the `--datadir` flag when running Swarm. If you do not specify this flag, the `datadir` will reside by default on `$HOME/.ethereum`.
|
||||
2. Your chunk directory location. This would normally be located in your `datadir/swarm/bzz-<your bzz account>/chunks`. We will refer to this as `chunkDir` below.
|
||||
3. Your `bzzAddr`. This is _not_ your `--bzzaccount`! You can find your `bzzAddr` when starting Swarm by looking for the following line:
|
||||
```
|
||||
INFO [03-21|17:25:04.791] Swarm network started bzzaddr=ca1e9f3938cc1425c6061b96ad9eb93e134dfe8734ad490164ef20af9d1cf59c
|
||||
```
|
||||
|
||||
The migration process is done in the following manner:
|
||||
1. Try to run the updated Swarm binary, it should complain about the local store format and exit. If it does - execute the following steps:
|
||||
2. `$ swarm --verbosity 5 db export <chunkDir> <exportLocation>/<exportFilename>.tar <bzzAddr>`
|
||||
3. Move or Remove your existing `chunkDir`
|
||||
4. Run the new Swarm binary as your would start your Swarm node normally. The binary should now load normally and not complain. This step creates a new empty chunk store. Please shut down the node after it starts correctly.
|
||||
5. `$ swarm --verbosity 5 db import --legacy <chunkDir> <exportLocation>/<exportFilename>.tar <bzzAddr>`
|
||||
6. Wait patientally for the `Imported X chunks successfully` message.
|
||||
7. Start your Swarm node as you normally would
|
||||
8. Have a beer
|
||||
|
@ -1,161 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
_ fs.Node = (*SwarmDir)(nil)
|
||||
_ fs.NodeRequestLookuper = (*SwarmDir)(nil)
|
||||
_ fs.HandleReadDirAller = (*SwarmDir)(nil)
|
||||
_ fs.NodeCreater = (*SwarmDir)(nil)
|
||||
_ fs.NodeRemover = (*SwarmDir)(nil)
|
||||
_ fs.NodeMkdirer = (*SwarmDir)(nil)
|
||||
)
|
||||
|
||||
type SwarmDir struct {
|
||||
inode uint64
|
||||
name string
|
||||
path string
|
||||
directories []*SwarmDir
|
||||
files []*SwarmFile
|
||||
|
||||
mountInfo *MountInfo
|
||||
lock *sync.RWMutex
|
||||
}
|
||||
|
||||
func NewSwarmDir(fullpath string, minfo *MountInfo) *SwarmDir {
|
||||
log.Debug("swarmfs", "NewSwarmDir", fullpath)
|
||||
newdir := &SwarmDir{
|
||||
inode: NewInode(),
|
||||
name: filepath.Base(fullpath),
|
||||
path: fullpath,
|
||||
directories: []*SwarmDir{},
|
||||
files: []*SwarmFile{},
|
||||
mountInfo: minfo,
|
||||
lock: &sync.RWMutex{},
|
||||
}
|
||||
return newdir
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
sd.lock.RLock()
|
||||
defer sd.lock.RUnlock()
|
||||
a.Inode = sd.inode
|
||||
a.Mode = os.ModeDir | 0700
|
||||
a.Uid = uint32(os.Getuid())
|
||||
a.Gid = uint32(os.Getegid())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (fs.Node, error) {
|
||||
log.Debug("swarmfs", "Lookup", req.Name)
|
||||
for _, n := range sd.files {
|
||||
if n.name == req.Name {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
for _, n := range sd.directories {
|
||||
if n.name == req.Name {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
return nil, fuse.ENOENT
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) {
|
||||
log.Debug("swarmfs ReadDirAll")
|
||||
var children []fuse.Dirent
|
||||
for _, file := range sd.files {
|
||||
children = append(children, fuse.Dirent{Inode: file.inode, Type: fuse.DT_File, Name: file.name})
|
||||
}
|
||||
for _, dir := range sd.directories {
|
||||
children = append(children, fuse.Dirent{Inode: dir.inode, Type: fuse.DT_Dir, Name: dir.name})
|
||||
}
|
||||
return children, nil
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
|
||||
log.Debug("swarmfs Create", "path", sd.path, "req.Name", req.Name)
|
||||
|
||||
newFile := NewSwarmFile(sd.path, req.Name, sd.mountInfo)
|
||||
newFile.fileSize = 0 // 0 means, file is not in swarm yet and it is just created
|
||||
|
||||
sd.lock.Lock()
|
||||
defer sd.lock.Unlock()
|
||||
sd.files = append(sd.files, newFile)
|
||||
|
||||
return newFile, newFile, nil
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
|
||||
log.Debug("swarmfs Remove", "path", sd.path, "req.Name", req.Name)
|
||||
|
||||
if req.Dir && sd.directories != nil {
|
||||
newDirs := []*SwarmDir{}
|
||||
for _, dir := range sd.directories {
|
||||
if dir.name == req.Name {
|
||||
removeDirectoryFromSwarm(dir)
|
||||
} else {
|
||||
newDirs = append(newDirs, dir)
|
||||
}
|
||||
}
|
||||
if len(sd.directories) > len(newDirs) {
|
||||
sd.lock.Lock()
|
||||
defer sd.lock.Unlock()
|
||||
sd.directories = newDirs
|
||||
}
|
||||
return nil
|
||||
} else if !req.Dir && sd.files != nil {
|
||||
newFiles := []*SwarmFile{}
|
||||
for _, f := range sd.files {
|
||||
if f.name == req.Name {
|
||||
removeFileFromSwarm(f)
|
||||
} else {
|
||||
newFiles = append(newFiles, f)
|
||||
}
|
||||
}
|
||||
if len(sd.files) > len(newFiles) {
|
||||
sd.lock.Lock()
|
||||
defer sd.lock.Unlock()
|
||||
sd.files = newFiles
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fuse.ENOENT
|
||||
}
|
||||
|
||||
func (sd *SwarmDir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
|
||||
log.Debug("swarmfs Mkdir", "path", sd.path, "req.Name", req.Name)
|
||||
newDir := NewSwarmDir(filepath.Join(sd.path, req.Name), sd.mountInfo)
|
||||
sd.lock.Lock()
|
||||
defer sd.lock.Unlock()
|
||||
sd.directories = append(sd.directories, newDir)
|
||||
|
||||
return newDir, nil
|
||||
}
|
@ -1,146 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxAppendFileSize = 10485760 // 10Mb
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidOffset = errors.New("Invalid offset during write")
|
||||
errFileSizeMaxLimixReached = errors.New("File size exceeded max limit")
|
||||
)
|
||||
|
||||
var (
|
||||
_ fs.Node = (*SwarmFile)(nil)
|
||||
_ fs.HandleReader = (*SwarmFile)(nil)
|
||||
_ fs.HandleWriter = (*SwarmFile)(nil)
|
||||
)
|
||||
|
||||
type SwarmFile struct {
|
||||
inode uint64
|
||||
name string
|
||||
path string
|
||||
addr storage.Address
|
||||
fileSize int64
|
||||
reader storage.LazySectionReader
|
||||
|
||||
mountInfo *MountInfo
|
||||
lock *sync.RWMutex
|
||||
}
|
||||
|
||||
func NewSwarmFile(path, fname string, minfo *MountInfo) *SwarmFile {
|
||||
newFile := &SwarmFile{
|
||||
inode: NewInode(),
|
||||
name: fname,
|
||||
path: path,
|
||||
addr: nil,
|
||||
fileSize: -1, // -1 means , file already exists in swarm and you need to just get the size from swarm
|
||||
reader: nil,
|
||||
|
||||
mountInfo: minfo,
|
||||
lock: &sync.RWMutex{},
|
||||
}
|
||||
return newFile
|
||||
}
|
||||
|
||||
func (sf *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error {
|
||||
log.Debug("swarmfs Attr", "path", sf.path)
|
||||
sf.lock.Lock()
|
||||
defer sf.lock.Unlock()
|
||||
a.Inode = sf.inode
|
||||
//TODO: need to get permission as argument
|
||||
a.Mode = 0700
|
||||
a.Uid = uint32(os.Getuid())
|
||||
a.Gid = uint32(os.Getegid())
|
||||
|
||||
if sf.fileSize == -1 {
|
||||
reader, _ := sf.mountInfo.swarmApi.Retrieve(ctx, sf.addr)
|
||||
quitC := make(chan bool)
|
||||
size, err := reader.Size(ctx, quitC)
|
||||
if err != nil {
|
||||
log.Error("Couldnt get size of file %s : %v", sf.path, err)
|
||||
return err
|
||||
}
|
||||
sf.fileSize = size
|
||||
log.Trace("swarmfs Attr", "size", size)
|
||||
close(quitC)
|
||||
}
|
||||
a.Size = uint64(sf.fileSize)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sf *SwarmFile) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
|
||||
log.Debug("swarmfs Read", "path", sf.path, "req.String", req.String())
|
||||
sf.lock.RLock()
|
||||
defer sf.lock.RUnlock()
|
||||
if sf.reader == nil {
|
||||
sf.reader, _ = sf.mountInfo.swarmApi.Retrieve(ctx, sf.addr)
|
||||
}
|
||||
buf := make([]byte, req.Size)
|
||||
n, err := sf.reader.ReadAt(buf, req.Offset)
|
||||
if err == io.ErrUnexpectedEOF || err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
resp.Data = buf[:n]
|
||||
sf.reader = nil
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (sf *SwarmFile) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
|
||||
log.Debug("swarmfs Write", "path", sf.path, "req.String", req.String())
|
||||
if sf.fileSize == 0 && req.Offset == 0 {
|
||||
// A new file is created
|
||||
err := addFileToSwarm(sf, req.Data, len(req.Data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Size = len(req.Data)
|
||||
} else if req.Offset <= sf.fileSize {
|
||||
totalSize := sf.fileSize + int64(len(req.Data))
|
||||
if totalSize > MaxAppendFileSize {
|
||||
log.Warn("swarmfs Append file size reached (%v) : (%v)", sf.fileSize, len(req.Data))
|
||||
return errFileSizeMaxLimixReached
|
||||
}
|
||||
|
||||
err := appendToExistingFileInSwarm(sf, req.Data, req.Offset, int64(len(req.Data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp.Size = len(req.Data)
|
||||
} else {
|
||||
log.Warn("swarmfs Invalid write request size(%v) : off(%v)", sf.fileSize, req.Offset)
|
||||
return errInvalidOffset
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"bazil.org/fuse/fs"
|
||||
)
|
||||
|
||||
var (
|
||||
_ fs.Node = (*SwarmDir)(nil)
|
||||
)
|
||||
|
||||
type SwarmRoot struct {
|
||||
root *SwarmDir
|
||||
}
|
||||
|
||||
func (filesystem *SwarmRoot) Root() (fs.Node, error) {
|
||||
return filesystem.root, nil
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
)
|
||||
|
||||
const (
|
||||
SwarmFSVersion = "0.1"
|
||||
mountTimeout = time.Second * 5
|
||||
unmountTimeout = time.Second * 10
|
||||
maxFUSEMounts = 5
|
||||
)
|
||||
|
||||
var (
|
||||
swarmfs *SwarmFS // Swarm file system singleton
|
||||
swarmfsLock sync.Once
|
||||
|
||||
inode uint64 = 1 // global inode
|
||||
inodeLock sync.RWMutex
|
||||
)
|
||||
|
||||
type SwarmFS struct {
|
||||
swarmApi *api.API
|
||||
activeMounts map[string]*MountInfo
|
||||
swarmFsLock *sync.RWMutex
|
||||
}
|
||||
|
||||
func NewSwarmFS(api *api.API) *SwarmFS {
|
||||
swarmfsLock.Do(func() {
|
||||
swarmfs = &SwarmFS{
|
||||
swarmApi: api,
|
||||
swarmFsLock: &sync.RWMutex{},
|
||||
activeMounts: map[string]*MountInfo{},
|
||||
}
|
||||
})
|
||||
return swarmfs
|
||||
|
||||
}
|
||||
|
||||
// Inode numbers need to be unique, they are used for caching inside fuse
|
||||
func NewInode() uint64 {
|
||||
inodeLock.Lock()
|
||||
defer inodeLock.Unlock()
|
||||
inode += 1
|
||||
return inode
|
||||
}
|
@ -1,51 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !linux,!darwin,!freebsd
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
var errNoFUSE = errors.New("FUSE is not supported on this platform")
|
||||
|
||||
func isFUSEUnsupportedError(err error) bool {
|
||||
return err == errNoFUSE
|
||||
}
|
||||
|
||||
type MountInfo struct {
|
||||
MountPoint string
|
||||
StartManifest string
|
||||
LatestManifest string
|
||||
}
|
||||
|
||||
func (self *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
|
||||
return nil, errNoFUSE
|
||||
}
|
||||
|
||||
func (self *SwarmFS) Unmount(mountpoint string) (bool, error) {
|
||||
return false, errNoFUSE
|
||||
}
|
||||
|
||||
func (self *SwarmFS) Listmounts() ([]*MountInfo, error) {
|
||||
return nil, errNoFUSE
|
||||
}
|
||||
|
||||
func (self *SwarmFS) Stop() error {
|
||||
return nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,285 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
"bazil.org/fuse/fs"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/swarm/api"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
)
|
||||
|
||||
var (
|
||||
errEmptyMountPoint = errors.New("need non-empty mount point")
|
||||
errNoRelativeMountPoint = errors.New("invalid path for mount point (need absolute path)")
|
||||
errMaxMountCount = errors.New("max FUSE mount count reached")
|
||||
errMountTimeout = errors.New("mount timeout")
|
||||
errAlreadyMounted = errors.New("mount point is already serving")
|
||||
)
|
||||
|
||||
func isFUSEUnsupportedError(err error) bool {
|
||||
if perr, ok := err.(*os.PathError); ok {
|
||||
return perr.Op == "open" && perr.Path == "/dev/fuse"
|
||||
}
|
||||
return err == fuse.ErrOSXFUSENotFound
|
||||
}
|
||||
|
||||
// MountInfo contains information about every active mount
|
||||
type MountInfo struct {
|
||||
MountPoint string
|
||||
StartManifest string
|
||||
LatestManifest string
|
||||
rootDir *SwarmDir
|
||||
fuseConnection *fuse.Conn
|
||||
swarmApi *api.API
|
||||
lock *sync.RWMutex
|
||||
serveClose chan struct{}
|
||||
}
|
||||
|
||||
func NewMountInfo(mhash, mpoint string, sapi *api.API) *MountInfo {
|
||||
log.Debug("swarmfs NewMountInfo", "hash", mhash, "mount point", mpoint)
|
||||
newMountInfo := &MountInfo{
|
||||
MountPoint: mpoint,
|
||||
StartManifest: mhash,
|
||||
LatestManifest: mhash,
|
||||
rootDir: nil,
|
||||
fuseConnection: nil,
|
||||
swarmApi: sapi,
|
||||
lock: &sync.RWMutex{},
|
||||
serveClose: make(chan struct{}),
|
||||
}
|
||||
return newMountInfo
|
||||
}
|
||||
|
||||
func (swarmfs *SwarmFS) Mount(mhash, mountpoint string) (*MountInfo, error) {
|
||||
log.Info("swarmfs", "mounting hash", mhash, "mount point", mountpoint)
|
||||
if mountpoint == "" {
|
||||
return nil, errEmptyMountPoint
|
||||
}
|
||||
if !strings.HasPrefix(mountpoint, "/") {
|
||||
return nil, errNoRelativeMountPoint
|
||||
}
|
||||
cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Trace("swarmfs mount", "cleanedMountPoint", cleanedMountPoint)
|
||||
|
||||
swarmfs.swarmFsLock.Lock()
|
||||
defer swarmfs.swarmFsLock.Unlock()
|
||||
|
||||
noOfActiveMounts := len(swarmfs.activeMounts)
|
||||
log.Debug("swarmfs mount", "# active mounts", noOfActiveMounts)
|
||||
if noOfActiveMounts >= maxFUSEMounts {
|
||||
return nil, errMaxMountCount
|
||||
}
|
||||
|
||||
if _, ok := swarmfs.activeMounts[cleanedMountPoint]; ok {
|
||||
return nil, errAlreadyMounted
|
||||
}
|
||||
|
||||
log.Trace("swarmfs mount: getting manifest tree")
|
||||
_, manifestEntryMap, err := swarmfs.swarmApi.BuildDirectoryTree(context.TODO(), mhash, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Trace("swarmfs mount: building mount info")
|
||||
mi := NewMountInfo(mhash, cleanedMountPoint, swarmfs.swarmApi)
|
||||
|
||||
dirTree := map[string]*SwarmDir{}
|
||||
rootDir := NewSwarmDir("/", mi)
|
||||
log.Trace("swarmfs mount", "rootDir", rootDir)
|
||||
mi.rootDir = rootDir
|
||||
|
||||
log.Trace("swarmfs mount: traversing manifest map")
|
||||
for suffix, entry := range manifestEntryMap {
|
||||
if suffix == "" { //empty suffix means that the file has no name - i.e. this is the default entry in a manifest. Since we cannot have files without a name, let us ignore this entry
|
||||
log.Warn("Manifest has an empty-path (default) entry which will be ignored in FUSE mount.")
|
||||
continue
|
||||
}
|
||||
addr := common.Hex2Bytes(entry.Hash)
|
||||
fullpath := "/" + suffix
|
||||
basepath := filepath.Dir(fullpath)
|
||||
parentDir := rootDir
|
||||
dirUntilNow := ""
|
||||
paths := strings.Split(basepath, "/")
|
||||
for i := range paths {
|
||||
if paths[i] != "" {
|
||||
thisDir := paths[i]
|
||||
dirUntilNow = dirUntilNow + "/" + thisDir
|
||||
|
||||
if _, ok := dirTree[dirUntilNow]; !ok {
|
||||
dirTree[dirUntilNow] = NewSwarmDir(dirUntilNow, mi)
|
||||
parentDir.directories = append(parentDir.directories, dirTree[dirUntilNow])
|
||||
parentDir = dirTree[dirUntilNow]
|
||||
|
||||
} else {
|
||||
parentDir = dirTree[dirUntilNow]
|
||||
}
|
||||
}
|
||||
}
|
||||
thisFile := NewSwarmFile(basepath, filepath.Base(fullpath), mi)
|
||||
thisFile.addr = addr
|
||||
|
||||
parentDir.files = append(parentDir.files, thisFile)
|
||||
}
|
||||
|
||||
fconn, err := fuse.Mount(cleanedMountPoint, fuse.FSName("swarmfs"), fuse.VolumeName(mhash))
|
||||
if isFUSEUnsupportedError(err) {
|
||||
log.Error("swarmfs error - FUSE not installed", "mountpoint", cleanedMountPoint, "err", err)
|
||||
return nil, err
|
||||
} else if err != nil {
|
||||
fuse.Unmount(cleanedMountPoint)
|
||||
log.Error("swarmfs error mounting swarm manifest", "mountpoint", cleanedMountPoint, "err", err)
|
||||
return nil, err
|
||||
}
|
||||
mi.fuseConnection = fconn
|
||||
|
||||
serverr := make(chan error, 1)
|
||||
go func() {
|
||||
log.Info("swarmfs", "serving hash", mhash, "at", cleanedMountPoint)
|
||||
filesys := &SwarmRoot{root: rootDir}
|
||||
//start serving the actual file system; see note below
|
||||
if err := fs.Serve(fconn, filesys); err != nil {
|
||||
log.Warn("swarmfs could not serve the requested hash", "error", err)
|
||||
serverr <- err
|
||||
}
|
||||
mi.serveClose <- struct{}{}
|
||||
}()
|
||||
|
||||
/*
|
||||
IMPORTANT NOTE: the fs.Serve function is blocking;
|
||||
Serve builds up the actual fuse file system by calling the
|
||||
Attr functions on each SwarmFile, creating the file inodes;
|
||||
specifically calling the swarm's LazySectionReader.Size() to set the file size.
|
||||
|
||||
This can take some time, and it appears that if we access the fuse file system
|
||||
too early, we can bring the tests to deadlock. The assumption so far is that
|
||||
at this point, the fuse driver didn't finish to initialize the file system.
|
||||
|
||||
Accessing files too early not only deadlocks the tests, but locks the access
|
||||
of the fuse file completely, resulting in blocked resources at OS system level.
|
||||
Even a simple `ls /tmp/testDir/testMountDir` could deadlock in a shell.
|
||||
|
||||
Workaround so far is to wait some time to give the OS enough time to initialize
|
||||
the fuse file system. During tests, this seemed to address the issue.
|
||||
|
||||
HOWEVER IT SHOULD BE NOTED THAT THIS MAY ONLY BE AN EFFECT,
|
||||
AND THE DEADLOCK CAUSED BY SOMETHING ELSE BLOCKING ACCESS DUE TO SOME RACE CONDITION
|
||||
(caused in the bazil.org library and/or the SwarmRoot, SwarmDir and SwarmFile implementations)
|
||||
*/
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
timer := time.NewTimer(mountTimeout)
|
||||
defer timer.Stop()
|
||||
// Check if the mount process has an error to report.
|
||||
select {
|
||||
case <-timer.C:
|
||||
log.Warn("swarmfs timed out mounting over FUSE", "mountpoint", cleanedMountPoint, "err", err)
|
||||
err := fuse.Unmount(cleanedMountPoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errMountTimeout
|
||||
case err := <-serverr:
|
||||
log.Warn("swarmfs error serving over FUSE", "mountpoint", cleanedMountPoint, "err", err)
|
||||
err = fuse.Unmount(cleanedMountPoint)
|
||||
return nil, err
|
||||
|
||||
case <-fconn.Ready:
|
||||
//this signals that the actual mount point from the fuse.Mount call is ready;
|
||||
//it does not signal though that the file system from fs.Serve is actually fully built up
|
||||
if err := fconn.MountError; err != nil {
|
||||
log.Error("Mounting error from fuse driver: ", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
log.Info("swarmfs now served over FUSE", "manifest", mhash, "mountpoint", cleanedMountPoint)
|
||||
}
|
||||
|
||||
timer.Stop()
|
||||
swarmfs.activeMounts[cleanedMountPoint] = mi
|
||||
return mi, nil
|
||||
}
|
||||
|
||||
func (swarmfs *SwarmFS) Unmount(mountpoint string) (*MountInfo, error) {
|
||||
swarmfs.swarmFsLock.Lock()
|
||||
defer swarmfs.swarmFsLock.Unlock()
|
||||
|
||||
cleanedMountPoint, err := filepath.Abs(filepath.Clean(mountpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mountInfo := swarmfs.activeMounts[cleanedMountPoint]
|
||||
|
||||
if mountInfo == nil || mountInfo.MountPoint != cleanedMountPoint {
|
||||
return nil, fmt.Errorf("swarmfs %s is not mounted", cleanedMountPoint)
|
||||
}
|
||||
err = fuse.Unmount(cleanedMountPoint)
|
||||
if err != nil {
|
||||
err1 := externalUnmount(cleanedMountPoint)
|
||||
if err1 != nil {
|
||||
errStr := fmt.Sprintf("swarmfs unmount error: %v", err)
|
||||
log.Warn(errStr)
|
||||
return nil, err1
|
||||
}
|
||||
}
|
||||
|
||||
err = mountInfo.fuseConnection.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(swarmfs.activeMounts, cleanedMountPoint)
|
||||
|
||||
<-mountInfo.serveClose
|
||||
|
||||
succString := fmt.Sprintf("swarmfs unmounting %v succeeded", cleanedMountPoint)
|
||||
log.Info(succString)
|
||||
|
||||
return mountInfo, nil
|
||||
}
|
||||
|
||||
func (swarmfs *SwarmFS) Listmounts() []*MountInfo {
|
||||
swarmfs.swarmFsLock.RLock()
|
||||
defer swarmfs.swarmFsLock.RUnlock()
|
||||
rows := make([]*MountInfo, 0, len(swarmfs.activeMounts))
|
||||
for _, mi := range swarmfs.activeMounts {
|
||||
rows = append(rows, mi)
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
func (swarmfs *SwarmFS) Stop() bool {
|
||||
for mp := range swarmfs.activeMounts {
|
||||
mountInfo := swarmfs.activeMounts[mp]
|
||||
swarmfs.Unmount(mountInfo.MountPoint)
|
||||
}
|
||||
return true
|
||||
}
|
@ -1,121 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build linux darwin freebsd
|
||||
|
||||
package fuse
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
)
|
||||
|
||||
func externalUnmount(mountPoint string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), unmountTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Try generic umount.
|
||||
if err := exec.CommandContext(ctx, "umount", mountPoint).Run(); err == nil {
|
||||
return nil
|
||||
}
|
||||
// Try FUSE-specific commands if umount didn't work.
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
return exec.CommandContext(ctx, "diskutil", "umount", mountPoint).Run()
|
||||
case "linux":
|
||||
return exec.CommandContext(ctx, "fusermount", "-u", mountPoint).Run()
|
||||
default:
|
||||
return fmt.Errorf("swarmfs unmount: unimplemented")
|
||||
}
|
||||
}
|
||||
|
||||
func addFileToSwarm(sf *SwarmFile, content []byte, size int) error {
|
||||
fkey, mhash, err := sf.mountInfo.swarmApi.AddFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, content, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sf.lock.Lock()
|
||||
defer sf.lock.Unlock()
|
||||
sf.addr = fkey
|
||||
sf.fileSize = int64(size)
|
||||
|
||||
sf.mountInfo.lock.Lock()
|
||||
defer sf.mountInfo.lock.Unlock()
|
||||
sf.mountInfo.LatestManifest = mhash
|
||||
|
||||
log.Info("swarmfs added new file:", "fname", sf.name, "new Manifest hash", mhash)
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeFileFromSwarm(sf *SwarmFile) error {
|
||||
mkey, err := sf.mountInfo.swarmApi.RemoveFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sf.mountInfo.lock.Lock()
|
||||
defer sf.mountInfo.lock.Unlock()
|
||||
sf.mountInfo.LatestManifest = mkey
|
||||
|
||||
log.Info("swarmfs removed file:", "fname", sf.name, "new Manifest hash", mkey)
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeDirectoryFromSwarm(sd *SwarmDir) error {
|
||||
if len(sd.directories) == 0 && len(sd.files) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, d := range sd.directories {
|
||||
err := removeDirectoryFromSwarm(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range sd.files {
|
||||
err := removeFileFromSwarm(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendToExistingFileInSwarm(sf *SwarmFile, content []byte, offset int64, length int64) error {
|
||||
fkey, mhash, err := sf.mountInfo.swarmApi.AppendFile(context.TODO(), sf.mountInfo.LatestManifest, sf.path, sf.name, sf.fileSize, content, sf.addr, offset, length, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sf.lock.Lock()
|
||||
defer sf.lock.Unlock()
|
||||
sf.addr = fkey
|
||||
sf.fileSize = sf.fileSize + int64(len(content))
|
||||
|
||||
sf.mountInfo.lock.Lock()
|
||||
defer sf.mountInfo.lock.Unlock()
|
||||
sf.mountInfo.LatestManifest = mhash
|
||||
|
||||
log.Info("swarmfs appended file:", "fname", sf.name, "new Manifest hash", mhash)
|
||||
return nil
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
l "github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
// CallDepth is set to 1 in order to influence to reported line number of
|
||||
// the log message with 1 skipped stack frame of calling l.Output()
|
||||
CallDepth = 1
|
||||
)
|
||||
|
||||
// Warn is a convenient alias for log.Warn with stats
|
||||
func Warn(msg string, ctx ...interface{}) {
|
||||
metrics.GetOrRegisterCounter("warn", nil).Inc(1)
|
||||
l.Output(msg, l.LvlWarn, CallDepth, ctx...)
|
||||
}
|
||||
|
||||
// Error is a convenient alias for log.Error with stats
|
||||
func Error(msg string, ctx ...interface{}) {
|
||||
metrics.GetOrRegisterCounter("error", nil).Inc(1)
|
||||
l.Output(msg, l.LvlError, CallDepth, ctx...)
|
||||
}
|
||||
|
||||
// Crit is a convenient alias for log.Crit with stats
|
||||
func Crit(msg string, ctx ...interface{}) {
|
||||
metrics.GetOrRegisterCounter("crit", nil).Inc(1)
|
||||
l.Output(msg, l.LvlCrit, CallDepth, ctx...)
|
||||
}
|
||||
|
||||
// Info is a convenient alias for log.Info with stats
|
||||
func Info(msg string, ctx ...interface{}) {
|
||||
metrics.GetOrRegisterCounter("info", nil).Inc(1)
|
||||
l.Output(msg, l.LvlInfo, CallDepth, ctx...)
|
||||
}
|
||||
|
||||
// Debug is a convenient alias for log.Debug with stats
|
||||
func Debug(msg string, ctx ...interface{}) {
|
||||
metrics.GetOrRegisterCounter("debug", nil).Inc(1)
|
||||
l.Output(msg, l.LvlDebug, CallDepth, ctx...)
|
||||
}
|
||||
|
||||
// Trace is a convenient alias for log.Trace with stats
|
||||
func Trace(msg string, ctx ...interface{}) {
|
||||
metrics.GetOrRegisterCounter("trace", nil).Inc(1)
|
||||
l.Output(msg, l.LvlTrace, CallDepth, ctx...)
|
||||
}
|
@ -1,143 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
gethmetrics "github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/metrics/influxdb"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
MetricsEnableInfluxDBExportFlag = cli.BoolFlag{
|
||||
Name: "metrics.influxdb.export",
|
||||
Usage: "Enable metrics export/push to an external InfluxDB database",
|
||||
}
|
||||
MetricsEnableInfluxDBAccountingExportFlag = cli.BoolFlag{
|
||||
Name: "metrics.influxdb.accounting",
|
||||
Usage: "Enable accounting metrics export/push to an external InfluxDB database",
|
||||
}
|
||||
MetricsInfluxDBEndpointFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.endpoint",
|
||||
Usage: "Metrics InfluxDB endpoint",
|
||||
Value: "http://127.0.0.1:8086",
|
||||
}
|
||||
MetricsInfluxDBDatabaseFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.database",
|
||||
Usage: "Metrics InfluxDB database",
|
||||
Value: "metrics",
|
||||
}
|
||||
MetricsInfluxDBUsernameFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.username",
|
||||
Usage: "Metrics InfluxDB username",
|
||||
Value: "",
|
||||
}
|
||||
MetricsInfluxDBPasswordFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.password",
|
||||
Usage: "Metrics InfluxDB password",
|
||||
Value: "",
|
||||
}
|
||||
// Tags are part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB.
|
||||
// For example `host` tag could be used so that we can group all nodes and average a measurement
|
||||
// across all of them, but also so that we can select a specific node and inspect its measurements.
|
||||
// https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key
|
||||
MetricsInfluxDBTagsFlag = cli.StringFlag{
|
||||
Name: "metrics.influxdb.tags",
|
||||
Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements",
|
||||
Value: "host=localhost",
|
||||
}
|
||||
)
|
||||
|
||||
// Flags holds all command-line flags required for metrics collection.
|
||||
var Flags = []cli.Flag{
|
||||
utils.MetricsEnabledFlag,
|
||||
MetricsEnableInfluxDBExportFlag,
|
||||
MetricsEnableInfluxDBAccountingExportFlag,
|
||||
MetricsInfluxDBEndpointFlag,
|
||||
MetricsInfluxDBDatabaseFlag,
|
||||
MetricsInfluxDBUsernameFlag,
|
||||
MetricsInfluxDBPasswordFlag,
|
||||
MetricsInfluxDBTagsFlag,
|
||||
}
|
||||
|
||||
func Setup(ctx *cli.Context) {
|
||||
if gethmetrics.Enabled {
|
||||
log.Info("Enabling swarm metrics collection")
|
||||
var (
|
||||
endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name)
|
||||
database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name)
|
||||
username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name)
|
||||
password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name)
|
||||
enableExport = ctx.GlobalBool(MetricsEnableInfluxDBExportFlag.Name)
|
||||
enableAccountingExport = ctx.GlobalBool(MetricsEnableInfluxDBAccountingExportFlag.Name)
|
||||
datadir = ctx.GlobalString("datadir")
|
||||
)
|
||||
|
||||
// Start system runtime metrics collection
|
||||
go gethmetrics.CollectProcessMetrics(4 * time.Second)
|
||||
|
||||
// Start collecting disk metrics
|
||||
go datadirDiskUsage(datadir, 4*time.Second)
|
||||
|
||||
gethmetrics.RegisterRuntimeMemStats(metrics.DefaultRegistry)
|
||||
go gethmetrics.CaptureRuntimeMemStats(metrics.DefaultRegistry, 4*time.Second)
|
||||
|
||||
tagsMap := utils.SplitTagsFlag(ctx.GlobalString(MetricsInfluxDBTagsFlag.Name))
|
||||
|
||||
if enableExport {
|
||||
log.Info("Enabling swarm metrics export to InfluxDB")
|
||||
go influxdb.InfluxDBWithTags(gethmetrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "swarm.", tagsMap)
|
||||
}
|
||||
|
||||
if enableAccountingExport {
|
||||
log.Info("Exporting swarm accounting metrics to InfluxDB")
|
||||
go influxdb.InfluxDBWithTags(gethmetrics.AccountingRegistry, 10*time.Second, endpoint, database, username, password, "accounting.", tagsMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func datadirDiskUsage(path string, d time.Duration) {
|
||||
for range time.Tick(d) {
|
||||
bytes, err := dirSize(path)
|
||||
if err != nil {
|
||||
log.Warn("cannot get disk space", "err", err)
|
||||
}
|
||||
|
||||
metrics.GetOrRegisterGauge("datadir.usage", nil).Update(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
func dirSize(path string) (int64, error) {
|
||||
var size int64
|
||||
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
size += info.Size()
|
||||
}
|
||||
return err
|
||||
})
|
||||
return size, err
|
||||
}
|
@ -1,152 +0,0 @@
|
||||
## Streaming
|
||||
|
||||
Streaming is a new protocol of the swarm bzz bundle of protocols.
|
||||
This protocol provides the basic logic for chunk-based data flow.
|
||||
It implements simple retrieve requests and delivery using priority queue.
|
||||
A data exchange stream is a directional flow of chunks between peers.
|
||||
The source of datachunks is the upstream, the receiver is called the
|
||||
downstream peer. Each streaming protocol defines an outgoing streamer
|
||||
and an incoming streamer, the former installing on the upstream,
|
||||
the latter on the downstream peer.
|
||||
|
||||
Subscribe on StreamerPeer launches an incoming streamer that sends
|
||||
a subscribe msg upstream. The streamer on the upstream peer
|
||||
handles the subscribe msg by installing the relevant outgoing streamer
|
||||
. The modules now engage in a process of upstream sending a sequence of hashes of
|
||||
chunks downstream (OfferedHashesMsg). The downstream peer evaluates which hashes are needed
|
||||
and get it delivered by sending back a msg (WantedHashesMsg).
|
||||
|
||||
Historical syncing is supported - currently not the right abstraction --
|
||||
state kept across sessions by saving a series of intervals after their last
|
||||
batch actually arrived.
|
||||
|
||||
Live streaming is also supported, by starting session from the first item
|
||||
after the subscription.
|
||||
|
||||
Provable data exchange. In case a stream represents a swarm document's data layer
|
||||
or higher level chunks, streaming up to a certain index is always provable. It saves on
|
||||
sending intermediate chunks.
|
||||
|
||||
Using the streamer logic, various stream types are easy to implement:
|
||||
|
||||
* light node requests:
|
||||
* url lookup with offset
|
||||
* document download
|
||||
* document upload
|
||||
* syncing
|
||||
* live session syncing
|
||||
* historical syncing
|
||||
* simple retrieve requests and deliveries
|
||||
* swarm feeds streams
|
||||
* receipting for finger pointing
|
||||
|
||||
## Syncing
|
||||
|
||||
Syncing is the process that makes sure storer nodes end up storing all and only the chunks that are requested from them.
|
||||
|
||||
### Requirements
|
||||
|
||||
- eventual consistency: so each chunk historical should be syncable
|
||||
- since the same chunk can and will arrive from many peers, (network traffic should be
|
||||
optimised, only one transfer of data per chunk)
|
||||
- explicit request deliveries should be prioritised higher than recent chunks received
|
||||
during the ongoing session which in turn should be higher than historical chunks.
|
||||
- insured chunks should get receipted for finger pointing litigation, the receipts storage
|
||||
should be organised efficiently, upstream peer should also be able to find these
|
||||
receipts for a deleted chunk easily to refute their challenge.
|
||||
- syncing should be resilient to cut connections, metadata should be persisted that
|
||||
keep track of syncing state across sessions, historical syncing state should survive restart
|
||||
- extra data structures to support syncing should be kept at minimum
|
||||
- syncing is not organized separately for chunk types (Swarm feed updates v regular content chunk)
|
||||
- various types of streams should have common logic abstracted
|
||||
|
||||
Syncing is now entirely mediated by the localstore, ie., no processes or memory leaks due to network contention.
|
||||
When a new chunk is stored, its chunk hash is index by proximity bin
|
||||
|
||||
peers syncronise by getting the chunks closer to the downstream peer than to the upstream one.
|
||||
Consequently peers just sync all stored items for the kad bin the receiving peer falls into.
|
||||
The special case of nearest neighbour sets is handled by the downstream peer
|
||||
indicating they want to sync all kademlia bins with proximity equal to or higher
|
||||
than their depth.
|
||||
|
||||
This sync state represents the initial state of a sync connection session.
|
||||
Retrieval is dictated by downstream peers simply using a special streamer protocol.
|
||||
|
||||
Syncing chunks created during the session by the upstream peer is called live session syncing
|
||||
while syncing of earlier chunks is historical syncing.
|
||||
|
||||
Once the relevant chunk is retrieved, downstream peer looks up all hash segments in its localstore
|
||||
and sends to the upstream peer a message with a a bitvector to indicate
|
||||
missing chunks (e.g., for chunk `k`, hash with chunk internal index which case )
|
||||
new items. In turn upstream peer sends the relevant chunk data alongside their index.
|
||||
|
||||
On sending chunks there is a priority queue system. If during looking up hashes in its localstore,
|
||||
downstream peer hits on an open request then a retrieve request is sent immediately to the upstream peer indicating
|
||||
that no extra round of checks is needed. If another peers syncer hits the same open request, it is slightly unsafe to not ask
|
||||
that peer too: if the first one disconnects before delivering or fails to deliver and therefore gets
|
||||
disconnected, we should still be able to continue with the other. The minimum redundant traffic coming from such simultaneous
|
||||
eventualities should be sufficiently rare not to warrant more complex treatment.
|
||||
|
||||
Session syncing involves downstream peer to request a new state on a bin from upstream.
|
||||
using the new state, the range (of chunks) between the previous state and the new one are retrieved
|
||||
and chunks are requested identical to the historical case. After receiving all the missing chunks
|
||||
from the new hashes, downstream peer will request a new range. If this happens before upstream peer updates a new state,
|
||||
we say that session syncing is live or the two peers are in sync. In general the time interval passed since downstream peer request up to the current session cursor is a good indication of a permanent (probably increasing) lag.
|
||||
|
||||
If there is no historical backlog, and downstream peer has an acceptable 'last synced' tag, then it is said to be fully synced with the upstream peer.
|
||||
If a peer is fully synced with all its storer peers, it can advertise itself as globally fully synced.
|
||||
|
||||
The downstream peer persists the record of the last synced offset. When the two peers disconnect and
|
||||
reconnect syncing can start from there.
|
||||
This situation however can also happen while historical syncing is not yet complete.
|
||||
Effectively this means that the peer needs to persist a record of an arbitrary array of offset ranges covered.
|
||||
|
||||
### Delivery requests
|
||||
|
||||
once the appropriate ranges of the hashstream are retrieved and buffered, downstream peer just scans the hashes, looks them up in localstore, if not found, create a request entry.
|
||||
The range is referenced by the chunk index. Alongside the name (indicating the stream, e.g., content chunks for bin 6) and the range
|
||||
downstream peer sends a 128 long bitvector indicating which chunks are needed.
|
||||
Newly created requests are satisfied bound together in a waitgroup which when done, will promptt sending the next one.
|
||||
to be able to do check and storage concurrently, we keep a buffer of one, we start with two batches of hashes.
|
||||
If there is nothing to give, upstream peers SetNextBatch is blocking. Subscription ends with an unsubscribe. which removes the syncer from the map.
|
||||
|
||||
Canceling requests (for instance the late chunks of an erasure batch) should be a chan closed
|
||||
on the request
|
||||
|
||||
Simple request is also a subscribe
|
||||
different streaming protocols are different p2p protocols with same message types.
|
||||
the constructor is the Run function itself. which takes a streamerpeer as argument
|
||||
|
||||
|
||||
### provable streams
|
||||
|
||||
The swarm hash over the hash stream has many advantages. It implements a provable data transfer
|
||||
and provide efficient storage for receipts in the form of inclusion proofs useable for finger pointing litigation.
|
||||
When challenged on a missing chunk, upstream peer will provide an inclusion proof of a chunk hash against the state of the
|
||||
sync stream. In order to be able to generate such an inclusion proof, upstream peer needs to store the hash index (counting consecutive hash-size segments) alongside the chunk data and preserve it even when the chunk data is deleted until the chunk is no longer insured.
|
||||
if there is no valid insurance on the files the entry may be deleted.
|
||||
As long as the chunk is preserved, no takeover proof will be needed since the node can respond to any challenge.
|
||||
However, once the node needs to delete an insured chunk for capacity reasons, a receipt should be available to
|
||||
refute the challenge by finger pointing to a downstream peer.
|
||||
As part of the deletion protocol then, hashes of insured chunks to be removed are pushed to an infinite stream for every bin.
|
||||
|
||||
Downstream peer on the other hand needs to make sure that they can only be finger pointed about a chunk they did receive and store.
|
||||
For this the check of a state should be exhaustive. If historical syncing finishes on one state, all hashes before are covered, no
|
||||
surprises. In other words historical syncing this process is self verifying. With session syncing however, it is not enough to check going back covering the range from old offset to new. Continuity (i.e., that the new state is extension of the old) needs to be verified: after downstream peer reads the range into a buffer, it appends the buffer the last known state at the last known offset and verifies the resulting hash matches
|
||||
the latest state. Past intervals of historical syncing are checked via the session root.
|
||||
Upstream peer signs the states, downstream peers can use as handover proofs.
|
||||
Downstream peers sign off on a state together with an initial offset.
|
||||
|
||||
Once historical syncing is complete and the session does not lag, downstream peer only preserves the latest upstream state and store the signed version.
|
||||
|
||||
Upstream peer needs to keep the latest takeover states: each deleted chunk's hash should be covered by takeover proof of at least one peer. If historical syncing is complete, upstream peer typically will store only the latest takeover proof from downstream peer.
|
||||
Crucially, the structure is totally independent of the number of peers in the bin, so it scales extremely well.
|
||||
|
||||
## implementation
|
||||
|
||||
The simplest protocol just involves upstream peer to prefix the key with the kademlia proximity order (say 0-15 or 0-31)
|
||||
and simply iterate on index per bin when syncing with a peer.
|
||||
|
||||
priority queues are used for sending chunks so that user triggered requests should be responded to first, session syncing second, and historical with lower priority.
|
||||
The request on chunks remains implemented as a dataless entry in the memory store.
|
||||
The lifecycle of this object should be more carefully thought through, ie., when it fails to retrieve it should be removed.
|
@ -1,62 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bitvector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
var errInvalidLength = errors.New("invalid length")
|
||||
|
||||
type BitVector struct {
|
||||
len int
|
||||
b []byte
|
||||
}
|
||||
|
||||
func New(l int) (bv *BitVector, err error) {
|
||||
return NewFromBytes(make([]byte, l/8+1), l)
|
||||
}
|
||||
|
||||
func NewFromBytes(b []byte, l int) (bv *BitVector, err error) {
|
||||
if l <= 0 {
|
||||
return nil, errInvalidLength
|
||||
}
|
||||
if len(b)*8 < l {
|
||||
return nil, errInvalidLength
|
||||
}
|
||||
return &BitVector{
|
||||
len: l,
|
||||
b: b,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (bv *BitVector) Get(i int) bool {
|
||||
bi := i / 8
|
||||
return bv.b[bi]&(0x1<<uint(i%8)) != 0
|
||||
}
|
||||
|
||||
func (bv *BitVector) Set(i int, v bool) {
|
||||
bi := i / 8
|
||||
cv := bv.Get(i)
|
||||
if cv != v {
|
||||
bv.b[bi] ^= 0x1 << uint8(i%8)
|
||||
}
|
||||
}
|
||||
|
||||
func (bv *BitVector) Bytes() []byte {
|
||||
return bv.b
|
||||
}
|
@ -1,104 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bitvector
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestBitvectorNew(t *testing.T) {
|
||||
_, err := New(0)
|
||||
if err != errInvalidLength {
|
||||
t.Errorf("expected err %v, got %v", errInvalidLength, err)
|
||||
}
|
||||
|
||||
_, err = NewFromBytes(nil, 0)
|
||||
if err != errInvalidLength {
|
||||
t.Errorf("expected err %v, got %v", errInvalidLength, err)
|
||||
}
|
||||
|
||||
_, err = NewFromBytes([]byte{0}, 9)
|
||||
if err != errInvalidLength {
|
||||
t.Errorf("expected err %v, got %v", errInvalidLength, err)
|
||||
}
|
||||
|
||||
_, err = NewFromBytes(make([]byte, 8), 8)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBitvectorGetSet(t *testing.T) {
|
||||
for _, length := range []int{
|
||||
1,
|
||||
2,
|
||||
4,
|
||||
8,
|
||||
9,
|
||||
15,
|
||||
16,
|
||||
} {
|
||||
bv, err := New(length)
|
||||
if err != nil {
|
||||
t.Errorf("error for length %v: %v", length, err)
|
||||
}
|
||||
|
||||
for i := 0; i < length; i++ {
|
||||
if bv.Get(i) {
|
||||
t.Errorf("expected false for element on index %v", i)
|
||||
}
|
||||
}
|
||||
|
||||
func() {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
t.Errorf("expecting panic")
|
||||
}
|
||||
}()
|
||||
bv.Get(length + 8)
|
||||
}()
|
||||
|
||||
for i := 0; i < length; i++ {
|
||||
bv.Set(i, true)
|
||||
for j := 0; j < length; j++ {
|
||||
if j == i {
|
||||
if !bv.Get(j) {
|
||||
t.Errorf("element on index %v is not set to true", i)
|
||||
}
|
||||
} else {
|
||||
if bv.Get(j) {
|
||||
t.Errorf("element on index %v is not false", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bv.Set(i, false)
|
||||
|
||||
if bv.Get(i) {
|
||||
t.Errorf("element on index %v is not set to false", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBitvectorNewFromBytesGet(t *testing.T) {
|
||||
bv, err := NewFromBytes([]byte{8}, 8)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if !bv.Get(3) {
|
||||
t.Fatalf("element 3 is not set to true: state %08b", bv.b[0])
|
||||
}
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func LogAddrs(nns [][]byte) string {
|
||||
var nnsa []string
|
||||
for _, nn := range nns {
|
||||
nnsa = append(nnsa, fmt.Sprintf("%08x", nn[:4]))
|
||||
}
|
||||
return strings.Join(nnsa, ", ")
|
||||
}
|
@ -1,220 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||
)
|
||||
|
||||
// discovery bzz extension for requesting and relaying node address records
|
||||
|
||||
var sortPeers = noSortPeers
|
||||
|
||||
// Peer wraps BzzPeer and embeds Kademlia overlay connectivity driver
|
||||
type Peer struct {
|
||||
*BzzPeer
|
||||
kad *Kademlia
|
||||
sentPeers bool // whether we already sent peer closer to this address
|
||||
mtx sync.RWMutex //
|
||||
peers map[string]bool // tracks node records sent to the peer
|
||||
depth uint8 // the proximity order advertised by remote as depth of saturation
|
||||
}
|
||||
|
||||
// NewPeer constructs a discovery peer
|
||||
func NewPeer(p *BzzPeer, kad *Kademlia) *Peer {
|
||||
d := &Peer{
|
||||
kad: kad,
|
||||
BzzPeer: p,
|
||||
peers: make(map[string]bool),
|
||||
}
|
||||
// record remote as seen so we never send a peer its own record
|
||||
d.seen(p.BzzAddr)
|
||||
return d
|
||||
}
|
||||
|
||||
// HandleMsg is the message handler that delegates incoming messages
|
||||
func (d *Peer) HandleMsg(ctx context.Context, msg interface{}) error {
|
||||
switch msg := msg.(type) {
|
||||
|
||||
case *peersMsg:
|
||||
return d.handlePeersMsg(msg)
|
||||
|
||||
case *subPeersMsg:
|
||||
return d.handleSubPeersMsg(msg)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown message type: %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyDepth sends a message to all connections if depth of saturation is changed
|
||||
func NotifyDepth(depth uint8, kad *Kademlia) {
|
||||
f := func(val *Peer, po int) bool {
|
||||
val.NotifyDepth(depth)
|
||||
return true
|
||||
}
|
||||
kad.EachConn(nil, 255, f)
|
||||
}
|
||||
|
||||
// NotifyPeer informs all peers about a newly added node
|
||||
func NotifyPeer(p *BzzAddr, k *Kademlia) {
|
||||
f := func(val *Peer, po int) bool {
|
||||
val.NotifyPeer(p, uint8(po))
|
||||
return true
|
||||
}
|
||||
k.EachConn(p.Address(), 255, f)
|
||||
}
|
||||
|
||||
// NotifyPeer notifies the remote node (recipient) about a peer if
|
||||
// the peer's PO is within the recipients advertised depth
|
||||
// OR the peer is closer to the recipient than self
|
||||
// unless already notified during the connection session
|
||||
func (d *Peer) NotifyPeer(a *BzzAddr, po uint8) {
|
||||
// immediately return
|
||||
if (po < d.getDepth() && pot.ProxCmp(d.kad.BaseAddr(), d, a) != 1) || d.seen(a) {
|
||||
return
|
||||
}
|
||||
resp := &peersMsg{
|
||||
Peers: []*BzzAddr{a},
|
||||
}
|
||||
go d.Send(context.TODO(), resp)
|
||||
}
|
||||
|
||||
// NotifyDepth sends a subPeers Msg to the receiver notifying them about
|
||||
// a change in the depth of saturation
|
||||
func (d *Peer) NotifyDepth(po uint8) {
|
||||
go d.Send(context.TODO(), &subPeersMsg{Depth: po})
|
||||
}
|
||||
|
||||
/*
|
||||
peersMsg is the message to pass peer information
|
||||
It is always a response to a peersRequestMsg
|
||||
|
||||
The encoding of a peer address is identical the devp2p base protocol peers
|
||||
messages: [IP, Port, NodeID],
|
||||
Note that a node's FileStore address is not the NodeID but the hash of the NodeID.
|
||||
|
||||
TODO:
|
||||
To mitigate against spurious peers messages, requests should be remembered
|
||||
and correctness of responses should be checked
|
||||
|
||||
If the proxBin of peers in the response is incorrect the sender should be
|
||||
disconnected
|
||||
*/
|
||||
|
||||
// peersMsg encapsulates an array of peer addresses
|
||||
// used for communicating about known peers
|
||||
// relevant for bootstrapping connectivity and updating peersets
|
||||
type peersMsg struct {
|
||||
Peers []*BzzAddr
|
||||
}
|
||||
|
||||
// String pretty prints a peersMsg
|
||||
func (msg peersMsg) String() string {
|
||||
return fmt.Sprintf("%T: %v", msg, msg.Peers)
|
||||
}
|
||||
|
||||
// handlePeersMsg called by the protocol when receiving peerset (for target address)
|
||||
// list of nodes ([]PeerAddr in peersMsg) is added to the overlay db using the
|
||||
// Register interface method
|
||||
func (d *Peer) handlePeersMsg(msg *peersMsg) error {
|
||||
// register all addresses
|
||||
if len(msg.Peers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, a := range msg.Peers {
|
||||
d.seen(a)
|
||||
NotifyPeer(a, d.kad)
|
||||
}
|
||||
return d.kad.Register(msg.Peers...)
|
||||
}
|
||||
|
||||
// subPeers msg is communicating the depth of the overlay table of a peer
|
||||
type subPeersMsg struct {
|
||||
Depth uint8
|
||||
}
|
||||
|
||||
// String returns the pretty printer
|
||||
func (msg subPeersMsg) String() string {
|
||||
return fmt.Sprintf("%T: request peers > PO%02d. ", msg, msg.Depth)
|
||||
}
|
||||
|
||||
// handleSubPeersMsg handles incoming subPeersMsg
|
||||
// this message represents the saturation depth of the remote peer
|
||||
// saturation depth is the radius within which the peer subscribes to peers
|
||||
// the first time this is received we send peer info on all
|
||||
// our connected peers that fall within peers saturation depth
|
||||
// otherwise this depth is just recorded on the peer, so that
|
||||
// subsequent new connections are sent iff they fall within the radius
|
||||
func (d *Peer) handleSubPeersMsg(msg *subPeersMsg) error {
|
||||
d.setDepth(msg.Depth)
|
||||
// only send peers after the initial subPeersMsg
|
||||
if !d.sentPeers {
|
||||
var peers []*BzzAddr
|
||||
// iterate connection in ascending order of disctance from the remote address
|
||||
d.kad.EachConn(d.Over(), 255, func(p *Peer, po int) bool {
|
||||
// terminate if we are beyond the radius
|
||||
if uint8(po) < msg.Depth {
|
||||
return false
|
||||
}
|
||||
if !d.seen(p.BzzAddr) { // here just records the peer sent
|
||||
peers = append(peers, p.BzzAddr)
|
||||
}
|
||||
return true
|
||||
})
|
||||
// if useful peers are found, send them over
|
||||
if len(peers) > 0 {
|
||||
go d.Send(context.TODO(), &peersMsg{Peers: sortPeers(peers)})
|
||||
}
|
||||
}
|
||||
d.sentPeers = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// seen takes a peer address and checks if it was sent to a peer already
|
||||
// if not, marks the peer as sent
|
||||
func (d *Peer) seen(p *BzzAddr) bool {
|
||||
d.mtx.Lock()
|
||||
defer d.mtx.Unlock()
|
||||
k := string(p.Address())
|
||||
if d.peers[k] {
|
||||
return true
|
||||
}
|
||||
d.peers[k] = true
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *Peer) getDepth() uint8 {
|
||||
d.mtx.RLock()
|
||||
defer d.mtx.RUnlock()
|
||||
return d.depth
|
||||
}
|
||||
|
||||
func (d *Peer) setDepth(depth uint8) {
|
||||
d.mtx.Lock()
|
||||
defer d.mtx.Unlock()
|
||||
d.depth = depth
|
||||
}
|
||||
|
||||
func noSortPeers(peers []*BzzAddr) []*BzzAddr {
|
||||
return peers
|
||||
}
|
@ -1,264 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/protocols"
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||
)
|
||||
|
||||
/***
|
||||
*
|
||||
* - after connect, that outgoing subpeersmsg is sent
|
||||
*
|
||||
*/
|
||||
func TestSubPeersMsg(t *testing.T) {
|
||||
params := NewHiveParams()
|
||||
s, pp, err := newHiveTester(params, 1, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := s.Nodes[0]
|
||||
raddr := NewAddr(node)
|
||||
pp.Register(raddr)
|
||||
|
||||
// start the hive and wait for the connection
|
||||
pp.Start(s.Server)
|
||||
defer pp.Stop()
|
||||
|
||||
// send subPeersMsg to the peer
|
||||
err = s.TestExchanges(p2ptest.Exchange{
|
||||
Label: "outgoing subPeersMsg",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &subPeersMsg{Depth: 0},
|
||||
Peer: node.ID(),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
maxPO = 8 // PO of pivot and control; chosen to test enough cases but not run too long
|
||||
maxPeerPO = 6 // pivot has no peers closer than this to the control peer
|
||||
maxPeersPerPO = 3
|
||||
)
|
||||
|
||||
// TestInitialPeersMsg tests if peersMsg response to incoming subPeersMsg is correct
|
||||
func TestInitialPeersMsg(t *testing.T) {
|
||||
for po := 0; po < maxPO; po++ {
|
||||
for depth := 0; depth < maxPO; depth++ {
|
||||
t.Run(fmt.Sprintf("PO=%d,advertised depth=%d", po, depth), func(t *testing.T) {
|
||||
testInitialPeersMsg(t, po, depth)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// testInitialPeersMsg tests that the correct set of peer info is sent
|
||||
// to another peer after receiving their subPeersMsg request
|
||||
func testInitialPeersMsg(t *testing.T, peerPO, peerDepth int) {
|
||||
// generate random pivot address
|
||||
prvkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer func(orig func([]*BzzAddr) []*BzzAddr) {
|
||||
sortPeers = orig
|
||||
}(sortPeers)
|
||||
sortPeers = testSortPeers
|
||||
pivotAddr := pot.NewAddressFromBytes(PrivateKeyToBzzKey(prvkey))
|
||||
// generate control peers address at peerPO wrt pivot
|
||||
peerAddr := pot.RandomAddressAt(pivotAddr, peerPO)
|
||||
// construct kademlia and hive
|
||||
to := NewKademlia(pivotAddr[:], NewKadParams())
|
||||
hive := NewHive(NewHiveParams(), to, nil)
|
||||
|
||||
// expected addrs in peersMsg response
|
||||
var expBzzAddrs []*BzzAddr
|
||||
connect := func(a pot.Address, po int) (addrs []*BzzAddr) {
|
||||
n := rand.Intn(maxPeersPerPO)
|
||||
for i := 0; i < n; i++ {
|
||||
peer, err := newDiscPeer(pot.RandomAddressAt(a, po))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hive.On(peer)
|
||||
addrs = append(addrs, peer.BzzAddr)
|
||||
}
|
||||
return addrs
|
||||
}
|
||||
register := func(a pot.Address, po int) {
|
||||
addr := pot.RandomAddressAt(a, po)
|
||||
hive.Register(&BzzAddr{OAddr: addr[:]})
|
||||
}
|
||||
|
||||
// generate connected and just registered peers
|
||||
for po := maxPeerPO; po >= 0; po-- {
|
||||
// create a fake connected peer at po from peerAddr
|
||||
ons := connect(peerAddr, po)
|
||||
// create a fake registered address at po from peerAddr
|
||||
register(peerAddr, po)
|
||||
// we collect expected peer addresses only up till peerPO
|
||||
if po < peerDepth {
|
||||
continue
|
||||
}
|
||||
expBzzAddrs = append(expBzzAddrs, ons...)
|
||||
}
|
||||
|
||||
// add extra connections closer to pivot than control
|
||||
for po := peerPO + 1; po < maxPO; po++ {
|
||||
ons := connect(pivotAddr, po)
|
||||
if peerDepth <= peerPO {
|
||||
expBzzAddrs = append(expBzzAddrs, ons...)
|
||||
}
|
||||
}
|
||||
|
||||
// create a special bzzBaseTester in which we can associate `enode.ID` to the `bzzAddr` we created above
|
||||
s, _, err := newBzzBaseTesterWithAddrs(prvkey, [][]byte{peerAddr[:]}, DiscoverySpec, hive.Run)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Stop()
|
||||
|
||||
// peerID to use in the protocol tester testExchange expect/trigger
|
||||
peerID := s.Nodes[0].ID()
|
||||
// block until control peer is found among hive peers
|
||||
found := false
|
||||
for attempts := 0; attempts < 2000; attempts++ {
|
||||
found = hive.Peer(peerID) != nil
|
||||
if found {
|
||||
break
|
||||
}
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Fatal("timeout waiting for peer connection to start")
|
||||
}
|
||||
|
||||
// pivotDepth is the advertised depth of the pivot node we expect in the outgoing subPeersMsg
|
||||
pivotDepth := hive.Saturation()
|
||||
// the test exchange is as follows:
|
||||
// 1. pivot sends to the control peer a `subPeersMsg` advertising its depth (ignored)
|
||||
// 2. peer sends to pivot a `subPeersMsg` advertising its own depth (arbitrarily chosen)
|
||||
// 3. pivot responds with `peersMsg` with the set of expected peers
|
||||
err = s.TestExchanges(
|
||||
p2ptest.Exchange{
|
||||
Label: "outgoing subPeersMsg",
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &subPeersMsg{Depth: uint8(pivotDepth)},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
},
|
||||
p2ptest.Exchange{
|
||||
Label: "trigger subPeersMsg and expect peersMsg",
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 1,
|
||||
Msg: &subPeersMsg{Depth: uint8(peerDepth)},
|
||||
Peer: peerID,
|
||||
},
|
||||
},
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 0,
|
||||
Msg: &peersMsg{Peers: testSortPeers(expBzzAddrs)},
|
||||
Peer: peerID,
|
||||
Timeout: 100 * time.Millisecond,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// for values MaxPeerPO < peerPO < MaxPO the pivot has no peers to offer to the control peer
|
||||
// in this case, no peersMsg will be sent out, and we would run into a time out
|
||||
if len(expBzzAddrs) == 0 {
|
||||
if err != nil {
|
||||
if err.Error() != "exchange #1 \"trigger subPeersMsg and expect peersMsg\": timed out" {
|
||||
t.Fatalf("expected timeout, got %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
t.Fatalf("expected timeout, got no error")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testSortPeers(peers []*BzzAddr) []*BzzAddr {
|
||||
comp := func(i, j int) bool {
|
||||
vi := binary.BigEndian.Uint64(peers[i].OAddr)
|
||||
vj := binary.BigEndian.Uint64(peers[j].OAddr)
|
||||
return vi < vj
|
||||
}
|
||||
sort.Slice(peers, comp)
|
||||
return peers
|
||||
}
|
||||
|
||||
// as we are not creating a real node via the protocol,
|
||||
// we need to create the discovery peer objects for the additional kademlia
|
||||
// nodes manually
|
||||
func newDiscPeer(addr pot.Address) (*Peer, error) {
|
||||
pKey, err := ecdsa.GenerateKey(crypto.S256(), crand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubKey := pKey.PublicKey
|
||||
nod := enode.NewV4(&pubKey, net.IPv4(127, 0, 0, 1), 0, 0)
|
||||
bzzAddr := &BzzAddr{OAddr: addr[:], UAddr: []byte(nod.String())}
|
||||
id := nod.ID()
|
||||
p2pPeer := p2p.NewPeer(id, id.String(), nil)
|
||||
return NewPeer(&BzzPeer{
|
||||
Peer: protocols.NewPeer(p2pPeer, &dummyMsgRW{}, DiscoverySpec),
|
||||
BzzAddr: bzzAddr,
|
||||
}, nil), nil
|
||||
}
|
||||
|
||||
type dummyMsgRW struct{}
|
||||
|
||||
func (d *dummyMsgRW) ReadMsg() (p2p.Msg, error) {
|
||||
return p2p.Msg{}, nil
|
||||
}
|
||||
func (d *dummyMsgRW) WriteMsg(msg p2p.Msg) error {
|
||||
return nil
|
||||
}
|
@ -1,93 +0,0 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/protocols"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
)
|
||||
|
||||
// ENRAddrEntry is the entry type to store the bzz key in the enode
|
||||
type ENRAddrEntry struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
func NewENRAddrEntry(addr []byte) *ENRAddrEntry {
|
||||
return &ENRAddrEntry{
|
||||
data: addr,
|
||||
}
|
||||
}
|
||||
|
||||
func (b ENRAddrEntry) Address() []byte {
|
||||
return b.data
|
||||
}
|
||||
|
||||
// ENRKey implements enr.Entry
|
||||
func (b ENRAddrEntry) ENRKey() string {
|
||||
return "bzzkey"
|
||||
}
|
||||
|
||||
// EncodeRLP implements rlp.Encoder
|
||||
func (b ENRAddrEntry) EncodeRLP(w io.Writer) error {
|
||||
log.Debug("in encoderlp", "b", b, "p", fmt.Sprintf("%p", &b))
|
||||
return rlp.Encode(w, &b.data)
|
||||
}
|
||||
|
||||
// DecodeRLP implements rlp.Decoder
|
||||
func (b *ENRAddrEntry) DecodeRLP(s *rlp.Stream) error {
|
||||
byt, err := s.Bytes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.data = byt
|
||||
log.Debug("in decoderlp", "b", b, "p", fmt.Sprintf("%p", &b))
|
||||
return nil
|
||||
}
|
||||
|
||||
type ENRLightNodeEntry bool
|
||||
|
||||
func (b ENRLightNodeEntry) ENRKey() string {
|
||||
return "bzzlightnode"
|
||||
}
|
||||
|
||||
type ENRBootNodeEntry bool
|
||||
|
||||
func (b ENRBootNodeEntry) ENRKey() string {
|
||||
return "bzzbootnode"
|
||||
}
|
||||
|
||||
func getENRBzzPeer(p *p2p.Peer, rw p2p.MsgReadWriter, spec *protocols.Spec) *BzzPeer {
|
||||
var lightnode ENRLightNodeEntry
|
||||
var bootnode ENRBootNodeEntry
|
||||
|
||||
// retrieve the ENR Record data
|
||||
record := p.Node().Record()
|
||||
record.Load(&lightnode)
|
||||
record.Load(&bootnode)
|
||||
|
||||
// get the address; separate function as long as we need swarm/network:NewAddr() to call it
|
||||
addr := getENRBzzAddr(p.Node())
|
||||
|
||||
// build the peer using the retrieved data
|
||||
return &BzzPeer{
|
||||
Peer: protocols.NewPeer(p, rw, spec),
|
||||
LightNode: bool(lightnode),
|
||||
BzzAddr: addr,
|
||||
}
|
||||
}
|
||||
|
||||
func getENRBzzAddr(nod *enode.Node) *BzzAddr {
|
||||
var addr ENRAddrEntry
|
||||
|
||||
record := nod.Record()
|
||||
record.Load(&addr)
|
||||
|
||||
return &BzzAddr{
|
||||
OAddr: addr.data,
|
||||
UAddr: []byte(nod.String()),
|
||||
}
|
||||
}
|
@ -1,336 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||
"github.com/ethereum/go-ethereum/swarm/tracing"
|
||||
olog "github.com/opentracing/opentracing-go/log"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSearchTimeout = 1 * time.Second
|
||||
// maximum number of forwarded requests (hops), to make sure requests are not
|
||||
// forwarded forever in peer loops
|
||||
maxHopCount uint8 = 20
|
||||
)
|
||||
|
||||
// Time to consider peer to be skipped.
|
||||
// Also used in stream delivery.
|
||||
var RequestTimeout = 10 * time.Second
|
||||
|
||||
type RequestFunc func(context.Context, *Request) (*enode.ID, chan struct{}, error)
|
||||
|
||||
// Fetcher is created when a chunk is not found locally. It starts a request handler loop once and
|
||||
// keeps it alive until all active requests are completed. This can happen:
|
||||
// 1. either because the chunk is delivered
|
||||
// 2. or because the requester cancelled/timed out
|
||||
// Fetcher self destroys itself after it is completed.
|
||||
// TODO: cancel all forward requests after termination
|
||||
type Fetcher struct {
|
||||
protoRequestFunc RequestFunc // request function fetcher calls to issue retrieve request for a chunk
|
||||
addr storage.Address // the address of the chunk to be fetched
|
||||
offerC chan *enode.ID // channel of sources (peer node id strings)
|
||||
requestC chan uint8 // channel for incoming requests (with the hopCount value in it)
|
||||
searchTimeout time.Duration
|
||||
skipCheck bool
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
Addr storage.Address // chunk address
|
||||
Source *enode.ID // nodeID of peer to request from (can be nil)
|
||||
SkipCheck bool // whether to offer the chunk first or deliver directly
|
||||
peersToSkip *sync.Map // peers not to request chunk from (only makes sense if source is nil)
|
||||
HopCount uint8 // number of forwarded requests (hops)
|
||||
}
|
||||
|
||||
// NewRequest returns a new instance of Request based on chunk address skip check and
|
||||
// a map of peers to skip.
|
||||
func NewRequest(addr storage.Address, skipCheck bool, peersToSkip *sync.Map) *Request {
|
||||
return &Request{
|
||||
Addr: addr,
|
||||
SkipCheck: skipCheck,
|
||||
peersToSkip: peersToSkip,
|
||||
}
|
||||
}
|
||||
|
||||
// SkipPeer returns if the peer with nodeID should not be requested to deliver a chunk.
|
||||
// Peers to skip are kept per Request and for a time period of RequestTimeout.
|
||||
// This function is used in stream package in Delivery.RequestFromPeers to optimize
|
||||
// requests for chunks.
|
||||
func (r *Request) SkipPeer(nodeID string) bool {
|
||||
val, ok := r.peersToSkip.Load(nodeID)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
t, ok := val.(time.Time)
|
||||
if ok && time.Now().After(t.Add(RequestTimeout)) {
|
||||
// deadline expired
|
||||
r.peersToSkip.Delete(nodeID)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FetcherFactory is initialised with a request function and can create fetchers
|
||||
type FetcherFactory struct {
|
||||
request RequestFunc
|
||||
skipCheck bool
|
||||
}
|
||||
|
||||
// NewFetcherFactory takes a request function and skip check parameter and creates a FetcherFactory
|
||||
func NewFetcherFactory(request RequestFunc, skipCheck bool) *FetcherFactory {
|
||||
return &FetcherFactory{
|
||||
request: request,
|
||||
skipCheck: skipCheck,
|
||||
}
|
||||
}
|
||||
|
||||
// New constructs a new Fetcher, for the given chunk. All peers in peersToSkip
|
||||
// are not requested to deliver the given chunk. peersToSkip should always
|
||||
// contain the peers which are actively requesting this chunk, to make sure we
|
||||
// don't request back the chunks from them.
|
||||
// The created Fetcher is started and returned.
|
||||
func (f *FetcherFactory) New(ctx context.Context, source storage.Address, peers *sync.Map) storage.NetFetcher {
|
||||
fetcher := NewFetcher(ctx, source, f.request, f.skipCheck)
|
||||
go fetcher.run(peers)
|
||||
return fetcher
|
||||
}
|
||||
|
||||
// NewFetcher creates a new Fetcher for the given chunk address using the given request function.
|
||||
func NewFetcher(ctx context.Context, addr storage.Address, rf RequestFunc, skipCheck bool) *Fetcher {
|
||||
return &Fetcher{
|
||||
addr: addr,
|
||||
protoRequestFunc: rf,
|
||||
offerC: make(chan *enode.ID),
|
||||
requestC: make(chan uint8),
|
||||
searchTimeout: defaultSearchTimeout,
|
||||
skipCheck: skipCheck,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// Offer is called when an upstream peer offers the chunk via syncing as part of `OfferedHashesMsg` and the node does not have the chunk locally.
|
||||
func (f *Fetcher) Offer(source *enode.ID) {
|
||||
// First we need to have this select to make sure that we return if context is done
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// This select alone would not guarantee that we return of context is done, it could potentially
|
||||
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
|
||||
select {
|
||||
case f.offerC <- source:
|
||||
case <-f.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// Request is called when an upstream peer request the chunk as part of `RetrieveRequestMsg`, or from a local request through FileStore, and the node does not have the chunk locally.
|
||||
func (f *Fetcher) Request(hopCount uint8) {
|
||||
// First we need to have this select to make sure that we return if context is done
|
||||
select {
|
||||
case <-f.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if hopCount >= maxHopCount {
|
||||
log.Debug("fetcher request hop count limit reached", "hops", hopCount)
|
||||
return
|
||||
}
|
||||
|
||||
// This select alone would not guarantee that we return of context is done, it could potentially
|
||||
// push to offerC instead if offerC is available (see number 2 in https://golang.org/ref/spec#Select_statements)
|
||||
select {
|
||||
case f.requestC <- hopCount + 1:
|
||||
case <-f.ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// start prepares the Fetcher
|
||||
// it keeps the Fetcher alive within the lifecycle of the passed context
|
||||
func (f *Fetcher) run(peers *sync.Map) {
|
||||
var (
|
||||
doRequest bool // determines if retrieval is initiated in the current iteration
|
||||
wait *time.Timer // timer for search timeout
|
||||
waitC <-chan time.Time // timer channel
|
||||
sources []*enode.ID // known sources, ie. peers that offered the chunk
|
||||
requested bool // true if the chunk was actually requested
|
||||
hopCount uint8
|
||||
)
|
||||
gone := make(chan *enode.ID) // channel to signal that a peer we requested from disconnected
|
||||
|
||||
// loop that keeps the fetching process alive
|
||||
// after every request a timer is set. If this goes off we request again from another peer
|
||||
// note that the previous request is still alive and has the chance to deliver, so
|
||||
// requesting again extends the search. ie.,
|
||||
// if a peer we requested from is gone we issue a new request, so the number of active
|
||||
// requests never decreases
|
||||
for {
|
||||
select {
|
||||
|
||||
// incoming offer
|
||||
case source := <-f.offerC:
|
||||
log.Trace("new source", "peer addr", source, "request addr", f.addr)
|
||||
// 1) the chunk is offered by a syncing peer
|
||||
// add to known sources
|
||||
sources = append(sources, source)
|
||||
// launch a request to the source iff the chunk was requested (not just expected because its offered by a syncing peer)
|
||||
doRequest = requested
|
||||
|
||||
// incoming request
|
||||
case hopCount = <-f.requestC:
|
||||
// 2) chunk is requested, set requested flag
|
||||
// launch a request iff none been launched yet
|
||||
doRequest = !requested
|
||||
log.Trace("new request", "request addr", f.addr, "doRequest", doRequest)
|
||||
requested = true
|
||||
|
||||
// peer we requested from is gone. fall back to another
|
||||
// and remove the peer from the peers map
|
||||
case id := <-gone:
|
||||
peers.Delete(id.String())
|
||||
doRequest = requested
|
||||
log.Trace("peer gone", "peer id", id.String(), "request addr", f.addr, "doRequest", doRequest)
|
||||
|
||||
// search timeout: too much time passed since the last request,
|
||||
// extend the search to a new peer if we can find one
|
||||
case <-waitC:
|
||||
doRequest = requested
|
||||
log.Trace("search timed out: requesting", "request addr", f.addr, "doRequest", doRequest)
|
||||
|
||||
// all Fetcher context closed, can quit
|
||||
case <-f.ctx.Done():
|
||||
log.Trace("terminate fetcher", "request addr", f.addr)
|
||||
// TODO: send cancellations to all peers left over in peers map (i.e., those we requested from)
|
||||
return
|
||||
}
|
||||
|
||||
// need to issue a new request
|
||||
if doRequest {
|
||||
var err error
|
||||
sources, err = f.doRequest(gone, peers, sources, hopCount)
|
||||
if err != nil {
|
||||
log.Info("unable to request", "request addr", f.addr, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if wait channel is not set, set it to a timer
|
||||
if requested {
|
||||
if wait == nil {
|
||||
wait = time.NewTimer(f.searchTimeout)
|
||||
defer wait.Stop()
|
||||
waitC = wait.C
|
||||
} else {
|
||||
// stop the timer and drain the channel if it was not drained earlier
|
||||
if !wait.Stop() {
|
||||
select {
|
||||
case <-wait.C:
|
||||
default:
|
||||
}
|
||||
}
|
||||
// reset the timer to go off after defaultSearchTimeout
|
||||
wait.Reset(f.searchTimeout)
|
||||
}
|
||||
}
|
||||
doRequest = false
|
||||
}
|
||||
}
|
||||
|
||||
// doRequest attempts at finding a peer to request the chunk from
|
||||
// * first it tries to request explicitly from peers that are known to have offered the chunk
|
||||
// * if there are no such peers (available) it tries to request it from a peer closest to the chunk address
|
||||
// excluding those in the peersToSkip map
|
||||
// * if no such peer is found an error is returned
|
||||
//
|
||||
// if a request is successful,
|
||||
// * the peer's address is added to the set of peers to skip
|
||||
// * the peer's address is removed from prospective sources, and
|
||||
// * a go routine is started that reports on the gone channel if the peer is disconnected (or terminated their streamer)
|
||||
func (f *Fetcher) doRequest(gone chan *enode.ID, peersToSkip *sync.Map, sources []*enode.ID, hopCount uint8) ([]*enode.ID, error) {
|
||||
var i int
|
||||
var sourceID *enode.ID
|
||||
var quit chan struct{}
|
||||
|
||||
req := &Request{
|
||||
Addr: f.addr,
|
||||
SkipCheck: f.skipCheck,
|
||||
peersToSkip: peersToSkip,
|
||||
HopCount: hopCount,
|
||||
}
|
||||
|
||||
foundSource := false
|
||||
// iterate over known sources
|
||||
for i = 0; i < len(sources); i++ {
|
||||
req.Source = sources[i]
|
||||
var err error
|
||||
log.Trace("fetcher.doRequest", "request addr", f.addr, "peer", req.Source.String())
|
||||
sourceID, quit, err = f.protoRequestFunc(f.ctx, req)
|
||||
if err == nil {
|
||||
// remove the peer from known sources
|
||||
// Note: we can modify the source although we are looping on it, because we break from the loop immediately
|
||||
sources = append(sources[:i], sources[i+1:]...)
|
||||
foundSource = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// if there are no known sources, or none available, we try request from a closest node
|
||||
if !foundSource {
|
||||
req.Source = nil
|
||||
var err error
|
||||
sourceID, quit, err = f.protoRequestFunc(f.ctx, req)
|
||||
if err != nil {
|
||||
// if no peers found to request from
|
||||
return sources, err
|
||||
}
|
||||
}
|
||||
// add peer to the set of peers to skip from now
|
||||
peersToSkip.Store(sourceID.String(), time.Now())
|
||||
|
||||
// if the quit channel is closed, it indicates that the source peer we requested from
|
||||
// disconnected or terminated its streamer
|
||||
// here start a go routine that watches this channel and reports the source peer on the gone channel
|
||||
// this go routine quits if the fetcher global context is done to prevent process leak
|
||||
go func() {
|
||||
select {
|
||||
case <-quit:
|
||||
gone <- sourceID
|
||||
case <-f.ctx.Done():
|
||||
}
|
||||
|
||||
// finish the request span
|
||||
spanId := fmt.Sprintf("stream.send.request.%v.%v", *sourceID, req.Addr)
|
||||
span := tracing.ShiftSpanByKey(spanId)
|
||||
|
||||
if span != nil {
|
||||
span.LogFields(olog.String("finish", "from doRequest"))
|
||||
span.Finish()
|
||||
}
|
||||
}()
|
||||
return sources, nil
|
||||
}
|
@ -1,476 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
)
|
||||
|
||||
var requestedPeerID = enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
|
||||
var sourcePeerID = enode.HexID("99d8594b52298567d2ca3f4c441a5ba0140ee9245e26460d01102a52773c73b9")
|
||||
|
||||
// mockRequester pushes every request to the requestC channel when its doRequest function is called
|
||||
type mockRequester struct {
|
||||
// requests []Request
|
||||
requestC chan *Request // when a request is coming it is pushed to requestC
|
||||
waitTimes []time.Duration // with waitTimes[i] you can define how much to wait on the ith request (optional)
|
||||
count int //counts the number of requests
|
||||
quitC chan struct{}
|
||||
}
|
||||
|
||||
func newMockRequester(waitTimes ...time.Duration) *mockRequester {
|
||||
return &mockRequester{
|
||||
requestC: make(chan *Request),
|
||||
waitTimes: waitTimes,
|
||||
quitC: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockRequester) doRequest(ctx context.Context, request *Request) (*enode.ID, chan struct{}, error) {
|
||||
waitTime := time.Duration(0)
|
||||
if m.count < len(m.waitTimes) {
|
||||
waitTime = m.waitTimes[m.count]
|
||||
m.count++
|
||||
}
|
||||
time.Sleep(waitTime)
|
||||
m.requestC <- request
|
||||
|
||||
// if there is a Source in the request use that, if not use the global requestedPeerId
|
||||
source := request.Source
|
||||
if source == nil {
|
||||
source = &requestedPeerID
|
||||
}
|
||||
return source, m.quitC, nil
|
||||
}
|
||||
|
||||
// TestFetcherSingleRequest creates a Fetcher using mockRequester, and run it with a sample set of peers to skip.
|
||||
// mockRequester pushes a Request on a channel every time the request function is called. Using
|
||||
// this channel we test if calling Fetcher.Request calls the request function, and whether it uses
|
||||
// the correct peers to skip which we provided for the fetcher.run function.
|
||||
func TestFetcherSingleRequest(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peers := []string{"a", "b", "c", "d"}
|
||||
peersToSkip := &sync.Map{}
|
||||
for _, p := range peers {
|
||||
peersToSkip.Store(p, time.Now())
|
||||
}
|
||||
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
fetcher.Request(0)
|
||||
|
||||
select {
|
||||
case request := <-requester.requestC:
|
||||
// request should contain all peers from peersToSkip provided to the fetcher
|
||||
for _, p := range peers {
|
||||
if _, ok := request.peersToSkip.Load(p); !ok {
|
||||
t.Fatalf("request.peersToSkip misses peer")
|
||||
}
|
||||
}
|
||||
|
||||
// source peer should be also added to peersToSkip eventually
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if _, ok := request.peersToSkip.Load(requestedPeerID.String()); !ok {
|
||||
t.Fatalf("request.peersToSkip does not contain peer returned by the request function")
|
||||
}
|
||||
|
||||
// hopCount in the forwarded request should be incremented
|
||||
if request.HopCount != 1 {
|
||||
t.Fatalf("Expected request.HopCount 1 got %v", request.HopCount)
|
||||
}
|
||||
|
||||
// fetch should trigger a request, if it doesn't happen in time, test should fail
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("fetch timeout")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCancelStopsFetcher tests that a cancelled fetcher does not initiate further requests even if its fetch function is called
|
||||
func TestFetcherCancelStopsFetcher(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// we start the fetcher, and then we immediately cancel the context
|
||||
go fetcher.run(peersToSkip)
|
||||
cancel()
|
||||
|
||||
// we call Request with an active context
|
||||
fetcher.Request(0)
|
||||
|
||||
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("cancelled fetcher initiated request")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
}
|
||||
|
||||
// TestFetchCancelStopsRequest tests that calling a Request function with a cancelled context does not initiate a request
|
||||
func TestFetcherCancelStopsRequest(t *testing.T) {
|
||||
t.Skip("since context is now per fetcher, this test is likely redundant")
|
||||
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// we start the fetcher with an active context
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// we call Request with a cancelled context
|
||||
fetcher.Request(0)
|
||||
|
||||
// fetcher should not initiate request, we can only check by waiting a bit and making sure no request is happening
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("cancelled fetch function initiated request")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
|
||||
// if there is another Request with active context, there should be a request, because the fetcher itself is not cancelled
|
||||
fetcher.Request(0)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("expected request")
|
||||
}
|
||||
}
|
||||
|
||||
// TestOfferUsesSource tests Fetcher Offer behavior.
|
||||
// In this case there should be 1 (and only one) request initiated from the source peer, and the
|
||||
// source nodeid should appear in the peersToSkip map.
|
||||
func TestFetcherOfferUsesSource(t *testing.T) {
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// start the fetcher
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// call the Offer function with the source peer
|
||||
fetcher.Offer(&sourcePeerID)
|
||||
|
||||
// fetcher should not initiate request
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("fetcher initiated request")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
|
||||
// call Request after the Offer
|
||||
fetcher.Request(0)
|
||||
|
||||
// there should be exactly 1 request coming from fetcher
|
||||
var request *Request
|
||||
select {
|
||||
case request = <-requester.requestC:
|
||||
if *request.Source != sourcePeerID {
|
||||
t.Fatalf("Expected source id %v got %v", sourcePeerID, request.Source)
|
||||
}
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("fetcher did not initiate request")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("Fetcher number of requests expected 1 got 2")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
|
||||
// source peer should be added to peersToSkip eventually
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if _, ok := request.peersToSkip.Load(sourcePeerID.String()); !ok {
|
||||
t.Fatalf("SourcePeerId not added to peersToSkip")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetcherOfferAfterRequestUsesSourceFromContext(t *testing.T) {
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// start the fetcher
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// call Request first
|
||||
fetcher.Request(0)
|
||||
|
||||
// there should be a request coming from fetcher
|
||||
var request *Request
|
||||
select {
|
||||
case request = <-requester.requestC:
|
||||
if request.Source != nil {
|
||||
t.Fatalf("Incorrect source peer id, expected nil got %v", request.Source)
|
||||
}
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("fetcher did not initiate request")
|
||||
}
|
||||
|
||||
// after the Request call Offer
|
||||
fetcher.Offer(&sourcePeerID)
|
||||
|
||||
// there should be a request coming from fetcher
|
||||
select {
|
||||
case request = <-requester.requestC:
|
||||
if *request.Source != sourcePeerID {
|
||||
t.Fatalf("Incorrect source peer id, expected %v got %v", sourcePeerID, request.Source)
|
||||
}
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("fetcher did not initiate request")
|
||||
}
|
||||
|
||||
// source peer should be added to peersToSkip eventually
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if _, ok := request.peersToSkip.Load(sourcePeerID.String()); !ok {
|
||||
t.Fatalf("SourcePeerId not added to peersToSkip")
|
||||
}
|
||||
}
|
||||
|
||||
// TestFetcherRetryOnTimeout tests that fetch retries after searchTimeOut has passed
|
||||
func TestFetcherRetryOnTimeout(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
// set searchTimeOut to low value so the test is quicker
|
||||
fetcher.searchTimeout = 250 * time.Millisecond
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
// start the fetcher
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// call the fetch function with an active context
|
||||
fetcher.Request(0)
|
||||
|
||||
// after 100ms the first request should be initiated
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
default:
|
||||
t.Fatalf("fetch did not initiate request")
|
||||
}
|
||||
|
||||
// after another 100ms no new request should be initiated, because search timeout is 250ms
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("unexpected request from fetcher")
|
||||
default:
|
||||
}
|
||||
|
||||
// after another 300ms search timeout is over, there should be a new request
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
default:
|
||||
t.Fatalf("fetch did not retry request")
|
||||
}
|
||||
}
|
||||
|
||||
// TestFetcherFactory creates a FetcherFactory and checks if the factory really creates and starts
|
||||
// a Fetcher when it return a fetch function. We test the fetching functionality just by checking if
|
||||
// a request is initiated when the fetch function is called
|
||||
func TestFetcherFactory(t *testing.T) {
|
||||
requester := newMockRequester(100 * time.Millisecond)
|
||||
addr := make([]byte, 32)
|
||||
fetcherFactory := NewFetcherFactory(requester.doRequest, false)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
fetcher := fetcherFactory.New(context.Background(), addr, peersToSkip)
|
||||
|
||||
fetcher.Request(0)
|
||||
|
||||
// check if the created fetchFunction really starts a fetcher and initiates a request
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("fetch timeout")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFetcherRequestQuitRetriesRequest(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
// make sure the searchTimeout is long so it is sure the request is not
|
||||
// retried because of timeout
|
||||
fetcher.searchTimeout = 10 * time.Second
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
fetcher.Request(0)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("request is not initiated")
|
||||
}
|
||||
|
||||
close(requester.quitC)
|
||||
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
t.Fatalf("request is not initiated after failed request")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRequestSkipPeer checks if PeerSkip function will skip provided peer
|
||||
// and not skip unknown one.
|
||||
func TestRequestSkipPeer(t *testing.T) {
|
||||
addr := make([]byte, 32)
|
||||
peers := []enode.ID{
|
||||
enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8"),
|
||||
enode.HexID("99d8594b52298567d2ca3f4c441a5ba0140ee9245e26460d01102a52773c73b9"),
|
||||
}
|
||||
|
||||
peersToSkip := new(sync.Map)
|
||||
peersToSkip.Store(peers[0].String(), time.Now())
|
||||
r := NewRequest(addr, false, peersToSkip)
|
||||
|
||||
if !r.SkipPeer(peers[0].String()) {
|
||||
t.Errorf("peer not skipped")
|
||||
}
|
||||
|
||||
if r.SkipPeer(peers[1].String()) {
|
||||
t.Errorf("peer skipped")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRequestSkipPeerExpired checks if a peer to skip is not skipped
|
||||
// after RequestTimeout has passed.
|
||||
func TestRequestSkipPeerExpired(t *testing.T) {
|
||||
addr := make([]byte, 32)
|
||||
peer := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
|
||||
|
||||
// set RequestTimeout to a low value and reset it after the test
|
||||
defer func(t time.Duration) { RequestTimeout = t }(RequestTimeout)
|
||||
RequestTimeout = 250 * time.Millisecond
|
||||
|
||||
peersToSkip := new(sync.Map)
|
||||
peersToSkip.Store(peer.String(), time.Now())
|
||||
r := NewRequest(addr, false, peersToSkip)
|
||||
|
||||
if !r.SkipPeer(peer.String()) {
|
||||
t.Errorf("peer not skipped")
|
||||
}
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
if r.SkipPeer(peer.String()) {
|
||||
t.Errorf("peer skipped")
|
||||
}
|
||||
}
|
||||
|
||||
// TestRequestSkipPeerPermanent checks if a peer to skip is not skipped
|
||||
// after RequestTimeout is not skipped if it is set for a permanent skipping
|
||||
// by value to peersToSkip map is not time.Duration.
|
||||
func TestRequestSkipPeerPermanent(t *testing.T) {
|
||||
addr := make([]byte, 32)
|
||||
peer := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
|
||||
|
||||
// set RequestTimeout to a low value and reset it after the test
|
||||
defer func(t time.Duration) { RequestTimeout = t }(RequestTimeout)
|
||||
RequestTimeout = 250 * time.Millisecond
|
||||
|
||||
peersToSkip := new(sync.Map)
|
||||
peersToSkip.Store(peer.String(), true)
|
||||
r := NewRequest(addr, false, peersToSkip)
|
||||
|
||||
if !r.SkipPeer(peer.String()) {
|
||||
t.Errorf("peer not skipped")
|
||||
}
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
if !r.SkipPeer(peer.String()) {
|
||||
t.Errorf("peer not skipped")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetcherMaxHopCount(t *testing.T) {
|
||||
requester := newMockRequester()
|
||||
addr := make([]byte, 32)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
fetcher := NewFetcher(ctx, addr, requester.doRequest, true)
|
||||
|
||||
peersToSkip := &sync.Map{}
|
||||
|
||||
go fetcher.run(peersToSkip)
|
||||
|
||||
// if hopCount is already at max no request should be initiated
|
||||
select {
|
||||
case <-requester.requestC:
|
||||
t.Fatalf("cancelled fetcher initiated request")
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
}
|
||||
}
|
@ -1,251 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
)
|
||||
|
||||
/*
|
||||
Hive is the logistic manager of the swarm
|
||||
|
||||
When the hive is started, a forever loop is launched that
|
||||
asks the kademlia nodetable
|
||||
to suggest peers to bootstrap connectivity
|
||||
*/
|
||||
|
||||
// HiveParams holds the config options to hive
|
||||
type HiveParams struct {
|
||||
Discovery bool // if want discovery of not
|
||||
PeersBroadcastSetSize uint8 // how many peers to use when relaying
|
||||
MaxPeersPerRequest uint8 // max size for peer address batches
|
||||
KeepAliveInterval time.Duration
|
||||
}
|
||||
|
||||
// NewHiveParams returns hive config with only the
|
||||
func NewHiveParams() *HiveParams {
|
||||
return &HiveParams{
|
||||
Discovery: true,
|
||||
PeersBroadcastSetSize: 3,
|
||||
MaxPeersPerRequest: 5,
|
||||
KeepAliveInterval: 500 * time.Millisecond,
|
||||
}
|
||||
}
|
||||
|
||||
// Hive manages network connections of the swarm node
|
||||
type Hive struct {
|
||||
*HiveParams // settings
|
||||
*Kademlia // the overlay connectiviy driver
|
||||
Store state.Store // storage interface to save peers across sessions
|
||||
addPeer func(*enode.Node) // server callback to connect to a peer
|
||||
// bookkeeping
|
||||
lock sync.Mutex
|
||||
peers map[enode.ID]*BzzPeer
|
||||
ticker *time.Ticker
|
||||
}
|
||||
|
||||
// NewHive constructs a new hive
|
||||
// HiveParams: config parameters
|
||||
// Kademlia: connectivity driver using a network topology
|
||||
// StateStore: to save peers across sessions
|
||||
func NewHive(params *HiveParams, kad *Kademlia, store state.Store) *Hive {
|
||||
return &Hive{
|
||||
HiveParams: params,
|
||||
Kademlia: kad,
|
||||
Store: store,
|
||||
peers: make(map[enode.ID]*BzzPeer),
|
||||
}
|
||||
}
|
||||
|
||||
// Start stars the hive, receives p2p.Server only at startup
|
||||
// server is used to connect to a peer based on its NodeID or enode URL
|
||||
// these are called on the p2p.Server which runs on the node
|
||||
func (h *Hive) Start(server *p2p.Server) error {
|
||||
log.Info("Starting hive", "baseaddr", fmt.Sprintf("%x", h.BaseAddr()[:4]))
|
||||
// if state store is specified, load peers to prepopulate the overlay address book
|
||||
if h.Store != nil {
|
||||
log.Info("Detected an existing store. trying to load peers")
|
||||
if err := h.loadPeers(); err != nil {
|
||||
log.Error(fmt.Sprintf("%08x hive encoutered an error trying to load peers", h.BaseAddr()[:4]))
|
||||
return err
|
||||
}
|
||||
}
|
||||
// assigns the p2p.Server#AddPeer function to connect to peers
|
||||
h.addPeer = server.AddPeer
|
||||
// ticker to keep the hive alive
|
||||
h.ticker = time.NewTicker(h.KeepAliveInterval)
|
||||
// this loop is doing bootstrapping and maintains a healthy table
|
||||
go h.connect()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop terminates the updateloop and saves the peers
|
||||
func (h *Hive) Stop() error {
|
||||
log.Info(fmt.Sprintf("%08x hive stopping, saving peers", h.BaseAddr()[:4]))
|
||||
h.ticker.Stop()
|
||||
if h.Store != nil {
|
||||
if err := h.savePeers(); err != nil {
|
||||
return fmt.Errorf("could not save peers to persistence store: %v", err)
|
||||
}
|
||||
if err := h.Store.Close(); err != nil {
|
||||
return fmt.Errorf("could not close file handle to persistence store: %v", err)
|
||||
}
|
||||
}
|
||||
log.Info(fmt.Sprintf("%08x hive stopped, dropping peers", h.BaseAddr()[:4]))
|
||||
h.EachConn(nil, 255, func(p *Peer, _ int) bool {
|
||||
log.Info(fmt.Sprintf("%08x dropping peer %08x", h.BaseAddr()[:4], p.Address()[:4]))
|
||||
p.Drop()
|
||||
return true
|
||||
})
|
||||
|
||||
log.Info(fmt.Sprintf("%08x all peers dropped", h.BaseAddr()[:4]))
|
||||
return nil
|
||||
}
|
||||
|
||||
// connect is a forever loop
|
||||
// at each iteration, ask the overlay driver to suggest the most preferred peer to connect to
|
||||
// as well as advertises saturation depth if needed
|
||||
func (h *Hive) connect() {
|
||||
for range h.ticker.C {
|
||||
|
||||
addr, depth, changed := h.SuggestPeer()
|
||||
if h.Discovery && changed {
|
||||
NotifyDepth(uint8(depth), h.Kademlia)
|
||||
}
|
||||
if addr == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Trace(fmt.Sprintf("%08x hive connect() suggested %08x", h.BaseAddr()[:4], addr.Address()[:4]))
|
||||
under, err := enode.ParseV4(string(addr.Under()))
|
||||
if err != nil {
|
||||
log.Warn(fmt.Sprintf("%08x unable to connect to bee %08x: invalid node URL: %v", h.BaseAddr()[:4], addr.Address()[:4], err))
|
||||
continue
|
||||
}
|
||||
log.Trace(fmt.Sprintf("%08x attempt to connect to bee %08x", h.BaseAddr()[:4], addr.Address()[:4]))
|
||||
h.addPeer(under)
|
||||
}
|
||||
}
|
||||
|
||||
// Run protocol run function
|
||||
func (h *Hive) Run(p *BzzPeer) error {
|
||||
h.trackPeer(p)
|
||||
defer h.untrackPeer(p)
|
||||
|
||||
dp := NewPeer(p, h.Kademlia)
|
||||
depth, changed := h.On(dp)
|
||||
// if we want discovery, advertise change of depth
|
||||
if h.Discovery {
|
||||
if changed {
|
||||
// if depth changed, send to all peers
|
||||
NotifyDepth(depth, h.Kademlia)
|
||||
} else {
|
||||
// otherwise just send depth to new peer
|
||||
dp.NotifyDepth(depth)
|
||||
}
|
||||
NotifyPeer(p.BzzAddr, h.Kademlia)
|
||||
}
|
||||
defer h.Off(dp)
|
||||
return dp.Run(dp.HandleMsg)
|
||||
}
|
||||
|
||||
func (h *Hive) trackPeer(p *BzzPeer) {
|
||||
h.lock.Lock()
|
||||
h.peers[p.ID()] = p
|
||||
h.lock.Unlock()
|
||||
}
|
||||
|
||||
func (h *Hive) untrackPeer(p *BzzPeer) {
|
||||
h.lock.Lock()
|
||||
delete(h.peers, p.ID())
|
||||
h.lock.Unlock()
|
||||
}
|
||||
|
||||
// NodeInfo function is used by the p2p.server RPC interface to display
|
||||
// protocol specific node information
|
||||
func (h *Hive) NodeInfo() interface{} {
|
||||
return h.String()
|
||||
}
|
||||
|
||||
// PeerInfo function is used by the p2p.server RPC interface to display
|
||||
// protocol specific information any connected peer referred to by their NodeID
|
||||
func (h *Hive) PeerInfo(id enode.ID) interface{} {
|
||||
p := h.Peer(id)
|
||||
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
addr := NewAddr(p.Node())
|
||||
return struct {
|
||||
OAddr hexutil.Bytes
|
||||
UAddr hexutil.Bytes
|
||||
}{
|
||||
OAddr: addr.OAddr,
|
||||
UAddr: addr.UAddr,
|
||||
}
|
||||
}
|
||||
|
||||
// Peer returns a bzz peer from the Hive. If there is no peer
|
||||
// with the provided enode id, a nil value is returned.
|
||||
func (h *Hive) Peer(id enode.ID) *BzzPeer {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
return h.peers[id]
|
||||
}
|
||||
|
||||
// loadPeers, savePeer implement persistence callback/
|
||||
func (h *Hive) loadPeers() error {
|
||||
var as []*BzzAddr
|
||||
err := h.Store.Get("peers", &as)
|
||||
if err != nil {
|
||||
if err == state.ErrNotFound {
|
||||
log.Info(fmt.Sprintf("hive %08x: no persisted peers found", h.BaseAddr()[:4]))
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
log.Info(fmt.Sprintf("hive %08x: peers loaded", h.BaseAddr()[:4]))
|
||||
|
||||
return h.Register(as...)
|
||||
}
|
||||
|
||||
// savePeers, savePeer implement persistence callback/
|
||||
func (h *Hive) savePeers() error {
|
||||
var peers []*BzzAddr
|
||||
h.Kademlia.EachAddr(nil, 256, func(pa *BzzAddr, i int) bool {
|
||||
if pa == nil {
|
||||
log.Warn(fmt.Sprintf("empty addr: %v", i))
|
||||
return true
|
||||
}
|
||||
log.Trace("saving peer", "peer", pa)
|
||||
peers = append(peers, pa)
|
||||
return true
|
||||
})
|
||||
if err := h.Store.Put("peers", peers); err != nil {
|
||||
return fmt.Errorf("could not save peers: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,177 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
)
|
||||
|
||||
func newHiveTester(params *HiveParams, n int, store state.Store) (*bzzTester, *Hive, error) {
|
||||
// setup
|
||||
prvkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
addr := PrivateKeyToBzzKey(prvkey)
|
||||
to := NewKademlia(addr, NewKadParams())
|
||||
pp := NewHive(params, to, store) // hive
|
||||
|
||||
bt, err := newBzzBaseTester(n, prvkey, DiscoverySpec, pp.Run)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return bt, pp, nil
|
||||
}
|
||||
|
||||
// TestRegisterAndConnect verifies that the protocol runs successfully
|
||||
// and that the peer connection exists afterwards
|
||||
func TestRegisterAndConnect(t *testing.T) {
|
||||
params := NewHiveParams()
|
||||
s, pp, err := newHiveTester(params, 1, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
node := s.Nodes[0]
|
||||
raddr := NewAddr(node)
|
||||
pp.Register(raddr)
|
||||
|
||||
// start the hive
|
||||
err = pp.Start(s.Server)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer pp.Stop()
|
||||
|
||||
// both hive connect and disconect check have time delays
|
||||
// therefore we need to verify that peer is connected
|
||||
// so that we are sure that the disconnect timeout doesn't complete
|
||||
// before the hive connect method is run at least once
|
||||
timeout := time.After(time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
t.Fatalf("expected connection")
|
||||
default:
|
||||
}
|
||||
i := 0
|
||||
pp.Kademlia.EachConn(nil, 256, func(addr *Peer, po int) bool {
|
||||
i++
|
||||
return true
|
||||
})
|
||||
if i > 0 {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
|
||||
// check that the connection actually exists
|
||||
// the timeout error means no disconnection events
|
||||
// were received within the a certain timeout
|
||||
err = s.TestDisconnected(&p2ptest.Disconnect{
|
||||
Peer: s.Nodes[0].ID(),
|
||||
Error: nil,
|
||||
})
|
||||
|
||||
if err == nil || err.Error() != "timed out waiting for peers to disconnect" {
|
||||
t.Fatalf("expected no disconnection event")
|
||||
}
|
||||
}
|
||||
|
||||
// TestHiveStatePersistance creates a protocol simulation with n peers for a node
|
||||
// After protocols complete, the node is shut down and the state is stored.
|
||||
// Another simulation is created, where 0 nodes are created, but where the stored state is passed
|
||||
// The test succeeds if all the peers from the stored state are known after the protocols of the
|
||||
// second simulation have completed
|
||||
//
|
||||
// Actual connectivity is not in scope for this test, as the peers loaded from state are not known to
|
||||
// the simulation; the test only verifies that the peers are known to the node
|
||||
func TestHiveStatePersistance(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "hive_test_store")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
const peersCount = 5
|
||||
|
||||
startHive := func(t *testing.T, dir string) (h *Hive, cleanupFunc func()) {
|
||||
store, err := state.NewDBStore(dir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
params := NewHiveParams()
|
||||
params.Discovery = false
|
||||
|
||||
prvkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
h = NewHive(params, NewKademlia(PrivateKeyToBzzKey(prvkey), NewKadParams()), store)
|
||||
s := p2ptest.NewProtocolTester(prvkey, 0, func(p *p2p.Peer, rw p2p.MsgReadWriter) error { return nil })
|
||||
|
||||
if err := h.Start(s.Server); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cleanupFunc = func() {
|
||||
err := h.Stop()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s.Stop()
|
||||
}
|
||||
return h, cleanupFunc
|
||||
}
|
||||
|
||||
h1, cleanup1 := startHive(t, dir)
|
||||
peers := make(map[string]bool)
|
||||
for i := 0; i < peersCount; i++ {
|
||||
raddr := RandomAddr()
|
||||
h1.Register(raddr)
|
||||
peers[raddr.String()] = true
|
||||
}
|
||||
cleanup1()
|
||||
|
||||
// start the hive and check that we know of all expected peers
|
||||
h2, cleanup2 := startHive(t, dir)
|
||||
cleanup2()
|
||||
|
||||
i := 0
|
||||
h2.Kademlia.EachAddr(nil, 256, func(addr *BzzAddr, po int) bool {
|
||||
delete(peers, addr.String())
|
||||
i++
|
||||
return true
|
||||
})
|
||||
if i != peersCount {
|
||||
t.Fatalf("invalid number of entries: got %v, want %v", i, peersCount)
|
||||
}
|
||||
if len(peers) != 0 {
|
||||
t.Fatalf("%d peers left over: %v", len(peers), peers)
|
||||
}
|
||||
}
|
@ -1,911 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||
sv "github.com/ethereum/go-ethereum/swarm/version"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
Taking the proximity order relative to a fix point x classifies the points in
|
||||
the space (n byte long byte sequences) into bins. Items in each are at
|
||||
most half as distant from x as items in the previous bin. Given a sample of
|
||||
uniformly distributed items (a hash function over arbitrary sequence) the
|
||||
proximity scale maps onto series of subsets with cardinalities on a negative
|
||||
exponential scale.
|
||||
|
||||
It also has the property that any two item belonging to the same bin are at
|
||||
most half as distant from each other as they are from x.
|
||||
|
||||
If we think of random sample of items in the bins as connections in a network of
|
||||
interconnected nodes then relative proximity can serve as the basis for local
|
||||
decisions for graph traversal where the task is to find a route between two
|
||||
points. Since in every hop, the finite distance halves, there is
|
||||
a guaranteed constant maximum limit on the number of hops needed to reach one
|
||||
node from the other.
|
||||
*/
|
||||
|
||||
var Pof = pot.DefaultPof(256)
|
||||
|
||||
// KadParams holds the config params for Kademlia
|
||||
type KadParams struct {
|
||||
// adjustable parameters
|
||||
MaxProxDisplay int // number of rows the table shows
|
||||
NeighbourhoodSize int // nearest neighbour core minimum cardinality
|
||||
MinBinSize int // minimum number of peers in a row
|
||||
MaxBinSize int // maximum number of peers in a row before pruning
|
||||
RetryInterval int64 // initial interval before a peer is first redialed
|
||||
RetryExponent int // exponent to multiply retry intervals with
|
||||
MaxRetries int // maximum number of redial attempts
|
||||
// function to sanction or prevent suggesting a peer
|
||||
Reachable func(*BzzAddr) bool `json:"-"`
|
||||
}
|
||||
|
||||
// NewKadParams returns a params struct with default values
|
||||
func NewKadParams() *KadParams {
|
||||
return &KadParams{
|
||||
MaxProxDisplay: 16,
|
||||
NeighbourhoodSize: 2,
|
||||
MinBinSize: 2,
|
||||
MaxBinSize: 4,
|
||||
RetryInterval: 4200000000, // 4.2 sec
|
||||
MaxRetries: 42,
|
||||
RetryExponent: 2,
|
||||
}
|
||||
}
|
||||
|
||||
// Kademlia is a table of live peers and a db of known peers (node records)
|
||||
type Kademlia struct {
|
||||
lock sync.RWMutex
|
||||
*KadParams // Kademlia configuration parameters
|
||||
base []byte // immutable baseaddress of the table
|
||||
addrs *pot.Pot // pots container for known peer addresses
|
||||
conns *pot.Pot // pots container for live peer connections
|
||||
depth uint8 // stores the last current depth of saturation
|
||||
nDepth int // stores the last neighbourhood depth
|
||||
nDepthMu sync.RWMutex // protects neighbourhood depth nDepth
|
||||
nDepthSig []chan struct{} // signals when neighbourhood depth nDepth is changed
|
||||
}
|
||||
|
||||
// NewKademlia creates a Kademlia table for base address addr
|
||||
// with parameters as in params
|
||||
// if params is nil, it uses default values
|
||||
func NewKademlia(addr []byte, params *KadParams) *Kademlia {
|
||||
if params == nil {
|
||||
params = NewKadParams()
|
||||
}
|
||||
return &Kademlia{
|
||||
base: addr,
|
||||
KadParams: params,
|
||||
addrs: pot.NewPot(nil, 0),
|
||||
conns: pot.NewPot(nil, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// entry represents a Kademlia table entry (an extension of BzzAddr)
|
||||
type entry struct {
|
||||
*BzzAddr
|
||||
conn *Peer
|
||||
seenAt time.Time
|
||||
retries int
|
||||
}
|
||||
|
||||
// newEntry creates a kademlia peer from a *Peer
|
||||
func newEntry(p *BzzAddr) *entry {
|
||||
return &entry{
|
||||
BzzAddr: p,
|
||||
seenAt: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// Label is a short tag for the entry for debug
|
||||
func Label(e *entry) string {
|
||||
return fmt.Sprintf("%s (%d)", e.Hex()[:4], e.retries)
|
||||
}
|
||||
|
||||
// Hex is the hexadecimal serialisation of the entry address
|
||||
func (e *entry) Hex() string {
|
||||
return fmt.Sprintf("%x", e.Address())
|
||||
}
|
||||
|
||||
// Register enters each address as kademlia peer record into the
|
||||
// database of known peer addresses
|
||||
func (k *Kademlia) Register(peers ...*BzzAddr) error {
|
||||
k.lock.Lock()
|
||||
defer k.lock.Unlock()
|
||||
|
||||
metrics.GetOrRegisterCounter("kad.register", nil).Inc(1)
|
||||
|
||||
var known, size int
|
||||
for _, p := range peers {
|
||||
log.Trace("kademlia trying to register", "addr", p)
|
||||
// error if self received, peer should know better
|
||||
// and should be punished for this
|
||||
if bytes.Equal(p.Address(), k.base) {
|
||||
return fmt.Errorf("add peers: %x is self", k.base)
|
||||
}
|
||||
var found bool
|
||||
k.addrs, _, found, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
|
||||
// if not found
|
||||
if v == nil {
|
||||
log.Trace("registering new peer", "addr", p)
|
||||
// insert new offline peer into conns
|
||||
return newEntry(p)
|
||||
}
|
||||
|
||||
e := v.(*entry)
|
||||
|
||||
// if underlay address is different, still add
|
||||
if !bytes.Equal(e.BzzAddr.UAddr, p.UAddr) {
|
||||
log.Trace("underlay addr is different, so add again", "new", p, "old", e.BzzAddr)
|
||||
// insert new offline peer into conns
|
||||
return newEntry(p)
|
||||
}
|
||||
|
||||
return v
|
||||
})
|
||||
if found {
|
||||
known++
|
||||
}
|
||||
size++
|
||||
}
|
||||
|
||||
k.setNeighbourhoodDepth()
|
||||
return nil
|
||||
}
|
||||
|
||||
// SuggestPeer returns an unconnected peer address as a peer suggestion for connection
|
||||
func (k *Kademlia) SuggestPeer() (suggestedPeer *BzzAddr, saturationDepth int, changed bool) {
|
||||
k.lock.Lock()
|
||||
defer k.lock.Unlock()
|
||||
|
||||
metrics.GetOrRegisterCounter("kad.suggestpeer", nil).Inc(1)
|
||||
|
||||
radius := neighbourhoodRadiusForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
// collect undersaturated bins in ascending order of number of connected peers
|
||||
// and from shallow to deep (ascending order of PO)
|
||||
// insert them in a map of bin arrays, keyed with the number of connected peers
|
||||
saturation := make(map[int][]int)
|
||||
var lastPO int // the last non-empty PO bin in the iteration
|
||||
saturationDepth = -1 // the deepest PO such that all shallower bins have >= k.MinBinSize peers
|
||||
var pastDepth bool // whether po of iteration >= depth
|
||||
k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
|
||||
// process skipped empty bins
|
||||
for ; lastPO < po; lastPO++ {
|
||||
// find the lowest unsaturated bin
|
||||
if saturationDepth == -1 {
|
||||
saturationDepth = lastPO
|
||||
}
|
||||
// if there is an empty bin, depth is surely passed
|
||||
pastDepth = true
|
||||
saturation[0] = append(saturation[0], lastPO)
|
||||
}
|
||||
lastPO = po + 1
|
||||
// past radius, depth is surely passed
|
||||
if po >= radius {
|
||||
pastDepth = true
|
||||
}
|
||||
// beyond depth the bin is treated as unsaturated even if size >= k.MinBinSize
|
||||
// in order to achieve full connectivity to all neighbours
|
||||
if pastDepth && size >= k.MinBinSize {
|
||||
size = k.MinBinSize - 1
|
||||
}
|
||||
// process non-empty unsaturated bins
|
||||
if size < k.MinBinSize {
|
||||
// find the lowest unsaturated bin
|
||||
if saturationDepth == -1 {
|
||||
saturationDepth = po
|
||||
}
|
||||
saturation[size] = append(saturation[size], po)
|
||||
}
|
||||
return true
|
||||
})
|
||||
// to trigger peer requests for peers closer than closest connection, include
|
||||
// all bins from nearest connection upto nearest address as unsaturated
|
||||
var nearestAddrAt int
|
||||
k.addrs.EachNeighbour(k.base, Pof, func(_ pot.Val, po int) bool {
|
||||
nearestAddrAt = po
|
||||
return false
|
||||
})
|
||||
// including bins as size 0 has the effect that requesting connection
|
||||
// is prioritised over non-empty shallower bins
|
||||
for ; lastPO <= nearestAddrAt; lastPO++ {
|
||||
saturation[0] = append(saturation[0], lastPO)
|
||||
}
|
||||
// all PO bins are saturated, ie., minsize >= k.MinBinSize, no peer suggested
|
||||
if len(saturation) == 0 {
|
||||
return nil, 0, false
|
||||
}
|
||||
// find the first callable peer in the address book
|
||||
// starting from the bins with smallest size proceeding from shallow to deep
|
||||
// for each bin (up until neighbourhood radius) we find callable candidate peers
|
||||
for size := 0; size < k.MinBinSize && suggestedPeer == nil; size++ {
|
||||
bins, ok := saturation[size]
|
||||
if !ok {
|
||||
// no bin with this size
|
||||
continue
|
||||
}
|
||||
cur := 0
|
||||
curPO := bins[0]
|
||||
k.addrs.EachBin(k.base, Pof, curPO, func(po, _ int, f func(func(pot.Val) bool) bool) bool {
|
||||
curPO = bins[cur]
|
||||
// find the next bin that has size size
|
||||
if curPO == po {
|
||||
cur++
|
||||
} else {
|
||||
// skip bins that have no addresses
|
||||
for ; cur < len(bins) && curPO < po; cur++ {
|
||||
curPO = bins[cur]
|
||||
}
|
||||
if po < curPO {
|
||||
cur--
|
||||
return true
|
||||
}
|
||||
// stop if there are no addresses
|
||||
if curPO < po {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// curPO found
|
||||
// find a callable peer out of the addresses in the unsaturated bin
|
||||
// stop if found
|
||||
f(func(val pot.Val) bool {
|
||||
e := val.(*entry)
|
||||
if k.callable(e) {
|
||||
suggestedPeer = e.BzzAddr
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return cur < len(bins) && suggestedPeer == nil
|
||||
})
|
||||
}
|
||||
|
||||
if uint8(saturationDepth) < k.depth {
|
||||
k.depth = uint8(saturationDepth)
|
||||
return suggestedPeer, saturationDepth, true
|
||||
}
|
||||
return suggestedPeer, 0, false
|
||||
}
|
||||
|
||||
// On inserts the peer as a kademlia peer into the live peers
|
||||
func (k *Kademlia) On(p *Peer) (uint8, bool) {
|
||||
k.lock.Lock()
|
||||
defer k.lock.Unlock()
|
||||
|
||||
metrics.GetOrRegisterCounter("kad.on", nil).Inc(1)
|
||||
|
||||
var ins bool
|
||||
k.conns, _, _, _ = pot.Swap(k.conns, p, Pof, func(v pot.Val) pot.Val {
|
||||
// if not found live
|
||||
if v == nil {
|
||||
ins = true
|
||||
// insert new online peer into conns
|
||||
return p
|
||||
}
|
||||
// found among live peers, do nothing
|
||||
return v
|
||||
})
|
||||
if ins && !p.BzzPeer.LightNode {
|
||||
a := newEntry(p.BzzAddr)
|
||||
a.conn = p
|
||||
// insert new online peer into addrs
|
||||
k.addrs, _, _, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
|
||||
return a
|
||||
})
|
||||
}
|
||||
// calculate if depth of saturation changed
|
||||
depth := uint8(k.saturation())
|
||||
var changed bool
|
||||
if depth != k.depth {
|
||||
changed = true
|
||||
k.depth = depth
|
||||
}
|
||||
k.setNeighbourhoodDepth()
|
||||
return k.depth, changed
|
||||
}
|
||||
|
||||
// setNeighbourhoodDepth calculates neighbourhood depth with depthForPot,
|
||||
// sets it to the nDepth and sends a signal to every nDepthSig channel.
|
||||
func (k *Kademlia) setNeighbourhoodDepth() {
|
||||
nDepth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
var changed bool
|
||||
k.nDepthMu.Lock()
|
||||
if nDepth != k.nDepth {
|
||||
k.nDepth = nDepth
|
||||
changed = true
|
||||
}
|
||||
k.nDepthMu.Unlock()
|
||||
|
||||
if len(k.nDepthSig) > 0 && changed {
|
||||
for _, c := range k.nDepthSig {
|
||||
// Every nDepthSig channel has a buffer capacity of 1,
|
||||
// so every receiver will get the signal even if the
|
||||
// select statement has the default case to avoid blocking.
|
||||
select {
|
||||
case c <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NeighbourhoodDepth returns the value calculated by depthForPot function
|
||||
// in setNeighbourhoodDepth method.
|
||||
func (k *Kademlia) NeighbourhoodDepth() int {
|
||||
k.nDepthMu.RLock()
|
||||
defer k.nDepthMu.RUnlock()
|
||||
return k.nDepth
|
||||
}
|
||||
|
||||
// SubscribeToNeighbourhoodDepthChange returns the channel that signals
|
||||
// when neighbourhood depth value is changed. The current neighbourhood depth
|
||||
// is returned by NeighbourhoodDepth method. Returned function unsubscribes
|
||||
// the channel from signaling and releases the resources. Returned function is safe
|
||||
// to be called multiple times.
|
||||
func (k *Kademlia) SubscribeToNeighbourhoodDepthChange() (c <-chan struct{}, unsubscribe func()) {
|
||||
channel := make(chan struct{}, 1)
|
||||
var closeOnce sync.Once
|
||||
|
||||
k.lock.Lock()
|
||||
defer k.lock.Unlock()
|
||||
|
||||
k.nDepthSig = append(k.nDepthSig, channel)
|
||||
|
||||
unsubscribe = func() {
|
||||
k.lock.Lock()
|
||||
defer k.lock.Unlock()
|
||||
|
||||
for i, c := range k.nDepthSig {
|
||||
if c == channel {
|
||||
k.nDepthSig = append(k.nDepthSig[:i], k.nDepthSig[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
closeOnce.Do(func() { close(channel) })
|
||||
}
|
||||
|
||||
return channel, unsubscribe
|
||||
}
|
||||
|
||||
// Off removes a peer from among live peers
|
||||
func (k *Kademlia) Off(p *Peer) {
|
||||
k.lock.Lock()
|
||||
defer k.lock.Unlock()
|
||||
var del bool
|
||||
if !p.BzzPeer.LightNode {
|
||||
k.addrs, _, _, _ = pot.Swap(k.addrs, p, Pof, func(v pot.Val) pot.Val {
|
||||
// v cannot be nil, must check otherwise we overwrite entry
|
||||
if v == nil {
|
||||
panic(fmt.Sprintf("connected peer not found %v", p))
|
||||
}
|
||||
del = true
|
||||
return newEntry(p.BzzAddr)
|
||||
})
|
||||
} else {
|
||||
del = true
|
||||
}
|
||||
|
||||
if del {
|
||||
k.conns, _, _, _ = pot.Swap(k.conns, p, Pof, func(_ pot.Val) pot.Val {
|
||||
// v cannot be nil, but no need to check
|
||||
return nil
|
||||
})
|
||||
k.setNeighbourhoodDepth()
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kademlia) ListKnown() []*BzzAddr {
|
||||
res := []*BzzAddr{}
|
||||
|
||||
k.addrs.Each(func(val pot.Val) bool {
|
||||
e := val.(*entry)
|
||||
res = append(res, e.BzzAddr)
|
||||
return true
|
||||
})
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// EachConn is an iterator with args (base, po, f) applies f to each live peer
|
||||
// that has proximity order po or less as measured from the base
|
||||
// if base is nil, kademlia base address is used
|
||||
func (k *Kademlia) EachConn(base []byte, o int, f func(*Peer, int) bool) {
|
||||
k.lock.RLock()
|
||||
defer k.lock.RUnlock()
|
||||
k.eachConn(base, o, f)
|
||||
}
|
||||
|
||||
func (k *Kademlia) eachConn(base []byte, o int, f func(*Peer, int) bool) {
|
||||
if len(base) == 0 {
|
||||
base = k.base
|
||||
}
|
||||
k.conns.EachNeighbour(base, Pof, func(val pot.Val, po int) bool {
|
||||
if po > o {
|
||||
return true
|
||||
}
|
||||
return f(val.(*Peer), po)
|
||||
})
|
||||
}
|
||||
|
||||
// EachAddr called with (base, po, f) is an iterator applying f to each known peer
|
||||
// that has proximity order o or less as measured from the base
|
||||
// if base is nil, kademlia base address is used
|
||||
func (k *Kademlia) EachAddr(base []byte, o int, f func(*BzzAddr, int) bool) {
|
||||
k.lock.RLock()
|
||||
defer k.lock.RUnlock()
|
||||
k.eachAddr(base, o, f)
|
||||
}
|
||||
|
||||
func (k *Kademlia) eachAddr(base []byte, o int, f func(*BzzAddr, int) bool) {
|
||||
if len(base) == 0 {
|
||||
base = k.base
|
||||
}
|
||||
k.addrs.EachNeighbour(base, Pof, func(val pot.Val, po int) bool {
|
||||
if po > o {
|
||||
return true
|
||||
}
|
||||
return f(val.(*entry).BzzAddr, po)
|
||||
})
|
||||
}
|
||||
|
||||
// neighbourhoodRadiusForPot returns the neighbourhood radius of the kademlia
|
||||
// neighbourhood radius encloses the nearest neighbour set with size >= neighbourhoodSize
|
||||
// i.e., neighbourhood radius is the deepest PO such that all bins not shallower altogether
|
||||
// contain at least neighbourhoodSize connected peers
|
||||
// if there is altogether less than neighbourhoodSize peers connected, it returns 0
|
||||
// caller must hold the lock
|
||||
func neighbourhoodRadiusForPot(p *pot.Pot, neighbourhoodSize int, pivotAddr []byte) (depth int) {
|
||||
if p.Size() <= neighbourhoodSize {
|
||||
return 0
|
||||
}
|
||||
// total number of peers in iteration
|
||||
var size int
|
||||
f := func(v pot.Val, i int) bool {
|
||||
// po == 256 means that addr is the pivot address(self)
|
||||
if i == 256 {
|
||||
return true
|
||||
}
|
||||
size++
|
||||
|
||||
// this means we have all nn-peers.
|
||||
// depth is by default set to the bin of the farthest nn-peer
|
||||
if size == neighbourhoodSize {
|
||||
depth = i
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
p.EachNeighbour(pivotAddr, Pof, f)
|
||||
return depth
|
||||
}
|
||||
|
||||
// depthForPot returns the depth for the pot
|
||||
// depth is the radius of the minimal extension of nearest neighbourhood that
|
||||
// includes all empty PO bins. I.e., depth is the deepest PO such that
|
||||
// - it is not deeper than neighbourhood radius
|
||||
// - all bins shallower than depth are not empty
|
||||
// caller must hold the lock
|
||||
func depthForPot(p *pot.Pot, neighbourhoodSize int, pivotAddr []byte) (depth int) {
|
||||
if p.Size() <= neighbourhoodSize {
|
||||
return 0
|
||||
}
|
||||
// determining the depth is a two-step process
|
||||
// first we find the proximity bin of the shallowest of the neighbourhoodSize peers
|
||||
// the numeric value of depth cannot be higher than this
|
||||
maxDepth := neighbourhoodRadiusForPot(p, neighbourhoodSize, pivotAddr)
|
||||
|
||||
// the second step is to test for empty bins in order from shallowest to deepest
|
||||
// if an empty bin is found, this will be the actual depth
|
||||
// we stop iterating if we hit the maxDepth determined in the first step
|
||||
p.EachBin(pivotAddr, Pof, 0, func(po int, _ int, f func(func(pot.Val) bool) bool) bool {
|
||||
if po == depth {
|
||||
if maxDepth == depth {
|
||||
return false
|
||||
}
|
||||
depth++
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
return depth
|
||||
}
|
||||
|
||||
// callable decides if an address entry represents a callable peer
|
||||
func (k *Kademlia) callable(e *entry) bool {
|
||||
// not callable if peer is live or exceeded maxRetries
|
||||
if e.conn != nil || e.retries > k.MaxRetries {
|
||||
return false
|
||||
}
|
||||
// calculate the allowed number of retries based on time lapsed since last seen
|
||||
timeAgo := int64(time.Since(e.seenAt))
|
||||
div := int64(k.RetryExponent)
|
||||
div += (150000 - rand.Int63n(300000)) * div / 1000000
|
||||
var retries int
|
||||
for delta := timeAgo; delta > k.RetryInterval; delta /= div {
|
||||
retries++
|
||||
}
|
||||
// this is never called concurrently, so safe to increment
|
||||
// peer can be retried again
|
||||
if retries < e.retries {
|
||||
log.Trace(fmt.Sprintf("%08x: %v long time since last try (at %v) needed before retry %v, wait only warrants %v", k.BaseAddr()[:4], e, timeAgo, e.retries, retries))
|
||||
return false
|
||||
}
|
||||
// function to sanction or prevent suggesting a peer
|
||||
if k.Reachable != nil && !k.Reachable(e.BzzAddr) {
|
||||
log.Trace(fmt.Sprintf("%08x: peer %v is temporarily not callable", k.BaseAddr()[:4], e))
|
||||
return false
|
||||
}
|
||||
e.retries++
|
||||
log.Trace(fmt.Sprintf("%08x: peer %v is callable", k.BaseAddr()[:4], e))
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// BaseAddr return the kademlia base address
|
||||
func (k *Kademlia) BaseAddr() []byte {
|
||||
return k.base
|
||||
}
|
||||
|
||||
// String returns kademlia table + kaddb table displayed with ascii
|
||||
func (k *Kademlia) String() string {
|
||||
k.lock.RLock()
|
||||
defer k.lock.RUnlock()
|
||||
return k.string()
|
||||
}
|
||||
|
||||
// string returns kademlia table + kaddb table displayed with ascii
|
||||
// caller must hold the lock
|
||||
func (k *Kademlia) string() string {
|
||||
wsrow := " "
|
||||
var rows []string
|
||||
|
||||
rows = append(rows, "=========================================================================")
|
||||
if len(sv.GitCommit) > 0 {
|
||||
rows = append(rows, fmt.Sprintf("commit hash: %s", sv.GitCommit))
|
||||
}
|
||||
rows = append(rows, fmt.Sprintf("%v KΛÐΞMLIΛ hive: queen's address: %x", time.Now().UTC().Format(time.UnixDate), k.BaseAddr()))
|
||||
rows = append(rows, fmt.Sprintf("population: %d (%d), NeighbourhoodSize: %d, MinBinSize: %d, MaxBinSize: %d", k.conns.Size(), k.addrs.Size(), k.NeighbourhoodSize, k.MinBinSize, k.MaxBinSize))
|
||||
|
||||
liverows := make([]string, k.MaxProxDisplay)
|
||||
peersrows := make([]string, k.MaxProxDisplay)
|
||||
|
||||
depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
rest := k.conns.Size()
|
||||
k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
|
||||
var rowlen int
|
||||
if po >= k.MaxProxDisplay {
|
||||
po = k.MaxProxDisplay - 1
|
||||
}
|
||||
row := []string{fmt.Sprintf("%2d", size)}
|
||||
rest -= size
|
||||
f(func(val pot.Val) bool {
|
||||
e := val.(*Peer)
|
||||
row = append(row, fmt.Sprintf("%x", e.Address()[:2]))
|
||||
rowlen++
|
||||
return rowlen < 4
|
||||
})
|
||||
r := strings.Join(row, " ")
|
||||
r = r + wsrow
|
||||
liverows[po] = r[:31]
|
||||
return true
|
||||
})
|
||||
|
||||
k.addrs.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
|
||||
var rowlen int
|
||||
if po >= k.MaxProxDisplay {
|
||||
po = k.MaxProxDisplay - 1
|
||||
}
|
||||
if size < 0 {
|
||||
panic("wtf")
|
||||
}
|
||||
row := []string{fmt.Sprintf("%2d", size)}
|
||||
// we are displaying live peers too
|
||||
f(func(val pot.Val) bool {
|
||||
e := val.(*entry)
|
||||
row = append(row, Label(e))
|
||||
rowlen++
|
||||
return rowlen < 4
|
||||
})
|
||||
peersrows[po] = strings.Join(row, " ")
|
||||
return true
|
||||
})
|
||||
|
||||
for i := 0; i < k.MaxProxDisplay; i++ {
|
||||
if i == depth {
|
||||
rows = append(rows, fmt.Sprintf("============ DEPTH: %d ==========================================", i))
|
||||
}
|
||||
left := liverows[i]
|
||||
right := peersrows[i]
|
||||
if len(left) == 0 {
|
||||
left = " 0 "
|
||||
}
|
||||
if len(right) == 0 {
|
||||
right = " 0"
|
||||
}
|
||||
rows = append(rows, fmt.Sprintf("%03d %v | %v", i, left, right))
|
||||
}
|
||||
rows = append(rows, "=========================================================================")
|
||||
return "\n" + strings.Join(rows, "\n")
|
||||
}
|
||||
|
||||
// PeerPot keeps info about expected nearest neighbours
|
||||
// used for testing only
|
||||
// TODO move to separate testing tools file
|
||||
type PeerPot struct {
|
||||
NNSet [][]byte
|
||||
PeersPerBin []int
|
||||
}
|
||||
|
||||
// NewPeerPotMap creates a map of pot record of *BzzAddr with keys
|
||||
// as hexadecimal representations of the address.
|
||||
// the NeighbourhoodSize of the passed kademlia is used
|
||||
// used for testing only
|
||||
// TODO move to separate testing tools file
|
||||
func NewPeerPotMap(neighbourhoodSize int, addrs [][]byte) map[string]*PeerPot {
|
||||
|
||||
// create a table of all nodes for health check
|
||||
np := pot.NewPot(nil, 0)
|
||||
for _, addr := range addrs {
|
||||
np, _, _ = pot.Add(np, addr, Pof)
|
||||
}
|
||||
ppmap := make(map[string]*PeerPot)
|
||||
|
||||
// generate an allknowing source of truth for connections
|
||||
// for every kademlia passed
|
||||
for i, a := range addrs {
|
||||
|
||||
// actual kademlia depth
|
||||
depth := depthForPot(np, neighbourhoodSize, a)
|
||||
|
||||
// all nn-peers
|
||||
var nns [][]byte
|
||||
peersPerBin := make([]int, depth)
|
||||
|
||||
// iterate through the neighbours, going from the deepest to the shallowest
|
||||
np.EachNeighbour(a, Pof, func(val pot.Val, po int) bool {
|
||||
addr := val.([]byte)
|
||||
// po == 256 means that addr is the pivot address(self)
|
||||
// we do not include self in the map
|
||||
if po == 256 {
|
||||
return true
|
||||
}
|
||||
// append any neighbors found
|
||||
// a neighbor is any peer in or deeper than the depth
|
||||
if po >= depth {
|
||||
nns = append(nns, addr)
|
||||
} else {
|
||||
// for peers < depth, we just count the number in each bin
|
||||
// the bin is the index of the slice
|
||||
peersPerBin[po]++
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
log.Trace(fmt.Sprintf("%x PeerPotMap NNS: %s, peersPerBin", addrs[i][:4], LogAddrs(nns)))
|
||||
ppmap[common.Bytes2Hex(a)] = &PeerPot{
|
||||
NNSet: nns,
|
||||
PeersPerBin: peersPerBin,
|
||||
}
|
||||
}
|
||||
return ppmap
|
||||
}
|
||||
|
||||
// Saturation returns the smallest po value in which the node has less than MinBinSize peers
|
||||
// if the iterator reaches neighbourhood radius, then the last bin + 1 is returned
|
||||
func (k *Kademlia) Saturation() int {
|
||||
k.lock.RLock()
|
||||
defer k.lock.RUnlock()
|
||||
|
||||
return k.saturation()
|
||||
}
|
||||
|
||||
func (k *Kademlia) saturation() int {
|
||||
prev := -1
|
||||
radius := neighbourhoodRadiusForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
|
||||
prev++
|
||||
if po >= radius {
|
||||
return false
|
||||
}
|
||||
return prev == po && size >= k.MinBinSize
|
||||
})
|
||||
if prev < 0 {
|
||||
return 0
|
||||
}
|
||||
return prev
|
||||
}
|
||||
|
||||
// isSaturated returns true if the kademlia is considered saturated, or false if not.
|
||||
// It checks this by checking an array of ints called unsaturatedBins; each item in that array corresponds
|
||||
// to the bin which is unsaturated (number of connections < k.MinBinSize).
|
||||
// The bin is considered unsaturated only if there are actual peers in that PeerPot's bin (peersPerBin)
|
||||
// (if there is no peer for a given bin, then no connection could ever be established;
|
||||
// in a God's view this is relevant as no more peers will ever appear on that bin)
|
||||
func (k *Kademlia) isSaturated(peersPerBin []int, depth int) bool {
|
||||
// depth could be calculated from k but as this is called from `GetHealthInfo()`,
|
||||
// the depth has already been calculated so we can require it as a parameter
|
||||
|
||||
// early check for depth
|
||||
if depth != len(peersPerBin) {
|
||||
return false
|
||||
}
|
||||
unsaturatedBins := make([]int, 0)
|
||||
k.conns.EachBin(k.base, Pof, 0, func(po, size int, f func(func(val pot.Val) bool) bool) bool {
|
||||
|
||||
if po >= depth {
|
||||
return false
|
||||
}
|
||||
log.Trace("peers per bin", "peersPerBin[po]", peersPerBin[po], "po", po)
|
||||
// if there are actually peers in the PeerPot who can fulfill k.MinBinSize
|
||||
if size < k.MinBinSize && size < peersPerBin[po] {
|
||||
log.Trace("connections for po", "po", po, "size", size)
|
||||
unsaturatedBins = append(unsaturatedBins, po)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
log.Trace("list of unsaturated bins", "unsaturatedBins", unsaturatedBins)
|
||||
return len(unsaturatedBins) == 0
|
||||
}
|
||||
|
||||
// knowNeighbours tests if all neighbours in the peerpot
|
||||
// are found among the peers known to the kademlia
|
||||
// It is used in Healthy function for testing only
|
||||
// TODO move to separate testing tools file
|
||||
func (k *Kademlia) knowNeighbours(addrs [][]byte) (got bool, n int, missing [][]byte) {
|
||||
pm := make(map[string]bool)
|
||||
depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
// create a map with all peers at depth and deeper known in the kademlia
|
||||
k.eachAddr(nil, 255, func(p *BzzAddr, po int) bool {
|
||||
// in order deepest to shallowest compared to the kademlia base address
|
||||
// all bins (except self) are included (0 <= bin <= 255)
|
||||
if po < depth {
|
||||
return false
|
||||
}
|
||||
pk := common.Bytes2Hex(p.Address())
|
||||
pm[pk] = true
|
||||
return true
|
||||
})
|
||||
|
||||
// iterate through nearest neighbors in the peerpot map
|
||||
// if we can't find the neighbor in the map we created above
|
||||
// then we don't know all our neighbors
|
||||
// (which sadly is all too common in modern society)
|
||||
var gots int
|
||||
var culprits [][]byte
|
||||
for _, p := range addrs {
|
||||
pk := common.Bytes2Hex(p)
|
||||
if pm[pk] {
|
||||
gots++
|
||||
} else {
|
||||
log.Trace(fmt.Sprintf("%08x: known nearest neighbour %s not found", k.base, pk))
|
||||
culprits = append(culprits, p)
|
||||
}
|
||||
}
|
||||
return gots == len(addrs), gots, culprits
|
||||
}
|
||||
|
||||
// connectedNeighbours tests if all neighbours in the peerpot
|
||||
// are currently connected in the kademlia
|
||||
// It is used in Healthy function for testing only
|
||||
func (k *Kademlia) connectedNeighbours(peers [][]byte) (got bool, n int, missing [][]byte) {
|
||||
pm := make(map[string]bool)
|
||||
|
||||
// create a map with all peers at depth and deeper that are connected in the kademlia
|
||||
// in order deepest to shallowest compared to the kademlia base address
|
||||
// all bins (except self) are included (0 <= bin <= 255)
|
||||
depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
k.eachConn(nil, 255, func(p *Peer, po int) bool {
|
||||
if po < depth {
|
||||
return false
|
||||
}
|
||||
pk := common.Bytes2Hex(p.Address())
|
||||
pm[pk] = true
|
||||
return true
|
||||
})
|
||||
|
||||
// iterate through nearest neighbors in the peerpot map
|
||||
// if we can't find the neighbor in the map we created above
|
||||
// then we don't know all our neighbors
|
||||
var gots int
|
||||
var culprits [][]byte
|
||||
for _, p := range peers {
|
||||
pk := common.Bytes2Hex(p)
|
||||
if pm[pk] {
|
||||
gots++
|
||||
} else {
|
||||
log.Trace(fmt.Sprintf("%08x: ExpNN: %s not found", k.base, pk))
|
||||
culprits = append(culprits, p)
|
||||
}
|
||||
}
|
||||
return gots == len(peers), gots, culprits
|
||||
}
|
||||
|
||||
// Health state of the Kademlia
|
||||
// used for testing only
|
||||
type Health struct {
|
||||
KnowNN bool // whether node knows all its neighbours
|
||||
CountKnowNN int // amount of neighbors known
|
||||
MissingKnowNN [][]byte // which neighbours we should have known but we don't
|
||||
ConnectNN bool // whether node is connected to all its neighbours
|
||||
CountConnectNN int // amount of neighbours connected to
|
||||
MissingConnectNN [][]byte // which neighbours we should have been connected to but we're not
|
||||
// Saturated: if in all bins < depth number of connections >= MinBinsize or,
|
||||
// if number of connections < MinBinSize, to the number of available peers in that bin
|
||||
Saturated bool
|
||||
Hive string
|
||||
}
|
||||
|
||||
// GetHealthInfo reports the health state of the kademlia connectivity
|
||||
//
|
||||
// The PeerPot argument provides an all-knowing view of the network
|
||||
// The resulting Health object is a result of comparisons between
|
||||
// what is the actual composition of the kademlia in question (the receiver), and
|
||||
// what SHOULD it have been when we take all we know about the network into consideration.
|
||||
//
|
||||
// used for testing only
|
||||
func (k *Kademlia) GetHealthInfo(pp *PeerPot) *Health {
|
||||
k.lock.RLock()
|
||||
defer k.lock.RUnlock()
|
||||
if len(pp.NNSet) < k.NeighbourhoodSize {
|
||||
log.Warn("peerpot NNSet < NeighbourhoodSize")
|
||||
}
|
||||
gotnn, countgotnn, culpritsgotnn := k.connectedNeighbours(pp.NNSet)
|
||||
knownn, countknownn, culpritsknownn := k.knowNeighbours(pp.NNSet)
|
||||
depth := depthForPot(k.conns, k.NeighbourhoodSize, k.base)
|
||||
|
||||
// check saturation
|
||||
saturated := k.isSaturated(pp.PeersPerBin, depth)
|
||||
|
||||
log.Trace(fmt.Sprintf("%08x: healthy: knowNNs: %v, gotNNs: %v, saturated: %v\n", k.base, knownn, gotnn, saturated))
|
||||
return &Health{
|
||||
KnowNN: knownn,
|
||||
CountKnowNN: countknownn,
|
||||
MissingKnowNN: culpritsknownn,
|
||||
ConnectNN: gotnn,
|
||||
CountConnectNN: countgotnn,
|
||||
MissingConnectNN: culpritsgotnn,
|
||||
Saturated: saturated,
|
||||
Hive: k.string(),
|
||||
}
|
||||
}
|
||||
|
||||
// Healthy return the strict interpretation of `Healthy` given a `Health` struct
|
||||
// definition of strict health: all conditions must be true:
|
||||
// - we at least know one peer
|
||||
// - we know all neighbors
|
||||
// - we are connected to all known neighbors
|
||||
// - it is saturated
|
||||
func (h *Health) Healthy() bool {
|
||||
return h.KnowNN && h.ConnectNN && h.CountKnowNN > 0 && h.Saturated
|
||||
}
|
@ -1,672 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/protocols"
|
||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||
)
|
||||
|
||||
func init() {
|
||||
h := log.LvlFilterHandler(log.LvlWarn, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))
|
||||
log.Root().SetHandler(h)
|
||||
}
|
||||
|
||||
func testKadPeerAddr(s string) *BzzAddr {
|
||||
a := pot.NewAddressFromString(s)
|
||||
return &BzzAddr{OAddr: a, UAddr: a}
|
||||
}
|
||||
|
||||
func newTestKademliaParams() *KadParams {
|
||||
params := NewKadParams()
|
||||
params.MinBinSize = 2
|
||||
params.NeighbourhoodSize = 2
|
||||
return params
|
||||
}
|
||||
|
||||
type testKademlia struct {
|
||||
*Kademlia
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func newTestKademlia(t *testing.T, b string) *testKademlia {
|
||||
base := pot.NewAddressFromString(b)
|
||||
return &testKademlia{
|
||||
Kademlia: NewKademlia(base, newTestKademliaParams()),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (tk *testKademlia) newTestKadPeer(s string, lightNode bool) *Peer {
|
||||
return NewPeer(&BzzPeer{BzzAddr: testKadPeerAddr(s), LightNode: lightNode}, tk.Kademlia)
|
||||
}
|
||||
|
||||
func (tk *testKademlia) On(ons ...string) {
|
||||
for _, s := range ons {
|
||||
tk.Kademlia.On(tk.newTestKadPeer(s, false))
|
||||
}
|
||||
}
|
||||
|
||||
func (tk *testKademlia) Off(offs ...string) {
|
||||
for _, s := range offs {
|
||||
tk.Kademlia.Off(tk.newTestKadPeer(s, false))
|
||||
}
|
||||
}
|
||||
|
||||
func (tk *testKademlia) Register(regs ...string) {
|
||||
var as []*BzzAddr
|
||||
for _, s := range regs {
|
||||
as = append(as, testKadPeerAddr(s))
|
||||
}
|
||||
err := tk.Kademlia.Register(as...)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// tests the validity of neighborhood depth calculations
|
||||
//
|
||||
// in particular, it tests that if there are one or more consecutive
|
||||
// empty bins above the farthest "nearest neighbor-peer" then
|
||||
// the depth should be set at the farthest of those empty bins
|
||||
//
|
||||
// TODO: Make test adapt to change in NeighbourhoodSize
|
||||
func TestNeighbourhoodDepth(t *testing.T) {
|
||||
baseAddressBytes := RandomAddr().OAddr
|
||||
kad := NewKademlia(baseAddressBytes, NewKadParams())
|
||||
|
||||
baseAddress := pot.NewAddressFromBytes(baseAddressBytes)
|
||||
|
||||
// generate the peers
|
||||
var peers []*Peer
|
||||
for i := 0; i < 7; i++ {
|
||||
addr := pot.RandomAddressAt(baseAddress, i)
|
||||
peers = append(peers, newTestDiscoveryPeer(addr, kad))
|
||||
}
|
||||
var sevenPeers []*Peer
|
||||
for i := 0; i < 2; i++ {
|
||||
addr := pot.RandomAddressAt(baseAddress, 7)
|
||||
sevenPeers = append(sevenPeers, newTestDiscoveryPeer(addr, kad))
|
||||
}
|
||||
|
||||
testNum := 0
|
||||
// first try with empty kademlia
|
||||
depth := kad.NeighbourhoodDepth()
|
||||
if depth != 0 {
|
||||
t.Fatalf("%d expected depth 0, was %d", testNum, depth)
|
||||
}
|
||||
testNum++
|
||||
|
||||
// add one peer on 7
|
||||
kad.On(sevenPeers[0])
|
||||
depth = kad.NeighbourhoodDepth()
|
||||
if depth != 0 {
|
||||
t.Fatalf("%d expected depth 0, was %d", testNum, depth)
|
||||
}
|
||||
testNum++
|
||||
|
||||
// add a second on 7
|
||||
kad.On(sevenPeers[1])
|
||||
depth = kad.NeighbourhoodDepth()
|
||||
if depth != 0 {
|
||||
t.Fatalf("%d expected depth 0, was %d", testNum, depth)
|
||||
}
|
||||
testNum++
|
||||
|
||||
// add from 0 to 6
|
||||
for i, p := range peers {
|
||||
kad.On(p)
|
||||
depth = kad.NeighbourhoodDepth()
|
||||
if depth != i+1 {
|
||||
t.Fatalf("%d.%d expected depth %d, was %d", i+1, testNum, i, depth)
|
||||
}
|
||||
}
|
||||
testNum++
|
||||
|
||||
kad.Off(sevenPeers[1])
|
||||
depth = kad.NeighbourhoodDepth()
|
||||
if depth != 6 {
|
||||
t.Fatalf("%d expected depth 6, was %d", testNum, depth)
|
||||
}
|
||||
testNum++
|
||||
|
||||
kad.Off(peers[4])
|
||||
depth = kad.NeighbourhoodDepth()
|
||||
if depth != 4 {
|
||||
t.Fatalf("%d expected depth 4, was %d", testNum, depth)
|
||||
}
|
||||
testNum++
|
||||
|
||||
kad.Off(peers[3])
|
||||
depth = kad.NeighbourhoodDepth()
|
||||
if depth != 3 {
|
||||
t.Fatalf("%d expected depth 3, was %d", testNum, depth)
|
||||
}
|
||||
testNum++
|
||||
}
|
||||
|
||||
// TestHighMinBinSize tests that the saturation function also works
|
||||
// if MinBinSize is > 2, the connection count is < k.MinBinSize
|
||||
// and there are more peers available than connected
|
||||
func TestHighMinBinSize(t *testing.T) {
|
||||
// a function to test for different MinBinSize values
|
||||
testKad := func(minBinSize int) {
|
||||
// create a test kademlia
|
||||
tk := newTestKademlia(t, "11111111")
|
||||
// set its MinBinSize to desired value
|
||||
tk.KadParams.MinBinSize = minBinSize
|
||||
|
||||
// add a couple of peers (so we have NN and depth)
|
||||
tk.On("00000000") // bin 0
|
||||
tk.On("11100000") // bin 3
|
||||
tk.On("11110000") // bin 4
|
||||
|
||||
first := "10000000" // add a first peer at bin 1
|
||||
tk.Register(first) // register it
|
||||
// we now have one registered peer at bin 1;
|
||||
// iterate and connect one peer at each iteration;
|
||||
// should be unhealthy until at minBinSize - 1
|
||||
// we connect the unconnected but registered peer
|
||||
for i := 1; i < minBinSize; i++ {
|
||||
peer := fmt.Sprintf("1000%b", 8|i)
|
||||
tk.On(peer)
|
||||
if i == minBinSize-1 {
|
||||
tk.On(first)
|
||||
tk.checkHealth(true)
|
||||
return
|
||||
}
|
||||
tk.checkHealth(false)
|
||||
}
|
||||
}
|
||||
// test MinBinSizes of 3 to 5
|
||||
testMinBinSizes := []int{3, 4, 5}
|
||||
for _, k := range testMinBinSizes {
|
||||
testKad(k)
|
||||
}
|
||||
}
|
||||
|
||||
// TestHealthStrict tests the simplest definition of health
|
||||
// Which means whether we are connected to all neighbors we know of
|
||||
func TestHealthStrict(t *testing.T) {
|
||||
|
||||
// base address is all zeros
|
||||
// no peers
|
||||
// unhealthy (and lonely)
|
||||
tk := newTestKademlia(t, "11111111")
|
||||
tk.checkHealth(false)
|
||||
|
||||
// know one peer but not connected
|
||||
// unhealthy
|
||||
tk.Register("11100000")
|
||||
tk.checkHealth(false)
|
||||
|
||||
// know one peer and connected
|
||||
// unhealthy: not saturated
|
||||
tk.On("11100000")
|
||||
tk.checkHealth(true)
|
||||
|
||||
// know two peers, only one connected
|
||||
// unhealthy
|
||||
tk.Register("11111100")
|
||||
tk.checkHealth(false)
|
||||
|
||||
// know two peers and connected to both
|
||||
// healthy
|
||||
tk.On("11111100")
|
||||
tk.checkHealth(true)
|
||||
|
||||
// know three peers, connected to the two deepest
|
||||
// healthy
|
||||
tk.Register("00000000")
|
||||
tk.checkHealth(false)
|
||||
|
||||
// know three peers, connected to all three
|
||||
// healthy
|
||||
tk.On("00000000")
|
||||
tk.checkHealth(true)
|
||||
|
||||
// add fourth peer deeper than current depth
|
||||
// unhealthy
|
||||
tk.Register("11110000")
|
||||
tk.checkHealth(false)
|
||||
|
||||
// connected to three deepest peers
|
||||
// healthy
|
||||
tk.On("11110000")
|
||||
tk.checkHealth(true)
|
||||
|
||||
// add additional peer in same bin as deepest peer
|
||||
// unhealthy
|
||||
tk.Register("11111101")
|
||||
tk.checkHealth(false)
|
||||
|
||||
// four deepest of five peers connected
|
||||
// healthy
|
||||
tk.On("11111101")
|
||||
tk.checkHealth(true)
|
||||
|
||||
// add additional peer in bin 0
|
||||
// unhealthy: unsaturated bin 0, 2 known but 1 connected
|
||||
tk.Register("00000001")
|
||||
tk.checkHealth(false)
|
||||
|
||||
// Connect second in bin 0
|
||||
// healthy
|
||||
tk.On("00000001")
|
||||
tk.checkHealth(true)
|
||||
|
||||
// add peer in bin 1
|
||||
// unhealthy, as it is known but not connected
|
||||
tk.Register("10000000")
|
||||
tk.checkHealth(false)
|
||||
|
||||
// connect peer in bin 1
|
||||
// depth change, is now 1
|
||||
// healthy, 1 peer in bin 1 known and connected
|
||||
tk.On("10000000")
|
||||
tk.checkHealth(true)
|
||||
|
||||
// add second peer in bin 1
|
||||
// unhealthy, as it is known but not connected
|
||||
tk.Register("10000001")
|
||||
tk.checkHealth(false)
|
||||
|
||||
// connect second peer in bin 1
|
||||
// healthy,
|
||||
tk.On("10000001")
|
||||
tk.checkHealth(true)
|
||||
|
||||
// connect third peer in bin 1
|
||||
// healthy,
|
||||
tk.On("10000011")
|
||||
tk.checkHealth(true)
|
||||
|
||||
// add peer in bin 2
|
||||
// unhealthy, no depth change
|
||||
tk.Register("11000000")
|
||||
tk.checkHealth(false)
|
||||
|
||||
// connect peer in bin 2
|
||||
// depth change - as we already have peers in bin 3 and 4,
|
||||
// we have contiguous bins, no bin < po 5 is empty -> depth 5
|
||||
// healthy, every bin < depth has the max available peers,
|
||||
// even if they are < MinBinSize
|
||||
tk.On("11000000")
|
||||
tk.checkHealth(true)
|
||||
|
||||
// add peer in bin 2
|
||||
// unhealthy, peer bin is below depth 5 but
|
||||
// has more available peers (2) than connected ones (1)
|
||||
// --> unsaturated
|
||||
tk.Register("11000011")
|
||||
tk.checkHealth(false)
|
||||
}
|
||||
|
||||
func (tk *testKademlia) checkHealth(expectHealthy bool) {
|
||||
tk.t.Helper()
|
||||
kid := common.Bytes2Hex(tk.BaseAddr())
|
||||
addrs := [][]byte{tk.BaseAddr()}
|
||||
tk.EachAddr(nil, 255, func(addr *BzzAddr, po int) bool {
|
||||
addrs = append(addrs, addr.Address())
|
||||
return true
|
||||
})
|
||||
|
||||
pp := NewPeerPotMap(tk.NeighbourhoodSize, addrs)
|
||||
healthParams := tk.GetHealthInfo(pp[kid])
|
||||
|
||||
// definition of health, all conditions but be true:
|
||||
// - we at least know one peer
|
||||
// - we know all neighbors
|
||||
// - we are connected to all known neighbors
|
||||
health := healthParams.Healthy()
|
||||
if expectHealthy != health {
|
||||
tk.t.Fatalf("expected kademlia health %v, is %v\n%v", expectHealthy, health, tk.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (tk *testKademlia) checkSuggestPeer(expAddr string, expDepth int, expChanged bool) {
|
||||
tk.t.Helper()
|
||||
addr, depth, changed := tk.SuggestPeer()
|
||||
log.Trace("suggestPeer return", "addr", addr, "depth", depth, "changed", changed)
|
||||
if binStr(addr) != expAddr {
|
||||
tk.t.Fatalf("incorrect peer address suggested. expected %v, got %v", expAddr, binStr(addr))
|
||||
}
|
||||
if depth != expDepth {
|
||||
tk.t.Fatalf("incorrect saturation depth suggested. expected %v, got %v", expDepth, depth)
|
||||
}
|
||||
if changed != expChanged {
|
||||
tk.t.Fatalf("expected depth change = %v, got %v", expChanged, changed)
|
||||
}
|
||||
}
|
||||
|
||||
func binStr(a *BzzAddr) string {
|
||||
if a == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return pot.ToBin(a.Address())[:8]
|
||||
}
|
||||
|
||||
func TestSuggestPeerFindPeers(t *testing.T) {
|
||||
tk := newTestKademlia(t, "00000000")
|
||||
tk.On("00100000")
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
tk.On("00010000")
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
tk.On("10000000", "10000001")
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
tk.On("01000000")
|
||||
tk.Off("10000001")
|
||||
tk.checkSuggestPeer("10000001", 0, true)
|
||||
|
||||
tk.On("00100001")
|
||||
tk.Off("01000000")
|
||||
tk.checkSuggestPeer("01000000", 0, false)
|
||||
|
||||
// second time disconnected peer not callable
|
||||
// with reasonably set Interval
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
// on and off again, peer callable again
|
||||
tk.On("01000000")
|
||||
tk.Off("01000000")
|
||||
tk.checkSuggestPeer("01000000", 0, false)
|
||||
|
||||
tk.On("01000000", "10000001")
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
tk.Register("00010001")
|
||||
tk.checkSuggestPeer("00010001", 0, false)
|
||||
|
||||
tk.On("00010001")
|
||||
tk.Off("01000000")
|
||||
tk.checkSuggestPeer("01000000", 0, false)
|
||||
|
||||
tk.On("01000000")
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
tk.Register("01000001")
|
||||
tk.checkSuggestPeer("01000001", 0, false)
|
||||
|
||||
tk.On("01000001")
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
tk.Register("10000010", "01000010", "00100010")
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
tk.Register("00010010")
|
||||
tk.checkSuggestPeer("00010010", 0, false)
|
||||
|
||||
tk.Off("00100001")
|
||||
tk.checkSuggestPeer("00100010", 2, true)
|
||||
|
||||
tk.Off("01000001")
|
||||
tk.checkSuggestPeer("01000010", 1, true)
|
||||
|
||||
tk.checkSuggestPeer("01000001", 0, false)
|
||||
tk.checkSuggestPeer("00100001", 0, false)
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
tk.On("01000001", "00100001")
|
||||
tk.Register("10000100", "01000100", "00100100")
|
||||
tk.Register("00000100", "00000101", "00000110")
|
||||
tk.Register("00000010", "00000011", "00000001")
|
||||
|
||||
tk.checkSuggestPeer("00000110", 0, false)
|
||||
tk.checkSuggestPeer("00000101", 0, false)
|
||||
tk.checkSuggestPeer("00000100", 0, false)
|
||||
tk.checkSuggestPeer("00000011", 0, false)
|
||||
tk.checkSuggestPeer("00000010", 0, false)
|
||||
tk.checkSuggestPeer("00000001", 0, false)
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
}
|
||||
|
||||
// a node should stay in the address book if it's removed from the kademlia
|
||||
func TestOffEffectingAddressBookNormalNode(t *testing.T) {
|
||||
tk := newTestKademlia(t, "00000000")
|
||||
// peer added to kademlia
|
||||
tk.On("01000000")
|
||||
// peer should be in the address book
|
||||
if tk.addrs.Size() != 1 {
|
||||
t.Fatal("known peer addresses should contain 1 entry")
|
||||
}
|
||||
// peer should be among live connections
|
||||
if tk.conns.Size() != 1 {
|
||||
t.Fatal("live peers should contain 1 entry")
|
||||
}
|
||||
// remove peer from kademlia
|
||||
tk.Off("01000000")
|
||||
// peer should be in the address book
|
||||
if tk.addrs.Size() != 1 {
|
||||
t.Fatal("known peer addresses should contain 1 entry")
|
||||
}
|
||||
// peer should not be among live connections
|
||||
if tk.conns.Size() != 0 {
|
||||
t.Fatal("live peers should contain 0 entry")
|
||||
}
|
||||
}
|
||||
|
||||
// a light node should not be in the address book
|
||||
func TestOffEffectingAddressBookLightNode(t *testing.T) {
|
||||
tk := newTestKademlia(t, "00000000")
|
||||
// light node peer added to kademlia
|
||||
tk.Kademlia.On(tk.newTestKadPeer("01000000", true))
|
||||
// peer should not be in the address book
|
||||
if tk.addrs.Size() != 0 {
|
||||
t.Fatal("known peer addresses should contain 0 entry")
|
||||
}
|
||||
// peer should be among live connections
|
||||
if tk.conns.Size() != 1 {
|
||||
t.Fatal("live peers should contain 1 entry")
|
||||
}
|
||||
// remove peer from kademlia
|
||||
tk.Kademlia.Off(tk.newTestKadPeer("01000000", true))
|
||||
// peer should not be in the address book
|
||||
if tk.addrs.Size() != 0 {
|
||||
t.Fatal("known peer addresses should contain 0 entry")
|
||||
}
|
||||
// peer should not be among live connections
|
||||
if tk.conns.Size() != 0 {
|
||||
t.Fatal("live peers should contain 0 entry")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuggestPeerRetries(t *testing.T) {
|
||||
tk := newTestKademlia(t, "00000000")
|
||||
tk.RetryInterval = int64(300 * time.Millisecond) // cycle
|
||||
tk.MaxRetries = 50
|
||||
tk.RetryExponent = 2
|
||||
sleep := func(n int) {
|
||||
ts := tk.RetryInterval
|
||||
for i := 1; i < n; i++ {
|
||||
ts *= int64(tk.RetryExponent)
|
||||
}
|
||||
time.Sleep(time.Duration(ts))
|
||||
}
|
||||
|
||||
tk.Register("01000000")
|
||||
tk.On("00000001", "00000010")
|
||||
tk.checkSuggestPeer("01000000", 0, false)
|
||||
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
sleep(1)
|
||||
tk.checkSuggestPeer("01000000", 0, false)
|
||||
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
sleep(1)
|
||||
tk.checkSuggestPeer("01000000", 0, false)
|
||||
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
sleep(2)
|
||||
tk.checkSuggestPeer("01000000", 0, false)
|
||||
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
|
||||
sleep(2)
|
||||
tk.checkSuggestPeer("<nil>", 0, false)
|
||||
}
|
||||
|
||||
func TestKademliaHiveString(t *testing.T) {
|
||||
tk := newTestKademlia(t, "00000000")
|
||||
tk.On("01000000", "00100000")
|
||||
tk.Register("10000000", "10000001")
|
||||
tk.MaxProxDisplay = 8
|
||||
h := tk.String()
|
||||
expH := "\n=========================================================================\nMon Feb 27 12:10:28 UTC 2017 KΛÐΞMLIΛ hive: queen's address: 0000000000000000000000000000000000000000000000000000000000000000\npopulation: 2 (4), NeighbourhoodSize: 2, MinBinSize: 2, MaxBinSize: 4\n============ DEPTH: 0 ==========================================\n000 0 | 2 8100 (0) 8000 (0)\n001 1 4000 | 1 4000 (0)\n002 1 2000 | 1 2000 (0)\n003 0 | 0\n004 0 | 0\n005 0 | 0\n006 0 | 0\n007 0 | 0\n========================================================================="
|
||||
if expH[104:] != h[104:] {
|
||||
t.Fatalf("incorrect hive output. expected %v, got %v", expH, h)
|
||||
}
|
||||
}
|
||||
|
||||
func newTestDiscoveryPeer(addr pot.Address, kad *Kademlia) *Peer {
|
||||
rw := &p2p.MsgPipeRW{}
|
||||
p := p2p.NewPeer(enode.ID{}, "foo", []p2p.Cap{})
|
||||
pp := protocols.NewPeer(p, rw, &protocols.Spec{})
|
||||
bp := &BzzPeer{
|
||||
Peer: pp,
|
||||
BzzAddr: &BzzAddr{
|
||||
OAddr: addr.Bytes(),
|
||||
UAddr: []byte(fmt.Sprintf("%x", addr[:])),
|
||||
},
|
||||
}
|
||||
return NewPeer(bp, kad)
|
||||
}
|
||||
|
||||
// TestKademlia_SubscribeToNeighbourhoodDepthChange checks if correct
|
||||
// signaling over SubscribeToNeighbourhoodDepthChange channels are made
|
||||
// when neighbourhood depth is changed.
|
||||
func TestKademlia_SubscribeToNeighbourhoodDepthChange(t *testing.T) {
|
||||
|
||||
testSignal := func(t *testing.T, k *testKademlia, prevDepth int, c <-chan struct{}) (newDepth int) {
|
||||
t.Helper()
|
||||
|
||||
select {
|
||||
case _, ok := <-c:
|
||||
if !ok {
|
||||
t.Error("closed signal channel")
|
||||
}
|
||||
newDepth = k.NeighbourhoodDepth()
|
||||
if prevDepth == newDepth {
|
||||
t.Error("depth not changed")
|
||||
}
|
||||
return newDepth
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Error("timeout")
|
||||
}
|
||||
return newDepth
|
||||
}
|
||||
|
||||
t.Run("single subscription", func(t *testing.T) {
|
||||
k := newTestKademlia(t, "00000000")
|
||||
|
||||
c, u := k.SubscribeToNeighbourhoodDepthChange()
|
||||
defer u()
|
||||
|
||||
depth := k.NeighbourhoodDepth()
|
||||
|
||||
k.On("11111101", "01000000", "10000000", "00000010")
|
||||
|
||||
testSignal(t, k, depth, c)
|
||||
})
|
||||
|
||||
t.Run("multiple subscriptions", func(t *testing.T) {
|
||||
k := newTestKademlia(t, "00000000")
|
||||
|
||||
c1, u1 := k.SubscribeToNeighbourhoodDepthChange()
|
||||
defer u1()
|
||||
|
||||
c2, u2 := k.SubscribeToNeighbourhoodDepthChange()
|
||||
defer u2()
|
||||
|
||||
depth := k.NeighbourhoodDepth()
|
||||
|
||||
k.On("11111101", "01000000", "10000000", "00000010")
|
||||
|
||||
testSignal(t, k, depth, c1)
|
||||
|
||||
testSignal(t, k, depth, c2)
|
||||
})
|
||||
|
||||
t.Run("multiple changes", func(t *testing.T) {
|
||||
k := newTestKademlia(t, "00000000")
|
||||
|
||||
c, u := k.SubscribeToNeighbourhoodDepthChange()
|
||||
defer u()
|
||||
|
||||
depth := k.NeighbourhoodDepth()
|
||||
|
||||
k.On("11111101", "01000000", "10000000", "00000010")
|
||||
|
||||
depth = testSignal(t, k, depth, c)
|
||||
|
||||
k.On("11111101", "01000010", "10000010", "00000110")
|
||||
|
||||
testSignal(t, k, depth, c)
|
||||
})
|
||||
|
||||
t.Run("no depth change", func(t *testing.T) {
|
||||
k := newTestKademlia(t, "00000000")
|
||||
|
||||
c, u := k.SubscribeToNeighbourhoodDepthChange()
|
||||
defer u()
|
||||
|
||||
// does not trigger the depth change
|
||||
k.On("11111101")
|
||||
|
||||
select {
|
||||
case _, ok := <-c:
|
||||
if !ok {
|
||||
t.Error("closed signal channel")
|
||||
}
|
||||
t.Error("signal received")
|
||||
case <-time.After(1 * time.Second):
|
||||
// all fine
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no new peers", func(t *testing.T) {
|
||||
k := newTestKademlia(t, "00000000")
|
||||
|
||||
changeC, unsubscribe := k.SubscribeToNeighbourhoodDepthChange()
|
||||
defer unsubscribe()
|
||||
|
||||
select {
|
||||
case _, ok := <-changeC:
|
||||
if !ok {
|
||||
t.Error("closed signal channel")
|
||||
}
|
||||
t.Error("signal received")
|
||||
case <-time.After(1 * time.Second):
|
||||
// all fine
|
||||
}
|
||||
})
|
||||
}
|
@ -1,105 +0,0 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
)
|
||||
|
||||
// BzzAddr implements the PeerAddr interface
|
||||
type BzzAddr struct {
|
||||
OAddr []byte
|
||||
UAddr []byte
|
||||
}
|
||||
|
||||
// Address implements OverlayPeer interface to be used in Overlay.
|
||||
func (a *BzzAddr) Address() []byte {
|
||||
return a.OAddr
|
||||
}
|
||||
|
||||
// Over returns the overlay address.
|
||||
func (a *BzzAddr) Over() []byte {
|
||||
return a.OAddr
|
||||
}
|
||||
|
||||
// Under returns the underlay address.
|
||||
func (a *BzzAddr) Under() []byte {
|
||||
return a.UAddr
|
||||
}
|
||||
|
||||
// ID returns the node identifier in the underlay.
|
||||
func (a *BzzAddr) ID() enode.ID {
|
||||
n, err := enode.ParseV4(string(a.UAddr))
|
||||
if err != nil {
|
||||
return enode.ID{}
|
||||
}
|
||||
return n.ID()
|
||||
}
|
||||
|
||||
// Update updates the underlay address of a peer record
|
||||
func (a *BzzAddr) Update(na *BzzAddr) *BzzAddr {
|
||||
return &BzzAddr{a.OAddr, na.UAddr}
|
||||
}
|
||||
|
||||
// String pretty prints the address
|
||||
func (a *BzzAddr) String() string {
|
||||
return fmt.Sprintf("%x <%s>", a.OAddr, a.UAddr)
|
||||
}
|
||||
|
||||
// RandomAddr is a utility method generating an address from a public key
|
||||
func RandomAddr() *BzzAddr {
|
||||
key, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
panic("unable to generate key")
|
||||
}
|
||||
node := enode.NewV4(&key.PublicKey, net.IP{127, 0, 0, 1}, 30303, 30303)
|
||||
return NewAddr(node)
|
||||
}
|
||||
|
||||
// NewAddr constucts a BzzAddr from a node record.
|
||||
func NewAddr(node *enode.Node) *BzzAddr {
|
||||
return &BzzAddr{OAddr: node.ID().Bytes(), UAddr: []byte(node.String())}
|
||||
}
|
||||
|
||||
func PrivateKeyToBzzKey(prvKey *ecdsa.PrivateKey) []byte {
|
||||
pubkeyBytes := crypto.FromECDSAPub(&prvKey.PublicKey)
|
||||
return crypto.Keccak256Hash(pubkeyBytes).Bytes()
|
||||
}
|
||||
|
||||
type EnodeParams struct {
|
||||
PrivateKey *ecdsa.PrivateKey
|
||||
EnodeKey *ecdsa.PrivateKey
|
||||
Lightnode bool
|
||||
Bootnode bool
|
||||
}
|
||||
|
||||
func NewEnodeRecord(params *EnodeParams) (*enr.Record, error) {
|
||||
|
||||
if params.PrivateKey == nil {
|
||||
return nil, fmt.Errorf("all param private keys must be defined")
|
||||
}
|
||||
|
||||
bzzkeybytes := PrivateKeyToBzzKey(params.PrivateKey)
|
||||
|
||||
var record enr.Record
|
||||
record.Set(NewENRAddrEntry(bzzkeybytes))
|
||||
record.Set(ENRLightNodeEntry(params.Lightnode))
|
||||
record.Set(ENRBootNodeEntry(params.Bootnode))
|
||||
return &record, nil
|
||||
}
|
||||
|
||||
func NewEnode(params *EnodeParams) (*enode.Node, error) {
|
||||
record, err := NewEnodeRecord(params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = enode.SignV4(record, params.EnodeKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ENR create fail: %v", err)
|
||||
}
|
||||
return enode.New(enode.V4ID{}, record)
|
||||
}
|
@ -1,263 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
var (
|
||||
currentNetworkID int
|
||||
cnt int
|
||||
nodeMap map[int][]enode.ID
|
||||
kademlias map[enode.ID]*Kademlia
|
||||
)
|
||||
|
||||
const (
|
||||
NumberOfNets = 4
|
||||
MaxTimeout = 15 * time.Second
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.Parse()
|
||||
rand.Seed(time.Now().Unix())
|
||||
}
|
||||
|
||||
/*
|
||||
Run the network ID test.
|
||||
The test creates one simulations.Network instance,
|
||||
a number of nodes, then connects nodes with each other in this network.
|
||||
|
||||
Each node gets a network ID assigned according to the number of networks.
|
||||
Having more network IDs is just arbitrary in order to exclude
|
||||
false positives.
|
||||
|
||||
Nodes should only connect with other nodes with the same network ID.
|
||||
After the setup phase, the test checks on each node if it has the
|
||||
expected node connections (excluding those not sharing the network ID).
|
||||
*/
|
||||
func TestNetworkID(t *testing.T) {
|
||||
log.Debug("Start test")
|
||||
//arbitrarily set the number of nodes. It could be any number
|
||||
numNodes := 24
|
||||
//the nodeMap maps all nodes (slice value) with the same network ID (key)
|
||||
nodeMap = make(map[int][]enode.ID)
|
||||
//set up the network and connect nodes
|
||||
net, err := setupNetwork(numNodes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up network: %v", err)
|
||||
}
|
||||
//let's sleep to ensure all nodes are connected
|
||||
time.Sleep(1 * time.Second)
|
||||
// shutdown the the network to avoid race conditions
|
||||
// on accessing kademlias global map while network nodes
|
||||
// are accepting messages
|
||||
net.Shutdown()
|
||||
//for each group sharing the same network ID...
|
||||
for _, netIDGroup := range nodeMap {
|
||||
log.Trace("netIDGroup size", "size", len(netIDGroup))
|
||||
//...check that their size of the kademlia is of the expected size
|
||||
//the assumption is that it should be the size of the group minus 1 (the node itself)
|
||||
for _, node := range netIDGroup {
|
||||
if kademlias[node].addrs.Size() != len(netIDGroup)-1 {
|
||||
t.Fatalf("Kademlia size has not expected peer size. Kademlia size: %d, expected size: %d", kademlias[node].addrs.Size(), len(netIDGroup)-1)
|
||||
}
|
||||
kademlias[node].EachAddr(nil, 0, func(addr *BzzAddr, _ int) bool {
|
||||
found := false
|
||||
for _, nd := range netIDGroup {
|
||||
if bytes.Equal(kademlias[nd].BaseAddr(), addr.Address()) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Fatalf("Expected node not found for node %s", node.String())
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
log.Info("Test terminated successfully")
|
||||
}
|
||||
|
||||
// setup simulated network with bzz/discovery and pss services.
|
||||
// connects nodes in a circle
|
||||
// if allowRaw is set, omission of builtin pss encryption is enabled (see PssParams)
|
||||
func setupNetwork(numnodes int) (net *simulations.Network, err error) {
|
||||
log.Debug("Setting up network")
|
||||
quitC := make(chan struct{})
|
||||
errc := make(chan error)
|
||||
nodes := make([]*simulations.Node, numnodes)
|
||||
if numnodes < 16 {
|
||||
return nil, fmt.Errorf("Minimum sixteen nodes in network")
|
||||
}
|
||||
adapter := adapters.NewSimAdapter(newServices())
|
||||
//create the network
|
||||
net = simulations.NewNetwork(adapter, &simulations.NetworkConfig{
|
||||
ID: "NetworkIdTestNet",
|
||||
DefaultService: "bzz",
|
||||
})
|
||||
log.Debug("Creating networks and nodes")
|
||||
|
||||
var connCount int
|
||||
|
||||
//create nodes and connect them to each other
|
||||
for i := 0; i < numnodes; i++ {
|
||||
log.Trace("iteration: ", "i", i)
|
||||
nodeconf := adapters.RandomNodeConfig()
|
||||
nodes[i], err = net.NewNodeWithConfig(nodeconf)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating node %d: %v", i, err)
|
||||
}
|
||||
err = net.Start(nodes[i].ID())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error starting node %d: %v", i, err)
|
||||
}
|
||||
client, err := nodes[i].Client()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create node %d rpc client fail: %v", i, err)
|
||||
}
|
||||
//now setup and start event watching in order to know when we can upload
|
||||
ctx, watchCancel := context.WithTimeout(context.Background(), MaxTimeout)
|
||||
defer watchCancel()
|
||||
watchSubscriptionEvents(ctx, nodes[i].ID(), client, errc, quitC)
|
||||
//on every iteration we connect to all previous ones
|
||||
for k := i - 1; k >= 0; k-- {
|
||||
connCount++
|
||||
log.Debug(fmt.Sprintf("Connecting node %d with node %d; connection count is %d", i, k, connCount))
|
||||
err = net.Connect(nodes[i].ID(), nodes[k].ID())
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "already connected") {
|
||||
return nil, fmt.Errorf("error connecting nodes: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//now wait until the number of expected subscriptions has been finished
|
||||
//`watchSubscriptionEvents` will write with a `nil` value to errc
|
||||
for err := range errc {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//`nil` received, decrement count
|
||||
connCount--
|
||||
log.Trace("count down", "cnt", connCount)
|
||||
//all subscriptions received
|
||||
if connCount == 0 {
|
||||
close(quitC)
|
||||
break
|
||||
}
|
||||
}
|
||||
log.Debug("Network setup phase terminated")
|
||||
return net, nil
|
||||
}
|
||||
|
||||
func newServices() adapters.Services {
|
||||
kademlias = make(map[enode.ID]*Kademlia)
|
||||
kademlia := func(id enode.ID) *Kademlia {
|
||||
if k, ok := kademlias[id]; ok {
|
||||
return k
|
||||
}
|
||||
params := NewKadParams()
|
||||
params.NeighbourhoodSize = 2
|
||||
params.MaxBinSize = 3
|
||||
params.MinBinSize = 1
|
||||
params.MaxRetries = 1000
|
||||
params.RetryExponent = 2
|
||||
params.RetryInterval = 1000000
|
||||
kademlias[id] = NewKademlia(id[:], params)
|
||||
return kademlias[id]
|
||||
}
|
||||
return adapters.Services{
|
||||
"bzz": func(ctx *adapters.ServiceContext) (node.Service, error) {
|
||||
addr := NewAddr(ctx.Config.Node())
|
||||
hp := NewHiveParams()
|
||||
hp.Discovery = false
|
||||
cnt++
|
||||
//assign the network ID
|
||||
currentNetworkID = cnt % NumberOfNets
|
||||
if ok := nodeMap[currentNetworkID]; ok == nil {
|
||||
nodeMap[currentNetworkID] = make([]enode.ID, 0)
|
||||
}
|
||||
//add this node to the group sharing the same network ID
|
||||
nodeMap[currentNetworkID] = append(nodeMap[currentNetworkID], ctx.Config.ID)
|
||||
log.Debug("current network ID:", "id", currentNetworkID)
|
||||
config := &BzzConfig{
|
||||
OverlayAddr: addr.Over(),
|
||||
UnderlayAddr: addr.Under(),
|
||||
HiveParams: hp,
|
||||
NetworkID: uint64(currentNetworkID),
|
||||
}
|
||||
return NewBzz(config, kademlia(ctx.Config.ID), nil, nil, nil), nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func watchSubscriptionEvents(ctx context.Context, id enode.ID, client *rpc.Client, errc chan error, quitC chan struct{}) {
|
||||
events := make(chan *p2p.PeerEvent)
|
||||
sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents")
|
||||
if err != nil {
|
||||
log.Error(err.Error())
|
||||
errc <- fmt.Errorf("error getting peer events for node %v: %s", id, err)
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
defer func() {
|
||||
sub.Unsubscribe()
|
||||
log.Trace("watch subscription events: unsubscribe", "id", id)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-quitC:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
select {
|
||||
case errc <- ctx.Err():
|
||||
case <-quitC:
|
||||
}
|
||||
return
|
||||
case e := <-events:
|
||||
if e.Type == p2p.PeerEventTypeAdd {
|
||||
errc <- nil
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
if err != nil {
|
||||
select {
|
||||
case errc <- fmt.Errorf("error getting peer events for node %v: %v", id, err):
|
||||
case <-quitC:
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
@ -1,118 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// package priority_queue implement a channel based priority queue
|
||||
// over arbitrary types. It provides an
|
||||
// an autopop loop applying a function to the items always respecting
|
||||
// their priority. The structure is only quasi consistent ie., if a lower
|
||||
// priority item is autopopped, it is guaranteed that there was a point
|
||||
// when no higher priority item was present, ie. it is not guaranteed
|
||||
// that there was any point where the lower priority item was present
|
||||
// but the higher was not
|
||||
|
||||
package priorityqueue
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrContention = errors.New("contention")
|
||||
|
||||
errBadPriority = errors.New("bad priority")
|
||||
|
||||
wakey = struct{}{}
|
||||
)
|
||||
|
||||
// PriorityQueue is the basic structure
|
||||
type PriorityQueue struct {
|
||||
Queues []chan interface{}
|
||||
wakeup chan struct{}
|
||||
}
|
||||
|
||||
// New is the constructor for PriorityQueue
|
||||
func New(n int, l int) *PriorityQueue {
|
||||
var queues = make([]chan interface{}, n)
|
||||
for i := range queues {
|
||||
queues[i] = make(chan interface{}, l)
|
||||
}
|
||||
return &PriorityQueue{
|
||||
Queues: queues,
|
||||
wakeup: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
// Run is a forever loop popping items from the queues
|
||||
func (pq *PriorityQueue) Run(ctx context.Context, f func(interface{})) {
|
||||
top := len(pq.Queues) - 1
|
||||
p := top
|
||||
READ:
|
||||
for {
|
||||
q := pq.Queues[p]
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case x := <-q:
|
||||
val := x.(struct {
|
||||
v interface{}
|
||||
t time.Time
|
||||
})
|
||||
f(val.v)
|
||||
metrics.GetOrRegisterResettingTimer("pq.run", nil).UpdateSince(val.t)
|
||||
p = top
|
||||
default:
|
||||
if p > 0 {
|
||||
p--
|
||||
continue READ
|
||||
}
|
||||
p = top
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-pq.wakeup:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push pushes an item to the appropriate queue specified in the priority argument
|
||||
// if context is given it waits until either the item is pushed or the Context aborts
|
||||
func (pq *PriorityQueue) Push(x interface{}, p int) error {
|
||||
if p < 0 || p >= len(pq.Queues) {
|
||||
return errBadPriority
|
||||
}
|
||||
val := struct {
|
||||
v interface{}
|
||||
t time.Time
|
||||
}{
|
||||
x,
|
||||
time.Now(),
|
||||
}
|
||||
select {
|
||||
case pq.Queues[p] <- val:
|
||||
default:
|
||||
return ErrContention
|
||||
}
|
||||
select {
|
||||
case pq.wakeup <- wakey:
|
||||
default:
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,97 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
package priorityqueue
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPriorityQueue(t *testing.T) {
|
||||
var results []string
|
||||
wg := sync.WaitGroup{}
|
||||
pq := New(3, 2)
|
||||
wg.Add(1)
|
||||
go pq.Run(context.Background(), func(v interface{}) {
|
||||
results = append(results, v.(string))
|
||||
wg.Done()
|
||||
})
|
||||
pq.Push("2.0", 2)
|
||||
wg.Wait()
|
||||
if results[0] != "2.0" {
|
||||
t.Errorf("expected first result %q, got %q", "2.0", results[0])
|
||||
}
|
||||
|
||||
Loop:
|
||||
for i, tc := range []struct {
|
||||
priorities []int
|
||||
values []string
|
||||
results []string
|
||||
errors []error
|
||||
}{
|
||||
{
|
||||
priorities: []int{0},
|
||||
values: []string{""},
|
||||
results: []string{""},
|
||||
},
|
||||
{
|
||||
priorities: []int{0, 1},
|
||||
values: []string{"0.0", "1.0"},
|
||||
results: []string{"1.0", "0.0"},
|
||||
},
|
||||
{
|
||||
priorities: []int{1, 0},
|
||||
values: []string{"1.0", "0.0"},
|
||||
results: []string{"1.0", "0.0"},
|
||||
},
|
||||
{
|
||||
priorities: []int{0, 1, 1},
|
||||
values: []string{"0.0", "1.0", "1.1"},
|
||||
results: []string{"1.0", "1.1", "0.0"},
|
||||
},
|
||||
{
|
||||
priorities: []int{0, 0, 0},
|
||||
values: []string{"0.0", "0.0", "0.1"},
|
||||
errors: []error{nil, nil, ErrContention},
|
||||
},
|
||||
} {
|
||||
var results []string
|
||||
wg := sync.WaitGroup{}
|
||||
pq := New(3, 2)
|
||||
wg.Add(len(tc.values))
|
||||
for j, value := range tc.values {
|
||||
err := pq.Push(value, tc.priorities[j])
|
||||
if tc.errors != nil && err != tc.errors[j] {
|
||||
t.Errorf("expected push error %v, got %v", tc.errors[j], err)
|
||||
continue Loop
|
||||
}
|
||||
if err != nil {
|
||||
continue Loop
|
||||
}
|
||||
}
|
||||
go pq.Run(context.Background(), func(v interface{}) {
|
||||
results = append(results, v.(string))
|
||||
wg.Done()
|
||||
})
|
||||
wg.Wait()
|
||||
for k, result := range tc.results {
|
||||
if results[k] != result {
|
||||
t.Errorf("test case %v: expected %v element %q, got %q", i, k, result, results[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,335 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/protocols"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/ethereum/go-ethereum/swarm/log"
|
||||
"github.com/ethereum/go-ethereum/swarm/state"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultNetworkID = 4
|
||||
// timeout for waiting
|
||||
bzzHandshakeTimeout = 3000 * time.Millisecond
|
||||
)
|
||||
|
||||
var DefaultTestNetworkID = rand.Uint64()
|
||||
|
||||
// BzzSpec is the spec of the generic swarm handshake
|
||||
var BzzSpec = &protocols.Spec{
|
||||
Name: "bzz",
|
||||
Version: 9,
|
||||
MaxMsgSize: 10 * 1024 * 1024,
|
||||
Messages: []interface{}{
|
||||
HandshakeMsg{},
|
||||
},
|
||||
}
|
||||
|
||||
// DiscoverySpec is the spec for the bzz discovery subprotocols
|
||||
var DiscoverySpec = &protocols.Spec{
|
||||
Name: "hive",
|
||||
Version: 8,
|
||||
MaxMsgSize: 10 * 1024 * 1024,
|
||||
Messages: []interface{}{
|
||||
peersMsg{},
|
||||
subPeersMsg{},
|
||||
},
|
||||
}
|
||||
|
||||
// BzzConfig captures the config params used by the hive
|
||||
type BzzConfig struct {
|
||||
OverlayAddr []byte // base address of the overlay network
|
||||
UnderlayAddr []byte // node's underlay address
|
||||
HiveParams *HiveParams
|
||||
NetworkID uint64
|
||||
LightNode bool
|
||||
BootnodeMode bool
|
||||
}
|
||||
|
||||
// Bzz is the swarm protocol bundle
|
||||
type Bzz struct {
|
||||
*Hive
|
||||
NetworkID uint64
|
||||
LightNode bool
|
||||
localAddr *BzzAddr
|
||||
mtx sync.Mutex
|
||||
handshakes map[enode.ID]*HandshakeMsg
|
||||
streamerSpec *protocols.Spec
|
||||
streamerRun func(*BzzPeer) error
|
||||
}
|
||||
|
||||
// NewBzz is the swarm protocol constructor
|
||||
// arguments
|
||||
// * bzz config
|
||||
// * overlay driver
|
||||
// * peer store
|
||||
func NewBzz(config *BzzConfig, kad *Kademlia, store state.Store, streamerSpec *protocols.Spec, streamerRun func(*BzzPeer) error) *Bzz {
|
||||
bzz := &Bzz{
|
||||
Hive: NewHive(config.HiveParams, kad, store),
|
||||
NetworkID: config.NetworkID,
|
||||
LightNode: config.LightNode,
|
||||
localAddr: &BzzAddr{config.OverlayAddr, config.UnderlayAddr},
|
||||
handshakes: make(map[enode.ID]*HandshakeMsg),
|
||||
streamerRun: streamerRun,
|
||||
streamerSpec: streamerSpec,
|
||||
}
|
||||
|
||||
if config.BootnodeMode {
|
||||
bzz.streamerRun = nil
|
||||
bzz.streamerSpec = nil
|
||||
}
|
||||
|
||||
return bzz
|
||||
}
|
||||
|
||||
// UpdateLocalAddr updates underlayaddress of the running node
|
||||
func (b *Bzz) UpdateLocalAddr(byteaddr []byte) *BzzAddr {
|
||||
b.localAddr = b.localAddr.Update(&BzzAddr{
|
||||
UAddr: byteaddr,
|
||||
OAddr: b.localAddr.OAddr,
|
||||
})
|
||||
return b.localAddr
|
||||
}
|
||||
|
||||
// NodeInfo returns the node's overlay address
|
||||
func (b *Bzz) NodeInfo() interface{} {
|
||||
return b.localAddr.Address()
|
||||
}
|
||||
|
||||
// Protocols return the protocols swarm offers
|
||||
// Bzz implements the node.Service interface
|
||||
// * handshake/hive
|
||||
// * discovery
|
||||
func (b *Bzz) Protocols() []p2p.Protocol {
|
||||
protocol := []p2p.Protocol{
|
||||
{
|
||||
Name: BzzSpec.Name,
|
||||
Version: BzzSpec.Version,
|
||||
Length: BzzSpec.Length(),
|
||||
Run: b.runBzz,
|
||||
NodeInfo: b.NodeInfo,
|
||||
},
|
||||
{
|
||||
Name: DiscoverySpec.Name,
|
||||
Version: DiscoverySpec.Version,
|
||||
Length: DiscoverySpec.Length(),
|
||||
Run: b.RunProtocol(DiscoverySpec, b.Hive.Run),
|
||||
NodeInfo: b.Hive.NodeInfo,
|
||||
PeerInfo: b.Hive.PeerInfo,
|
||||
},
|
||||
}
|
||||
if b.streamerSpec != nil && b.streamerRun != nil {
|
||||
protocol = append(protocol, p2p.Protocol{
|
||||
Name: b.streamerSpec.Name,
|
||||
Version: b.streamerSpec.Version,
|
||||
Length: b.streamerSpec.Length(),
|
||||
Run: b.RunProtocol(b.streamerSpec, b.streamerRun),
|
||||
})
|
||||
}
|
||||
return protocol
|
||||
}
|
||||
|
||||
// APIs returns the APIs offered by bzz
|
||||
// * hive
|
||||
// Bzz implements the node.Service interface
|
||||
func (b *Bzz) APIs() []rpc.API {
|
||||
return []rpc.API{{
|
||||
Namespace: "hive",
|
||||
Version: "3.0",
|
||||
Service: b.Hive,
|
||||
}}
|
||||
}
|
||||
|
||||
// RunProtocol is a wrapper for swarm subprotocols
|
||||
// returns a p2p protocol run function that can be assigned to p2p.Protocol#Run field
|
||||
// arguments:
|
||||
// * p2p protocol spec
|
||||
// * run function taking BzzPeer as argument
|
||||
// this run function is meant to block for the duration of the protocol session
|
||||
// on return the session is terminated and the peer is disconnected
|
||||
// the protocol waits for the bzz handshake is negotiated
|
||||
// the overlay address on the BzzPeer is set from the remote handshake
|
||||
func (b *Bzz) RunProtocol(spec *protocols.Spec, run func(*BzzPeer) error) func(*p2p.Peer, p2p.MsgReadWriter) error {
|
||||
return func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
// wait for the bzz protocol to perform the handshake
|
||||
handshake, _ := b.GetOrCreateHandshake(p.ID())
|
||||
defer b.removeHandshake(p.ID())
|
||||
select {
|
||||
case <-handshake.done:
|
||||
case <-time.After(bzzHandshakeTimeout):
|
||||
return fmt.Errorf("%08x: %s protocol timeout waiting for handshake on %08x", b.BaseAddr()[:4], spec.Name, p.ID().Bytes()[:4])
|
||||
}
|
||||
if handshake.err != nil {
|
||||
return fmt.Errorf("%08x: %s protocol closed: %v", b.BaseAddr()[:4], spec.Name, handshake.err)
|
||||
}
|
||||
// the handshake has succeeded so construct the BzzPeer and run the protocol
|
||||
peer := &BzzPeer{
|
||||
Peer: protocols.NewPeer(p, rw, spec),
|
||||
BzzAddr: handshake.peerAddr,
|
||||
lastActive: time.Now(),
|
||||
LightNode: handshake.LightNode,
|
||||
}
|
||||
|
||||
log.Debug("peer created", "addr", handshake.peerAddr.String())
|
||||
|
||||
return run(peer)
|
||||
}
|
||||
}
|
||||
|
||||
// performHandshake implements the negotiation of the bzz handshake
|
||||
// shared among swarm subprotocols
|
||||
func (b *Bzz) performHandshake(p *protocols.Peer, handshake *HandshakeMsg) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), bzzHandshakeTimeout)
|
||||
defer func() {
|
||||
close(handshake.done)
|
||||
cancel()
|
||||
}()
|
||||
rsh, err := p.Handshake(ctx, handshake, b.checkHandshake)
|
||||
if err != nil {
|
||||
handshake.err = err
|
||||
return err
|
||||
}
|
||||
handshake.peerAddr = rsh.(*HandshakeMsg).Addr
|
||||
handshake.LightNode = rsh.(*HandshakeMsg).LightNode
|
||||
return nil
|
||||
}
|
||||
|
||||
// runBzz is the p2p protocol run function for the bzz base protocol
|
||||
// that negotiates the bzz handshake
|
||||
func (b *Bzz) runBzz(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
handshake, _ := b.GetOrCreateHandshake(p.ID())
|
||||
if !<-handshake.init {
|
||||
return fmt.Errorf("%08x: bzz already started on peer %08x", b.localAddr.Over()[:4], p.ID().Bytes()[:4])
|
||||
}
|
||||
close(handshake.init)
|
||||
defer b.removeHandshake(p.ID())
|
||||
peer := protocols.NewPeer(p, rw, BzzSpec)
|
||||
err := b.performHandshake(peer, handshake)
|
||||
if err != nil {
|
||||
log.Warn(fmt.Sprintf("%08x: handshake failed with remote peer %08x: %v", b.localAddr.Over()[:4], p.ID().Bytes()[:4], err))
|
||||
|
||||
return err
|
||||
}
|
||||
// fail if we get another handshake
|
||||
msg, err := rw.ReadMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg.Discard()
|
||||
return errors.New("received multiple handshakes")
|
||||
}
|
||||
|
||||
// BzzPeer is the bzz protocol view of a protocols.Peer (itself an extension of p2p.Peer)
|
||||
// implements the Peer interface and all interfaces Peer implements: Addr, OverlayPeer
|
||||
type BzzPeer struct {
|
||||
*protocols.Peer // represents the connection for online peers
|
||||
*BzzAddr // remote address -> implements Addr interface = protocols.Peer
|
||||
lastActive time.Time // time is updated whenever mutexes are releasing
|
||||
LightNode bool
|
||||
}
|
||||
|
||||
func NewBzzPeer(p *protocols.Peer) *BzzPeer {
|
||||
return &BzzPeer{Peer: p, BzzAddr: NewAddr(p.Node())}
|
||||
}
|
||||
|
||||
// ID returns the peer's underlay node identifier.
|
||||
func (p *BzzPeer) ID() enode.ID {
|
||||
// This is here to resolve a method tie: both protocols.Peer and BzzAddr are embedded
|
||||
// into the struct and provide ID(). The protocols.Peer version is faster, ensure it
|
||||
// gets used.
|
||||
return p.Peer.ID()
|
||||
}
|
||||
|
||||
/*
|
||||
Handshake
|
||||
|
||||
* Version: 8 byte integer version of the protocol
|
||||
* NetworkID: 8 byte integer network identifier
|
||||
* Addr: the address advertised by the node including underlay and overlay connecctions
|
||||
*/
|
||||
type HandshakeMsg struct {
|
||||
Version uint64
|
||||
NetworkID uint64
|
||||
Addr *BzzAddr
|
||||
LightNode bool
|
||||
|
||||
// peerAddr is the address received in the peer handshake
|
||||
peerAddr *BzzAddr
|
||||
|
||||
init chan bool
|
||||
done chan struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
// String pretty prints the handshake
|
||||
func (bh *HandshakeMsg) String() string {
|
||||
return fmt.Sprintf("Handshake: Version: %v, NetworkID: %v, Addr: %v, LightNode: %v, peerAddr: %v", bh.Version, bh.NetworkID, bh.Addr, bh.LightNode, bh.peerAddr)
|
||||
}
|
||||
|
||||
// Perform initiates the handshake and validates the remote handshake message
|
||||
func (b *Bzz) checkHandshake(hs interface{}) error {
|
||||
rhs := hs.(*HandshakeMsg)
|
||||
if rhs.NetworkID != b.NetworkID {
|
||||
return fmt.Errorf("network id mismatch %d (!= %d)", rhs.NetworkID, b.NetworkID)
|
||||
}
|
||||
if rhs.Version != uint64(BzzSpec.Version) {
|
||||
return fmt.Errorf("version mismatch %d (!= %d)", rhs.Version, BzzSpec.Version)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeHandshake removes handshake for peer with peerID
|
||||
// from the bzz handshake store
|
||||
func (b *Bzz) removeHandshake(peerID enode.ID) {
|
||||
b.mtx.Lock()
|
||||
defer b.mtx.Unlock()
|
||||
delete(b.handshakes, peerID)
|
||||
}
|
||||
|
||||
// GetHandshake returns the bzz handhake that the remote peer with peerID sent
|
||||
func (b *Bzz) GetOrCreateHandshake(peerID enode.ID) (*HandshakeMsg, bool) {
|
||||
b.mtx.Lock()
|
||||
defer b.mtx.Unlock()
|
||||
handshake, found := b.handshakes[peerID]
|
||||
if !found {
|
||||
handshake = &HandshakeMsg{
|
||||
Version: uint64(BzzSpec.Version),
|
||||
NetworkID: b.NetworkID,
|
||||
Addr: b.localAddr,
|
||||
LightNode: b.LightNode,
|
||||
init: make(chan bool, 1),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
// when handhsake is first created for a remote peer
|
||||
// it is initialised with the init
|
||||
handshake.init <- true
|
||||
b.handshakes[peerID] = handshake
|
||||
}
|
||||
|
||||
return handshake, found
|
||||
}
|
@ -1,343 +0,0 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/enr"
|
||||
"github.com/ethereum/go-ethereum/p2p/protocols"
|
||||
p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
|
||||
"github.com/ethereum/go-ethereum/swarm/pot"
|
||||
)
|
||||
|
||||
const (
|
||||
TestProtocolVersion = 9
|
||||
)
|
||||
|
||||
var TestProtocolNetworkID = DefaultTestNetworkID
|
||||
|
||||
var (
|
||||
loglevel = flag.Int("loglevel", 2, "verbosity of logs")
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.Parse()
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
func HandshakeMsgExchange(lhs, rhs *HandshakeMsg, id enode.ID) []p2ptest.Exchange {
|
||||
return []p2ptest.Exchange{
|
||||
{
|
||||
Expects: []p2ptest.Expect{
|
||||
{
|
||||
Code: 0,
|
||||
Msg: lhs,
|
||||
Peer: id,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Triggers: []p2ptest.Trigger{
|
||||
{
|
||||
Code: 0,
|
||||
Msg: rhs,
|
||||
Peer: id,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newBzzBaseTester(n int, prvkey *ecdsa.PrivateKey, spec *protocols.Spec, run func(*BzzPeer) error) (*bzzTester, error) {
|
||||
var addrs [][]byte
|
||||
for i := 0; i < n; i++ {
|
||||
addr := pot.RandomAddress()
|
||||
addrs = append(addrs, addr[:])
|
||||
}
|
||||
pt, _, err := newBzzBaseTesterWithAddrs(prvkey, addrs, spec, run)
|
||||
return pt, err
|
||||
}
|
||||
|
||||
func newBzzBaseTesterWithAddrs(prvkey *ecdsa.PrivateKey, addrs [][]byte, spec *protocols.Spec, run func(*BzzPeer) error) (*bzzTester, [][]byte, error) {
|
||||
n := len(addrs)
|
||||
cs := make(map[enode.ID]chan bool)
|
||||
var csMu sync.Mutex
|
||||
|
||||
srv := func(p *BzzPeer) error {
|
||||
defer func() {
|
||||
csMu.Lock()
|
||||
defer csMu.Unlock()
|
||||
if cs[p.ID()] != nil {
|
||||
close(cs[p.ID()])
|
||||
}
|
||||
}()
|
||||
return run(p)
|
||||
}
|
||||
mu := &sync.Mutex{}
|
||||
nodeToAddr := make(map[enode.ID][]byte)
|
||||
protocol := func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
|
||||
mu.Lock()
|
||||
nodeToAddr[p.ID()] = addrs[0]
|
||||
mu.Unlock()
|
||||
bzzAddr := &BzzAddr{addrs[0], []byte(p.Node().String())}
|
||||
addrs = addrs[1:]
|
||||
return srv(&BzzPeer{Peer: protocols.NewPeer(p, rw, spec), BzzAddr: bzzAddr})
|
||||
}
|
||||
|
||||
s := p2ptest.NewProtocolTester(prvkey, n, protocol)
|
||||
var record enr.Record
|
||||
bzzKey := PrivateKeyToBzzKey(prvkey)
|
||||
record.Set(NewENRAddrEntry(bzzKey))
|
||||
err := enode.SignV4(&record, prvkey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to generate ENR: %v", err)
|
||||
}
|
||||
nod, err := enode.New(enode.V4ID{}, &record)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to create enode: %v", err)
|
||||
}
|
||||
addr := getENRBzzAddr(nod)
|
||||
|
||||
csMu.Lock()
|
||||
for _, node := range s.Nodes {
|
||||
log.Warn("node", "node", node)
|
||||
cs[node.ID()] = make(chan bool)
|
||||
}
|
||||
csMu.Unlock()
|
||||
|
||||
var nodeAddrs [][]byte
|
||||
pt := &bzzTester{
|
||||
addr: addr,
|
||||
ProtocolTester: s,
|
||||
cs: cs,
|
||||
}
|
||||
mu.Lock()
|
||||
for _, n := range pt.Nodes {
|
||||
nodeAddrs = append(nodeAddrs, nodeToAddr[n.ID()])
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
return pt, nodeAddrs, nil
|
||||
}
|
||||
|
||||
type bzzTester struct {
|
||||
*p2ptest.ProtocolTester
|
||||
addr *BzzAddr
|
||||
cs map[enode.ID]chan bool
|
||||
bzz *Bzz
|
||||
}
|
||||
|
||||
func newBzz(addr *BzzAddr, lightNode bool) *Bzz {
|
||||
config := &BzzConfig{
|
||||
OverlayAddr: addr.Over(),
|
||||
UnderlayAddr: addr.Under(),
|
||||
HiveParams: NewHiveParams(),
|
||||
NetworkID: DefaultTestNetworkID,
|
||||
LightNode: lightNode,
|
||||
}
|
||||
kad := NewKademlia(addr.OAddr, NewKadParams())
|
||||
bzz := NewBzz(config, kad, nil, nil, nil)
|
||||
return bzz
|
||||
}
|
||||
|
||||
func newBzzHandshakeTester(n int, prvkey *ecdsa.PrivateKey, lightNode bool) (*bzzTester, error) {
|
||||
|
||||
var record enr.Record
|
||||
bzzkey := PrivateKeyToBzzKey(prvkey)
|
||||
record.Set(NewENRAddrEntry(bzzkey))
|
||||
record.Set(ENRLightNodeEntry(lightNode))
|
||||
err := enode.SignV4(&record, prvkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nod, err := enode.New(enode.V4ID{}, &record)
|
||||
addr := getENRBzzAddr(nod)
|
||||
|
||||
bzz := newBzz(addr, lightNode)
|
||||
|
||||
pt := p2ptest.NewProtocolTester(prvkey, n, bzz.runBzz)
|
||||
|
||||
return &bzzTester{
|
||||
addr: addr,
|
||||
ProtocolTester: pt,
|
||||
bzz: bzz,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// should test handshakes in one exchange? parallelisation
|
||||
func (s *bzzTester) testHandshake(lhs, rhs *HandshakeMsg, disconnects ...*p2ptest.Disconnect) error {
|
||||
if err := s.TestExchanges(HandshakeMsgExchange(lhs, rhs, rhs.Addr.ID())...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(disconnects) > 0 {
|
||||
return s.TestDisconnected(disconnects...)
|
||||
}
|
||||
|
||||
// If we don't expect disconnect, ensure peers remain connected
|
||||
err := s.TestDisconnected(&p2ptest.Disconnect{
|
||||
Peer: s.Nodes[0].ID(),
|
||||
Error: nil,
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
return fmt.Errorf("Unexpected peer disconnect")
|
||||
}
|
||||
|
||||
if err.Error() != "timed out waiting for peers to disconnect" {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func correctBzzHandshake(addr *BzzAddr, lightNode bool) *HandshakeMsg {
|
||||
return &HandshakeMsg{
|
||||
Version: TestProtocolVersion,
|
||||
NetworkID: TestProtocolNetworkID,
|
||||
Addr: addr,
|
||||
LightNode: lightNode,
|
||||
}
|
||||
}
|
||||
|
||||
func TestBzzHandshakeNetworkIDMismatch(t *testing.T) {
|
||||
lightNode := false
|
||||
prvkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := newBzzHandshakeTester(1, prvkey, lightNode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Stop()
|
||||
node := s.Nodes[0]
|
||||
|
||||
err = s.testHandshake(
|
||||
correctBzzHandshake(s.addr, lightNode),
|
||||
&HandshakeMsg{Version: TestProtocolVersion, NetworkID: 321, Addr: NewAddr(node)},
|
||||
&p2ptest.Disconnect{Peer: node.ID(), Error: fmt.Errorf("Handshake error: Message handler error: (msg code 0): network id mismatch 321 (!= %v)", TestProtocolNetworkID)},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBzzHandshakeVersionMismatch(t *testing.T) {
|
||||
lightNode := false
|
||||
prvkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := newBzzHandshakeTester(1, prvkey, lightNode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Stop()
|
||||
node := s.Nodes[0]
|
||||
|
||||
err = s.testHandshake(
|
||||
correctBzzHandshake(s.addr, lightNode),
|
||||
&HandshakeMsg{Version: 0, NetworkID: TestProtocolNetworkID, Addr: NewAddr(node)},
|
||||
&p2ptest.Disconnect{Peer: node.ID(), Error: fmt.Errorf("Handshake error: Message handler error: (msg code 0): version mismatch 0 (!= %d)", TestProtocolVersion)},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBzzHandshakeSuccess(t *testing.T) {
|
||||
lightNode := false
|
||||
prvkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s, err := newBzzHandshakeTester(1, prvkey, lightNode)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer s.Stop()
|
||||
node := s.Nodes[0]
|
||||
|
||||
err = s.testHandshake(
|
||||
correctBzzHandshake(s.addr, lightNode),
|
||||
&HandshakeMsg{Version: TestProtocolVersion, NetworkID: TestProtocolNetworkID, Addr: NewAddr(node)},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBzzHandshakeLightNode(t *testing.T) {
|
||||
var lightNodeTests = []struct {
|
||||
name string
|
||||
lightNode bool
|
||||
}{
|
||||
{"on", true},
|
||||
{"off", false},
|
||||
}
|
||||
|
||||
for _, test := range lightNodeTests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
prvkey, err := crypto.GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pt, err := newBzzHandshakeTester(1, prvkey, false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer pt.Stop()
|
||||
|
||||
node := pt.Nodes[0]
|
||||
addr := NewAddr(node)
|
||||
|
||||
err = pt.testHandshake(
|
||||
correctBzzHandshake(pt.addr, false),
|
||||
&HandshakeMsg{Version: TestProtocolVersion, NetworkID: TestProtocolNetworkID, Addr: addr, LightNode: test.lightNode},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
select {
|
||||
|
||||
case <-pt.bzz.handshakes[node.ID()].done:
|
||||
if pt.bzz.handshakes[node.ID()].LightNode != test.lightNode {
|
||||
t.Fatalf("peer LightNode flag is %v, should be %v", pt.bzz.handshakes[node.ID()].LightNode, test.lightNode)
|
||||
}
|
||||
case <-time.After(10 * time.Second):
|
||||
t.Fatal("test timeout")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -1,79 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import "github.com/ethereum/go-ethereum/p2p/enode"
|
||||
|
||||
// BucketKey is the type that should be used for keys in simulation buckets.
|
||||
type BucketKey string
|
||||
|
||||
// NodeItem returns an item set in ServiceFunc function for a particular node.
|
||||
func (s *Simulation) NodeItem(id enode.ID, key interface{}) (value interface{}, ok bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if _, ok := s.buckets[id]; !ok {
|
||||
return nil, false
|
||||
}
|
||||
return s.buckets[id].Load(key)
|
||||
}
|
||||
|
||||
// SetNodeItem sets a new item associated with the node with provided NodeID.
|
||||
// Buckets should be used to avoid managing separate simulation global state.
|
||||
func (s *Simulation) SetNodeItem(id enode.ID, key interface{}, value interface{}) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.buckets[id].Store(key, value)
|
||||
}
|
||||
|
||||
// NodesItems returns a map of items from all nodes that are all set under the
|
||||
// same BucketKey.
|
||||
func (s *Simulation) NodesItems(key interface{}) (values map[enode.ID]interface{}) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
ids := s.NodeIDs()
|
||||
values = make(map[enode.ID]interface{}, len(ids))
|
||||
for _, id := range ids {
|
||||
if _, ok := s.buckets[id]; !ok {
|
||||
continue
|
||||
}
|
||||
if v, ok := s.buckets[id].Load(key); ok {
|
||||
values[id] = v
|
||||
}
|
||||
}
|
||||
return values
|
||||
}
|
||||
|
||||
// UpNodesItems returns a map of items with the same BucketKey from all nodes that are up.
|
||||
func (s *Simulation) UpNodesItems(key interface{}) (values map[enode.ID]interface{}) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
ids := s.UpNodeIDs()
|
||||
values = make(map[enode.ID]interface{})
|
||||
for _, id := range ids {
|
||||
if _, ok := s.buckets[id]; !ok {
|
||||
continue
|
||||
}
|
||||
if v, ok := s.buckets[id].Load(key); ok {
|
||||
values[id] = v
|
||||
}
|
||||
}
|
||||
return values
|
||||
}
|
@ -1,155 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
// TestServiceBucket tests all bucket functionality using subtests.
|
||||
// It constructs a simulation of two nodes by adding items to their buckets
|
||||
// in ServiceFunc constructor, then by SetNodeItem. Testing UpNodesItems
|
||||
// is done by stopping one node and validating availability of its items.
|
||||
func TestServiceBucket(t *testing.T) {
|
||||
testKey := "Key"
|
||||
testValue := "Value"
|
||||
|
||||
sim := New(map[string]ServiceFunc{
|
||||
"noop": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
b.Store(testKey, testValue+ctx.Config.ID.String())
|
||||
return newNoopService(), nil, nil
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
id1, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id2, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("ServiceFunc bucket Store", func(t *testing.T) {
|
||||
v, ok := sim.NodeItem(id1, testKey)
|
||||
if !ok {
|
||||
t.Fatal("bucket item not found")
|
||||
}
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
t.Fatal("bucket item value is not string")
|
||||
}
|
||||
if s != testValue+id1.String() {
|
||||
t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
|
||||
}
|
||||
|
||||
v, ok = sim.NodeItem(id2, testKey)
|
||||
if !ok {
|
||||
t.Fatal("bucket item not found")
|
||||
}
|
||||
s, ok = v.(string)
|
||||
if !ok {
|
||||
t.Fatal("bucket item value is not string")
|
||||
}
|
||||
if s != testValue+id2.String() {
|
||||
t.Fatalf("expected %q, got %q", testValue+id2.String(), s)
|
||||
}
|
||||
})
|
||||
|
||||
customKey := "anotherKey"
|
||||
customValue := "anotherValue"
|
||||
|
||||
t.Run("SetNodeItem", func(t *testing.T) {
|
||||
sim.SetNodeItem(id1, customKey, customValue)
|
||||
|
||||
v, ok := sim.NodeItem(id1, customKey)
|
||||
if !ok {
|
||||
t.Fatal("bucket item not found")
|
||||
}
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
t.Fatal("bucket item value is not string")
|
||||
}
|
||||
if s != customValue {
|
||||
t.Fatalf("expected %q, got %q", customValue, s)
|
||||
}
|
||||
|
||||
_, ok = sim.NodeItem(id2, customKey)
|
||||
if ok {
|
||||
t.Fatal("bucket item should not be found")
|
||||
}
|
||||
})
|
||||
|
||||
if err := sim.StopNode(id2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Run("UpNodesItems", func(t *testing.T) {
|
||||
items := sim.UpNodesItems(testKey)
|
||||
|
||||
v, ok := items[id1]
|
||||
if !ok {
|
||||
t.Errorf("node 1 item not found")
|
||||
}
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
t.Fatal("node 1 item value is not string")
|
||||
}
|
||||
if s != testValue+id1.String() {
|
||||
t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
|
||||
}
|
||||
|
||||
_, ok = items[id2]
|
||||
if ok {
|
||||
t.Errorf("node 2 item should not be found")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("NodeItems", func(t *testing.T) {
|
||||
items := sim.NodesItems(testKey)
|
||||
|
||||
v, ok := items[id1]
|
||||
if !ok {
|
||||
t.Errorf("node 1 item not found")
|
||||
}
|
||||
s, ok := v.(string)
|
||||
if !ok {
|
||||
t.Fatal("node 1 item value is not string")
|
||||
}
|
||||
if s != testValue+id1.String() {
|
||||
t.Fatalf("expected %q, got %q", testValue+id1.String(), s)
|
||||
}
|
||||
|
||||
v, ok = items[id2]
|
||||
if !ok {
|
||||
t.Errorf("node 2 item not found")
|
||||
}
|
||||
s, ok = v.(string)
|
||||
if !ok {
|
||||
t.Fatal("node 1 item value is not string")
|
||||
}
|
||||
if s != testValue+id2.String() {
|
||||
t.Fatalf("expected %q, got %q", testValue+id2.String(), s)
|
||||
}
|
||||
})
|
||||
}
|
@ -1,217 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
)
|
||||
|
||||
// PeerEvent is the type of the channel returned by Simulation.PeerEvents.
|
||||
type PeerEvent struct {
|
||||
// NodeID is the ID of node that the event is caught on.
|
||||
NodeID enode.ID
|
||||
// PeerID is the ID of the peer node that the event is caught on.
|
||||
PeerID enode.ID
|
||||
// Event is the event that is caught.
|
||||
Event *simulations.Event
|
||||
// Error is the error that may have happened during event watching.
|
||||
Error error
|
||||
}
|
||||
|
||||
// PeerEventsFilter defines a filter on PeerEvents to exclude messages with
|
||||
// defined properties. Use PeerEventsFilter methods to set required options.
|
||||
type PeerEventsFilter struct {
|
||||
eventType simulations.EventType
|
||||
|
||||
connUp *bool
|
||||
|
||||
msgReceive *bool
|
||||
protocol *string
|
||||
msgCode *uint64
|
||||
}
|
||||
|
||||
// NewPeerEventsFilter returns a new PeerEventsFilter instance.
|
||||
func NewPeerEventsFilter() *PeerEventsFilter {
|
||||
return &PeerEventsFilter{}
|
||||
}
|
||||
|
||||
// Connect sets the filter to events when two nodes connect.
|
||||
func (f *PeerEventsFilter) Connect() *PeerEventsFilter {
|
||||
f.eventType = simulations.EventTypeConn
|
||||
b := true
|
||||
f.connUp = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// Drop sets the filter to events when two nodes disconnect.
|
||||
func (f *PeerEventsFilter) Drop() *PeerEventsFilter {
|
||||
f.eventType = simulations.EventTypeConn
|
||||
b := false
|
||||
f.connUp = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// ReceivedMessages sets the filter to only messages that are received.
|
||||
func (f *PeerEventsFilter) ReceivedMessages() *PeerEventsFilter {
|
||||
f.eventType = simulations.EventTypeMsg
|
||||
b := true
|
||||
f.msgReceive = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// SentMessages sets the filter to only messages that are sent.
|
||||
func (f *PeerEventsFilter) SentMessages() *PeerEventsFilter {
|
||||
f.eventType = simulations.EventTypeMsg
|
||||
b := false
|
||||
f.msgReceive = &b
|
||||
return f
|
||||
}
|
||||
|
||||
// Protocol sets the filter to only one message protocol.
|
||||
func (f *PeerEventsFilter) Protocol(p string) *PeerEventsFilter {
|
||||
f.eventType = simulations.EventTypeMsg
|
||||
f.protocol = &p
|
||||
return f
|
||||
}
|
||||
|
||||
// MsgCode sets the filter to only one msg code.
|
||||
func (f *PeerEventsFilter) MsgCode(c uint64) *PeerEventsFilter {
|
||||
f.eventType = simulations.EventTypeMsg
|
||||
f.msgCode = &c
|
||||
return f
|
||||
}
|
||||
|
||||
// PeerEvents returns a channel of events that are captured by admin peerEvents
|
||||
// subscription nodes with provided NodeIDs. Additional filters can be set to ignore
|
||||
// events that are not relevant.
|
||||
func (s *Simulation) PeerEvents(ctx context.Context, ids []enode.ID, filters ...*PeerEventsFilter) <-chan PeerEvent {
|
||||
eventC := make(chan PeerEvent)
|
||||
|
||||
// wait group to make sure all subscriptions to admin peerEvents are established
|
||||
// before this function returns.
|
||||
var subsWG sync.WaitGroup
|
||||
for _, id := range ids {
|
||||
s.shutdownWG.Add(1)
|
||||
subsWG.Add(1)
|
||||
go func(id enode.ID) {
|
||||
defer s.shutdownWG.Done()
|
||||
|
||||
events := make(chan *simulations.Event)
|
||||
sub := s.Net.Events().Subscribe(events)
|
||||
defer sub.Unsubscribe()
|
||||
|
||||
subsWG.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if err := ctx.Err(); err != nil {
|
||||
select {
|
||||
case eventC <- PeerEvent{NodeID: id, Error: err}:
|
||||
case <-s.Done():
|
||||
}
|
||||
}
|
||||
return
|
||||
case <-s.Done():
|
||||
return
|
||||
case e := <-events:
|
||||
// ignore control events
|
||||
if e.Control {
|
||||
continue
|
||||
}
|
||||
match := len(filters) == 0 // if there are no filters match all events
|
||||
for _, f := range filters {
|
||||
if f.eventType == simulations.EventTypeConn && e.Conn != nil {
|
||||
if *f.connUp != e.Conn.Up {
|
||||
continue
|
||||
}
|
||||
// all connection filter parameters matched, break the loop
|
||||
match = true
|
||||
break
|
||||
}
|
||||
if f.eventType == simulations.EventTypeMsg && e.Msg != nil {
|
||||
if f.msgReceive != nil && *f.msgReceive != e.Msg.Received {
|
||||
continue
|
||||
}
|
||||
if f.protocol != nil && *f.protocol != e.Msg.Protocol {
|
||||
continue
|
||||
}
|
||||
if f.msgCode != nil && *f.msgCode != e.Msg.Code {
|
||||
continue
|
||||
}
|
||||
// all message filter parameters matched, break the loop
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
var peerID enode.ID
|
||||
switch e.Type {
|
||||
case simulations.EventTypeConn:
|
||||
peerID = e.Conn.One
|
||||
if peerID == id {
|
||||
peerID = e.Conn.Other
|
||||
}
|
||||
case simulations.EventTypeMsg:
|
||||
peerID = e.Msg.One
|
||||
if peerID == id {
|
||||
peerID = e.Msg.Other
|
||||
}
|
||||
}
|
||||
if match {
|
||||
select {
|
||||
case eventC <- PeerEvent{NodeID: id, PeerID: peerID, Event: e}:
|
||||
case <-ctx.Done():
|
||||
if err := ctx.Err(); err != nil {
|
||||
select {
|
||||
case eventC <- PeerEvent{NodeID: id, PeerID: peerID, Error: err}:
|
||||
case <-s.Done():
|
||||
}
|
||||
}
|
||||
return
|
||||
case <-s.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
case err := <-sub.Err():
|
||||
if err != nil {
|
||||
select {
|
||||
case eventC <- PeerEvent{NodeID: id, Error: err}:
|
||||
case <-ctx.Done():
|
||||
if err := ctx.Err(); err != nil {
|
||||
select {
|
||||
case eventC <- PeerEvent{NodeID: id, Error: err}:
|
||||
case <-s.Done():
|
||||
}
|
||||
}
|
||||
return
|
||||
case <-s.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}(id)
|
||||
}
|
||||
|
||||
// wait all subscriptions
|
||||
subsWG.Wait()
|
||||
return eventC
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TestPeerEvents creates simulation, adds two nodes,
|
||||
// register for peer events, connects nodes in a chain
|
||||
// and waits for the number of connection events to
|
||||
// be received.
|
||||
func TestPeerEvents(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodes(2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
events := sim.PeerEvents(ctx, sim.NodeIDs())
|
||||
|
||||
// two nodes -> two connection events
|
||||
expectedEventCount := 2
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(expectedEventCount)
|
||||
|
||||
go func() {
|
||||
for e := range events {
|
||||
if e.Error != nil {
|
||||
if e.Error == context.Canceled {
|
||||
return
|
||||
}
|
||||
t.Error(e.Error)
|
||||
continue
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
}()
|
||||
|
||||
err = sim.Net.ConnectNodesChain(sim.NodeIDs())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestPeerEventsTimeout(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodes(2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
events := sim.PeerEvents(ctx, sim.NodeIDs())
|
||||
|
||||
done := make(chan struct{})
|
||||
errC := make(chan error)
|
||||
go func() {
|
||||
for e := range events {
|
||||
if e.Error == context.Canceled {
|
||||
return
|
||||
}
|
||||
if e.Error == context.DeadlineExceeded {
|
||||
close(done)
|
||||
return
|
||||
} else {
|
||||
errC <- e.Error
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("no context deadline received")
|
||||
case err := <-errC:
|
||||
t.Fatal(err)
|
||||
case <-done:
|
||||
// all good, context deadline detected
|
||||
}
|
||||
}
|
@ -1,141 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
"github.com/ethereum/go-ethereum/swarm/network/simulation"
|
||||
)
|
||||
|
||||
// Every node can have a Kademlia associated using the node bucket under
|
||||
// BucketKeyKademlia key. This allows to use WaitTillHealthy to block until
|
||||
// all nodes have the their Kademlias healthy.
|
||||
func ExampleSimulation_WaitTillHealthy() {
|
||||
|
||||
sim := simulation.New(map[string]simulation.ServiceFunc{
|
||||
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
addr := network.NewAddr(ctx.Config.Node())
|
||||
hp := network.NewHiveParams()
|
||||
hp.Discovery = false
|
||||
config := &network.BzzConfig{
|
||||
OverlayAddr: addr.Over(),
|
||||
UnderlayAddr: addr.Under(),
|
||||
HiveParams: hp,
|
||||
}
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
// store kademlia in node's bucket under BucketKeyKademlia
|
||||
// so that it can be found by WaitTillHealthy method.
|
||||
b.Store(simulation.BucketKeyKademlia, kad)
|
||||
return network.NewBzz(config, kad, nil, nil, nil), nil, nil
|
||||
},
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodesAndConnectRing(10)
|
||||
if err != nil {
|
||||
// handle error properly...
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
ill, err := sim.WaitTillHealthy(ctx)
|
||||
if err != nil {
|
||||
// inspect the latest detected not healthy kademlias
|
||||
for id, kad := range ill {
|
||||
fmt.Println("Node", id)
|
||||
fmt.Println(kad.String())
|
||||
}
|
||||
// handle error...
|
||||
}
|
||||
|
||||
// continue with the test
|
||||
|
||||
}
|
||||
|
||||
// Watch all peer events in the simulation network, buy receiving from a channel.
|
||||
func ExampleSimulation_PeerEvents() {
|
||||
sim := simulation.New(nil)
|
||||
defer sim.Close()
|
||||
|
||||
events := sim.PeerEvents(context.Background(), sim.NodeIDs())
|
||||
|
||||
go func() {
|
||||
for e := range events {
|
||||
if e.Error != nil {
|
||||
log.Error("peer event", "err", e.Error)
|
||||
continue
|
||||
}
|
||||
log.Info("peer event", "node", e.NodeID, "peer", e.PeerID, "type", e.Event.Type)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Detect when a nodes drop a peer.
|
||||
func ExampleSimulation_PeerEvents_disconnections() {
|
||||
sim := simulation.New(nil)
|
||||
defer sim.Close()
|
||||
|
||||
disconnections := sim.PeerEvents(
|
||||
context.Background(),
|
||||
sim.NodeIDs(),
|
||||
simulation.NewPeerEventsFilter().Drop(),
|
||||
)
|
||||
|
||||
go func() {
|
||||
for d := range disconnections {
|
||||
if d.Error != nil {
|
||||
log.Error("peer drop", "err", d.Error)
|
||||
continue
|
||||
}
|
||||
log.Warn("peer drop", "node", d.NodeID, "peer", d.PeerID)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Watch multiple types of events or messages. In this case, they differ only
|
||||
// by MsgCode, but filters can be set for different types or protocols, too.
|
||||
func ExampleSimulation_PeerEvents_multipleFilters() {
|
||||
sim := simulation.New(nil)
|
||||
defer sim.Close()
|
||||
|
||||
msgs := sim.PeerEvents(
|
||||
context.Background(),
|
||||
sim.NodeIDs(),
|
||||
// Watch when bzz messages 1 and 4 are received.
|
||||
simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("bzz").MsgCode(1),
|
||||
simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("bzz").MsgCode(4),
|
||||
)
|
||||
|
||||
go func() {
|
||||
for m := range msgs {
|
||||
if m.Error != nil {
|
||||
log.Error("bzz message", "err", m.Error)
|
||||
continue
|
||||
}
|
||||
log.Info("bzz message", "node", m.NodeID, "peer", m.PeerID)
|
||||
}
|
||||
}()
|
||||
}
|
@ -1,68 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
)
|
||||
|
||||
// Package defaults.
|
||||
var (
|
||||
DefaultHTTPSimAddr = ":8888"
|
||||
)
|
||||
|
||||
//WithServer implements the builder pattern constructor for Simulation to
|
||||
//start with a HTTP server
|
||||
func (s *Simulation) WithServer(addr string) *Simulation {
|
||||
//assign default addr if nothing provided
|
||||
if addr == "" {
|
||||
addr = DefaultHTTPSimAddr
|
||||
}
|
||||
log.Info(fmt.Sprintf("Initializing simulation server on %s...", addr))
|
||||
//initialize the HTTP server
|
||||
s.handler = simulations.NewServer(s.Net)
|
||||
s.runC = make(chan struct{})
|
||||
//add swarm specific routes to the HTTP server
|
||||
s.addSimulationRoutes()
|
||||
s.httpSrv = &http.Server{
|
||||
Addr: addr,
|
||||
Handler: s.handler,
|
||||
}
|
||||
go func() {
|
||||
err := s.httpSrv.ListenAndServe()
|
||||
if err != nil {
|
||||
log.Error("Error starting the HTTP server", "error", err)
|
||||
}
|
||||
}()
|
||||
return s
|
||||
}
|
||||
|
||||
//register additional HTTP routes
|
||||
func (s *Simulation) addSimulationRoutes() {
|
||||
s.handler.POST("/runsim", s.RunSimulation)
|
||||
}
|
||||
|
||||
// RunSimulation is the actual POST endpoint runner
|
||||
func (s *Simulation) RunSimulation(w http.ResponseWriter, req *http.Request) {
|
||||
log.Debug("RunSimulation endpoint running")
|
||||
s.runC <- struct{}{}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
@ -1,110 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
func TestSimulationWithHTTPServer(t *testing.T) {
|
||||
log.Debug("Init simulation")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
|
||||
sim := New(
|
||||
map[string]ServiceFunc{
|
||||
"noop": func(_ *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
return newNoopService(), nil, nil
|
||||
},
|
||||
}).WithServer(DefaultHTTPSimAddr)
|
||||
defer sim.Close()
|
||||
log.Debug("Done.")
|
||||
|
||||
_, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Debug("Starting sim round and let it time out...")
|
||||
//first test that running without sending to the channel will actually
|
||||
//block the simulation, so let it time out
|
||||
result := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
|
||||
log.Debug("Just start the sim without any action and wait for the timeout")
|
||||
//ensure with a Sleep that simulation doesn't terminate before the timeout
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
if result.Error.Error() == "context deadline exceeded" {
|
||||
log.Debug("Expected timeout error received")
|
||||
} else {
|
||||
t.Fatal(result.Error)
|
||||
}
|
||||
}
|
||||
|
||||
//now run it again and send the expected signal on the waiting channel,
|
||||
//then close the simulation
|
||||
log.Debug("Starting sim round and wait for frontend signal...")
|
||||
//this time the timeout should be long enough so that it doesn't kick in too early
|
||||
ctx, cancel2 := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel2()
|
||||
errC := make(chan error, 1)
|
||||
go triggerSimulationRun(t, errC)
|
||||
result = sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
|
||||
log.Debug("This run waits for the run signal from `frontend`...")
|
||||
//ensure with a Sleep that simulation doesn't terminate before the signal is received
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
})
|
||||
if result.Error != nil {
|
||||
t.Fatal(result.Error)
|
||||
}
|
||||
if err := <-errC; err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
log.Debug("Test terminated successfully")
|
||||
}
|
||||
|
||||
func triggerSimulationRun(t *testing.T, errC chan error) {
|
||||
//We need to first wait for the sim HTTP server to start running...
|
||||
time.Sleep(2 * time.Second)
|
||||
//then we can send the signal
|
||||
|
||||
log.Debug("Sending run signal to simulation: POST /runsim...")
|
||||
resp, err := http.Post(fmt.Sprintf("http://localhost%s/runsim", DefaultHTTPSimAddr), "application/json", nil)
|
||||
if err != nil {
|
||||
errC <- fmt.Errorf("Request failed: %v", err)
|
||||
return
|
||||
}
|
||||
log.Debug("Signal sent")
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
errC <- fmt.Errorf("err %s", resp.Status)
|
||||
return
|
||||
}
|
||||
errC <- resp.Body.Close()
|
||||
}
|
@ -1,203 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
)
|
||||
|
||||
// BucketKeyKademlia is the key to be used for storing the kademlia
|
||||
// instance for particular node, usually inside the ServiceFunc function.
|
||||
var BucketKeyKademlia BucketKey = "kademlia"
|
||||
|
||||
// WaitTillHealthy is blocking until the health of all kademlias is true.
|
||||
// If error is not nil, a map of kademlia that was found not healthy is returned.
|
||||
// TODO: Check correctness since change in kademlia depth calculation logic
|
||||
func (s *Simulation) WaitTillHealthy(ctx context.Context) (ill map[enode.ID]*network.Kademlia, err error) {
|
||||
// Prepare PeerPot map for checking Kademlia health
|
||||
var ppmap map[string]*network.PeerPot
|
||||
kademlias := s.kademlias()
|
||||
addrs := make([][]byte, 0, len(kademlias))
|
||||
// TODO verify that all kademlias have same params
|
||||
for _, k := range kademlias {
|
||||
addrs = append(addrs, k.BaseAddr())
|
||||
}
|
||||
ppmap = network.NewPeerPotMap(s.neighbourhoodSize, addrs)
|
||||
|
||||
// Wait for healthy Kademlia on every node before checking files
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
ill = make(map[enode.ID]*network.Kademlia)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ill, ctx.Err()
|
||||
case <-ticker.C:
|
||||
for k := range ill {
|
||||
delete(ill, k)
|
||||
}
|
||||
log.Debug("kademlia health check", "addr count", len(addrs), "kad len", len(kademlias))
|
||||
for id, k := range kademlias {
|
||||
//PeerPot for this node
|
||||
addr := common.Bytes2Hex(k.BaseAddr())
|
||||
pp := ppmap[addr]
|
||||
//call Healthy RPC
|
||||
h := k.GetHealthInfo(pp)
|
||||
//print info
|
||||
log.Debug(k.String())
|
||||
log.Debug("kademlia", "connectNN", h.ConnectNN, "knowNN", h.KnowNN)
|
||||
log.Debug("kademlia", "health", h.ConnectNN && h.KnowNN, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
|
||||
log.Debug("kademlia", "ill condition", !h.ConnectNN, "addr", hex.EncodeToString(k.BaseAddr()), "node", id)
|
||||
if !h.Healthy() {
|
||||
ill[id] = k
|
||||
}
|
||||
}
|
||||
if len(ill) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// kademlias returns all Kademlia instances that are set
|
||||
// in simulation bucket.
|
||||
func (s *Simulation) kademlias() (ks map[enode.ID]*network.Kademlia) {
|
||||
items := s.UpNodesItems(BucketKeyKademlia)
|
||||
log.Debug("kademlia len items", "len", len(items))
|
||||
ks = make(map[enode.ID]*network.Kademlia, len(items))
|
||||
for id, v := range items {
|
||||
k, ok := v.(*network.Kademlia)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
ks[id] = k
|
||||
}
|
||||
return ks
|
||||
}
|
||||
|
||||
// WaitTillSnapshotRecreated is blocking until all the connections specified
|
||||
// in the snapshot are registered in the kademlia.
|
||||
// It differs from WaitTillHealthy, which waits only until all the kademlias are
|
||||
// healthy (it might happen even before all the connections are established).
|
||||
func (s *Simulation) WaitTillSnapshotRecreated(ctx context.Context, snap *simulations.Snapshot) error {
|
||||
expected := getSnapshotConnections(snap.Conns)
|
||||
ticker := time.NewTicker(150 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-ticker.C:
|
||||
actual := s.getActualConnections()
|
||||
if isAllDeployed(expected, actual) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Simulation) getActualConnections() (res []uint64) {
|
||||
kademlias := s.kademlias()
|
||||
for base, k := range kademlias {
|
||||
k.EachConn(base[:], 256, func(p *network.Peer, _ int) bool {
|
||||
res = append(res, getConnectionHash(base, p.ID()))
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// only list those connections that appear twice (both peers should recognize connection as active)
|
||||
res = removeDuplicatesAndSingletons(res)
|
||||
return res
|
||||
}
|
||||
|
||||
func getSnapshotConnections(conns []simulations.Conn) (res []uint64) {
|
||||
for _, c := range conns {
|
||||
res = append(res, getConnectionHash(c.One, c.Other))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// returns an integer connection identifier (similar to 8-byte hash)
|
||||
func getConnectionHash(a, b enode.ID) uint64 {
|
||||
var h [8]byte
|
||||
for i := 0; i < 8; i++ {
|
||||
h[i] = a[i] ^ b[i]
|
||||
}
|
||||
res := binary.LittleEndian.Uint64(h[:])
|
||||
return res
|
||||
}
|
||||
|
||||
// returns true if all connections in expected are listed in actual
|
||||
func isAllDeployed(expected []uint64, actual []uint64) bool {
|
||||
if len(expected) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
exp := make([]uint64, len(expected))
|
||||
copy(exp, expected)
|
||||
for _, c := range actual {
|
||||
// remove value c from exp
|
||||
for i := 0; i < len(exp); i++ {
|
||||
if exp[i] == c {
|
||||
exp = removeListElement(exp, i)
|
||||
if len(exp) == 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(exp) == 0
|
||||
}
|
||||
|
||||
func removeListElement(arr []uint64, i int) []uint64 {
|
||||
last := len(arr) - 1
|
||||
arr[i] = arr[last]
|
||||
arr = arr[:last]
|
||||
return arr
|
||||
}
|
||||
|
||||
func removeDuplicatesAndSingletons(arr []uint64) []uint64 {
|
||||
for i := 0; i < len(arr); {
|
||||
found := false
|
||||
for j := i + 1; j < len(arr); j++ {
|
||||
if arr[i] == arr[j] {
|
||||
arr = removeListElement(arr, j) // remove duplicate
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
i++
|
||||
} else {
|
||||
arr = removeListElement(arr, i) // remove singleton
|
||||
}
|
||||
}
|
||||
|
||||
return arr
|
||||
}
|
@ -1,310 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
)
|
||||
|
||||
/*
|
||||
TestWaitTillHealthy tests that we indeed get a healthy network after we wait for it.
|
||||
For this to be tested, a bit of a snake tail bite needs to happen:
|
||||
* First we create a first simulation
|
||||
* Run it as nodes connected in a ring
|
||||
* Wait until the network is healthy
|
||||
* Then we create a snapshot
|
||||
* With this snapshot we create a new simulation
|
||||
* This simulation is expected to have a healthy configuration, as it uses the snapshot
|
||||
* Thus we just iterate all nodes and check that their kademlias are healthy
|
||||
* If all kademlias are healthy, the test succeeded, otherwise it failed
|
||||
*/
|
||||
func TestWaitTillHealthy(t *testing.T) {
|
||||
t.Skip("this test is flaky; disabling till underlying problem is solved")
|
||||
testNodesNum := 10
|
||||
|
||||
// create the first simulation
|
||||
sim := New(createSimServiceMap(true))
|
||||
|
||||
// connect and...
|
||||
nodeIDs, err := sim.AddNodesAndConnectRing(testNodesNum)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// array of all overlay addresses
|
||||
var addrs [][]byte
|
||||
// iterate once to be able to build the peer map
|
||||
for _, node := range nodeIDs {
|
||||
//get the kademlia overlay address from this ID
|
||||
a := node.Bytes()
|
||||
//append it to the array of all overlay addresses
|
||||
addrs = append(addrs, a)
|
||||
}
|
||||
// build a PeerPot only once
|
||||
pp := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, addrs)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// ...wait until healthy
|
||||
ill, err := sim.WaitTillHealthy(ctx)
|
||||
if err != nil {
|
||||
for id, kad := range ill {
|
||||
t.Log("Node", id)
|
||||
t.Log(kad.String())
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// now create a snapshot of this network
|
||||
snap, err := sim.Net.Snapshot()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// close the initial simulation
|
||||
sim.Close()
|
||||
// create a control simulation
|
||||
controlSim := New(createSimServiceMap(false))
|
||||
defer controlSim.Close()
|
||||
|
||||
// load the snapshot into this control simulation
|
||||
err = controlSim.Net.Load(snap)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = controlSim.WaitTillHealthy(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, node := range nodeIDs {
|
||||
// ...get its kademlia
|
||||
item, ok := controlSim.NodeItem(node, BucketKeyKademlia)
|
||||
if !ok {
|
||||
t.Fatal("No kademlia bucket item")
|
||||
}
|
||||
kad := item.(*network.Kademlia)
|
||||
// get its base address
|
||||
kid := common.Bytes2Hex(kad.BaseAddr())
|
||||
|
||||
//get the health info
|
||||
info := kad.GetHealthInfo(pp[kid])
|
||||
log.Trace("Health info", "info", info)
|
||||
// check that it is healthy
|
||||
healthy := info.Healthy()
|
||||
if !healthy {
|
||||
t.Fatalf("Expected node %v of control simulation to be healthy, but it is not, unhealthy kademlias: %v", node, kad.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// createSimServiceMap returns the services map
|
||||
// this function will create the sim services with or without discovery enabled
|
||||
// based on the flag passed
|
||||
func createSimServiceMap(discovery bool) map[string]ServiceFunc {
|
||||
return map[string]ServiceFunc{
|
||||
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
addr := network.NewAddr(ctx.Config.Node())
|
||||
hp := network.NewHiveParams()
|
||||
hp.Discovery = discovery
|
||||
config := &network.BzzConfig{
|
||||
OverlayAddr: addr.Over(),
|
||||
UnderlayAddr: addr.Under(),
|
||||
HiveParams: hp,
|
||||
}
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
// store kademlia in node's bucket under BucketKeyKademlia
|
||||
// so that it can be found by WaitTillHealthy method.
|
||||
b.Store(BucketKeyKademlia, kad)
|
||||
return network.NewBzz(config, kad, nil, nil, nil), nil, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TestWaitTillSnapshotRecreated tests that we indeed have a network
|
||||
// configuration specified in the snapshot file, after we wait for it.
|
||||
//
|
||||
// First we create a first simulation
|
||||
// Run it as nodes connected in a ring
|
||||
// Wait until the network is healthy
|
||||
// Then we create a snapshot
|
||||
// With this snapshot we create a new simulation
|
||||
// Call WaitTillSnapshotRecreated() function and wait until it returns
|
||||
// Iterate the nodes and check if all the connections are successfully recreated
|
||||
func TestWaitTillSnapshotRecreated(t *testing.T) {
|
||||
t.Skip("test is flaky. disabling until underlying problem is addressed")
|
||||
var err error
|
||||
sim := New(createSimServiceMap(true))
|
||||
_, err = sim.AddNodesAndConnectRing(16)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
_, err = sim.WaitTillHealthy(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
originalConnections := sim.getActualConnections()
|
||||
snap, err := sim.Net.Snapshot()
|
||||
sim.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
controlSim := New(createSimServiceMap(false))
|
||||
defer controlSim.Close()
|
||||
err = controlSim.Net.Load(snap)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = controlSim.WaitTillSnapshotRecreated(ctx, snap)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
controlConnections := controlSim.getActualConnections()
|
||||
|
||||
for _, c := range originalConnections {
|
||||
if !exist(controlConnections, c) {
|
||||
t.Fatal("connection was not recreated")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// exist returns true if val is found in arr
|
||||
func exist(arr []uint64, val uint64) bool {
|
||||
for _, c := range arr {
|
||||
if c == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func TestRemoveDuplicatesAndSingletons(t *testing.T) {
|
||||
singletons := []uint64{
|
||||
0x3c127c6f6cb026b0,
|
||||
0x0f45190d72e71fc5,
|
||||
0xb0184c02449e0bb6,
|
||||
0xa85c7b84239c54d3,
|
||||
0xe3b0c44298fc1c14,
|
||||
0x9afbf4c8996fb924,
|
||||
0x27ae41e4649b934c,
|
||||
0xa495991b7852b855,
|
||||
}
|
||||
|
||||
doubles := []uint64{
|
||||
0x1b879f878de7fc7a,
|
||||
0xc6791470521bdab4,
|
||||
0xdd34b0ee39bbccc6,
|
||||
0x4d904fbf0f31da10,
|
||||
0x6403c2560432c8f8,
|
||||
0x18954e33cf3ad847,
|
||||
0x90db00e98dc7a8a6,
|
||||
0x92886b0dfcc1809b,
|
||||
}
|
||||
|
||||
var arr []uint64
|
||||
arr = append(arr, doubles...)
|
||||
arr = append(arr, singletons...)
|
||||
arr = append(arr, doubles...)
|
||||
arr = removeDuplicatesAndSingletons(arr)
|
||||
|
||||
for _, i := range singletons {
|
||||
if exist(arr, i) {
|
||||
t.Fatalf("singleton not removed: %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
for _, i := range doubles {
|
||||
if !exist(arr, i) {
|
||||
t.Fatalf("wrong value removed: %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
for j := 0; j < len(doubles); j++ {
|
||||
v := doubles[j] + singletons[j]
|
||||
if exist(arr, v) {
|
||||
t.Fatalf("non-existing value found, index: %d", j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsAllDeployed(t *testing.T) {
|
||||
a := []uint64{
|
||||
0x3c127c6f6cb026b0,
|
||||
0x0f45190d72e71fc5,
|
||||
0xb0184c02449e0bb6,
|
||||
0xa85c7b84239c54d3,
|
||||
0xe3b0c44298fc1c14,
|
||||
0x9afbf4c8996fb924,
|
||||
0x27ae41e4649b934c,
|
||||
0xa495991b7852b855,
|
||||
}
|
||||
|
||||
b := []uint64{
|
||||
0x1b879f878de7fc7a,
|
||||
0xc6791470521bdab4,
|
||||
0xdd34b0ee39bbccc6,
|
||||
0x4d904fbf0f31da10,
|
||||
0x6403c2560432c8f8,
|
||||
0x18954e33cf3ad847,
|
||||
0x90db00e98dc7a8a6,
|
||||
0x92886b0dfcc1809b,
|
||||
}
|
||||
|
||||
var c []uint64
|
||||
c = append(c, a...)
|
||||
c = append(c, b...)
|
||||
|
||||
if !isAllDeployed(a, c) {
|
||||
t.Fatal("isAllDeployed failed")
|
||||
}
|
||||
|
||||
if !isAllDeployed(b, c) {
|
||||
t.Fatal("isAllDeployed failed")
|
||||
}
|
||||
|
||||
if isAllDeployed(c, a) {
|
||||
t.Fatal("isAllDeployed failed: false positive")
|
||||
}
|
||||
|
||||
if isAllDeployed(c, b) {
|
||||
t.Fatal("isAllDeployed failed: false positive")
|
||||
}
|
||||
|
||||
c = c[2:]
|
||||
|
||||
if isAllDeployed(a, c) {
|
||||
t.Fatal("isAllDeployed failed: false positive")
|
||||
}
|
||||
|
||||
if !isAllDeployed(b, c) {
|
||||
t.Fatal("isAllDeployed failed")
|
||||
}
|
||||
}
|
@ -1,341 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
)
|
||||
|
||||
var (
|
||||
BucketKeyBzzPrivateKey BucketKey = "bzzprivkey"
|
||||
)
|
||||
|
||||
// NodeIDs returns NodeIDs for all nodes in the network.
|
||||
func (s *Simulation) NodeIDs() (ids []enode.ID) {
|
||||
nodes := s.Net.GetNodes()
|
||||
ids = make([]enode.ID, len(nodes))
|
||||
for i, node := range nodes {
|
||||
ids[i] = node.ID()
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// UpNodeIDs returns NodeIDs for nodes that are up in the network.
|
||||
func (s *Simulation) UpNodeIDs() (ids []enode.ID) {
|
||||
nodes := s.Net.GetNodes()
|
||||
for _, node := range nodes {
|
||||
if node.Up() {
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// DownNodeIDs returns NodeIDs for nodes that are stopped in the network.
|
||||
func (s *Simulation) DownNodeIDs() (ids []enode.ID) {
|
||||
nodes := s.Net.GetNodes()
|
||||
for _, node := range nodes {
|
||||
if !node.Up() {
|
||||
ids = append(ids, node.ID())
|
||||
}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// AddNodeOption defines the option that can be passed
|
||||
// to Simulation.AddNode method.
|
||||
type AddNodeOption func(*adapters.NodeConfig)
|
||||
|
||||
// AddNodeWithMsgEvents sets the EnableMsgEvents option
|
||||
// to NodeConfig.
|
||||
func AddNodeWithMsgEvents(enable bool) AddNodeOption {
|
||||
return func(o *adapters.NodeConfig) {
|
||||
o.EnableMsgEvents = enable
|
||||
}
|
||||
}
|
||||
|
||||
// AddNodeWithService specifies a service that should be
|
||||
// started on a node. This option can be repeated as variadic
|
||||
// argument toe AddNode and other add node related methods.
|
||||
// If AddNodeWithService is not specified, all services will be started.
|
||||
func AddNodeWithService(serviceName string) AddNodeOption {
|
||||
return func(o *adapters.NodeConfig) {
|
||||
o.Services = append(o.Services, serviceName)
|
||||
}
|
||||
}
|
||||
|
||||
// AddNode creates a new node with random configuration,
|
||||
// applies provided options to the config and adds the node to network.
|
||||
// By default all services will be started on a node. If one or more
|
||||
// AddNodeWithService option are provided, only specified services will be started.
|
||||
func (s *Simulation) AddNode(opts ...AddNodeOption) (id enode.ID, err error) {
|
||||
conf := adapters.RandomNodeConfig()
|
||||
for _, o := range opts {
|
||||
o(conf)
|
||||
}
|
||||
if len(conf.Services) == 0 {
|
||||
conf.Services = s.serviceNames
|
||||
}
|
||||
|
||||
// add ENR records to the underlying node
|
||||
// most importantly the bzz overlay address
|
||||
//
|
||||
// for now we have no way of setting bootnodes or lightnodes in sims
|
||||
// so we just let them be set to false
|
||||
// they should perhaps be possible to override them with AddNodeOption
|
||||
bzzPrivateKey, err := BzzPrivateKeyFromConfig(conf)
|
||||
if err != nil {
|
||||
return enode.ID{}, err
|
||||
}
|
||||
|
||||
enodeParams := &network.EnodeParams{
|
||||
PrivateKey: bzzPrivateKey,
|
||||
}
|
||||
record, err := network.NewEnodeRecord(enodeParams)
|
||||
conf.Record = *record
|
||||
|
||||
// Add the bzz address to the node config
|
||||
node, err := s.Net.NewNodeWithConfig(conf)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
s.buckets[node.ID()] = new(sync.Map)
|
||||
s.SetNodeItem(node.ID(), BucketKeyBzzPrivateKey, bzzPrivateKey)
|
||||
|
||||
return node.ID(), s.Net.Start(node.ID())
|
||||
}
|
||||
|
||||
// AddNodes creates new nodes with random configurations,
|
||||
// applies provided options to the config and adds nodes to network.
|
||||
func (s *Simulation) AddNodes(count int, opts ...AddNodeOption) (ids []enode.ID, err error) {
|
||||
ids = make([]enode.ID, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
id, err := s.AddNode(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append(ids, id)
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// AddNodesAndConnectFull is a helpper method that combines
|
||||
// AddNodes and ConnectNodesFull. Only new nodes will be connected.
|
||||
func (s *Simulation) AddNodesAndConnectFull(count int, opts ...AddNodeOption) (ids []enode.ID, err error) {
|
||||
if count < 2 {
|
||||
return nil, errors.New("count of nodes must be at least 2")
|
||||
}
|
||||
ids, err = s.AddNodes(count, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = s.Net.ConnectNodesFull(ids)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// AddNodesAndConnectChain is a helpper method that combines
|
||||
// AddNodes and ConnectNodesChain. The chain will be continued from the last
|
||||
// added node, if there is one in simulation using ConnectToLastNode method.
|
||||
func (s *Simulation) AddNodesAndConnectChain(count int, opts ...AddNodeOption) (ids []enode.ID, err error) {
|
||||
if count < 2 {
|
||||
return nil, errors.New("count of nodes must be at least 2")
|
||||
}
|
||||
id, err := s.AddNode(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = s.Net.ConnectToLastNode(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids, err = s.AddNodes(count-1, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append([]enode.ID{id}, ids...)
|
||||
err = s.Net.ConnectNodesChain(ids)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// AddNodesAndConnectRing is a helpper method that combines
|
||||
// AddNodes and ConnectNodesRing.
|
||||
func (s *Simulation) AddNodesAndConnectRing(count int, opts ...AddNodeOption) (ids []enode.ID, err error) {
|
||||
if count < 2 {
|
||||
return nil, errors.New("count of nodes must be at least 2")
|
||||
}
|
||||
ids, err = s.AddNodes(count, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = s.Net.ConnectNodesRing(ids)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// AddNodesAndConnectStar is a helpper method that combines
|
||||
// AddNodes and ConnectNodesStar.
|
||||
func (s *Simulation) AddNodesAndConnectStar(count int, opts ...AddNodeOption) (ids []enode.ID, err error) {
|
||||
if count < 2 {
|
||||
return nil, errors.New("count of nodes must be at least 2")
|
||||
}
|
||||
ids, err = s.AddNodes(count, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = s.Net.ConnectNodesStar(ids[1:], ids[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// UploadSnapshot uploads a snapshot to the simulation
|
||||
// This method tries to open the json file provided, applies the config to all nodes
|
||||
// and then loads the snapshot into the Simulation network
|
||||
func (s *Simulation) UploadSnapshot(ctx context.Context, snapshotFile string, opts ...AddNodeOption) error {
|
||||
f, err := os.Open(snapshotFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jsonbyte, err := ioutil.ReadAll(f)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var snap simulations.Snapshot
|
||||
if err := json.Unmarshal(jsonbyte, &snap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//the snapshot probably has the property EnableMsgEvents not set
|
||||
//set it to true (we need this to wait for messages before uploading)
|
||||
for i := range snap.Nodes {
|
||||
snap.Nodes[i].Node.Config.EnableMsgEvents = true
|
||||
snap.Nodes[i].Node.Config.Services = s.serviceNames
|
||||
for _, o := range opts {
|
||||
o(snap.Nodes[i].Node.Config)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.Net.Load(&snap); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.WaitTillSnapshotRecreated(ctx, &snap)
|
||||
}
|
||||
|
||||
// StartNode starts a node by NodeID.
|
||||
func (s *Simulation) StartNode(id enode.ID) (err error) {
|
||||
return s.Net.Start(id)
|
||||
}
|
||||
|
||||
// StartRandomNode starts a random node.
|
||||
func (s *Simulation) StartRandomNode() (id enode.ID, err error) {
|
||||
n := s.Net.GetRandomDownNode()
|
||||
if n == nil {
|
||||
return id, ErrNodeNotFound
|
||||
}
|
||||
return n.ID(), s.Net.Start(n.ID())
|
||||
}
|
||||
|
||||
// StartRandomNodes starts random nodes.
|
||||
func (s *Simulation) StartRandomNodes(count int) (ids []enode.ID, err error) {
|
||||
ids = make([]enode.ID, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
n := s.Net.GetRandomDownNode()
|
||||
if n == nil {
|
||||
return nil, ErrNodeNotFound
|
||||
}
|
||||
err = s.Net.Start(n.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append(ids, n.ID())
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// StopNode stops a node by NodeID.
|
||||
func (s *Simulation) StopNode(id enode.ID) (err error) {
|
||||
return s.Net.Stop(id)
|
||||
}
|
||||
|
||||
// StopRandomNode stops a random node.
|
||||
func (s *Simulation) StopRandomNode() (id enode.ID, err error) {
|
||||
n := s.Net.GetRandomUpNode()
|
||||
if n == nil {
|
||||
return id, ErrNodeNotFound
|
||||
}
|
||||
return n.ID(), s.Net.Stop(n.ID())
|
||||
}
|
||||
|
||||
// StopRandomNodes stops random nodes.
|
||||
func (s *Simulation) StopRandomNodes(count int) (ids []enode.ID, err error) {
|
||||
ids = make([]enode.ID, 0, count)
|
||||
for i := 0; i < count; i++ {
|
||||
n := s.Net.GetRandomUpNode()
|
||||
if n == nil {
|
||||
return nil, ErrNodeNotFound
|
||||
}
|
||||
err = s.Net.Stop(n.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ids = append(ids, n.ID())
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// seed the random generator for Simulation.randomNode.
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// derive a private key for swarm for the node key
|
||||
// returns the private key used to generate the bzz key
|
||||
func BzzPrivateKeyFromConfig(conf *adapters.NodeConfig) (*ecdsa.PrivateKey, error) {
|
||||
// pad the seed key some arbitrary data as ecdsa.GenerateKey takes 40 bytes seed data
|
||||
privKeyBuf := append(crypto.FromECDSA(conf.PrivateKey), []byte{0x62, 0x7a, 0x7a, 0x62, 0x7a, 0x7a, 0x62, 0x7a}...)
|
||||
bzzPrivateKey, err := ecdsa.GenerateKey(crypto.S256(), bytes.NewReader(privKeyBuf))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bzzPrivateKey, nil
|
||||
}
|
@ -1,446 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
)
|
||||
|
||||
func TestUpDownNodeIDs(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodes(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotIDs := sim.NodeIDs()
|
||||
|
||||
if !equalNodeIDs(ids, gotIDs) {
|
||||
t.Error("returned nodes are not equal to added ones")
|
||||
}
|
||||
|
||||
stoppedIDs, err := sim.StopRandomNodes(3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotIDs = sim.UpNodeIDs()
|
||||
|
||||
for _, id := range gotIDs {
|
||||
if !sim.Net.GetNode(id).Up() {
|
||||
t.Errorf("node %s should not be down", id)
|
||||
}
|
||||
}
|
||||
|
||||
if !equalNodeIDs(ids, append(gotIDs, stoppedIDs...)) {
|
||||
t.Error("returned nodes are not equal to added ones")
|
||||
}
|
||||
|
||||
gotIDs = sim.DownNodeIDs()
|
||||
|
||||
for _, id := range gotIDs {
|
||||
if sim.Net.GetNode(id).Up() {
|
||||
t.Errorf("node %s should not be up", id)
|
||||
}
|
||||
}
|
||||
|
||||
if !equalNodeIDs(stoppedIDs, gotIDs) {
|
||||
t.Error("returned nodes are not equal to the stopped ones")
|
||||
}
|
||||
}
|
||||
|
||||
func equalNodeIDs(one, other []enode.ID) bool {
|
||||
if len(one) != len(other) {
|
||||
return false
|
||||
}
|
||||
var count int
|
||||
for _, a := range one {
|
||||
var found bool
|
||||
for _, b := range other {
|
||||
if a == b {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
count++
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return count == len(one)
|
||||
}
|
||||
|
||||
func TestAddNode(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n := sim.Net.GetNode(id)
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodeWithMsgEvents(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode(AddNodeWithMsgEvents(true))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !sim.Net.GetNode(id).Config.EnableMsgEvents {
|
||||
t.Error("EnableMsgEvents is false")
|
||||
}
|
||||
|
||||
id, err = sim.AddNode(AddNodeWithMsgEvents(false))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if sim.Net.GetNode(id).Config.EnableMsgEvents {
|
||||
t.Error("EnableMsgEvents is true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodeWithService(t *testing.T) {
|
||||
sim := New(map[string]ServiceFunc{
|
||||
"noop1": noopServiceFunc,
|
||||
"noop2": noopServiceFunc,
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode(AddNodeWithService("noop1"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n := sim.Net.GetNode(id).Node.(*adapters.SimNode)
|
||||
if n.Service("noop1") == nil {
|
||||
t.Error("service noop1 not found on node")
|
||||
}
|
||||
if n.Service("noop2") != nil {
|
||||
t.Error("service noop2 should not be found on node")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodeMultipleServices(t *testing.T) {
|
||||
sim := New(map[string]ServiceFunc{
|
||||
"noop1": noopServiceFunc,
|
||||
"noop2": noopService2Func,
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n := sim.Net.GetNode(id).Node.(*adapters.SimNode)
|
||||
if n.Service("noop1") == nil {
|
||||
t.Error("service noop1 not found on node")
|
||||
}
|
||||
if n.Service("noop2") == nil {
|
||||
t.Error("service noop2 not found on node")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodeDuplicateServiceError(t *testing.T) {
|
||||
sim := New(map[string]ServiceFunc{
|
||||
"noop1": noopServiceFunc,
|
||||
"noop2": noopServiceFunc,
|
||||
})
|
||||
defer sim.Close()
|
||||
|
||||
wantErr := "duplicate service: *simulation.noopService"
|
||||
_, err := sim.AddNode()
|
||||
if err.Error() != wantErr {
|
||||
t.Errorf("got error %q, want %q", err, wantErr)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodes(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
nodesCount := 12
|
||||
|
||||
ids, err := sim.AddNodes(nodesCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
count := len(ids)
|
||||
if count != nodesCount {
|
||||
t.Errorf("expected %v nodes, got %v", nodesCount, count)
|
||||
}
|
||||
|
||||
count = len(sim.Net.GetNodes())
|
||||
if count != nodesCount {
|
||||
t.Errorf("expected %v nodes, got %v", nodesCount, count)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddNodesAndConnectFull(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
n := 12
|
||||
|
||||
ids, err := sim.AddNodesAndConnectFull(n)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
simulations.VerifyFull(t, sim.Net, ids)
|
||||
}
|
||||
|
||||
func TestAddNodesAndConnectChain(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodesAndConnectChain(12)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// add another set of nodes to test
|
||||
// if two chains are connected
|
||||
_, err = sim.AddNodesAndConnectChain(7)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
simulations.VerifyChain(t, sim.Net, sim.UpNodeIDs())
|
||||
}
|
||||
|
||||
func TestAddNodesAndConnectRing(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodesAndConnectRing(12)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
simulations.VerifyRing(t, sim.Net, ids)
|
||||
}
|
||||
|
||||
func TestAddNodesAndConnectStar(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
ids, err := sim.AddNodesAndConnectStar(12)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
simulations.VerifyStar(t, sim.Net, ids, 0)
|
||||
}
|
||||
|
||||
//To test that uploading a snapshot works
|
||||
func TestUploadSnapshot(t *testing.T) {
|
||||
log.Debug("Creating simulation")
|
||||
s := New(map[string]ServiceFunc{
|
||||
"bzz": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
addr := network.NewAddr(ctx.Config.Node())
|
||||
hp := network.NewHiveParams()
|
||||
hp.Discovery = false
|
||||
config := &network.BzzConfig{
|
||||
OverlayAddr: addr.Over(),
|
||||
UnderlayAddr: addr.Under(),
|
||||
HiveParams: hp,
|
||||
}
|
||||
kad := network.NewKademlia(addr.Over(), network.NewKadParams())
|
||||
b.Store(BucketKeyKademlia, kad)
|
||||
return network.NewBzz(config, kad, nil, nil, nil), nil, nil
|
||||
},
|
||||
})
|
||||
defer s.Close()
|
||||
|
||||
nodeCount := 16
|
||||
log.Debug("Uploading snapshot")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
err := s.UploadSnapshot(ctx, fmt.Sprintf("../stream/testing/snapshot_%d.json", nodeCount))
|
||||
if err != nil {
|
||||
t.Fatalf("Error uploading snapshot to simulation network: %v", err)
|
||||
}
|
||||
|
||||
log.Debug("Starting simulation...")
|
||||
s.Run(ctx, func(ctx context.Context, sim *Simulation) error {
|
||||
log.Debug("Checking")
|
||||
nodes := sim.UpNodeIDs()
|
||||
if len(nodes) != nodeCount {
|
||||
t.Fatal("Simulation network node number doesn't match snapshot node number")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
log.Debug("Done.")
|
||||
}
|
||||
|
||||
func TestStartStopNode(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n := sim.Net.GetNode(id)
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
|
||||
err = sim.StopNode(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n.Up() {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
|
||||
waitForPeerEventPropagation()
|
||||
|
||||
err = sim.StartNode(id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartStopRandomNode(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodes(3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
id, err := sim.StopRandomNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n := sim.Net.GetNode(id)
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if n.Up() {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
|
||||
id2, err := sim.StopRandomNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
waitForPeerEventPropagation()
|
||||
|
||||
idStarted, err := sim.StartRandomNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if idStarted != id && idStarted != id2 {
|
||||
t.Error("unexpected started node ID")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStartStopRandomNodes(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
_, err := sim.AddNodes(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ids, err := sim.StopRandomNodes(3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
n := sim.Net.GetNode(id)
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if n.Up() {
|
||||
t.Error("node not stopped")
|
||||
}
|
||||
}
|
||||
|
||||
waitForPeerEventPropagation()
|
||||
|
||||
ids, err = sim.StartRandomNodes(2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, id := range ids {
|
||||
n := sim.Net.GetNode(id)
|
||||
if n == nil {
|
||||
t.Fatal("node not found")
|
||||
}
|
||||
if !n.Up() {
|
||||
t.Error("node not started")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForPeerEventPropagation() {
|
||||
// Sleep here to ensure that Network.watchPeerEvents defer function
|
||||
// has set the `node.Up() = false` before we start the node again.
|
||||
//
|
||||
// The same node is stopped and started again, and upon start
|
||||
// watchPeerEvents is started in a goroutine. If the node is stopped
|
||||
// and then very quickly started, that goroutine may be scheduled later
|
||||
// then start and force `node.Up() = false` in its defer function.
|
||||
// This will make this test unreliable.
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
)
|
||||
|
||||
// Service returns a single Service by name on a particular node
|
||||
// with provided id.
|
||||
func (s *Simulation) Service(name string, id enode.ID) node.Service {
|
||||
simNode, ok := s.Net.GetNode(id).Node.(*adapters.SimNode)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
services := simNode.ServiceMap()
|
||||
if len(services) == 0 {
|
||||
return nil
|
||||
}
|
||||
return services[name]
|
||||
}
|
||||
|
||||
// RandomService returns a single Service by name on a
|
||||
// randomly chosen node that is up.
|
||||
func (s *Simulation) RandomService(name string) node.Service {
|
||||
n := s.Net.GetRandomUpNode().Node.(*adapters.SimNode)
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.Service(name)
|
||||
}
|
||||
|
||||
// Services returns all services with a provided name
|
||||
// from nodes that are up.
|
||||
func (s *Simulation) Services(name string) (services map[enode.ID]node.Service) {
|
||||
nodes := s.Net.GetNodes()
|
||||
services = make(map[enode.ID]node.Service)
|
||||
for _, node := range nodes {
|
||||
if !node.Up() {
|
||||
continue
|
||||
}
|
||||
simNode, ok := node.Node.(*adapters.SimNode)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
services[node.ID()] = simNode.Service(name)
|
||||
}
|
||||
return services
|
||||
}
|
@ -1,46 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestService(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
id, err := sim.AddNode()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, ok := sim.Service("noop", id).(*noopService)
|
||||
if !ok {
|
||||
t.Fatalf("service is not of %T type", &noopService{})
|
||||
}
|
||||
|
||||
_, ok = sim.RandomService("noop").(*noopService)
|
||||
if !ok {
|
||||
t.Fatalf("service is not of %T type", &noopService{})
|
||||
}
|
||||
|
||||
_, ok = sim.Services("noop")[id].(*noopService)
|
||||
if !ok {
|
||||
t.Fatalf("service is not of %T type", &noopService{})
|
||||
}
|
||||
}
|
@ -1,218 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/ethereum/go-ethereum/swarm/network"
|
||||
)
|
||||
|
||||
// Common errors that are returned by functions in this package.
|
||||
var (
|
||||
ErrNodeNotFound = errors.New("node not found")
|
||||
)
|
||||
|
||||
// Simulation provides methods on network, nodes and services
|
||||
// to manage them.
|
||||
type Simulation struct {
|
||||
// Net is exposed as a way to access lower level functionalities
|
||||
// of p2p/simulations.Network.
|
||||
Net *simulations.Network
|
||||
|
||||
serviceNames []string
|
||||
cleanupFuncs []func()
|
||||
buckets map[enode.ID]*sync.Map
|
||||
shutdownWG sync.WaitGroup
|
||||
done chan struct{}
|
||||
mu sync.RWMutex
|
||||
neighbourhoodSize int
|
||||
|
||||
httpSrv *http.Server //attach a HTTP server via SimulationOptions
|
||||
handler *simulations.Server //HTTP handler for the server
|
||||
runC chan struct{} //channel where frontend signals it is ready
|
||||
}
|
||||
|
||||
// ServiceFunc is used in New to declare new service constructor.
|
||||
// The first argument provides ServiceContext from the adapters package
|
||||
// giving for example the access to NodeID. Second argument is the sync.Map
|
||||
// where all "global" state related to the service should be kept.
|
||||
// All cleanups needed for constructed service and any other constructed
|
||||
// objects should ne provided in a single returned cleanup function.
|
||||
// Returned cleanup function will be called by Close function
|
||||
// after network shutdown.
|
||||
type ServiceFunc func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error)
|
||||
|
||||
// New creates a new simulation instance
|
||||
// Services map must have unique keys as service names and
|
||||
// every ServiceFunc must return a node.Service of the unique type.
|
||||
// This restriction is required by node.Node.Start() function
|
||||
// which is used to start node.Service returned by ServiceFunc.
|
||||
func New(services map[string]ServiceFunc) (s *Simulation) {
|
||||
s = &Simulation{
|
||||
buckets: make(map[enode.ID]*sync.Map),
|
||||
done: make(chan struct{}),
|
||||
neighbourhoodSize: network.NewKadParams().NeighbourhoodSize,
|
||||
}
|
||||
|
||||
adapterServices := make(map[string]adapters.ServiceFunc, len(services))
|
||||
for name, serviceFunc := range services {
|
||||
// Scope this variables correctly
|
||||
// as they will be in the adapterServices[name] function accessed later.
|
||||
name, serviceFunc := name, serviceFunc
|
||||
s.serviceNames = append(s.serviceNames, name)
|
||||
adapterServices[name] = func(ctx *adapters.ServiceContext) (node.Service, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
b, ok := s.buckets[ctx.Config.ID]
|
||||
if !ok {
|
||||
b = new(sync.Map)
|
||||
}
|
||||
service, cleanup, err := serviceFunc(ctx, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cleanup != nil {
|
||||
s.cleanupFuncs = append(s.cleanupFuncs, cleanup)
|
||||
}
|
||||
s.buckets[ctx.Config.ID] = b
|
||||
return service, nil
|
||||
}
|
||||
}
|
||||
|
||||
s.Net = simulations.NewNetwork(
|
||||
adapters.NewTCPAdapter(adapterServices),
|
||||
&simulations.NetworkConfig{ID: "0"},
|
||||
)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// RunFunc is the function that will be called
|
||||
// on Simulation.Run method call.
|
||||
type RunFunc func(context.Context, *Simulation) error
|
||||
|
||||
// Result is the returned value of Simulation.Run method.
|
||||
type Result struct {
|
||||
Duration time.Duration
|
||||
Error error
|
||||
}
|
||||
|
||||
// Run calls the RunFunc function while taking care of
|
||||
// cancellation provided through the Context.
|
||||
func (s *Simulation) Run(ctx context.Context, f RunFunc) (r Result) {
|
||||
//if the option is set to run a HTTP server with the simulation,
|
||||
//init the server and start it
|
||||
start := time.Now()
|
||||
if s.httpSrv != nil {
|
||||
log.Info("Waiting for frontend to be ready...(send POST /runsim to HTTP server)")
|
||||
//wait for the frontend to connect
|
||||
select {
|
||||
case <-s.runC:
|
||||
case <-ctx.Done():
|
||||
return Result{
|
||||
Duration: time.Since(start),
|
||||
Error: ctx.Err(),
|
||||
}
|
||||
}
|
||||
log.Info("Received signal from frontend - starting simulation run.")
|
||||
}
|
||||
errc := make(chan error)
|
||||
quit := make(chan struct{})
|
||||
defer close(quit)
|
||||
go func() {
|
||||
select {
|
||||
case errc <- f(ctx, s):
|
||||
case <-quit:
|
||||
}
|
||||
}()
|
||||
var err error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
case err = <-errc:
|
||||
}
|
||||
return Result{
|
||||
Duration: time.Since(start),
|
||||
Error: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Maximal number of parallel calls to cleanup functions on
|
||||
// Simulation.Close.
|
||||
var maxParallelCleanups = 10
|
||||
|
||||
// Close calls all cleanup functions that are returned by
|
||||
// ServiceFunc, waits for all of them to finish and other
|
||||
// functions that explicitly block shutdownWG
|
||||
// (like Simulation.PeerEvents) and shuts down the network
|
||||
// at the end. It is used to clean all resources from the
|
||||
// simulation.
|
||||
func (s *Simulation) Close() {
|
||||
close(s.done)
|
||||
|
||||
sem := make(chan struct{}, maxParallelCleanups)
|
||||
s.mu.RLock()
|
||||
cleanupFuncs := make([]func(), len(s.cleanupFuncs))
|
||||
for i, f := range s.cleanupFuncs {
|
||||
if f != nil {
|
||||
cleanupFuncs[i] = f
|
||||
}
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
var cleanupWG sync.WaitGroup
|
||||
for _, cleanup := range cleanupFuncs {
|
||||
cleanupWG.Add(1)
|
||||
sem <- struct{}{}
|
||||
go func(cleanup func()) {
|
||||
defer cleanupWG.Done()
|
||||
defer func() { <-sem }()
|
||||
|
||||
cleanup()
|
||||
}(cleanup)
|
||||
}
|
||||
cleanupWG.Wait()
|
||||
|
||||
if s.httpSrv != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
err := s.httpSrv.Shutdown(ctx)
|
||||
if err != nil {
|
||||
log.Error("Error shutting down HTTP server!", "err", err)
|
||||
}
|
||||
close(s.runC)
|
||||
}
|
||||
|
||||
s.shutdownWG.Wait()
|
||||
s.Net.Shutdown()
|
||||
}
|
||||
|
||||
// Done returns a channel that is closed when the simulation
|
||||
// is closed by Close method. It is useful for signaling termination
|
||||
// of all possible goroutines that are created within the test.
|
||||
func (s *Simulation) Done() <-chan struct{} {
|
||||
return s.done
|
||||
}
|
@ -1,203 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package simulation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations"
|
||||
"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
|
||||
"github.com/mattn/go-colorable"
|
||||
)
|
||||
|
||||
var (
|
||||
loglevel = flag.Int("loglevel", 2, "verbosity of logs")
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.Parse()
|
||||
log.PrintOrigins(true)
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
|
||||
}
|
||||
|
||||
// TestRun tests if Run method calls RunFunc and if it handles context properly.
|
||||
func TestRun(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
defer sim.Close()
|
||||
|
||||
t.Run("call", func(t *testing.T) {
|
||||
expect := "something"
|
||||
var got string
|
||||
r := sim.Run(context.Background(), func(ctx context.Context, sim *Simulation) error {
|
||||
got = expect
|
||||
return nil
|
||||
})
|
||||
|
||||
if r.Error != nil {
|
||||
t.Errorf("unexpected error: %v", r.Error)
|
||||
}
|
||||
if got != expect {
|
||||
t.Errorf("expected %q, got %q", expect, got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("cancellation", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
r := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
|
||||
time.Sleep(time.Second)
|
||||
return nil
|
||||
})
|
||||
|
||||
if r.Error != context.DeadlineExceeded {
|
||||
t.Errorf("unexpected error: %v", r.Error)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("context value and duration", func(t *testing.T) {
|
||||
ctx := context.WithValue(context.Background(), "hey", "there")
|
||||
sleep := 50 * time.Millisecond
|
||||
|
||||
r := sim.Run(ctx, func(ctx context.Context, sim *Simulation) error {
|
||||
if ctx.Value("hey") != "there" {
|
||||
return errors.New("expected context value not passed")
|
||||
}
|
||||
time.Sleep(sleep)
|
||||
return nil
|
||||
})
|
||||
|
||||
if r.Error != nil {
|
||||
t.Errorf("unexpected error: %v", r.Error)
|
||||
}
|
||||
if r.Duration < sleep {
|
||||
t.Errorf("reported run duration less then expected: %s", r.Duration)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestClose tests are Close method triggers all close functions and are all nodes not up anymore.
|
||||
func TestClose(t *testing.T) {
|
||||
var mu sync.Mutex
|
||||
var cleanupCount int
|
||||
|
||||
sleep := 50 * time.Millisecond
|
||||
|
||||
sim := New(map[string]ServiceFunc{
|
||||
"noop": func(ctx *adapters.ServiceContext, b *sync.Map) (node.Service, func(), error) {
|
||||
return newNoopService(), func() {
|
||||
time.Sleep(sleep)
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
cleanupCount++
|
||||
}, nil
|
||||
},
|
||||
})
|
||||
|
||||
nodeCount := 30
|
||||
|
||||
_, err := sim.AddNodes(nodeCount)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var upNodeCount int
|
||||
for _, n := range sim.Net.GetNodes() {
|
||||
if n.Up() {
|
||||
upNodeCount++
|
||||
}
|
||||
}
|
||||
if upNodeCount != nodeCount {
|
||||
t.Errorf("all nodes should be up, insted only %v are up", upNodeCount)
|
||||
}
|
||||
|
||||
sim.Close()
|
||||
|
||||
if cleanupCount != nodeCount {
|
||||
t.Errorf("number of cleanups expected %v, got %v", nodeCount, cleanupCount)
|
||||
}
|
||||
|
||||
upNodeCount = 0
|
||||
for _, n := range sim.Net.GetNodes() {
|
||||
if n.Up() {
|
||||
upNodeCount++
|
||||
}
|
||||
}
|
||||
if upNodeCount != 0 {
|
||||
t.Errorf("all nodes should be down, insted %v are up", upNodeCount)
|
||||
}
|
||||
}
|
||||
|
||||
// TestDone checks if Close method triggers the closing of done channel.
|
||||
func TestDone(t *testing.T) {
|
||||
sim := New(noopServiceFuncMap)
|
||||
sleep := 50 * time.Millisecond
|
||||
timeout := 2 * time.Second
|
||||
|
||||
start := time.Now()
|
||||
go func() {
|
||||
time.Sleep(sleep)
|
||||
sim.Close()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(timeout):
|
||||
t.Error("done channel closing timed out")
|
||||
case <-sim.Done():
|
||||
if d := time.Since(start); d < sleep {
|
||||
t.Errorf("done channel closed sooner then expected: %s", d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// a helper map for usual services that do not do anything
|
||||
var noopServiceFuncMap = map[string]ServiceFunc{
|
||||
"noop": noopServiceFunc,
|
||||
}
|
||||
|
||||
// a helper function for most basic noop service
|
||||
func noopServiceFunc(_ *adapters.ServiceContext, _ *sync.Map) (node.Service, func(), error) {
|
||||
return newNoopService(), nil, nil
|
||||
}
|
||||
|
||||
func newNoopService() node.Service {
|
||||
return &noopService{}
|
||||
}
|
||||
|
||||
// a helper function for most basic Noop service
|
||||
// of a different type then NoopService to test
|
||||
// multiple services on one node.
|
||||
func noopService2Func(_ *adapters.ServiceContext, _ *sync.Map) (node.Service, func(), error) {
|
||||
return new(noopService2), nil, nil
|
||||
}
|
||||
|
||||
// NoopService2 is the service that does not do anything
|
||||
// but implements node.Service interface.
|
||||
type noopService2 struct {
|
||||
simulations.NoopService
|
||||
}
|
||||
|
||||
type noopService struct {
|
||||
simulations.NoopService
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user