Compare commits

...

61 Commits

Author SHA1 Message Date
66e016c6dc swarm: release v0.4.3 (#1602)
* swarm: 0.4.3 release notes

* swarm: 0.4.3 stable version

* swarm: 0.4.3 release notes - fix
2019-07-25 13:08:29 +02:00
74b12e35bf network/retrieve: add bzz-retrieve protocol (#1589)
* network/retrieve: initial commit

* network: import dependencies from stream package branch

* network/retrieve: address pr comments

* network/retrieve: fix pr comments

* network/retrieve: create logger for peer

* network: address pr comments

* network/retrieve: lint

* network/retrieve: prevent forever loop

* network/retrieve: address pr comments

* network/retrieve: fix linter

* network/retrieve: pr comments

* network/retrieval: pr comments
2019-07-25 10:53:46 +02:00
388d8ccd9f PoC: Network simulation framework (#1555)
* simv2: wip

* simulation: exec adapter start/stop

* simulation: add node status to exec adapter

* simulation: initial simulation code

* simulation: exec adapter, configure path to executable

* simulation: initial docker adapter

* simulation: wip kubernetes adapter

* simulation: kubernetes adapter proxy

* simulation: implement GetAll/StartAll/StopAll

* simulation: kuberentes adapter - set env vars and resource limits

* simulation: discovery test

* simulation: remove port definitions within docker adapter

* simulation: simplify wait for healthy loop

* simulation: get nat ip addr from interface

* simulation: pull docker images automatically

* simulation: NodeStatus -> NodeInfo

* simulation: move discovery test to example dir

* simulation: example snapshot usage

* simulation: add goclient specific simulation

* simulation: add peer connections to snapshot

* simulation: close rpc client

* simulation: don't export kubernetes proxy server

* simulation: merge simulation code

* simulation: don't export nodemap

* simulation: rename SimulationSnapshot -> Snapshot

* simulation: linting fixes

* simulation: add k8s available helper func

* simulation: vendor

* simulation: fix 'no non-test Go files' when building

* simulation: remove errors from interface methods where non were returned

* simulation: run getHealthInfo check in parallel
2019-07-24 17:00:13 +02:00
9720da34db network: structured output for kademlia table (#1586) 2019-07-19 13:35:11 +02:00
090197227f client: add bzz client, update smoke tests (#1582) 2019-07-18 18:49:20 +02:00
48857ae88e swarm-smoke: fix check max prox hosts for pull/push sync modes (#1578) 2019-07-17 16:21:50 +02:00
ad77f43b9d cmd/swarm: allow using a network interface by name for nat purposes (#1557) 2019-07-16 09:35:04 +02:00
9d96d9bef5 pss: disable TestForwardBasic (#1544) 2019-07-11 18:17:26 +02:00
7101f65a8b api, network: count chunk deliveries per peer (#1534) 2019-07-09 22:50:06 +02:00
af3b5e9ce1 network/newstream: new stream! protocol base implementation (#1500)
network/newstream: merge new stream protocol wire protocol changes, changes to docs, add basic protocol handlers and placeholders
2019-07-08 13:25:56 +03:00
7deac5693c swarm: fix bzz_info.port when using dynamic port allocation (#1537) 2019-07-05 12:15:17 +02:00
d5f6ee4620 cmd/swarm: make bzzaccount flag optional and add bzzkeyhex flag (#1531)
* cmd/swarm: don't require bzzaccount flag

* docker: remove setup script because bzzaccount is not required anymore

* cmd/swarm: manage account creation/selection when bzzflag is not defined

* cmd/swarm: no bzzaccount flag tests

* cmd/swarm: use random network ports on tests

* cmd/swarm: fix typo

* cmd/swarm: add --bzzkeyhex flag

* cmd/swarm: use different ipc paths for tests

* readme: update example on how to run swarm

* cmd/swarm: rename getAccount -> getOrCreateAccount

* cmd/swarm: remove unneeded comment

* cmd/swarm: use shorter ipc path for test
2019-07-04 13:14:10 +02:00
fb73e6cb9b cmd/swarm: remove separate function to parse env vars (#1536) 2019-07-04 13:13:39 +02:00
a6e64aea67 network/bitvector: Multibit set/unset + string rep (#1530)
* network/bitvector: Multibit set/unset + string rep

* network/bitvector: Add code comments

* network/bitvector: Make Unset -> Set with bool false

* network/bitvector: Revert to Set/Unset

* network/stream: Update to new bitvector signature
2019-07-02 18:08:00 +02:00
b3f21ddbba swarm: 0.4.3 unstable (#1526)
* swarm: 0.4.3 unstable

* CHANGELOG: consistent date format
2019-06-28 12:22:07 +02:00
3cdc45ee0f travis: also build on release tags (#1527) 2019-06-28 12:02:47 +02:00
bef511363e swarm: release v0.4.2 (#1496)
* CHANGELOG: reflect latest merged PRs

* changelog: updated changelog for swarm 0.4.2

* version: mark 0.4.2 stable
2019-06-28 11:19:28 +02:00
fb75cdce03 network: bump bzz stream hive (#1522) 2019-06-28 10:21:14 +02:00
13774a8aee docker: update ca-certificates file (#1525) 2019-06-27 18:06:09 +02:00
4d66995fee Add swarm guide to /docs (#1513)
* docs: add swarm-guide

* readthedocs build configuration file
2019-06-26 10:18:32 +02:00
a0a14cc11c network/simulation: Add ExecAdapter capability to swarm simulations (#1503) 2019-06-24 09:48:40 +02:00
8afb316399 docs: add initial stream! protocol specification (#1454)
* docs: add initial pull sync spec
2019-06-20 12:21:07 +02:00
f2a12b38c4 docs: release process setup (#1487) 2019-06-19 11:12:56 +02:00
f828da674d build: enable ubuntu ppa disco (19.04) builds (#1495) 2019-06-18 15:34:30 +02:00
9fa6d29c88 network/stream: remove send.offered.hashes trace (#1497) 2019-06-18 14:53:18 +02:00
d589af14a8 simple fetchers (#1492)
* network: disable shouldNOTRequestAgain
* vendor singleflight
* network: disable shouldNOTRequestAgain
* network, storage: fix tests
* network/storage: remove HopCount and SkipCheck
* network/stream: use localstore when providing chunks a node has offered
* storage: refactor lnetstore
* storage: rename FetcherItem to Fetcher
* storage/feed: no distinction between catastrophic err or chunk not found
* network/stream: remove TestDeliveryFromNodes, as FindPeer is changed, and Swarm connectivity is not a chain
* network/stream: fixed intervals tests
* swarm: fixes for linter
* storage: use LRU cache for fetchers
* network, storage: better godoc
* storage/netstore: explicit errors
* storage/feed/lookup: Clarify ReadFunc expected return error values
* storage: address comments by elad
2019-06-18 08:47:27 +02:00
f57d4f0802 swarm/storage: Support for uploading 100gb files (#1395)
* swarm/storage: Fix gouroutine leak while uploading large files
* swarm/storage: Fix pyramid chunker to wrap properly level3 and above
2019-06-18 08:46:20 +02:00
604960938b Revert "simplification of Fetchers (#1344)" (#1491)
This reverts commit 0b724bd4d5.
2019-06-17 10:30:55 +02:00
0b724bd4d5 simplification of Fetchers (#1344) 2019-06-17 10:01:12 +02:00
21adbe2fac docker: include git commit hash in swarm version (#1488)
* dockerignore: remove .git - needed for git hash on builds

* changelog: include #1488
2019-06-14 15:49:18 +02:00
8b37def961 cmd/swarm-smoke: fix what we wanted to avoid (#1486) 2019-06-14 14:49:21 +02:00
fce3e45f2b build: don't upload buildinfo (#1484) 2019-06-14 14:24:12 +02:00
99db027993 docker: fix run-smoke.sh path to binary (#1485) 2019-06-14 14:22:55 +02:00
8a1f6a6c6f README: add docker section and fix license section (#1483)
* readme: add docker section

* readme: fix license info

* readme: omit sections in toc
2019-06-14 12:18:56 +02:00
9704ced894 cmd/swarm-smoke: add paging to trackChunks (#1464)
* cmd/swarm-smoke: add paging to trackChunks
2019-06-14 10:46:41 +02:00
99a431cf50 version: update to v0.4.2 unstable (#1470) 2019-06-13 17:45:36 +02:00
0b9a3ea407 swarm: release v0.4.1 (#1469) 2019-06-13 16:32:55 +02:00
2f0b94fa1a network: bump proto versions due to change in OfferedHashesMsg (#1465) 2019-06-13 16:11:46 +02:00
815a6d120f changelog: initial setup (#1466) 2019-06-13 16:04:16 +02:00
4744b7c9d7 p2p/protocols: do not vendor swarm related packages (#1462) 2019-06-13 15:11:00 +02:00
3243c7ba07 docker: create new dockerfiles that are context aware (#1463) 2019-06-13 14:30:13 +02:00
76e71cca3b network/stream: disable flaky TestStartNetworkSync test (#1458) 2019-06-12 15:50:44 +02:00
b3f601427c storage: fix alignement panics on 32 bit arch (#1460) 2019-06-12 12:55:36 +02:00
f709083e47 api: temporary fix test failing on windows due to access denied (#1450) 2019-06-12 10:00:46 +02:00
e865445f17 network/stream: remove cast for msg type (#1449) 2019-06-12 08:55:15 +02:00
de220b8323 docs: arch diagram - use source of truth images (#1434) 2019-06-11 10:17:45 +02:00
c1126aaa1b swarm/network/stream: add syncing test (#1399)
* swarm/network/stream: remove old syncing test and replace it with two more granular tests, with the first one testing a full sync between two nodes (makes sure that when all subscriptions are established on all bins - all content is replicated from one node to the other), and a second test that adds a bigger amount of nodes to a star network topology, effectively making the pivot node have a depth > 0 to sync just certain content to the connected nodes (due to a star topology this test (always 1 hop) does not check that, for example, specific content is routed across multiple nodes, but assumes content should be only on the most proximate node)

* api, cmd/swarm-smoke, fuse, network, storage: fix data dir leak when using ephemeral filestore, pull test params to be easily adjustable
2019-06-10 10:52:19 +02:00
ee7ccbdb4d docs: initial swarm arch diagram (#1432)
Important high-level diagrams added
2019-06-07 13:27:20 +02:00
bedd06762d swarm-smoke: add debug flag (#1428) 2019-06-06 15:06:18 +02:00
5663cb2c76 docs: push syncing tags doc (#1429) 2019-06-06 14:51:58 +02:00
a454aa6c8d swarm,cmd: fix migration link, change loglevel severity (#1420) 2019-06-06 14:46:28 +02:00
bd1887189c swarm/network/stream: remove dead code (#1422)
* swarm/network/stream: remove dead code

* swarm/network/stream: remove HandoverProof
2019-06-05 12:59:46 +02:00
6d0902da3c p2p/protocols: import to codebase, remove from vendor (#1416) 2019-06-04 14:32:44 +02:00
70320ceeae Makefile: added support for "make swarm" command (#1412) 2019-06-03 20:59:48 +02:00
3e522f2e7d appveyor: remove unused nsis task (#1410) 2019-06-03 15:33:09 +02:00
1c8c8bac45 build/deb: fix deb.control for ethereum-swarm (#1414)
* build/deb: fix deb.control for ethereum-swarm

* build: env.sh fix
2019-06-03 15:26:37 +02:00
809165fc5e ci/cd: fix azure blobstorage name (#1413) 2019-06-03 15:16:56 +02:00
2c80a4198f Appveyor branches: master only (#1409) 2019-06-03 14:18:41 +02:00
a83a84460e .github: fix contributing.md (#1408) 2019-06-03 14:08:40 +02:00
c5430df218 README.md: modify installation instructions after repo split (#1407) 2019-06-03 13:10:20 +02:00
b046760db1 swarm: codebase split from go-ethereum (#1405) 2019-06-03 12:28:18 +02:00
3273 changed files with 506138 additions and 158343 deletions

View File

@ -1,5 +1 @@
**/*_test.go
build/_workspace
build/_bin
tests/testdata
.github

24
.github/CODEOWNERS vendored
View File

@ -1,23 +1 @@
# Lines starting with '#' are comments.
# Each line is a file pattern followed by one or more owners.
accounts/usbwallet @karalabe
accounts/scwallet @gballet
accounts/abi @gballet
cmd/clef @holiman
cmd/puppeth @karalabe
consensus @karalabe
core/ @karalabe @holiman @rjl493456442
dashboard/ @kurkomisi
eth/ @karalabe @holiman @rjl493456442
graphql/ @gballet
les/ @zsfelfoldi @rjl493456442
light/ @zsfelfoldi @rjl493456442
mobile/ @karalabe @ligi
p2p/ @fjl @zsfelfoldi
rpc/ @fjl @holiman
p2p/simulations @zelig @nonsense @janos @justelad
p2p/protocols @zelig @nonsense @janos @justelad
p2p/testing @zelig @nonsense @janos @justelad
signer/ @holiman
whisper/ @gballet @gluk256
# To be defined

View File

@ -1,40 +1,20 @@
# Contributing
## Contributing
Thank you for considering to help out with the source code! We welcome
contributions from anyone on the internet, and are grateful for even the
smallest of fixes!
Thank you for considering to help out with the source code! We welcome contributions from
anyone on the internet, and are grateful for even the smallest of fixes!
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a
pull request for the maintainers to review and merge into the main code base. If
you wish to submit more complex changes though, please check up with the core
devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum) to
ensure those changes are in line with the general philosophy of the project
and/or get some early feedback which can make both your efforts much lighter as
well as our review and merge procedures quick and simple.
## Coding guidelines
If you'd like to contribute to Swarm, please fork, fix, commit and send a pull request
for the maintainers to review and merge into the main code base. If you wish to submit more
complex changes though, please check up with the core devs first on [our Swarm gitter channel](https://gitter.im/ethersphere/orange-lounge)
to ensure those changes are in line with the general philosophy of the project and/or get some
early feedback which can make both your efforts much lighter as well as our review and merge
procedures quick and simple.
Please make sure your contributions adhere to our coding guidelines:
* Code must adhere to the official Go
[formatting](https://golang.org/doc/effective_go.html#formatting) guidelines
(i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
* Code must be documented adhering to the official Go
[commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
* Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
* Pull requests need to be based on and opened against the `master` branch.
* [Code review guidelines](https://github.com/ethersphere/swarm/blob/master/docs/Code-Review-Guidelines.md).
* Commit messages should be prefixed with the package(s) they modify.
* E.g. "eth, rpc: make trace configs optional"
## Can I have feature X
Before you submit a feature request, please check and make sure that it isn't
possible through some other means. The JavaScript-enabled console is a powerful
feature in the right hands. Please check our
[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info
and help.
## Configuration, dependencies, and tests
Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
for more details on configuring your environment, managing project dependencies
and testing procedures.
* E.g. "fuse: ignore default manifest entry"

View File

@ -1,12 +1,8 @@
Hi there,
please note that this is an issue tracker reserved for bug reports and feature requests.
For general questions please use the gitter channel or the Ethereum stack exchange at https://ethereum.stackexchange.com.
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions. It's helpful to search the existing GitHub issues first. It's likely that another user has already reported the issue you're facing, or it's a known issue that we're already aware of. Please note that this is an issue tracker reserved for bug reports and feature requests. For general questions please use the gitter channel https://gitter.im/ethereum/swarm or the ethereum stack exchange at https://ethereum.stackexchange.com. -->
#### System information
Geth version: `geth version`
Swarm version: `swarm version`
OS & Version: Windows/Linux/OSX
Commit hash : (if `develop`)

3
.gitmodules vendored
View File

@ -1,3 +0,0 @@
[submodule "tests"]
path = tests/testdata
url = https://github.com/ethereum/tests

123
.mailmap
View File

@ -1,123 +0,0 @@
Jeffrey Wilcke <jeffrey@ethereum.org>
Jeffrey Wilcke <jeffrey@ethereum.org> <geffobscura@gmail.com>
Jeffrey Wilcke <jeffrey@ethereum.org> <obscuren@obscura.com>
Jeffrey Wilcke <jeffrey@ethereum.org> <obscuren@users.noreply.github.com>
Viktor Trón <viktor.tron@gmail.com>
Joseph Goulden <joegoulden@gmail.com>
Nick Savers <nicksavers@gmail.com>
Maran Hidskes <maran.hidskes@gmail.com>
Taylor Gerring <taylor.gerring@gmail.com>
Taylor Gerring <taylor.gerring@gmail.com> <taylor.gerring@ethereum.org>
Bas van Kervel <bas@ethdev.com>
Bas van Kervel <bas@ethdev.com> <basvankervel@ziggo.nl>
Bas van Kervel <bas@ethdev.com> <basvankervel@gmail.com>
Bas van Kervel <bas@ethdev.com> <bas-vk@users.noreply.github.com>
Sven Ehlert <sven@ethdev.com>
Vitalik Buterin <v@buterin.com>
Marian Oancea <contact@siteshop.ro>
Christoph Jentzsch <jentzsch.software@gmail.com>
Heiko Hees <heiko@heiko.org>
Alex Leverington <alex@ethdev.com>
Alex Leverington <alex@ethdev.com> <subtly@users.noreply.github.com>
Zsolt Felföldi <zsfelfoldi@gmail.com>
Gavin Wood <i@gavwood.com>
Martin Becze <mjbecze@gmail.com>
Martin Becze <mjbecze@gmail.com> <wanderer@users.noreply.github.com>
Dimitry Khokhlov <winsvega@mail.ru>
Roman Mandeleil <roman.mandeleil@gmail.com>
Alec Perseghin <aperseghin@gmail.com>
Alon Muroch <alonmuroch@gmail.com>
Arkadiy Paronyan <arkadiy@ethdev.com>
Jae Kwon <jkwon.work@gmail.com>
Aaron Kumavis <kumavis@users.noreply.github.com>
Nick Dodson <silentcicero@outlook.com>
Jason Carver <jacarver@linkedin.com>
Jason Carver <jacarver@linkedin.com> <ut96caarrs@snkmail.com>
Joseph Chow <ethereum@outlook.com>
Joseph Chow <ethereum@outlook.com> ethers <TODO>
Enrique Fynn <enriquefynn@gmail.com>
Vincent G <caktux@gmail.com>
RJ Catalano <catalanor0220@gmail.com>
RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com>
Nchinda Nchinda <nchinda2@gmail.com>
Aron Fischer <github@aron.guru> <homotopycolimit@users.noreply.github.com>
Vlad Gluhovsky <gluk256@users.noreply.github.com>
Ville Sundell <github@solarius.fi>
Elliot Shepherd <elliot@identitii.com>
Yohann Léon <sybiload@gmail.com>
Gregg Dourgarian <greggd@tempworks.com>
Casey Detrio <cdetrio@gmail.com>
Jens Agerberg <github@agerberg.me>
Nick Johnson <arachnid@notdot.net>
Henning Diedrich <hd@eonblast.com>
Henning Diedrich <hd@eonblast.com> Drake Burroughs <wildfyre@hotmail.com>
Felix Lange <fjl@twurst.com>
Felix Lange <fjl@twurst.com> <fjl@users.noreply.github.com>
Максим Чусовлянов <mchusovlianov@gmail.com>
Louis Holbrook <dev@holbrook.no>
Louis Holbrook <dev@holbrook.no> <nolash@users.noreply.github.com>
Thomas Bocek <tom@tomp2p.net>
Victor Tran <vu.tran54@gmail.com>
Justin Drake <drakefjustin@gmail.com>
Frank Wang <eternnoir@gmail.com>
Gary Rong <garyrong0905@gmail.com>
Guillaume Nicolas <guin56@gmail.com>
Sorin Neacsu <sorin.neacsu@gmail.com>
Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com>
Valentin Wüstholz <wuestholz@gmail.com>
Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com>
Armin Braun <me@obrown.io>
Ernesto del Toro <ernesto.deltoro@gmail.com>
Ernesto del Toro <ernesto.deltoro@gmail.com> <ernestodeltoro@users.noreply.github.com>

14
.readthedocs.yml Normal file
View File

@ -0,0 +1,14 @@
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
sphinx:
configuration: docs/swarm-guide/contents/conf.py
builder: html
python:
version: 3.7
install:
- requirements: docs/swarm-guide/requirements.txt

View File

@ -1,21 +1,14 @@
language: go
go_import_path: github.com/ethereum/go-ethereum
go_import_path: github.com/ethersphere/swarm
sudo: false
branches:
only:
- master
- /v(\d+\.)(\d+\.)(\d)/
matrix:
include:
- os: linux
dist: xenial
sudo: required
go: 1.10.x
script:
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install
- go run build/ci.go test -coverage $TEST_PACKAGES
- os: linux
dist: xenial
dist: trusty
sudo: required
go: 1.11.x
script:
@ -27,7 +20,7 @@ matrix:
# These are the latest Go versions.
- os: linux
dist: xenial
dist: trusty
sudo: required
go: 1.12.x
script:
@ -54,24 +47,20 @@ matrix:
# This builder only tests code linters on latest version of Go
- os: linux
dist: xenial
dist: trusty
go: 1.12.x
env:
- lint
git:
submodules: false # avoid cloning ethereum/tests
script:
- go run build/ci.go lint
# This builder does the Ubuntu PPA upload
- if: repo = ethereum/go-ethereum AND type = push
- if: type = push
os: linux
dist: xenial
dist: trusty
go: 1.12.x
env:
- ubuntu-ppa
git:
submodules: false # avoid cloning ethereum/tests
addons:
apt:
packages:
@ -83,18 +72,16 @@ matrix:
- python-paramiko
script:
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user ethswarm -signer "Ethereum Swarm Linux Builder <swarm@ethereum.org>"
# This builder does the Linux Azure uploads
- if: repo = ethereum/go-ethereum AND type = push
- if: type = push
os: linux
dist: xenial
dist: trusty
sudo: required
go: 1.12.x
env:
- azure-linux
git:
submodules: false # avoid cloning ethereum/tests
addons:
apt:
packages:
@ -102,134 +89,66 @@ matrix:
script:
# Build for the primary platforms that Trusty can manage
- go run build/ci.go install
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload ethswarm/builds
- go run build/ci.go install -arch 386
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload ethswarm/builds
# Switch over GCC to cross compilation (breaks 386, hence why do it here only)
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
- sudo ln -s /usr/include/asm-generic /usr/include/asm
- GOARM=5 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc
- GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload ethswarm/builds
- GOARM=6 go run build/ci.go install -arch arm -cc arm-linux-gnueabi-gcc
- GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload ethswarm/builds
- GOARM=7 go run build/ci.go install -arch arm -cc arm-linux-gnueabihf-gcc
- GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload ethswarm/builds
- go run build/ci.go install -arch arm64 -cc aarch64-linux-gnu-gcc
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload ethswarm/builds
# This builder does the Linux Azure MIPS xgo uploads
- if: repo = ethereum/go-ethereum AND type = push
- if: type = push
os: linux
dist: xenial
dist: trusty
services:
- docker
go: 1.12.x
env:
- azure-linux-mips
git:
submodules: false # avoid cloning ethereum/tests
script:
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -upload ethswarm/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -upload ethswarm/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY -upload ethswarm/builds
- go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v
- for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
# This builder does the Android Maven and Azure uploads
- if: repo = ethereum/go-ethereum AND type = push
os: linux
dist: xenial
addons:
apt:
packages:
- oracle-java8-installer
- oracle-java8-set-default
language: android
android:
components:
- platform-tools
- tools
- android-15
- android-19
- android-24
env:
- azure-android
- maven-android
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- curl https://dl.google.com/go/go1.12.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
script:
# Build the Android archive and upload it to Maven Central and Azure
- curl https://dl.google.com/android/repository/android-ndk-r19b-linux-x86_64.zip -o android-ndk-r19b.zip
- unzip -q android-ndk-r19b.zip && rm android-ndk-r19b.zip
- mv android-ndk-r19b $ANDROID_HOME/ndk-bundle
- mkdir -p $GOPATH/src/github.com/ethereum
- ln -s `pwd` $GOPATH/src/github.com/ethereum/go-ethereum
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload ethswarm/builds
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- if: repo = ethereum/go-ethereum AND type = push
- if: type = push
os: osx
go: 1.12.x
env:
- azure-osx
- azure-ios
- cocoapods-ios
git:
submodules: false # avoid cloning ethereum/tests
script:
- go run build/ci.go install
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload ethswarm/builds
# Build the iOS framework and upload it to CocoaPods and Azure
- gem uninstall cocoapods -a -x
- gem install cocoapods
- mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak
- sed -i '.bak' 's/repo.join/!repo.join/g' $(dirname `gem which cocoapods`)/cocoapods/sources_manager.rb
- if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then git clone --depth=1 https://github.com/CocoaPods/Specs.git ~/.cocoapods/repos/master && pod setup --verbose; fi
- xctool -version
- xcrun simctl list
# Workaround for https://github.com/golang/go/issues/23749
- export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc'
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
# This builder does the Azure archive purges to avoid accumulating junk
- if: repo = ethereum/go-ethereum AND type = cron
- if: type = cron
os: linux
dist: xenial
dist: trusty
go: 1.12.x
env:
- azure-purge
git:
submodules: false # avoid cloning ethereum/tests
script:
- go run build/ci.go purge -store gethstore/builds -days 14
- name: Race Detector for Swarm
if: repo = ethersphere/go-ethereum
os: linux
dist: xenial
go: 1.12.x
git:
submodules: false # avoid cloning ethereum/tests
script: ./build/travis_keepalive.sh go test -timeout 20m -race ./swarm... ./p2p/{protocols,simulations,testing}/...
- go run build/ci.go purge -store ethswarm/builds -days 14

207
AUTHORS
View File

@ -1,174 +1,35 @@
# This is the official list of go-ethereum authors for copyright purposes.
# Core team members
Afri Schoedon <5chdn@users.noreply.github.com>
Agustin Armellini Fischer <armellini13@gmail.com>
Airead <fgh1987168@gmail.com>
Alan Chen <alanchchen@users.noreply.github.com>
Alejandro Isaza <alejandro.isaza@gmail.com>
Ales Katona <ales@coinbase.com>
Alex Leverington <alex@ethdev.com>
Alex Wu <wuyiding@gmail.com>
Alexandre Van de Sande <alex.vandesande@ethdev.com>
Ali Hajimirza <Ali92hm@users.noreply.github.com>
Anton Evangelatov <anton.evangelatov@gmail.com>
Arba Sasmoyo <arba.sasmoyo@gmail.com>
Armani Ferrante <armaniferrante@berkeley.edu>
Armin Braun <me@obrown.io>
Aron Fischer <github@aron.guru>
Bas van Kervel <bas@ethdev.com>
Benjamin Brent <benjamin@benjaminbrent.com>
Benoit Verkindt <benoit.verkindt@gmail.com>
Bo <bohende@gmail.com>
Bo Ye <boy.e.computer.1982@outlook.com>
Bob Glickstein <bobg@users.noreply.github.com>
Brian Schroeder <bts@gmail.com>
Casey Detrio <cdetrio@gmail.com>
Chase Wright <mysticryuujin@gmail.com>
Christoph Jentzsch <jentzsch.software@gmail.com>
Daniel A. Nagy <nagy.da@gmail.com>
Daniel Sloof <goapsychadelic@gmail.com>
Darrel Herbst <dherbst@gmail.com>
Dave Appleton <calistralabs@gmail.com>
Diego Siqueira <DiSiqueira@users.noreply.github.com>
Dmitry Shulyak <yashulyak@gmail.com>
Egon Elbre <egonelbre@gmail.com>
Elias Naur <elias.naur@gmail.com>
Elliot Shepherd <elliot@identitii.com>
Enrique Fynn <enriquefynn@gmail.com>
Ernesto del Toro <ernesto.deltoro@gmail.com>
Ethan Buchman <ethan@coinculture.info>
Eugene Valeyev <evgen.povt@gmail.com>
Evangelos Pappas <epappas@evalonlabs.com>
Evgeny Danilenko <6655321@bk.ru>
Fabian Vogelsteller <fabian@frozeman.de>
Fabio Barone <fabio.barone.co@gmail.com>
Fabio Berger <fabioberger1991@gmail.com>
FaceHo <facehoshi@gmail.com>
Felix Lange <fjl@twurst.com>
Fiisio <liangcszzu@163.com>
Frank Wang <eternnoir@gmail.com>
Furkan KAMACI <furkankamaci@gmail.com>
Gary Rong <garyrong0905@gmail.com>
George Ornbo <george@shapeshed.com>
Gregg Dourgarian <greggd@tempworks.com>
Guillaume Ballet <gballet@gmail.com>
Guillaume Nicolas <guin56@gmail.com>
Gustav Simonsson <gustav.simonsson@gmail.com>
Hao Bryan Cheng <haobcheng@gmail.com>
Henning Diedrich <hd@eonblast.com>
Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
Ivan Daniluk <ivan.daniluk@gmail.com>
Jae Kwon <jkwon.work@gmail.com>
Jamie Pitts <james.pitts@gmail.com>
Janoš Guljaš <janos@users.noreply.github.com>
Jason Carver <jacarver@linkedin.com>
Jay Guo <guojiannan1101@gmail.com>
Jeff R. Allen <jra@nella.org>
Jeffrey Wilcke <jeffrey@ethereum.org>
Jens Agerberg <github@agerberg.me>
Jia Chenhui <jiachenhui1989@gmail.com>
Jim McDonald <Jim@mcdee.net>
Joel Burget <joelburget@gmail.com>
Jonathan Brown <jbrown@bluedroplet.com>
Joseph Chow <ethereum@outlook.com>
Justin Clark-Casey <justincc@justincc.org>
Justin Drake <drakefjustin@gmail.com>
Kenji Siu <kenji@isuntv.com>
Kobi Gurkan <kobigurk@gmail.com>
Konrad Feldmeier <konrad@brainbot.com>
Kurkó Mihály <kurkomisi@users.noreply.github.com>
Kyuntae Ethan Kim <ethan.kyuntae.kim@gmail.com>
Lefteris Karapetsas <lefteris@refu.co>
Leif Jurvetson <leijurv@gmail.com>
Leo Shklovskii <leo@thermopylae.net>
Lewis Marshall <lewis@lmars.net>
Lio李欧 <lionello@users.noreply.github.com>
Louis Holbrook <dev@holbrook.no>
Luca Zeug <luclu@users.noreply.github.com>
Magicking <s@6120.eu>
Maran Hidskes <maran.hidskes@gmail.com>
Marek Kotewicz <marek.kotewicz@gmail.com>
Mark <markya0616@gmail.com>
Martin Holst Swende <martin@swende.se>
Matthew Di Ferrante <mattdf@users.noreply.github.com>
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
Maximilian Meister <mmeister@suse.de>
Micah Zoltu <micah@zoltu.net>
Michael Ruminer <michael.ruminer+github@gmail.com>
Miguel Mota <miguelmota2@gmail.com>
Miya Chen <miyatlchen@gmail.com>
Nchinda Nchinda <nchinda2@gmail.com>
Nick Dodson <silentcicero@outlook.com>
Nick Johnson <arachnid@notdot.net>
Nicolas Guillaume <gunicolas@sqli.com>
Noman <noman@noman.land>
Oli Bye <olibye@users.noreply.github.com>
Paul Litvak <litvakpol@012.net.il>
Paulo L F Casaretto <pcasaretto@gmail.com>
Paweł Bylica <chfast@gmail.com>
Peter Pratscher <pratscher@gmail.com>
Petr Mikusek <petr@mikusek.info>
Péter Szilágyi <peterke@gmail.com>
RJ Catalano <catalanor0220@gmail.com>
Ramesh Nair <ram@hiddentao.com>
Ricardo Catalinas Jiménez <r@untroubled.be>
Ricardo Domingos <ricardohsd@gmail.com>
Richard Hart <richardhart92@gmail.com>
Rob <robert@rojotek.com>
Robert Zaremba <robert.zaremba@scale-it.pl>
Russ Cox <rsc@golang.org>
Rémy Roy <remyroy@remyroy.com>
S. Matthew English <s-matthew-english@users.noreply.github.com>
Shintaro Kaneko <kaneshin0120@gmail.com>
Sorin Neacsu <sorin.neacsu@gmail.com>
Stein Dekker <dekker.stein@gmail.com>
Steve Waldman <swaldman@mchange.com>
Steven Roose <stevenroose@gmail.com>
Taylor Gerring <taylor.gerring@gmail.com>
Thomas Bocek <tom@tomp2p.net>
Ti Zhou <tizhou1986@gmail.com>
Tosh Camille <tochecamille@gmail.com>
Valentin Wüstholz <wuestholz@gmail.com>
Victor Farazdagi <simple.square@gmail.com>
Victor Tran <vu.tran54@gmail.com>
Viktor Trón <viktor.tron@gmail.com>
Ville Sundell <github@solarius.fi>
Vincent G <caktux@gmail.com>
Vitalik Buterin <v@buterin.com>
Vitaly V <vvelikodny@gmail.com>
Vivek Anand <vivekanand1101@users.noreply.github.com>
Vlad Gluhovsky <gluk256@users.noreply.github.com>
Yohann Léon <sybiload@gmail.com>
Yoichi Hirai <i@yoichihirai.com>
Yondon Fu <yondon.fu@gmail.com>
Zach <zach.ramsay@gmail.com>
Zahoor Mohamed <zahoor@zahoor.in>
Zoe Nolan <github@zoenolan.org>
Zsolt Felföldi <zsfelfoldi@gmail.com>
am2rican5 <am2rican5@gmail.com>
ayeowch <ayeowch@gmail.com>
b00ris <b00ris@mail.ru>
bailantaotao <Edwin@maicoin.com>
baizhenxuan <nkbai@163.com>
bloonfield <bloonfield@163.com>
changhong <changhong.yu@shanbay.com>
evgk <evgeniy.kamyshev@gmail.com>
ferhat elmas <elmas.ferhat@gmail.com>
holisticode <holistic.computing@gmail.com>
jtakalai <juuso.takalainen@streamr.com>
ken10100147 <sunhongping@kanjian.com>
ligi <ligi@ligi.de>
mark.lin <mark@maicoin.com>
necaremus <necaremus@gmail.com>
njupt-moon <1015041018@njupt.edu.cn>
nkbai <nkbai@163.com>
rhaps107 <dod-source@yandex.ru>
slumber1122 <slumber1122@gmail.com>
sunxiaojun2014 <sunxiaojun-xy@360.cn>
terasum <terasum@163.com>
tsarpaul <Litvakpol@012.net.il>
xiekeyang <xiekeyang@users.noreply.github.com>
yoza <yoza.is12s@gmail.com>
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
Максим Чусовлянов <mchusovlianov@gmail.com>
Ralph Caraveo <deckarep@gmail.com>
Viktor Trón - @zelig
Louis Holbrook - @nolash
Lewis Marshall - @lmars
Anton Evangelatov - @nonsense
Janoš Guljaš - @janos
Balint Gabor - @gbalint
Elad Nachmias - @justelad
Daniel A. Nagy - @nagydani
Aron Fischer - @homotopycolimit
Fabio Barone - @holisticode
Zahoor Mohamed - @jmozah
Zsolt Felföldi - @zsfelfoldi
# External contributors
Kiel Barry
Gary Rong
Jared Wasinger
Leon Stanko
Javier Peletier [epiclabs.io]
Bartek Borkowski [tungsten-labs.com]
Shane Howley [mainframe.com]
Doug Leonard [mainframe.com]
Ivan Daniluk [status.im]
Felix Lange [EF]
Martin Holst Swende [EF]
Guillaume Ballet [EF]
ligi [EF]
Christopher Dro [blick-labs.com]
Sergii Bomko [ledgerleopard.com]
Domino Valdano
Rafael Matias
Coogan Brennan

83
CHANGELOG.md Normal file
View File

@ -0,0 +1,83 @@
## v0.4.3 (July 25, 2019)
### Notes
- **Docker users:** The `$PASSWORD` and `$DATADIR` environment variables are not supported anymore since this release. From now on you should mount the password or data dirctories as a volume. For example:
```bash
$ docker run -it -v $PWD/hostdata:/data \
-v $PWD/password:/password \
ethersphere/swarm:0.4.3 \
--datadir /data \
--password /password
```
### Bug fixes and improvements
* [#1586](https://github.com/ethersphere/swarm/pull/1586): network: structured output for kademlia table
* [#1582](https://github.com/ethersphere/swarm/pull/1582): client: add bzz client, update smoke tests
* [#1578](https://github.com/ethersphere/swarm/pull/1578): swarm-smoke: fix check max prox hosts for pull/push sync modes
* [#1557](https://github.com/ethersphere/swarm/pull/1557): cmd/swarm: allow using a network interface by name for nat purposes
* [#1534](https://github.com/ethersphere/swarm/pull/1534): api, network: count chunk deliveries per peer
* [#1537](https://github.com/ethersphere/swarm/pull/1537): swarm: fix bzz_info.port when using dynamic port allocation
* [#1531](https://github.com/ethersphere/swarm/pull/1531): cmd/swarm: make bzzaccount flag optional and add bzzkeyhex flag
* [#1536](https://github.com/ethersphere/swarm/pull/1536): cmd/swarm: use only one function to parse flags
* [#1530](https://github.com/ethersphere/swarm/pull/1530): network/bitvector: Multibit set/unset + string rep
* [#1555](https://github.com/ethersphere/swarm/pull/1555): PoC: Network simulation framework
## v0.4.2 (June 28, 2019)
### Notes
This release is not backward compatible with the previous versions of Swarm due to changes to the wire protocol of the Retrieve Request messages. Please update your nodes.
### Bug fixes and improvements
* [#1503](https://github.com/ethersphere/swarm/pull/1503): network/simulation: add ExecAdapter capability to swarm simulations
* [#1495](https://github.com/ethersphere/swarm/pull/1495): build: enable ubuntu ppa disco (19.04) builds
* [#1395](https://github.com/ethersphere/swarm/pull/1395): swarm/storage: support for uploading 100gb files
* [#1344](https://github.com/ethersphere/swarm/pull/1344): swarm/network, swarm/storage: simplification of fetchers
* [#1488](https://github.com/ethersphere/swarm/pull/1488): docker: include git commit hash in swarm version
## v0.4.1 (June 13, 2019)
### Improvements
* [#1465](https://github.com/ethersphere/swarm/pull/1465): network: bump proto versions due to change in OfferedHashesMsg
* [#1428](https://github.com/ethersphere/swarm/pull/1428): swarm-smoke: add debug flag
* [#1422](https://github.com/ethersphere/swarm/pull/1422): swarm/network/stream: remove dead code
* [#1463](https://github.com/ethersphere/swarm/pull/1463): docker: create new dockerfiles that are context aware
* [#1466](https://github.com/ethersphere/swarm/pull/1466): changelog for releases
### Bug fixes
* [#1460](https://github.com/ethersphere/swarm/pull/1460): storage: fix alignement panics on 32 bit arch
* [#1422](https://github.com/ethersphere/swarm/pull/1422), [#19650](https://github.com/ethereum/go-ethereum/pull/19650): swarm/network/stream: remove dead code
* [#1420](https://github.com/ethersphere/swarm/pull/1420): swarm, cmd: fix migration link, change loglevel severity
* [#19594](https://github.com/ethereum/go-ethereum/pull/19594): swarm/api/http: fix bzz-hash to return ens resolved hash directly
* [#19599](https://github.com/ethereum/go-ethereum/pull/19599): swarm/storage: fix SubscribePull to not skip chunks
### Notes
* Swarm has split the codebase ([go-ethereum#19661](https://github.com/ethereum/go-ethereum/pull/19661), [#1405](https://github.com/ethersphere/swarm/pull/1405)) from [ethereum/go-ethereum](https://github.com/ethereum/go-ethereum). The code is now under [ethersphere/swarm](https://github.com/ethersphere/swarm)
* New docker images (>=0.4.0) can now be found under https://hub.docker.com/r/ethersphere/swarm
## v0.4.0 (May 17, 2019)
### Changes
* Implemented parallel feed lookups within Swarm Feeds
* Updated syncing protocol subscription algorithm
* Implemented EIP-1577 - Multiaddr support for ENS
* Improved LocalStore implementation
* Added support for syncing tags which provide the ability to measure how long it will take for an uploaded file to sync to the network
* Fixed data race bugs within PSS
* Improved end-to-end integration tests
* Various performance improvements and bug fixes
* Improved instrumentation - metrics and OpenTracing traces
### Notes
This release is not backward compatible with the previous versions of Swarm due to the new LocalStore implementation. If you wish to keep your data, you should run a data migration prior to running this version.
BZZ network ID has been updated to 4.
Swarm v0.4.0 introduces major changes to the existing codebase. Among other things, the storage layer has been rewritten to be more modular and flexible in a manner that will accommodate for our future needs. Since Swarm at this point does not provide any storage guarantees, we have made the decision to not impose any migrations on the nodes that we maintain as part of the public test network, nor on our users. We have provided a [manual](https://github.com/ethersphere/swarm/blob/master/docs/Migration-v0.3-to-v0.4.md) for those of you who are running private deployments and would like to migrate your data to the new local storage schema.

View File

@ -1,16 +1,13 @@
# Build Geth in a stock Go builder container
FROM golang:1.12-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git
ADD . /swarm
WORKDIR /swarm
RUN make swarm
ADD . /go-ethereum
RUN cd /go-ethereum && make geth
FROM ethereum/client-go:v1.8.27 as geth
# Pull Geth into a second stage deploy alpine container
FROM alpine:latest
RUN apk add --no-cache ca-certificates
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp
ENTRYPOINT ["geth"]
FROM alpine:3.9
RUN apk --no-cache add ca-certificates && update-ca-certificates
COPY --from=builder /swarm/build/bin/swarm /usr/local/bin/
COPY --from=geth /usr/local/bin/geth /usr/local/bin/
ENTRYPOINT ["/usr/local/bin/swarm"]

View File

@ -1,15 +1,14 @@
# Build Geth in a stock Go builder container
FROM golang:1.12-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers git
ADD . /swarm
WORKDIR /swarm
RUN make alltools
ADD . /go-ethereum
RUN cd /go-ethereum && make all
FROM ethereum/client-go:v1.8.27 as geth
# Pull all binaries into a second stage deploy alpine container
FROM alpine:latest
RUN apk add --no-cache ca-certificates
COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp
FROM alpine:3.9
RUN apk --no-cache add ca-certificates
COPY --from=builder /swarm/build/bin/* /usr/local/bin/
COPY --from=geth /usr/local/bin/geth /usr/local/bin/
COPY docker/run-smoke.sh /run-smoke.sh
ENTRYPOINT ["/usr/local/bin/swarm"]

144
Makefile
View File

@ -2,152 +2,12 @@
# with Go source code. If you know what GOPATH is then you probably
# don't need to bother with make.
.PHONY: geth android ios geth-cross swarm evm all test clean
.PHONY: geth-linux geth-linux-386 geth-linux-amd64 geth-linux-mips64 geth-linux-mips64le
.PHONY: geth-linux-arm geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
.PHONY: geth-darwin geth-darwin-386 geth-darwin-amd64
.PHONY: geth-windows geth-windows-386 geth-windows-amd64
GOBIN = $(shell pwd)/build/bin
GO ?= latest
geth:
build/env.sh go run build/ci.go install ./cmd/geth
@echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth."
swarm:
build/env.sh go run build/ci.go install ./cmd/swarm
@echo "Done building."
@echo "Run \"$(GOBIN)/swarm\" to launch swarm."
all:
build/env.sh go run build/ci.go install
android:
build/env.sh go run build/ci.go aar --local
@echo "Done building."
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
ios:
build/env.sh go run build/ci.go xcode --local
@echo "Done building."
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
test: all
build/env.sh go run build/ci.go test
lint: ## Run linters.
build/env.sh go run build/ci.go lint
clean:
./build/clean_go_build_cache.sh
rm -fr build/_workspace/pkg/ $(GOBIN)/*
# The devtools target installs tools required for 'go generate'.
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
devtools:
env GOBIN= go get -u golang.org/x/tools/cmd/stringer
env GOBIN= go get -u github.com/kevinburke/go-bindata/go-bindata
env GOBIN= go get -u github.com/fjl/gencodec
env GOBIN= go get -u github.com/golang/protobuf/protoc-gen-go
env GOBIN= go install ./cmd/abigen
@type "npm" 2> /dev/null || echo 'Please install node.js and npm'
@type "solc" 2> /dev/null || echo 'Please install solc'
@type "protoc" 2> /dev/null || echo 'Please install protoc'
swarm-devtools:
env GOBIN= go install ./cmd/swarm/mimegen
# Cross Compilation Targets (xgo)
geth-cross: geth-linux geth-darwin geth-windows geth-android geth-ios
@echo "Full cross compilation done:"
@ls -ld $(GOBIN)/geth-*
geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 geth-linux-mips64le
@echo "Linux cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-*
geth-linux-386:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
@echo "Linux 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep 386
geth-linux-amd64:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
@echo "Linux amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-arm64
@echo "Linux ARM cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm
geth-linux-arm-5:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
@echo "Linux ARMv5 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
geth-linux-arm-6:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
@echo "Linux ARMv6 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
geth-linux-arm-7:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
@echo "Linux ARMv7 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
geth-linux-arm64:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
@echo "Linux ARM64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
geth-linux-mips:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips
geth-linux-mipsle:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPSle cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
geth-linux-mips64:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
geth-linux-mips64le:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS64le cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
geth-darwin: geth-darwin-386 geth-darwin-amd64
@echo "Darwin cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-*
geth-darwin-386:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
@echo "Darwin 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
geth-darwin-amd64:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
@echo "Darwin amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
geth-windows: geth-windows-386 geth-windows-amd64
@echo "Windows cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-*
geth-windows-386:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
@echo "Windows 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep 386
geth-windows-amd64:
build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
@echo "Windows amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
alltools:
build/env.sh go run build/ci.go install ./cmd/...

475
README.md
View File

@ -1,290 +1,314 @@
## Go Ethereum
## Swarm <!-- omit in toc -->
Official Golang implementation of the Ethereum protocol.
[https://swarm.ethereum.org](https://swarm.ethereum.org)
[![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
)](https://godoc.org/github.com/ethereum/go-ethereum)
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
[![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum)
[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv)
Swarm is a distributed storage platform and content distribution service, a native base layer service of the ethereum web3 stack. The primary objective of Swarm is to provide a decentralized and redundant store for dapp code and data as well as block chain and state data. Swarm is also set out to provide various base layer services for web3, including node-to-node messaging, media streaming, decentralised database services and scalable state-channel infrastructure for decentralised service economies.
Automated builds are available for stable releases and the unstable master branch.
Binary archives are published at https://geth.ethereum.org/downloads/.
[![Travis](https://travis-ci.org/ethersphere/swarm.svg?branch=master)](https://travis-ci.org/ethersphere/swarm)
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/ethersphere/orange-lounge?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
## Table of Contents <!-- omit in toc -->
- [Building the source](#Building-the-source)
- [Running Swarm](#Running-Swarm)
- [Verifying that your local Swarm node is running](#Verifying-that-your-local-Swarm-node-is-running)
- [Ethereum Name Service resolution](#Ethereum-Name-Service-resolution)
- [Documentation](#Documentation)
- [Docker](#Docker)
- [Docker tags](#Docker-tags)
- [Swarm command line arguments](#Swarm-command-line-arguments)
- [Developers Guide](#Developers-Guide)
- [Go Environment](#Go-Environment)
- [Vendored Dependencies](#Vendored-Dependencies)
- [Testing](#Testing)
- [Profiling Swarm](#Profiling-Swarm)
- [Metrics and Instrumentation in Swarm](#Metrics-and-Instrumentation-in-Swarm)
- [Visualizing metrics](#Visualizing-metrics)
- [Public Gateways](#Public-Gateways)
- [Swarm Dapps](#Swarm-Dapps)
- [Contributing](#Contributing)
- [License](#License)
## Building the source
For prerequisites and detailed build instructions please read the
[Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum)
on the wiki.
Building Swarm requires Go (version 1.11 or later).
Building geth requires both a Go (version 1.10 or later) and a C compiler.
You can install them using your favourite package manager.
Once the dependencies are installed, run
To simply compile the `swarm` binary without a `GOPATH`:
make geth
or, to build the full suite of utilities:
make all
## Executables
The go-ethereum project comes with several wrappers/executables found in the `cmd` directory.
| Command | Description |
|:----------:|-------------|
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug`). |
| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
| `swarm` | Swarm daemon and tools. This is the entry point for the Swarm network. `swarm --help` for command line options and subcommands. See [Swarm README](https://github.com/ethereum/go-ethereum/tree/master/swarm) for more information. |
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
## Running geth
Going through all the possible command line flags is out of scope here (please consult our
[CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)), but we've
enumerated a few common parameter combos to get you up to speed quickly on how you can run your
own Geth instance.
### Full node on the main Ethereum network
By far the most common scenario is people wanting to simply interact with the Ethereum network:
create accounts; transfer funds; deploy and interact with contracts. For this particular use-case
the user doesn't care about years-old historical data, so we can fast-sync quickly to the current
state of the network. To do so:
```
$ geth console
```bash
$ git clone https://github.com/ethersphere/swarm
$ cd swarm
$ make swarm
```
This command will:
You will find the binary under `./build/bin/swarm`.
* Start geth in fast sync mode (default, can be changed with the `--syncmode` flag), causing it to
download more data in exchange for avoiding processing the entire history of the Ethereum network,
which is very CPU intensive.
* Start up Geth's built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console),
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
This tool is optional and if you leave it out you can always attach to an already running Geth instance
with `geth attach`.
To build a vendored `swarm` using `go get` you must have `GOPATH` set. Then run:
### A Full node on the Ethereum test network
Transitioning towards developers, if you'd like to play around with creating Ethereum contracts, you
almost certainly would like to do that without any real money involved until you get the hang of the
entire system. In other words, instead of attaching to the main network, you want to join the **test**
network with your node, which is fully equivalent to the main network, but with play-Ether only.
```
$ geth --testnet console
```bash
$ go get -d github.com/ethersphere/swarm
$ go install github.com/ethersphere/swarm/cmd/swarm
```
The `console` subcommand has the exact same meaning as above and they are equally useful on the
testnet too. Please see above for their explanations if you've skipped here.
## Running Swarm
Specifying the `--testnet` flag, however, will reconfigure your Geth instance a bit:
* Instead of using the default data directory (`~/.ethereum` on Linux for example), Geth will nest
itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on Linux). Note, on OSX
and Linux this also means that attaching to a running testnet node requires the use of a custom
endpoint since `geth attach` will try to attach to a production node endpoint by default. E.g.
`geth attach <datadir>/testnet/geth.ipc`. Windows users are not affected by this.
* Instead of connecting the main Ethereum network, the client will connect to the test network,
which uses different P2P bootnodes, different network IDs and genesis states.
*Note: Although there are some internal protective measures to prevent transactions from crossing
over between the main network and test network, you should make sure to always use separate accounts
for play-money and real-money. Unless you manually move accounts, Geth will by default correctly
separate the two networks and will not make any accounts available between them.*
### Full node on the Rinkeby test network
The above test network is a cross-client one based on the ethash proof-of-work consensus algorithm. As such, it has certain extra overhead and is more susceptible to reorganization attacks due to the network's low difficulty/security. Go Ethereum also supports connecting to a proof-of-authority based test network called [*Rinkeby*](https://www.rinkeby.io) (operated by members of the community). This network is lighter, more secure, but is only supported by go-ethereum.
```
$ geth --rinkeby console
```bash
$ swarm
```
### Configuration
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a configuration file via:
If you don't have an account yet, then you will be prompted to create one and secure it with a password:
```
$ geth --config /path/to/your_config.toml
Your new account is locked with a password. Please give a password. Do not forget this password.
Passphrase:
Repeat passphrase:
```
To get an idea how the file should look like you can use the `dumpconfig` subcommand to export your existing configuration:
If you have multiple accounts created, then you'll have to choose one of the accounts by using the `--bzzaccount` flag.
```
$ geth --your-favourite-flags dumpconfig
```bash
$ swarm --bzzaccount <your-account-here>
# example
$ swarm --bzzaccount 2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1
```
*Note: This works only with geth v1.6.0 and above.*
### Verifying that your local Swarm node is running
#### Docker quick start
When running, Swarm is accessible through an HTTP API on port 8500.
One of the quickest ways to get Ethereum up and running on your machine is by using Docker:
Confirm that it is up and running by pointing your browser to http://localhost:8500
```
docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
-p 8545:8545 -p 30303:30303 \
ethereum/client-go
### Ethereum Name Service resolution
The Ethereum Name Service is the Ethereum equivalent of DNS in the classic web. In order to use ENS to resolve names to Swarm content hashes (e.g. `bzz://theswarm.eth`), `swarm` has to connect to a `geth` instance, which is synced with the Ethereum mainnet. This is done using the `--ens-api` flag.
```bash
$ swarm --bzzaccount <your-account-here> \
--ens-api '$HOME/.ethereum/geth.ipc'
# in our example
$ swarm --bzzaccount 2f1cd699b0bf461dcfbf0098ad8f5587b038f0f1 \
--ens-api '$HOME/.ethereum/geth.ipc'
```
This will start geth in fast-sync mode with a DB memory allowance of 1GB just as the above command does. It will also create a persistent volume in your home directory for saving your blockchain as well as map the default ports. There is also an `alpine` tag available for a slim version of the image.
For more information on usage, features or command line flags, please consult the Documentation.
Do not forget `--rpcaddr 0.0.0.0`, if you want to access RPC from other containers and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not accessible from the outside.
## Documentation
### Programmatically interfacing Geth nodes
Swarm documentation can be found at [https://swarm-guide.readthedocs.io](https://swarm-guide.readthedocs.io).
As a developer, sooner rather than later you'll want to start interacting with Geth and the Ethereum
network via your own programs and not manually through the console. To aid this, Geth has built-in
support for a JSON-RPC based APIs ([standard APIs](https://github.com/ethereum/wiki/wiki/JSON-RPC) and
[Geth specific APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs)). These can be
exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based platforms, and named pipes on Windows).
## Docker
The IPC interface is enabled by default and exposes all the APIs supported by Geth, whereas the HTTP
and WS interfaces need to manually be enabled and only expose a subset of APIs due to security reasons.
These can be turned on/off and configured as you'd expect.
Swarm container images are available at Docker Hub: [ethersphere/swarm](https://hub.docker.com/r/ethersphere/swarm)
HTTP based JSON-RPC API options:
### Docker tags
* `--rpc` Enable the HTTP-RPC server
* `--rpcaddr` HTTP-RPC server listening interface (default: "localhost")
* `--rpcport` HTTP-RPC server listening port (default: 8545)
* `--rpcapi` API's offered over the HTTP-RPC interface (default: "eth,net,web3")
* `--rpccorsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced)
* `--ws` Enable the WS-RPC server
* `--wsaddr` WS-RPC server listening interface (default: "localhost")
* `--wsport` WS-RPC server listening port (default: 8546)
* `--wsapi` API's offered over the WS-RPC interface (default: "eth,net,web3")
* `--wsorigins` Origins from which to accept websockets requests
* `--ipcdisable` Disable the IPC-RPC server
* `--ipcapi` API's offered over the IPC-RPC interface (default: "admin,debug,eth,miner,net,personal,shh,txpool,web3")
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
* `latest` - latest stable release
* `edge` - latest build from `master`
* `v0.x.y` - specific stable release
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to connect
via HTTP, WS or IPC to a Geth node configured with the above flags and you'll need to speak [JSON-RPC](https://www.jsonrpc.org/specification)
on all transports. You can reuse the same connection for multiple requests!
### Swarm command line arguments
**Note: Please understand the security implications of opening up an HTTP/WS based transport before
doing so! Hackers on the internet are actively trying to subvert Ethereum nodes with exposed APIs!
Further, all browser tabs can access locally running web servers, so malicious web pages could try to
subvert locally available APIs!**
All Swarm command line arguments are supported and can be sent as part of the CMD field to the Docker container.
### Operating a private network
**Examples:**
Maintaining your own private network is more involved as a lot of configurations taken for granted in
the official networks need to be manually set up.
Running a Swarm container from the command line
#### Defining the private genesis state
First, you'll need to create the genesis state of your networks, which all nodes need to be aware of
and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`):
```json
{
"config": {
"chainId": 0,
"homesteadBlock": 0,
"eip155Block": 0,
"eip158Block": 0
},
"alloc" : {},
"coinbase" : "0x0000000000000000000000000000000000000000",
"difficulty" : "0x20000",
"extraData" : "",
"gasLimit" : "0x2fefd8",
"nonce" : "0x0000000000000042",
"mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
"parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000",
"timestamp" : "0x00"
}
```bash
$ docker run -it ethersphere/swarm \
--debug \
--verbosity 4
```
The above fields should be fine for most purposes, although we'd recommend changing the `nonce` to
some random value so you prevent unknown remote nodes from being able to connect to you. If you'd
like to pre-fund some accounts for easier testing, you can populate the `alloc` field with account
configs:
```json
"alloc": {
"0x0000000000000000000000000000000000000001": {"balance": "111111111"},
"0x0000000000000000000000000000000000000002": {"balance": "222222222"}
}
Running a Swarm container with custom ENS endpoint
```bash
$ docker run -it ethersphere/swarm \
--ens-api http://1.2.3.4:8545 \
--debug \
--verbosity 4
```
With the genesis state defined in the above JSON file, you'll need to initialize **every** Geth node
with it prior to starting it up to ensure all blockchain parameters are correctly set:
Running a Swarm container with metrics enabled
```
$ geth init path/to/genesis.json
```bash
$ docker run -it ethersphere/swarm \
--debug \
--metrics \
--metrics.influxdb.export \
--metrics.influxdb.endpoint "http://localhost:8086" \
--metrics.influxdb.username "user" \
--metrics.influxdb.password "pass" \
--metrics.influxdb.database "metrics" \
--metrics.influxdb.host.tag "localhost" \
--verbosity 4
```
#### Creating the rendezvous point
Running a Swarm container with tracing and pprof server enabled
With all nodes that you want to run initialized to the desired genesis state, you'll need to start a
bootstrap node that others can use to find each other in your network and/or over the internet. The
clean way is to configure and run a dedicated bootnode:
```
$ bootnode --genkey=boot.key
$ bootnode --nodekey=boot.key
```bash
$ docker run -it ethersphere/swarm \
--debug \
--tracing \
--tracing.endpoint 127.0.0.1:6831 \
--tracing.svc myswarm \
--pprof \
--pprofaddr 0.0.0.0 \
--pprofport 6060
```
With the bootnode online, it will display an [`enode` URL](https://github.com/ethereum/wiki/wiki/enode-url-format)
that other nodes can use to connect to it and exchange peer information. Make sure to replace the
displayed IP address information (most probably `[::]`) with your externally accessible IP to get the
actual `enode` URL.
Running a Swarm container with a custom data directory mounted from a volume and a password file to unlock the swarm account
*Note: You could also use a full-fledged Geth node as a bootnode, but it's the less recommended way.*
#### Starting up your member nodes
With the bootnode operational and externally reachable (you can try `telnet <ip> <port>` to ensure
it's indeed reachable), start every subsequent Geth node pointed to the bootnode for peer discovery
via the `--bootnodes` flag. It will probably also be desirable to keep the data directory of your
private network separated, so do also specify a custom `--datadir` flag.
```
$ geth --datadir=path/to/custom/data/folder --bootnodes=<bootnode-enode-url-from-above>
```bash
$ docker run -it -v $PWD/hostdata:/data \
-v $PWD/password:/password \
ethersphere/swarm \
--datadir /data \
--password /password \
--debug \
--verbosity 4
```
*Note: Since your network will be completely cut off from the main and test networks, you'll also
need to configure a miner to process transactions and create new blocks for you.*
## Developers Guide
#### Running a private miner
### Go Environment
Mining on the public Ethereum network is a complex task as it's only feasible using GPUs, requiring
an OpenCL or CUDA enabled `ethminer` instance. For information on such a setup, please consult the
[EtherMining subreddit](https://www.reddit.com/r/EtherMining/) and the [Genoil miner](https://github.com/Genoil/cpp-ethereum)
repository.
We assume that you have Go v1.11 installed, and `GOPATH` is set.
In a private network setting, however a single CPU miner instance is more than enough for practical
purposes as it can produce a stable stream of blocks at the correct intervals without needing heavy
resources (consider running on a single thread, no need for multiple ones either). To start a Geth
instance for mining, run it with all your usual flags, extended by:
You must have your working copy under `$GOPATH/src/github.com/ethersphere/swarm`.
```
$ geth <usual-flags> --mine --minerthreads=1 --etherbase=0x0000000000000000000000000000000000000000
Most likely you will be working from your fork of `swarm`, let's say from `github.com/nirname/swarm`. Clone or move your fork into the right place:
```bash
$ git clone git@github.com:nirname/swarm.git $GOPATH/src/github.com/ethersphere/swarm
```
Which will start mining blocks and transactions on a single CPU thread, crediting all proceedings to
the account specified by `--etherbase`. You can further tune the mining by changing the default gas
limit blocks converge to (`--targetgaslimit`) and the price transactions are accepted at (`--gasprice`).
## Contribution
### Vendored Dependencies
All dependencies are tracked in the `vendor` directory. We use `govendor` to manage them.
If you want to add a new dependency, run `govendor fetch <import-path>`, then commit the result.
If you want to update all dependencies to their latest upstream version, run `govendor fetch +v`.
### Testing
This section explains how to run unit, integration, and end-to-end tests in your development sandbox.
Testing one library:
```bash
$ go test -v -cpu 4 ./api
```
Note: Using options -cpu (number of cores allowed) and -v (logging even if no error) is recommended.
Testing only some methods:
```bash
$ go test -v -cpu 4 ./api -run TestMethod
```
Note: here all tests with prefix TestMethod will be run, so if you got TestMethod, TestMethod1, then both!
Running benchmarks:
```bash
$ go test -v -cpu 4 -bench . -run BenchmarkJoin
```
### Profiling Swarm
This section explains how to add Go `pprof` profiler to Swarm
If `swarm` is started with the `--pprof` option, a debugging HTTP server is made available on port 6060.
You can bring up http://localhost:6060/debug/pprof to see the heap, running routines etc.
By clicking full goroutine stack dump (clicking http://localhost:6060/debug/pprof/goroutine?debug=2) you can generate trace that is useful for debugging.
### Metrics and Instrumentation in Swarm
This section explains how to visualize and use existing Swarm metrics and how to instrument Swarm with a new metric.
Swarm metrics system is based on the `go-metrics` library.
The most common types of measurements we use in Swarm are `counters` and `resetting timers`. Consult the `go-metrics` documentation for full reference of available types.
```go
// incrementing a counter
metrics.GetOrRegisterCounter("network.stream.received_chunks", nil).Inc(1)
// measuring latency with a resetting timer
start := time.Now()
t := metrics.GetOrRegisterResettingTimer("http.request.GET.time"), nil)
...
t := UpdateSince(start)
```
#### Visualizing metrics
Swarm supports an InfluxDB exporter. Consult the help section to learn about the command line arguments used to configure it:
```bash
$ swarm --help | grep metrics
```
We use Grafana and InfluxDB to visualise metrics reported by Swarm. We keep our Grafana dashboards under version control at https://github.com/ethersphere/grafana-dashboards. You could use them or design your own.
We have built a tool to help with automatic start of Grafana and InfluxDB and provisioning of dashboards at https://github.com/nonsense/stateth, which requires that you have Docker installed.
Once you have `stateth` installed, and you have Docker running locally, you have to:
1. Run `stateth` and keep it running in the background
```bash
$ stateth --rm --grafana-dashboards-folder $GOPATH/src/github.com/ethersphere/grafana-dashboards --influxdb-database metrics
```
2. Run `swarm` with at least the following params:
```bash
--metrics \
--metrics.influxdb.export \
--metrics.influxdb.endpoint "http://localhost:8086" \
--metrics.influxdb.username "admin" \
--metrics.influxdb.password "admin" \
--metrics.influxdb.database "metrics"
```
3. Open Grafana at http://localhost:3000 and view the dashboards to gain insight into Swarm.
## Public Gateways
Swarm offers a local HTTP proxy API that Dapps can use to interact with Swarm. The Ethereum Foundation is hosting a public gateway, which allows free access so that people can try Swarm without running their own node.
The Swarm public gateways are temporary and users should not rely on their existence for production services.
The Swarm public gateway can be found at https://swarm-gateways.net and is always running the latest `stable` Swarm release.
## Swarm Dapps
You can find a few reference Swarm decentralised applications at: https://swarm-gateways.net/bzz:/swarmapps.eth
Their source code can be found at: https://github.com/ethersphere/swarm-dapps
## Contributing
Thank you for considering to help out with the source code! We welcome contributions from
anyone on the internet, and are grateful for even the smallest of fixes!
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
If you'd like to contribute to Swarm, please fork, fix, commit and send a pull request
for the maintainers to review and merge into the main code base. If you wish to submit more
complex changes though, please check up with the core devs first on [our gitter channel](https://gitter.im/ethereum/go-ethereum)
complex changes though, please check up with the core devs first on [our Swarm gitter channel](https://gitter.im/ethersphere/orange-lounge)
to ensure those changes are in line with the general philosophy of the project and/or get some
early feedback which can make both your efforts much lighter as well as our review and merge
procedures quick and simple.
@ -294,18 +318,17 @@ Please make sure your contributions adhere to our coding guidelines:
* Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) guidelines.
* Pull requests need to be based on and opened against the `master` branch.
* [Code review guidelines](https://github.com/ethersphere/swarm/blob/master/docs/Code-Review-Guidelines.md).
* Commit messages should be prefixed with the package(s) they modify.
* E.g. "eth, rpc: make trace configs optional"
* E.g. "fuse: ignore default manifest entry"
Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
for more details on configuring your environment, managing project dependencies, and testing procedures.
## License
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the
The swarm library (i.e. all code outside of the `cmd` directory) is licensed under the
[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), also
included in our repository in the `COPYING.LESSER` file.
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) is licensed under the
The swarm binaries (i.e. all code inside of the `cmd` directory) is licensed under the
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also included
in our repository in the `COPYING` file.

View File

@ -1,933 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"bytes"
"encoding/hex"
"fmt"
"log"
"math/big"
"reflect"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
const jsondata = `
[
{ "type" : "function", "name" : "balance", "constant" : true },
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] }
]`
const jsondata2 = `
[
{ "type" : "function", "name" : "balance", "constant" : true },
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
{ "type" : "function", "name" : "test", "constant" : false, "inputs" : [ { "name" : "number", "type" : "uint32" } ] },
{ "type" : "function", "name" : "string", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "string" } ] },
{ "type" : "function", "name" : "bool", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "bool" } ] },
{ "type" : "function", "name" : "address", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address" } ] },
{ "type" : "function", "name" : "uint64[2]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] },
{ "type" : "function", "name" : "uint64[]", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] },
{ "type" : "function", "name" : "foo", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] },
{ "type" : "function", "name" : "bar", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] },
{ "type" : "function", "name" : "slice", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] },
{ "type" : "function", "name" : "slice256", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "uint256[2]" } ] },
{ "type" : "function", "name" : "sliceAddress", "constant" : false, "inputs" : [ { "name" : "inputs", "type" : "address[]" } ] },
{ "type" : "function", "name" : "sliceMultiAddress", "constant" : false, "inputs" : [ { "name" : "a", "type" : "address[]" }, { "name" : "b", "type" : "address[]" } ] },
{ "type" : "function", "name" : "nestedArray", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint256[2][2]" }, { "name" : "b", "type" : "address[]" } ] },
{ "type" : "function", "name" : "nestedArray2", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][2]" } ] },
{ "type" : "function", "name" : "nestedSlice", "constant" : false, "inputs" : [ { "name" : "a", "type" : "uint8[][]" } ] }
]`
func TestReader(t *testing.T) {
Uint256, _ := NewType("uint256", nil)
exp := ABI{
Methods: map[string]Method{
"balance": {
"balance", true, nil, nil,
},
"send": {
"send", false, []Argument{
{"amount", Uint256, false},
}, nil,
},
},
}
abi, err := JSON(strings.NewReader(jsondata))
if err != nil {
t.Error(err)
}
// deep equal fails for some reason
for name, expM := range exp.Methods {
gotM, exist := abi.Methods[name]
if !exist {
t.Errorf("Missing expected method %v", name)
}
if !reflect.DeepEqual(gotM, expM) {
t.Errorf("\nGot abi method: \n%v\ndoes not match expected method\n%v", gotM, expM)
}
}
for name, gotM := range abi.Methods {
expM, exist := exp.Methods[name]
if !exist {
t.Errorf("Found extra method %v", name)
}
if !reflect.DeepEqual(gotM, expM) {
t.Errorf("\nGot abi method: \n%v\ndoes not match expected method\n%v", gotM, expM)
}
}
}
func TestTestNumbers(t *testing.T) {
abi, err := JSON(strings.NewReader(jsondata2))
if err != nil {
t.Error(err)
t.FailNow()
}
if _, err := abi.Pack("balance"); err != nil {
t.Error(err)
}
if _, err := abi.Pack("balance", 1); err == nil {
t.Error("expected error for balance(1)")
}
if _, err := abi.Pack("doesntexist", nil); err == nil {
t.Errorf("doesntexist shouldn't exist")
}
if _, err := abi.Pack("doesntexist", 1); err == nil {
t.Errorf("doesntexist(1) shouldn't exist")
}
if _, err := abi.Pack("send", big.NewInt(1000)); err != nil {
t.Error(err)
}
i := new(int)
*i = 1000
if _, err := abi.Pack("send", i); err == nil {
t.Errorf("expected send( ptr ) to throw, requires *big.Int instead of *int")
}
if _, err := abi.Pack("test", uint32(1000)); err != nil {
t.Error(err)
}
}
func TestTestString(t *testing.T) {
abi, err := JSON(strings.NewReader(jsondata2))
if err != nil {
t.Error(err)
t.FailNow()
}
if _, err := abi.Pack("string", "hello world"); err != nil {
t.Error(err)
}
}
func TestTestBool(t *testing.T) {
abi, err := JSON(strings.NewReader(jsondata2))
if err != nil {
t.Error(err)
t.FailNow()
}
if _, err := abi.Pack("bool", true); err != nil {
t.Error(err)
}
}
func TestTestSlice(t *testing.T) {
abi, err := JSON(strings.NewReader(jsondata2))
if err != nil {
t.Error(err)
t.FailNow()
}
slice := make([]uint64, 2)
if _, err := abi.Pack("uint64[2]", slice); err != nil {
t.Error(err)
}
if _, err := abi.Pack("uint64[]", slice); err != nil {
t.Error(err)
}
}
func TestMethodSignature(t *testing.T) {
String, _ := NewType("string", nil)
m := Method{"foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil}
exp := "foo(string,string)"
if m.Sig() != exp {
t.Error("signature mismatch", exp, "!=", m.Sig())
}
idexp := crypto.Keccak256([]byte(exp))[:4]
if !bytes.Equal(m.Id(), idexp) {
t.Errorf("expected ids to match %x != %x", m.Id(), idexp)
}
uintt, _ := NewType("uint256", nil)
m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil}
exp = "foo(uint256)"
if m.Sig() != exp {
t.Error("signature mismatch", exp, "!=", m.Sig())
}
// Method with tuple arguments
s, _ := NewType("tuple", []ArgumentMarshaling{
{Name: "a", Type: "int256"},
{Name: "b", Type: "int256[]"},
{Name: "c", Type: "tuple[]", Components: []ArgumentMarshaling{
{Name: "x", Type: "int256"},
{Name: "y", Type: "int256"},
}},
{Name: "d", Type: "tuple[2]", Components: []ArgumentMarshaling{
{Name: "x", Type: "int256"},
{Name: "y", Type: "int256"},
}},
})
m = Method{"foo", false, []Argument{{"s", s, false}, {"bar", String, false}}, nil}
exp = "foo((int256,int256[],(int256,int256)[],(int256,int256)[2]),string)"
if m.Sig() != exp {
t.Error("signature mismatch", exp, "!=", m.Sig())
}
}
func TestMultiPack(t *testing.T) {
abi, err := JSON(strings.NewReader(jsondata2))
if err != nil {
t.Error(err)
t.FailNow()
}
sig := crypto.Keccak256([]byte("bar(uint32,uint16)"))[:4]
sig = append(sig, make([]byte, 64)...)
sig[35] = 10
sig[67] = 11
packed, err := abi.Pack("bar", uint32(10), uint16(11))
if err != nil {
t.Error(err)
t.FailNow()
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
}
func ExampleJSON() {
const definition = `[{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"isBar","outputs":[{"name":"","type":"bool"}],"type":"function"}]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
log.Fatalln(err)
}
out, err := abi.Pack("isBar", common.HexToAddress("01"))
if err != nil {
log.Fatalln(err)
}
fmt.Printf("%x\n", out)
// Output:
// 1f2c40920000000000000000000000000000000000000000000000000000000000000001
}
func TestInputVariableInputLength(t *testing.T) {
const definition = `[
{ "type" : "function", "name" : "strOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" } ] },
{ "type" : "function", "name" : "bytesOne", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" } ] },
{ "type" : "function", "name" : "strTwo", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "str1", "type" : "string" } ] }
]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
}
// test one string
strin := "hello world"
strpack, err := abi.Pack("strOne", strin)
if err != nil {
t.Error(err)
}
offset := make([]byte, 32)
offset[31] = 32
length := make([]byte, 32)
length[31] = byte(len(strin))
value := common.RightPadBytes([]byte(strin), 32)
exp := append(offset, append(length, value...)...)
// ignore first 4 bytes of the output. This is the function identifier
strpack = strpack[4:]
if !bytes.Equal(strpack, exp) {
t.Errorf("expected %x, got %x\n", exp, strpack)
}
// test one bytes
btspack, err := abi.Pack("bytesOne", []byte(strin))
if err != nil {
t.Error(err)
}
// ignore first 4 bytes of the output. This is the function identifier
btspack = btspack[4:]
if !bytes.Equal(btspack, exp) {
t.Errorf("expected %x, got %x\n", exp, btspack)
}
// test two strings
str1 := "hello"
str2 := "world"
str2pack, err := abi.Pack("strTwo", str1, str2)
if err != nil {
t.Error(err)
}
offset1 := make([]byte, 32)
offset1[31] = 64
length1 := make([]byte, 32)
length1[31] = byte(len(str1))
value1 := common.RightPadBytes([]byte(str1), 32)
offset2 := make([]byte, 32)
offset2[31] = 128
length2 := make([]byte, 32)
length2[31] = byte(len(str2))
value2 := common.RightPadBytes([]byte(str2), 32)
exp2 := append(offset1, offset2...)
exp2 = append(exp2, append(length1, value1...)...)
exp2 = append(exp2, append(length2, value2...)...)
// ignore first 4 bytes of the output. This is the function identifier
str2pack = str2pack[4:]
if !bytes.Equal(str2pack, exp2) {
t.Errorf("expected %x, got %x\n", exp, str2pack)
}
// test two strings, first > 32, second < 32
str1 = strings.Repeat("a", 33)
str2pack, err = abi.Pack("strTwo", str1, str2)
if err != nil {
t.Error(err)
}
offset1 = make([]byte, 32)
offset1[31] = 64
length1 = make([]byte, 32)
length1[31] = byte(len(str1))
value1 = common.RightPadBytes([]byte(str1), 64)
offset2[31] = 160
exp2 = append(offset1, offset2...)
exp2 = append(exp2, append(length1, value1...)...)
exp2 = append(exp2, append(length2, value2...)...)
// ignore first 4 bytes of the output. This is the function identifier
str2pack = str2pack[4:]
if !bytes.Equal(str2pack, exp2) {
t.Errorf("expected %x, got %x\n", exp, str2pack)
}
// test two strings, first > 32, second >32
str1 = strings.Repeat("a", 33)
str2 = strings.Repeat("a", 33)
str2pack, err = abi.Pack("strTwo", str1, str2)
if err != nil {
t.Error(err)
}
offset1 = make([]byte, 32)
offset1[31] = 64
length1 = make([]byte, 32)
length1[31] = byte(len(str1))
value1 = common.RightPadBytes([]byte(str1), 64)
offset2 = make([]byte, 32)
offset2[31] = 160
length2 = make([]byte, 32)
length2[31] = byte(len(str2))
value2 = common.RightPadBytes([]byte(str2), 64)
exp2 = append(offset1, offset2...)
exp2 = append(exp2, append(length1, value1...)...)
exp2 = append(exp2, append(length2, value2...)...)
// ignore first 4 bytes of the output. This is the function identifier
str2pack = str2pack[4:]
if !bytes.Equal(str2pack, exp2) {
t.Errorf("expected %x, got %x\n", exp, str2pack)
}
}
func TestInputFixedArrayAndVariableInputLength(t *testing.T) {
const definition = `[
{ "type" : "function", "name" : "fixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
{ "type" : "function", "name" : "fixedArrBytes", "constant" : true, "inputs" : [ { "name" : "str", "type" : "bytes" }, { "name" : "fixedArr", "type" : "uint256[2]" } ] },
{ "type" : "function", "name" : "mixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr", "type": "uint256[2]" }, { "name" : "dynArr", "type": "uint256[]" } ] },
{ "type" : "function", "name" : "doubleFixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "fixedArr2", "type": "uint256[3]" } ] },
{ "type" : "function", "name" : "multipleMixedArrStr", "constant" : true, "inputs" : [ { "name" : "str", "type" : "string" }, { "name" : "fixedArr1", "type": "uint256[2]" }, { "name" : "dynArr", "type" : "uint256[]" }, { "name" : "fixedArr2", "type" : "uint256[3]" } ] }
]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Error(err)
}
// test string, fixed array uint256[2]
strin := "hello world"
arrin := [2]*big.Int{big.NewInt(1), big.NewInt(2)}
fixedArrStrPack, err := abi.Pack("fixedArrStr", strin, arrin)
if err != nil {
t.Error(err)
}
// generate expected output
offset := make([]byte, 32)
offset[31] = 96
length := make([]byte, 32)
length[31] = byte(len(strin))
strvalue := common.RightPadBytes([]byte(strin), 32)
arrinvalue1 := common.LeftPadBytes(arrin[0].Bytes(), 32)
arrinvalue2 := common.LeftPadBytes(arrin[1].Bytes(), 32)
exp := append(offset, arrinvalue1...)
exp = append(exp, arrinvalue2...)
exp = append(exp, append(length, strvalue...)...)
// ignore first 4 bytes of the output. This is the function identifier
fixedArrStrPack = fixedArrStrPack[4:]
if !bytes.Equal(fixedArrStrPack, exp) {
t.Errorf("expected %x, got %x\n", exp, fixedArrStrPack)
}
// test byte array, fixed array uint256[2]
bytesin := []byte(strin)
arrin = [2]*big.Int{big.NewInt(1), big.NewInt(2)}
fixedArrBytesPack, err := abi.Pack("fixedArrBytes", bytesin, arrin)
if err != nil {
t.Error(err)
}
// generate expected output
offset = make([]byte, 32)
offset[31] = 96
length = make([]byte, 32)
length[31] = byte(len(strin))
strvalue = common.RightPadBytes([]byte(strin), 32)
arrinvalue1 = common.LeftPadBytes(arrin[0].Bytes(), 32)
arrinvalue2 = common.LeftPadBytes(arrin[1].Bytes(), 32)
exp = append(offset, arrinvalue1...)
exp = append(exp, arrinvalue2...)
exp = append(exp, append(length, strvalue...)...)
// ignore first 4 bytes of the output. This is the function identifier
fixedArrBytesPack = fixedArrBytesPack[4:]
if !bytes.Equal(fixedArrBytesPack, exp) {
t.Errorf("expected %x, got %x\n", exp, fixedArrBytesPack)
}
// test string, fixed array uint256[2], dynamic array uint256[]
strin = "hello world"
fixedarrin := [2]*big.Int{big.NewInt(1), big.NewInt(2)}
dynarrin := []*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}
mixedArrStrPack, err := abi.Pack("mixedArrStr", strin, fixedarrin, dynarrin)
if err != nil {
t.Error(err)
}
// generate expected output
stroffset := make([]byte, 32)
stroffset[31] = 128
strlength := make([]byte, 32)
strlength[31] = byte(len(strin))
strvalue = common.RightPadBytes([]byte(strin), 32)
fixedarrinvalue1 := common.LeftPadBytes(fixedarrin[0].Bytes(), 32)
fixedarrinvalue2 := common.LeftPadBytes(fixedarrin[1].Bytes(), 32)
dynarroffset := make([]byte, 32)
dynarroffset[31] = byte(160 + ((len(strin)/32)+1)*32)
dynarrlength := make([]byte, 32)
dynarrlength[31] = byte(len(dynarrin))
dynarrinvalue1 := common.LeftPadBytes(dynarrin[0].Bytes(), 32)
dynarrinvalue2 := common.LeftPadBytes(dynarrin[1].Bytes(), 32)
dynarrinvalue3 := common.LeftPadBytes(dynarrin[2].Bytes(), 32)
exp = append(stroffset, fixedarrinvalue1...)
exp = append(exp, fixedarrinvalue2...)
exp = append(exp, dynarroffset...)
exp = append(exp, append(strlength, strvalue...)...)
dynarrarg := append(dynarrlength, dynarrinvalue1...)
dynarrarg = append(dynarrarg, dynarrinvalue2...)
dynarrarg = append(dynarrarg, dynarrinvalue3...)
exp = append(exp, dynarrarg...)
// ignore first 4 bytes of the output. This is the function identifier
mixedArrStrPack = mixedArrStrPack[4:]
if !bytes.Equal(mixedArrStrPack, exp) {
t.Errorf("expected %x, got %x\n", exp, mixedArrStrPack)
}
// test string, fixed array uint256[2], fixed array uint256[3]
strin = "hello world"
fixedarrin1 := [2]*big.Int{big.NewInt(1), big.NewInt(2)}
fixedarrin2 := [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}
doubleFixedArrStrPack, err := abi.Pack("doubleFixedArrStr", strin, fixedarrin1, fixedarrin2)
if err != nil {
t.Error(err)
}
// generate expected output
stroffset = make([]byte, 32)
stroffset[31] = 192
strlength = make([]byte, 32)
strlength[31] = byte(len(strin))
strvalue = common.RightPadBytes([]byte(strin), 32)
fixedarrin1value1 := common.LeftPadBytes(fixedarrin1[0].Bytes(), 32)
fixedarrin1value2 := common.LeftPadBytes(fixedarrin1[1].Bytes(), 32)
fixedarrin2value1 := common.LeftPadBytes(fixedarrin2[0].Bytes(), 32)
fixedarrin2value2 := common.LeftPadBytes(fixedarrin2[1].Bytes(), 32)
fixedarrin2value3 := common.LeftPadBytes(fixedarrin2[2].Bytes(), 32)
exp = append(stroffset, fixedarrin1value1...)
exp = append(exp, fixedarrin1value2...)
exp = append(exp, fixedarrin2value1...)
exp = append(exp, fixedarrin2value2...)
exp = append(exp, fixedarrin2value3...)
exp = append(exp, append(strlength, strvalue...)...)
// ignore first 4 bytes of the output. This is the function identifier
doubleFixedArrStrPack = doubleFixedArrStrPack[4:]
if !bytes.Equal(doubleFixedArrStrPack, exp) {
t.Errorf("expected %x, got %x\n", exp, doubleFixedArrStrPack)
}
// test string, fixed array uint256[2], dynamic array uint256[], fixed array uint256[3]
strin = "hello world"
fixedarrin1 = [2]*big.Int{big.NewInt(1), big.NewInt(2)}
dynarrin = []*big.Int{big.NewInt(1), big.NewInt(2)}
fixedarrin2 = [3]*big.Int{big.NewInt(1), big.NewInt(2), big.NewInt(3)}
multipleMixedArrStrPack, err := abi.Pack("multipleMixedArrStr", strin, fixedarrin1, dynarrin, fixedarrin2)
if err != nil {
t.Error(err)
}
// generate expected output
stroffset = make([]byte, 32)
stroffset[31] = 224
strlength = make([]byte, 32)
strlength[31] = byte(len(strin))
strvalue = common.RightPadBytes([]byte(strin), 32)
fixedarrin1value1 = common.LeftPadBytes(fixedarrin1[0].Bytes(), 32)
fixedarrin1value2 = common.LeftPadBytes(fixedarrin1[1].Bytes(), 32)
dynarroffset = U256(big.NewInt(int64(256 + ((len(strin)/32)+1)*32)))
dynarrlength = make([]byte, 32)
dynarrlength[31] = byte(len(dynarrin))
dynarrinvalue1 = common.LeftPadBytes(dynarrin[0].Bytes(), 32)
dynarrinvalue2 = common.LeftPadBytes(dynarrin[1].Bytes(), 32)
fixedarrin2value1 = common.LeftPadBytes(fixedarrin2[0].Bytes(), 32)
fixedarrin2value2 = common.LeftPadBytes(fixedarrin2[1].Bytes(), 32)
fixedarrin2value3 = common.LeftPadBytes(fixedarrin2[2].Bytes(), 32)
exp = append(stroffset, fixedarrin1value1...)
exp = append(exp, fixedarrin1value2...)
exp = append(exp, dynarroffset...)
exp = append(exp, fixedarrin2value1...)
exp = append(exp, fixedarrin2value2...)
exp = append(exp, fixedarrin2value3...)
exp = append(exp, append(strlength, strvalue...)...)
dynarrarg = append(dynarrlength, dynarrinvalue1...)
dynarrarg = append(dynarrarg, dynarrinvalue2...)
exp = append(exp, dynarrarg...)
// ignore first 4 bytes of the output. This is the function identifier
multipleMixedArrStrPack = multipleMixedArrStrPack[4:]
if !bytes.Equal(multipleMixedArrStrPack, exp) {
t.Errorf("expected %x, got %x\n", exp, multipleMixedArrStrPack)
}
}
func TestDefaultFunctionParsing(t *testing.T) {
const definition = `[{ "name" : "balance" }]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
}
if _, ok := abi.Methods["balance"]; !ok {
t.Error("expected 'balance' to be present")
}
}
func TestBareEvents(t *testing.T) {
const definition = `[
{ "type" : "event", "name" : "balance" },
{ "type" : "event", "name" : "anon", "anonymous" : true},
{ "type" : "event", "name" : "args", "inputs" : [{ "indexed":false, "name":"arg0", "type":"uint256" }, { "indexed":true, "name":"arg1", "type":"address" }] },
{ "type" : "event", "name" : "tuple", "inputs" : [{ "indexed":false, "name":"t", "type":"tuple", "components":[{"name":"a", "type":"uint256"}] }, { "indexed":true, "name":"arg1", "type":"address" }] }
]`
arg0, _ := NewType("uint256", nil)
arg1, _ := NewType("address", nil)
tuple, _ := NewType("tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}})
expectedEvents := map[string]struct {
Anonymous bool
Args []Argument
}{
"balance": {false, nil},
"anon": {true, nil},
"args": {false, []Argument{
{Name: "arg0", Type: arg0, Indexed: false},
{Name: "arg1", Type: arg1, Indexed: true},
}},
"tuple": {false, []Argument{
{Name: "t", Type: tuple, Indexed: false},
{Name: "arg1", Type: arg1, Indexed: true},
}},
}
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
}
if len(abi.Events) != len(expectedEvents) {
t.Fatalf("invalid number of events after parsing, want %d, got %d", len(expectedEvents), len(abi.Events))
}
for name, exp := range expectedEvents {
got, ok := abi.Events[name]
if !ok {
t.Errorf("could not found event %s", name)
continue
}
if got.Anonymous != exp.Anonymous {
t.Errorf("invalid anonymous indication for event %s, want %v, got %v", name, exp.Anonymous, got.Anonymous)
}
if len(got.Inputs) != len(exp.Args) {
t.Errorf("invalid number of args, want %d, got %d", len(exp.Args), len(got.Inputs))
continue
}
for i, arg := range exp.Args {
if arg.Name != got.Inputs[i].Name {
t.Errorf("events[%s].Input[%d] has an invalid name, want %s, got %s", name, i, arg.Name, got.Inputs[i].Name)
}
if arg.Indexed != got.Inputs[i].Indexed {
t.Errorf("events[%s].Input[%d] has an invalid indexed indication, want %v, got %v", name, i, arg.Indexed, got.Inputs[i].Indexed)
}
if arg.Type.T != got.Inputs[i].Type.T {
t.Errorf("events[%s].Input[%d] has an invalid type, want %x, got %x", name, i, arg.Type.T, got.Inputs[i].Type.T)
}
}
}
}
// TestUnpackEvent is based on this contract:
// contract T {
// event received(address sender, uint amount, bytes memo);
// event receivedAddr(address sender);
// function receive(bytes memo) external payable {
// received(msg.sender, msg.value, memo);
// receivedAddr(msg.sender);
// }
// }
// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
func TestUnpackEvent(t *testing.T) {
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
abi, err := JSON(strings.NewReader(abiJSON))
if err != nil {
t.Fatal(err)
}
const hexdata = `000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158`
data, err := hex.DecodeString(hexdata)
if err != nil {
t.Fatal(err)
}
if len(data)%32 == 0 {
t.Errorf("len(data) is %d, want a non-multiple of 32", len(data))
}
type ReceivedEvent struct {
Sender common.Address
Amount *big.Int
Memo []byte
}
var ev ReceivedEvent
err = abi.Unpack(&ev, "received", data)
if err != nil {
t.Error(err)
}
type ReceivedAddrEvent struct {
Sender common.Address
}
var receivedAddrEv ReceivedAddrEvent
err = abi.Unpack(&receivedAddrEv, "receivedAddr", data)
if err != nil {
t.Error(err)
}
}
func TestUnpackEventIntoMap(t *testing.T) {
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
abi, err := JSON(strings.NewReader(abiJSON))
if err != nil {
t.Fatal(err)
}
const hexdata = `000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158`
data, err := hex.DecodeString(hexdata)
if err != nil {
t.Fatal(err)
}
if len(data)%32 == 0 {
t.Errorf("len(data) is %d, want a non-multiple of 32", len(data))
}
receivedMap := map[string]interface{}{}
expectedReceivedMap := map[string]interface{}{
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
if err := abi.UnpackIntoMap(receivedMap, "received", data); err != nil {
t.Error(err)
}
if len(receivedMap) != 3 {
t.Error("unpacked `received` map expected to have length 3")
}
if receivedMap["sender"] != expectedReceivedMap["sender"] {
t.Error("unpacked `received` map does not match expected map")
}
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
t.Error("unpacked `received` map does not match expected map")
}
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
t.Error("unpacked `received` map does not match expected map")
}
receivedAddrMap := map[string]interface{}{}
if err = abi.UnpackIntoMap(receivedAddrMap, "receivedAddr", data); err != nil {
t.Error(err)
}
if len(receivedAddrMap) != 1 {
t.Error("unpacked `receivedAddr` map expected to have length 1")
}
if receivedAddrMap["sender"] != expectedReceivedMap["sender"] {
t.Error("unpacked `receivedAddr` map does not match expected map")
}
}
func TestUnpackMethodIntoMap(t *testing.T) {
const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"send","outputs":[{"name":"amount","type":"uint256"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"get","outputs":[{"name":"hash","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"}]`
abi, err := JSON(strings.NewReader(abiJSON))
if err != nil {
t.Fatal(err)
}
const hexdata = `00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000015800000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000158000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000001580000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000015800000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000158`
data, err := hex.DecodeString(hexdata)
if err != nil {
t.Fatal(err)
}
if len(data)%32 != 0 {
t.Errorf("len(data) is %d, want a multiple of 32", len(data))
}
// Tests a method with no outputs
receiveMap := map[string]interface{}{}
if err = abi.UnpackIntoMap(receiveMap, "receive", data); err != nil {
t.Error(err)
}
if len(receiveMap) > 0 {
t.Error("unpacked `receive` map expected to have length 0")
}
// Tests a method with only outputs
sendMap := map[string]interface{}{}
if err = abi.UnpackIntoMap(sendMap, "send", data); err != nil {
t.Error(err)
}
if len(sendMap) != 1 {
t.Error("unpacked `send` map expected to have length 1")
}
if sendMap["amount"].(*big.Int).Cmp(big.NewInt(1)) != 0 {
t.Error("unpacked `send` map expected `amount` value of 1")
}
// Tests a method with outputs and inputs
getMap := map[string]interface{}{}
if err = abi.UnpackIntoMap(getMap, "get", data); err != nil {
t.Error(err)
}
if len(getMap) != 1 {
t.Error("unpacked `get` map expected to have length 1")
}
expectedBytes := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 88, 0}
if !bytes.Equal(getMap["hash"].([]byte), expectedBytes) {
t.Errorf("unpacked `get` map expected `hash` value of %v", expectedBytes)
}
}
func TestUnpackIntoMapNamingConflict(t *testing.T) {
// Two methods have the same name
var abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"get","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[],"name":"send","outputs":[{"name":"amount","type":"uint256"}],"payable":true,"stateMutability":"payable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"get","outputs":[{"name":"hash","type":"bytes"}],"payable":true,"stateMutability":"payable","type":"function"}]`
abi, err := JSON(strings.NewReader(abiJSON))
if err != nil {
t.Fatal(err)
}
var hexdata = `00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158`
data, err := hex.DecodeString(hexdata)
if err != nil {
t.Fatal(err)
}
if len(data)%32 == 0 {
t.Errorf("len(data) is %d, want a non-multiple of 32", len(data))
}
getMap := map[string]interface{}{}
if err = abi.UnpackIntoMap(getMap, "get", data); err == nil {
t.Error("naming conflict between two methods; error expected")
}
// Two events have the same name
abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"received","type":"event"}]`
abi, err = JSON(strings.NewReader(abiJSON))
if err != nil {
t.Fatal(err)
}
hexdata = `000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158`
data, err = hex.DecodeString(hexdata)
if err != nil {
t.Fatal(err)
}
if len(data)%32 == 0 {
t.Errorf("len(data) is %d, want a non-multiple of 32", len(data))
}
receivedMap := map[string]interface{}{}
if err = abi.UnpackIntoMap(receivedMap, "received", data); err != nil {
t.Error("naming conflict between two events; no error expected")
}
if len(receivedMap) != 1 {
t.Error("naming conflict between two events; event defined latest in the abi expected to be used")
}
// Method and event have the same name
abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"received","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
abi, err = JSON(strings.NewReader(abiJSON))
if err != nil {
t.Fatal(err)
}
if len(data)%32 == 0 {
t.Errorf("len(data) is %d, want a non-multiple of 32", len(data))
}
if err = abi.UnpackIntoMap(receivedMap, "received", data); err == nil {
t.Error("naming conflict between an event and a method; error expected")
}
// Conflict is case sensitive
abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"received","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"Received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]`
abi, err = JSON(strings.NewReader(abiJSON))
if err != nil {
t.Fatal(err)
}
if len(data)%32 == 0 {
t.Errorf("len(data) is %d, want a non-multiple of 32", len(data))
}
expectedReceivedMap := map[string]interface{}{
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
if err = abi.UnpackIntoMap(receivedMap, "Received", data); err != nil {
t.Error(err)
}
if len(receivedMap) != 3 {
t.Error("unpacked `received` map expected to have length 3")
}
if receivedMap["sender"] != expectedReceivedMap["sender"] {
t.Error("unpacked `received` map does not match expected map")
}
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
t.Error("unpacked `received` map does not match expected map")
}
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
t.Error("unpacked `received` map does not match expected map")
}
}
func TestABI_MethodById(t *testing.T) {
const abiJSON = `[
{"type":"function","name":"receive","constant":false,"inputs":[{"name":"memo","type":"bytes"}],"outputs":[],"payable":true,"stateMutability":"payable"},
{"type":"event","name":"received","anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}]},
{"type":"function","name":"fixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"}]},
{"type":"function","name":"fixedArrBytes","constant":true,"inputs":[{"name":"str","type":"bytes"},{"name":"fixedArr","type":"uint256[2]"}]},
{"type":"function","name":"mixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"}]},
{"type":"function","name":"doubleFixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"fixedArr2","type":"uint256[3]"}]},
{"type":"function","name":"multipleMixedArrStr","constant":true,"inputs":[{"name":"str","type":"string"},{"name":"fixedArr1","type":"uint256[2]"},{"name":"dynArr","type":"uint256[]"},{"name":"fixedArr2","type":"uint256[3]"}]},
{"type":"function","name":"balance","constant":true},
{"type":"function","name":"send","constant":false,"inputs":[{"name":"amount","type":"uint256"}]},
{"type":"function","name":"test","constant":false,"inputs":[{"name":"number","type":"uint32"}]},
{"type":"function","name":"string","constant":false,"inputs":[{"name":"inputs","type":"string"}]},
{"type":"function","name":"bool","constant":false,"inputs":[{"name":"inputs","type":"bool"}]},
{"type":"function","name":"address","constant":false,"inputs":[{"name":"inputs","type":"address"}]},
{"type":"function","name":"uint64[2]","constant":false,"inputs":[{"name":"inputs","type":"uint64[2]"}]},
{"type":"function","name":"uint64[]","constant":false,"inputs":[{"name":"inputs","type":"uint64[]"}]},
{"type":"function","name":"foo","constant":false,"inputs":[{"name":"inputs","type":"uint32"}]},
{"type":"function","name":"bar","constant":false,"inputs":[{"name":"inputs","type":"uint32"},{"name":"string","type":"uint16"}]},
{"type":"function","name":"_slice","constant":false,"inputs":[{"name":"inputs","type":"uint32[2]"}]},
{"type":"function","name":"__slice256","constant":false,"inputs":[{"name":"inputs","type":"uint256[2]"}]},
{"type":"function","name":"sliceAddress","constant":false,"inputs":[{"name":"inputs","type":"address[]"}]},
{"type":"function","name":"sliceMultiAddress","constant":false,"inputs":[{"name":"a","type":"address[]"},{"name":"b","type":"address[]"}]}
]
`
abi, err := JSON(strings.NewReader(abiJSON))
if err != nil {
t.Fatal(err)
}
for name, m := range abi.Methods {
a := fmt.Sprintf("%v", m)
m2, err := abi.MethodById(m.Id())
if err != nil {
t.Fatalf("Failed to look up ABI method: %v", err)
}
b := fmt.Sprintf("%v", m2)
if a != b {
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
}
}
// Also test empty
if _, err := abi.MethodById([]byte{0x00}); err == nil {
t.Errorf("Expected error, too short to decode data")
}
if _, err := abi.MethodById([]byte{}); err == nil {
t.Errorf("Expected error, too short to decode data")
}
if _, err := abi.MethodById(nil); err == nil {
t.Errorf("Expected error, nil is short to decode data")
}
}

View File

@ -1,82 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package backends_test
import (
"context"
"math/big"
"testing"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
)
func TestSimulatedBackend(t *testing.T) {
var gasLimit uint64 = 8000029
key, _ := crypto.GenerateKey() // nolint: gosec
auth := bind.NewKeyedTransactor(key)
genAlloc := make(core.GenesisAlloc)
genAlloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(9223372036854775807)}
sim := backends.NewSimulatedBackend(genAlloc, gasLimit)
// should return an error if the tx is not found
txHash := common.HexToHash("2")
_, isPending, err := sim.TransactionByHash(context.Background(), txHash)
if isPending {
t.Fatal("transaction should not be pending")
}
if err != ethereum.NotFound {
t.Fatalf("err should be `ethereum.NotFound` but received %v", err)
}
// generate a transaction and confirm you can retrieve it
code := `6060604052600a8060106000396000f360606040526008565b00`
var gas uint64 = 3000000
tx := types.NewContractCreation(0, big.NewInt(0), gas, big.NewInt(1), common.FromHex(code))
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, key)
err = sim.SendTransaction(context.Background(), tx)
if err != nil {
t.Fatal("error sending transaction")
}
txHash = tx.Hash()
_, isPending, err = sim.TransactionByHash(context.Background(), txHash)
if err != nil {
t.Fatalf("error getting transaction with hash: %v", txHash.String())
}
if !isPending {
t.Fatal("transaction should have pending status")
}
sim.Commit()
tx, isPending, err = sim.TransactionByHash(context.Background(), txHash)
if err != nil {
t.Fatalf("error getting transaction with hash: %v", txHash.String())
}
if isPending {
t.Fatal("transaction should not have pending status")
}
}

View File

@ -1,373 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bind_test
import (
"bytes"
"context"
"math/big"
"strings"
"testing"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
)
type mockCaller struct {
codeAtBlockNumber *big.Int
callContractBlockNumber *big.Int
}
func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
mc.codeAtBlockNumber = blockNumber
return []byte{1, 2, 3}, nil
}
func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
mc.callContractBlockNumber = blockNumber
return nil, nil
}
func TestPassingBlockNumber(t *testing.T) {
mc := &mockCaller{}
bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{
Methods: map[string]abi.Method{
"something": {
Name: "something",
Outputs: abi.Arguments{},
},
},
}, mc, nil, nil)
var ret string
blockNumber := big.NewInt(42)
bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, &ret, "something")
if mc.callContractBlockNumber != blockNumber {
t.Fatalf("CallContract() was not passed the block number")
}
if mc.codeAtBlockNumber != blockNumber {
t.Fatalf("CodeAt() was not passed the block number")
}
bc.Call(&bind.CallOpts{}, &ret, "something")
if mc.callContractBlockNumber != nil {
t.Fatalf("CallContract() was passed a block number when it should not have been")
}
if mc.codeAtBlockNumber != nil {
t.Fatalf("CodeAt() was passed a block number when it should not have been")
}
}
const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158"
func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) {
hash := crypto.Keccak256Hash([]byte("testName"))
mockLog := types.Log{
Address: common.HexToAddress("0x0"),
Topics: []common.Hash{
common.HexToHash("0x0"),
hash,
},
Data: hexutil.MustDecode(hexData),
BlockNumber: uint64(26),
TxHash: common.HexToHash("0x0"),
TxIndex: 111,
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
Index: 7,
Removed: false,
}
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
receivedMap := make(map[string]interface{})
expectedReceivedMap := map[string]interface{}{
"name": hash,
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
t.Error(err)
}
if len(receivedMap) != 4 {
t.Fatal("unpacked map expected to have length 4")
}
if receivedMap["name"] != expectedReceivedMap["name"] {
t.Error("unpacked map does not match expected map")
}
if receivedMap["sender"] != expectedReceivedMap["sender"] {
t.Error("unpacked map does not match expected map")
}
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
t.Error("unpacked map does not match expected map")
}
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
t.Error("unpacked map does not match expected map")
}
}
func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) {
sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"})
if err != nil {
t.Fatal(err)
}
hash := crypto.Keccak256Hash(sliceBytes)
mockLog := types.Log{
Address: common.HexToAddress("0x0"),
Topics: []common.Hash{
common.HexToHash("0x0"),
hash,
},
Data: hexutil.MustDecode(hexData),
BlockNumber: uint64(26),
TxHash: common.HexToHash("0x0"),
TxIndex: 111,
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
Index: 7,
Removed: false,
}
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"names","type":"string[]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
receivedMap := make(map[string]interface{})
expectedReceivedMap := map[string]interface{}{
"names": hash,
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
t.Error(err)
}
if len(receivedMap) != 4 {
t.Fatal("unpacked map expected to have length 4")
}
if receivedMap["names"] != expectedReceivedMap["names"] {
t.Error("unpacked map does not match expected map")
}
if receivedMap["sender"] != expectedReceivedMap["sender"] {
t.Error("unpacked map does not match expected map")
}
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
t.Error("unpacked map does not match expected map")
}
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
t.Error("unpacked map does not match expected map")
}
}
func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) {
arrBytes, err := rlp.EncodeToBytes([2]common.Address{common.HexToAddress("0x0"), common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")})
if err != nil {
t.Fatal(err)
}
hash := crypto.Keccak256Hash(arrBytes)
mockLog := types.Log{
Address: common.HexToAddress("0x0"),
Topics: []common.Hash{
common.HexToHash("0x0"),
hash,
},
Data: hexutil.MustDecode(hexData),
BlockNumber: uint64(26),
TxHash: common.HexToHash("0x0"),
TxIndex: 111,
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
Index: 7,
Removed: false,
}
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"addresses","type":"address[2]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
receivedMap := make(map[string]interface{})
expectedReceivedMap := map[string]interface{}{
"addresses": hash,
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
t.Error(err)
}
if len(receivedMap) != 4 {
t.Fatal("unpacked map expected to have length 4")
}
if receivedMap["addresses"] != expectedReceivedMap["addresses"] {
t.Error("unpacked map does not match expected map")
}
if receivedMap["sender"] != expectedReceivedMap["sender"] {
t.Error("unpacked map does not match expected map")
}
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
t.Error("unpacked map does not match expected map")
}
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
t.Error("unpacked map does not match expected map")
}
}
func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) {
mockAddress := common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")
addrBytes := mockAddress.Bytes()
hash := crypto.Keccak256Hash([]byte("mockFunction(address,uint)"))
functionSelector := hash[:4]
functionTyBytes := append(addrBytes, functionSelector...)
var functionTy [24]byte
copy(functionTy[:], functionTyBytes[0:24])
mockLog := types.Log{
Address: common.HexToAddress("0x0"),
Topics: []common.Hash{
common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
common.BytesToHash(functionTyBytes),
},
Data: hexutil.MustDecode(hexData),
BlockNumber: uint64(26),
TxHash: common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"),
TxIndex: 111,
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
Index: 7,
Removed: false,
}
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"function","type":"function"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
receivedMap := make(map[string]interface{})
expectedReceivedMap := map[string]interface{}{
"function": functionTy,
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
t.Error(err)
}
if len(receivedMap) != 4 {
t.Fatal("unpacked map expected to have length 4")
}
if receivedMap["function"] != expectedReceivedMap["function"] {
t.Error("unpacked map does not match expected map")
}
if receivedMap["sender"] != expectedReceivedMap["sender"] {
t.Error("unpacked map does not match expected map")
}
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
t.Error("unpacked map does not match expected map")
}
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
t.Error("unpacked map does not match expected map")
}
}
func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) {
byts := []byte{1, 2, 3, 4, 5}
hash := crypto.Keccak256Hash(byts)
mockLog := types.Log{
Address: common.HexToAddress("0x0"),
Topics: []common.Hash{
common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
hash,
},
Data: hexutil.MustDecode(hexData),
BlockNumber: uint64(26),
TxHash: common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"),
TxIndex: 111,
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
Index: 7,
Removed: false,
}
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"content","type":"bytes"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
receivedMap := make(map[string]interface{})
expectedReceivedMap := map[string]interface{}{
"content": hash,
"sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"),
"amount": big.NewInt(1),
"memo": []byte{88},
}
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err != nil {
t.Error(err)
}
if len(receivedMap) != 4 {
t.Fatal("unpacked map expected to have length 4")
}
if receivedMap["content"] != expectedReceivedMap["content"] {
t.Error("unpacked map does not match expected map")
}
if receivedMap["sender"] != expectedReceivedMap["sender"] {
t.Error("unpacked map does not match expected map")
}
if receivedMap["amount"].(*big.Int).Cmp(expectedReceivedMap["amount"].(*big.Int)) != 0 {
t.Error("unpacked map does not match expected map")
}
if !bytes.Equal(receivedMap["memo"].([]byte), expectedReceivedMap["memo"].([]byte)) {
t.Error("unpacked map does not match expected map")
}
}
func TestUnpackIntoMapNamingConflict(t *testing.T) {
hash := crypto.Keccak256Hash([]byte("testName"))
mockLog := types.Log{
Address: common.HexToAddress("0x0"),
Topics: []common.Hash{
common.HexToHash("0x0"),
hash,
},
Data: hexutil.MustDecode(hexData),
BlockNumber: uint64(26),
TxHash: common.HexToHash("0x0"),
TxIndex: 111,
BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}),
Index: 7,
Removed: false,
}
abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"received","type":"event"}]`
parsedAbi, _ := abi.JSON(strings.NewReader(abiString))
bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil)
receivedMap := make(map[string]interface{})
if err := bc.UnpackLogIntoMap(receivedMap, "received", mockLog); err == nil {
t.Error("naming conflict between two events; error expected")
}
}

File diff suppressed because one or more lines are too long

View File

@ -1,103 +0,0 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bind
import (
"reflect"
"testing"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
)
func TestMakeTopics(t *testing.T) {
type args struct {
query [][]interface{}
}
tests := []struct {
name string
args args
want [][]common.Hash
wantErr bool
}{
{
"support fixed byte types, right padded to 32 bytes",
args{[][]interface{}{{[5]byte{1, 2, 3, 4, 5}}}},
[][]common.Hash{{common.Hash{1, 2, 3, 4, 5}}},
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := makeTopics(tt.args.query...)
if (err != nil) != tt.wantErr {
t.Errorf("makeTopics() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("makeTopics() = %v, want %v", got, tt.want)
}
})
}
}
func TestParseTopics(t *testing.T) {
type bytesStruct struct {
StaticBytes [5]byte
}
bytesType, _ := abi.NewType("bytes5", nil)
type args struct {
createObj func() interface{}
resultObj func() interface{}
fields abi.Arguments
topics []common.Hash
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "support fixed byte types, right padded to 32 bytes",
args: args{
createObj: func() interface{} { return &bytesStruct{} },
resultObj: func() interface{} { return &bytesStruct{StaticBytes: [5]byte{1, 2, 3, 4, 5}} },
fields: abi.Arguments{abi.Argument{
Name: "staticBytes",
Type: bytesType,
Indexed: true,
}},
topics: []common.Hash{
{1, 2, 3, 4, 5},
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
createObj := tt.args.createObj()
if err := parseTopics(createObj, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
t.Errorf("parseTopics() error = %v, wantErr %v", err, tt.wantErr)
}
resultObj := tt.args.resultObj()
if !reflect.DeepEqual(createObj, resultObj) {
t.Errorf("parseTopics() = %v, want %v", createObj, resultObj)
}
})
}
}

View File

@ -1,94 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bind_test
import (
"context"
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
)
var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
var waitDeployedTests = map[string]struct {
code string
gas uint64
wantAddress common.Address
wantErr error
}{
"successful deploy": {
code: `6060604052600a8060106000396000f360606040526008565b00`,
gas: 3000000,
wantAddress: common.HexToAddress("0x3a220f351252089d385b29beca14e27f204c296a"),
},
"empty code": {
code: ``,
gas: 300000,
wantErr: bind.ErrNoCodeAfterDeploy,
wantAddress: common.HexToAddress("0x3a220f351252089d385b29beca14e27f204c296a"),
},
}
func TestWaitDeployed(t *testing.T) {
for name, test := range waitDeployedTests {
backend := backends.NewSimulatedBackend(
core.GenesisAlloc{
crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000)},
}, 10000000,
)
// Create the transaction.
tx := types.NewContractCreation(0, big.NewInt(0), test.gas, big.NewInt(1), common.FromHex(test.code))
tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)
// Wait for it to get mined in the background.
var (
err error
address common.Address
mined = make(chan struct{})
ctx = context.Background()
)
go func() {
address, err = bind.WaitDeployed(ctx, backend, tx)
close(mined)
}()
// Send and mine the transaction.
backend.SendTransaction(ctx, tx)
backend.Commit()
select {
case <-mined:
if err != test.wantErr {
t.Errorf("test %q: error mismatch: got %q, want %q", name, err, test.wantErr)
}
if address != test.wantAddress {
t.Errorf("test %q: unexpected contract address %s", name, address.Hex())
}
case <-time.After(2 * time.Second):
t.Errorf("test %q: timeout", name)
}
}
}

View File

@ -1,428 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"bytes"
"encoding/hex"
"encoding/json"
"math/big"
"reflect"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var jsonEventTransfer = []byte(`{
"anonymous": false,
"inputs": [
{
"indexed": true, "name": "from", "type": "address"
}, {
"indexed": true, "name": "to", "type": "address"
}, {
"indexed": false, "name": "value", "type": "uint256"
}],
"name": "Transfer",
"type": "event"
}`)
var jsonEventPledge = []byte(`{
"anonymous": false,
"inputs": [{
"indexed": false, "name": "who", "type": "address"
}, {
"indexed": false, "name": "wad", "type": "uint128"
}, {
"indexed": false, "name": "currency", "type": "bytes3"
}],
"name": "Pledge",
"type": "event"
}`)
var jsonEventMixedCase = []byte(`{
"anonymous": false,
"inputs": [{
"indexed": false, "name": "value", "type": "uint256"
}, {
"indexed": false, "name": "_value", "type": "uint256"
}, {
"indexed": false, "name": "Value", "type": "uint256"
}],
"name": "MixedCase",
"type": "event"
}`)
// 1000000
var transferData1 = "00000000000000000000000000000000000000000000000000000000000f4240"
// "0x00Ce0d46d924CC8437c806721496599FC3FFA268", 2218516807680, "usd"
var pledgeData1 = "00000000000000000000000000ce0d46d924cc8437c806721496599fc3ffa2680000000000000000000000000000000000000000000000000000020489e800007573640000000000000000000000000000000000000000000000000000000000"
// 1000000,2218516807680,1000001
var mixedCaseData1 = "00000000000000000000000000000000000000000000000000000000000f42400000000000000000000000000000000000000000000000000000020489e8000000000000000000000000000000000000000000000000000000000000000f4241"
func TestEventId(t *testing.T) {
var table = []struct {
definition string
expectations map[string]common.Hash
}{
{
definition: `[
{ "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
{ "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] }
]`,
expectations: map[string]common.Hash{
"Balance": crypto.Keccak256Hash([]byte("Balance(uint256)")),
"Check": crypto.Keccak256Hash([]byte("Check(address,uint256)")),
},
},
}
for _, test := range table {
abi, err := JSON(strings.NewReader(test.definition))
if err != nil {
t.Fatal(err)
}
for name, event := range abi.Events {
if event.Id() != test.expectations[name] {
t.Errorf("expected id to be %x, got %x", test.expectations[name], event.Id())
}
}
}
}
func TestEventString(t *testing.T) {
var table = []struct {
definition string
expectations map[string]string
}{
{
definition: `[
{ "type" : "event", "name" : "Balance", "inputs": [{ "name" : "in", "type": "uint256" }] },
{ "type" : "event", "name" : "Check", "inputs": [{ "name" : "t", "type": "address" }, { "name": "b", "type": "uint256" }] },
{ "type" : "event", "name" : "Transfer", "inputs": [{ "name": "from", "type": "address", "indexed": true }, { "name": "to", "type": "address", "indexed": true }, { "name": "value", "type": "uint256" }] }
]`,
expectations: map[string]string{
"Balance": "event Balance(uint256 in)",
"Check": "event Check(address t, uint256 b)",
"Transfer": "event Transfer(address indexed from, address indexed to, uint256 value)",
},
},
}
for _, test := range table {
abi, err := JSON(strings.NewReader(test.definition))
if err != nil {
t.Fatal(err)
}
for name, event := range abi.Events {
if event.String() != test.expectations[name] {
t.Errorf("expected string to be %s, got %s", test.expectations[name], event.String())
}
}
}
}
// TestEventMultiValueWithArrayUnpack verifies that array fields will be counted after parsing array.
func TestEventMultiValueWithArrayUnpack(t *testing.T) {
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": false, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
type testStruct struct {
Value1 [2]uint8
Value2 uint8
}
abi, err := JSON(strings.NewReader(definition))
require.NoError(t, err)
var b bytes.Buffer
var i uint8 = 1
for ; i <= 3; i++ {
b.Write(packNum(reflect.ValueOf(i)))
}
var rst testStruct
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
require.Equal(t, [2]uint8{1, 2}, rst.Value1)
require.Equal(t, uint8(3), rst.Value2)
}
func TestEventTupleUnpack(t *testing.T) {
type EventTransfer struct {
Value *big.Int
}
type EventTransferWithTag struct {
// this is valid because `value` is not exportable,
// so value is only unmarshalled into `Value1`.
value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithSameFieldAndTag struct {
Value *big.Int
Value1 *big.Int `abi:"value"`
}
type BadEventTransferWithDuplicatedTag struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"value"`
}
type BadEventTransferWithEmptyTag struct {
Value *big.Int `abi:""`
}
type EventPledge struct {
Who common.Address
Wad *big.Int
Currency [3]byte
}
type BadEventPledge struct {
Who string
Wad int
Currency [3]byte
}
type EventMixedCase struct {
Value1 *big.Int `abi:"value"`
Value2 *big.Int `abi:"_value"`
Value3 *big.Int `abi:"Value"`
}
bigint := new(big.Int)
bigintExpected := big.NewInt(1000000)
bigintExpected2 := big.NewInt(2218516807680)
bigintExpected3 := big.NewInt(1000001)
addr := common.HexToAddress("0x00Ce0d46d924CC8437c806721496599FC3FFA268")
var testCases = []struct {
data string
dest interface{}
expected interface{}
jsonLog []byte
error string
name string
}{{
transferData1,
&EventTransfer{},
&EventTransfer{Value: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure",
}, {
transferData1,
&[]interface{}{&bigint},
&[]interface{}{&bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into slice",
}, {
transferData1,
&EventTransferWithTag{},
&EventTransferWithTag{Value1: bigintExpected},
jsonEventTransfer,
"",
"Can unpack ERC20 Transfer event into structure with abi: tag",
}, {
transferData1,
&BadEventTransferWithDuplicatedTag{},
&BadEventTransferWithDuplicatedTag{},
jsonEventTransfer,
"struct: abi tag in 'Value2' already mapped",
"Can not unpack ERC20 Transfer event with duplicated abi tag",
}, {
transferData1,
&BadEventTransferWithSameFieldAndTag{},
&BadEventTransferWithSameFieldAndTag{},
jsonEventTransfer,
"abi: multiple variables maps to the same abi field 'value'",
"Can not unpack ERC20 Transfer event with a field and a tag mapping to the same abi variable",
}, {
transferData1,
&BadEventTransferWithEmptyTag{},
&BadEventTransferWithEmptyTag{},
jsonEventTransfer,
"struct: abi tag in 'Value' is empty",
"Can not unpack ERC20 Transfer event with an empty tag",
}, {
pledgeData1,
&EventPledge{},
&EventPledge{
addr,
bigintExpected2,
[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into structure",
}, {
pledgeData1,
&[]interface{}{&common.Address{}, &bigint, &[3]byte{}},
&[]interface{}{
&addr,
&bigintExpected2,
&[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into slice",
}, {
pledgeData1,
&[3]interface{}{&common.Address{}, &bigint, &[3]byte{}},
&[3]interface{}{
&addr,
&bigintExpected2,
&[3]byte{'u', 's', 'd'}},
jsonEventPledge,
"",
"Can unpack Pledge event into an array",
}, {
pledgeData1,
&[]interface{}{new(int), 0, 0},
&[]interface{}{},
jsonEventPledge,
"abi: cannot unmarshal common.Address in to int",
"Can not unpack Pledge event into slice with wrong types",
}, {
pledgeData1,
&BadEventPledge{},
&BadEventPledge{},
jsonEventPledge,
"abi: cannot unmarshal common.Address in to string",
"Can not unpack Pledge event into struct with wrong filed types",
}, {
pledgeData1,
&[]interface{}{common.Address{}, new(big.Int)},
&[]interface{}{},
jsonEventPledge,
"abi: insufficient number of elements in the list/array for unpack, want 3, got 2",
"Can not unpack Pledge event into too short slice",
}, {
pledgeData1,
new(map[string]interface{}),
&[]interface{}{},
jsonEventPledge,
"abi: cannot unmarshal tuple into map[string]interface {}",
"Can not unpack Pledge event into map",
}, {
mixedCaseData1,
&EventMixedCase{},
&EventMixedCase{Value1: bigintExpected, Value2: bigintExpected2, Value3: bigintExpected3},
jsonEventMixedCase,
"",
"Can unpack abi variables with mixed case",
}}
for _, tc := range testCases {
assert := assert.New(t)
tc := tc
t.Run(tc.name, func(t *testing.T) {
err := unpackTestEventData(tc.dest, tc.data, tc.jsonLog, assert)
if tc.error == "" {
assert.Nil(err, "Should be able to unpack event data.")
assert.Equal(tc.expected, tc.dest, tc.name)
} else {
assert.EqualError(err, tc.error, tc.name)
}
})
}
}
func unpackTestEventData(dest interface{}, hexData string, jsonEvent []byte, assert *assert.Assertions) error {
data, err := hex.DecodeString(hexData)
assert.NoError(err, "Hex data should be a correct hex-string")
var e Event
assert.NoError(json.Unmarshal(jsonEvent, &e), "Should be able to unmarshal event ABI")
a := ABI{Events: map[string]Event{"e": e}}
return a.Unpack(dest, "e", data)
}
/*
Taken from
https://github.com/ethereum/go-ethereum/pull/15568
*/
type testResult struct {
Values [2]*big.Int
Value1 *big.Int
Value2 *big.Int
}
type testCase struct {
definition string
want testResult
}
func (tc testCase) encoded(intType, arrayType Type) []byte {
var b bytes.Buffer
if tc.want.Value1 != nil {
val, _ := intType.pack(reflect.ValueOf(tc.want.Value1))
b.Write(val)
}
if !reflect.DeepEqual(tc.want.Values, [2]*big.Int{nil, nil}) {
val, _ := arrayType.pack(reflect.ValueOf(tc.want.Values))
b.Write(val)
}
if tc.want.Value2 != nil {
val, _ := intType.pack(reflect.ValueOf(tc.want.Value2))
b.Write(val)
}
return b.Bytes()
}
// TestEventUnpackIndexed verifies that indexed field will be skipped by event decoder.
func TestEventUnpackIndexed(t *testing.T) {
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8"},{"indexed": false, "name":"value2", "type":"uint8"}]}]`
type testStruct struct {
Value1 uint8
Value2 uint8
}
abi, err := JSON(strings.NewReader(definition))
require.NoError(t, err)
var b bytes.Buffer
b.Write(packNum(reflect.ValueOf(uint8(8))))
var rst testStruct
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
require.Equal(t, uint8(0), rst.Value1)
require.Equal(t, uint8(8), rst.Value2)
}
// TestEventIndexedWithArrayUnpack verifies that decoder will not overlow when static array is indexed input.
func TestEventIndexedWithArrayUnpack(t *testing.T) {
definition := `[{"name": "test", "type": "event", "inputs": [{"indexed": true, "name":"value1", "type":"uint8[2]"},{"indexed": false, "name":"value2", "type":"string"}]}]`
type testStruct struct {
Value1 [2]uint8
Value2 string
}
abi, err := JSON(strings.NewReader(definition))
require.NoError(t, err)
var b bytes.Buffer
stringOut := "abc"
// number of fields that will be encoded * 32
b.Write(packNum(reflect.ValueOf(32)))
b.Write(packNum(reflect.ValueOf(len(stringOut))))
b.Write(common.RightPadBytes([]byte(stringOut), 32))
var rst testStruct
require.NoError(t, abi.Unpack(&rst, "test", b.Bytes()))
require.Equal(t, [2]uint8{0, 0}, rst.Value1)
require.Equal(t, stringOut, rst.Value2)
}

View File

@ -1,61 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"strings"
"testing"
)
const methoddata = `
[
{ "type" : "function", "name" : "balance", "constant" : true },
{ "type" : "function", "name" : "send", "constant" : false, "inputs" : [ { "name" : "amount", "type" : "uint256" } ] },
{ "type" : "function", "name" : "transfer", "constant" : false, "inputs" : [ { "name" : "from", "type" : "address" }, { "name" : "to", "type" : "address" }, { "name" : "value", "type" : "uint256" } ], "outputs" : [ { "name" : "success", "type" : "bool" } ] }
]`
func TestMethodString(t *testing.T) {
var table = []struct {
method string
expectation string
}{
{
method: "balance",
expectation: "function balance() constant returns()",
},
{
method: "send",
expectation: "function send(uint256 amount) returns()",
},
{
method: "transfer",
expectation: "function transfer(address from, address to, uint256 value) returns(bool success)",
},
}
abi, err := JSON(strings.NewReader(methoddata))
if err != nil {
t.Fatal(err)
}
for _, test := range table {
got := abi.Methods[test.method].String()
if got != test.expectation {
t.Errorf("expected string to be %s, got %s", test.expectation, got)
}
}
}

View File

@ -1,33 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"bytes"
"math/big"
"testing"
)
func TestNumberTypes(t *testing.T) {
ubytes := make([]byte, 32)
ubytes[31] = 1
unsigned := U256(big.NewInt(1))
if !bytes.Equal(unsigned, ubytes) {
t.Errorf("expected %x got %x", ubytes, unsigned)
}
}

View File

@ -1,783 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"bytes"
"math"
"math/big"
"reflect"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
)
func TestPack(t *testing.T) {
for i, test := range []struct {
typ string
components []ArgumentMarshaling
input interface{}
output []byte
}{
{
"uint8",
nil,
uint8(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint8[]",
nil,
[]uint8{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint16",
nil,
uint16(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint16[]",
nil,
[]uint16{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint32",
nil,
uint32(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint32[]",
nil,
[]uint32{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint64",
nil,
uint64(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint64[]",
nil,
[]uint64{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint256",
nil,
big.NewInt(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"uint256[]",
nil,
[]*big.Int{big.NewInt(1), big.NewInt(2)},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int8",
nil,
int8(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int8[]",
nil,
[]int8{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int16",
nil,
int16(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int16[]",
nil,
[]int16{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int32",
nil,
int32(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int32[]",
nil,
[]int32{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int64",
nil,
int64(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int64[]",
nil,
[]int64{1, 2},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int256",
nil,
big.NewInt(2),
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002"),
},
{
"int256[]",
nil,
[]*big.Int{big.NewInt(1), big.NewInt(2)},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002"),
},
{
"bytes1",
nil,
[1]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes2",
nil,
[2]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes3",
nil,
[3]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes4",
nil,
[4]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes5",
nil,
[5]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes6",
nil,
[6]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes7",
nil,
[7]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes8",
nil,
[8]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes9",
nil,
[9]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes10",
nil,
[10]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes11",
nil,
[11]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes12",
nil,
[12]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes13",
nil,
[13]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes14",
nil,
[14]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes15",
nil,
[15]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes16",
nil,
[16]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes17",
nil,
[17]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes18",
nil,
[18]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes19",
nil,
[19]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes20",
nil,
[20]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes21",
nil,
[21]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes22",
nil,
[22]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes23",
nil,
[23]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes24",
nil,
[24]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes25",
nil,
[25]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes26",
nil,
[26]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes27",
nil,
[27]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes28",
nil,
[28]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes29",
nil,
[29]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes30",
nil,
[30]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes31",
nil,
[31]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"bytes32",
nil,
[32]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"uint32[2][3][4]",
nil,
[4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
},
{
"address[]",
nil,
[]common.Address{{1}, {2}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000200000000000000000000000001000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000"),
},
{
"bytes32[]",
nil,
[]common.Hash{{1}, {2}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000201000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000"),
},
{
"function",
nil,
[24]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"string",
nil,
"foobar",
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000006666f6f6261720000000000000000000000000000000000000000000000000000"),
},
{
"string[]",
nil,
[]string{"hello", "foobar"},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"0000000000000000000000000000000000000000000000000000000000000080" + // offset 128 to i = 1
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
"666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
},
{
"string[2]",
nil,
[]string{"hello", "foobar"},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset to i = 0
"0000000000000000000000000000000000000000000000000000000000000080" + // offset to i = 1
"0000000000000000000000000000000000000000000000000000000000000005" + // len(str[0]) = 5
"68656c6c6f000000000000000000000000000000000000000000000000000000" + // str[0]
"0000000000000000000000000000000000000000000000000000000000000006" + // len(str[1]) = 6
"666f6f6261720000000000000000000000000000000000000000000000000000"), // str[1]
},
{
"bytes32[][]",
nil,
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // len(array) = 2
"0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
},
{
"bytes32[][2]",
nil,
[][]common.Hash{{{1}, {2}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // offset 64 to i = 0
"00000000000000000000000000000000000000000000000000000000000000a0" + // offset 160 to i = 1
"0000000000000000000000000000000000000000000000000000000000000002" + // len(array[0]) = 2
"0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
"0000000000000000000000000000000000000000000000000000000000000003" + // len(array[1]) = 3
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
},
{
"bytes32[3][2]",
nil,
[][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000" + // array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // array[0][1]
"0300000000000000000000000000000000000000000000000000000000000000" + // array[0][2]
"0300000000000000000000000000000000000000000000000000000000000000" + // array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000"), // array[1][2]
},
{
// static tuple
"tuple",
[]ArgumentMarshaling{
{Name: "a", Type: "int64"},
{Name: "b", Type: "int256"},
{Name: "c", Type: "int256"},
{Name: "d", Type: "bool"},
{Name: "e", Type: "bytes32[3][2]"},
},
struct {
A int64
B *big.Int
C *big.Int
D bool
E [][]common.Hash
}{1, big.NewInt(1), big.NewInt(-1), true, [][]common.Hash{{{1}, {2}, {3}}, {{3}, {4}, {5}}}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001" + // struct[a]
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // struct[c]
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[d]
"0100000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][0]
"0200000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][1]
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[0][2]
"0300000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][0]
"0400000000000000000000000000000000000000000000000000000000000000" + // struct[e] array[1][1]
"0500000000000000000000000000000000000000000000000000000000000000"), // struct[e] array[1][2]
},
{
// dynamic tuple
"tuple",
[]ArgumentMarshaling{
{Name: "a", Type: "string"},
{Name: "b", Type: "int64"},
{Name: "c", Type: "bytes"},
{Name: "d", Type: "string[]"},
{Name: "e", Type: "int256[]"},
{Name: "f", Type: "address[]"},
},
struct {
FieldA string `abi:"a"` // Test whether abi tag works
FieldB int64 `abi:"b"`
C []byte
D []string
E []*big.Int
F []common.Address
}{"foobar", 1, []byte{1}, []string{"foo", "bar"}, []*big.Int{big.NewInt(1), big.NewInt(-1)}, []common.Address{{1}, {2}}},
common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000c0" + // struct[a] offset
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[b]
"0000000000000000000000000000000000000000000000000000000000000100" + // struct[c] offset
"0000000000000000000000000000000000000000000000000000000000000140" + // struct[d] offset
"0000000000000000000000000000000000000000000000000000000000000220" + // struct[e] offset
"0000000000000000000000000000000000000000000000000000000000000280" + // struct[f] offset
"0000000000000000000000000000000000000000000000000000000000000006" + // struct[a] length
"666f6f6261720000000000000000000000000000000000000000000000000000" + // struct[a] "foobar"
"0000000000000000000000000000000000000000000000000000000000000001" + // struct[c] length
"0100000000000000000000000000000000000000000000000000000000000000" + // []byte{1}
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[d] length
"0000000000000000000000000000000000000000000000000000000000000040" + // foo offset
"0000000000000000000000000000000000000000000000000000000000000080" + // bar offset
"0000000000000000000000000000000000000000000000000000000000000003" + // foo length
"666f6f0000000000000000000000000000000000000000000000000000000000" + // foo
"0000000000000000000000000000000000000000000000000000000000000003" + // bar offset
"6261720000000000000000000000000000000000000000000000000000000000" + // bar
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[e] length
"0000000000000000000000000000000000000000000000000000000000000001" + // 1
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // -1
"0000000000000000000000000000000000000000000000000000000000000002" + // struct[f] length
"0000000000000000000000000100000000000000000000000000000000000000" + // common.Address{1}
"0000000000000000000000000200000000000000000000000000000000000000"), // common.Address{2}
},
{
// nested tuple
"tuple",
[]ArgumentMarshaling{
{Name: "a", Type: "tuple", Components: []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256[]"}}},
{Name: "b", Type: "int256[]"},
},
struct {
A struct {
FieldA *big.Int `abi:"a"`
B []*big.Int
}
B []*big.Int
}{
A: struct {
FieldA *big.Int `abi:"a"` // Test whether abi tag works for nested tuple
B []*big.Int
}{big.NewInt(1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
B: []*big.Int{big.NewInt(1), big.NewInt(0)}},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // a offset
"00000000000000000000000000000000000000000000000000000000000000e0" + // b offset
"0000000000000000000000000000000000000000000000000000000000000001" + // a.a value
"0000000000000000000000000000000000000000000000000000000000000040" + // a.b offset
"0000000000000000000000000000000000000000000000000000000000000002" + // a.b length
"0000000000000000000000000000000000000000000000000000000000000001" + // a.b[0] value
"0000000000000000000000000000000000000000000000000000000000000000" + // a.b[1] value
"0000000000000000000000000000000000000000000000000000000000000002" + // b length
"0000000000000000000000000000000000000000000000000000000000000001" + // b[0] value
"0000000000000000000000000000000000000000000000000000000000000000"), // b[1] value
},
{
// tuple slice
"tuple[]",
[]ArgumentMarshaling{
{Name: "a", Type: "int256"},
{Name: "b", Type: "int256[]"},
},
[]struct {
A *big.Int
B []*big.Int
}{
{big.NewInt(-1), []*big.Int{big.NewInt(1), big.NewInt(0)}},
{big.NewInt(1), []*big.Int{big.NewInt(2), big.NewInt(-1)}},
},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000002" + // tuple length
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
"00000000000000000000000000000000000000000000000000000000000000e0" + // tuple[1] offset
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0].B offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].B length
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].B[0] value
"0000000000000000000000000000000000000000000000000000000000000000" + // tuple[0].B[1] value
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A
"0000000000000000000000000000000000000000000000000000000000000040" + // tuple[1].B offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B length
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].B[0] value
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].B[1] value
},
{
// static tuple array
"tuple[2]",
[]ArgumentMarshaling{
{Name: "a", Type: "int256"},
{Name: "b", Type: "int256"},
},
[2]struct {
A *big.Int
B *big.Int
}{
{big.NewInt(-1), big.NewInt(1)},
{big.NewInt(1), big.NewInt(-1)},
},
common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].a
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].b
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].a
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].b
},
{
// dynamic tuple array
"tuple[2]",
[]ArgumentMarshaling{
{Name: "a", Type: "int256[]"},
},
[2]struct {
A []*big.Int
}{
{[]*big.Int{big.NewInt(-1), big.NewInt(1)}},
{[]*big.Int{big.NewInt(1), big.NewInt(-1)}},
},
common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000040" + // tuple[0] offset
"00000000000000000000000000000000000000000000000000000000000000c0" + // tuple[1] offset
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[0].A offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[0].A length
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + // tuple[0].A[0]
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[0].A[1]
"0000000000000000000000000000000000000000000000000000000000000020" + // tuple[1].A offset
"0000000000000000000000000000000000000000000000000000000000000002" + // tuple[1].A length
"0000000000000000000000000000000000000000000000000000000000000001" + // tuple[1].A[0]
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), // tuple[1].A[1]
},
} {
typ, err := NewType(test.typ, test.components)
if err != nil {
t.Fatalf("%v failed. Unexpected parse error: %v", i, err)
}
output, err := typ.pack(reflect.ValueOf(test.input))
if err != nil {
t.Fatalf("%v failed. Unexpected pack error: %v", i, err)
}
if !bytes.Equal(output, test.output) {
t.Errorf("input %d for typ: %v failed. Expected bytes: '%x' Got: '%x'", i, typ.String(), test.output, output)
}
}
}
func TestMethodPack(t *testing.T) {
abi, err := JSON(strings.NewReader(jsondata2))
if err != nil {
t.Fatal(err)
}
sig := abi.Methods["slice"].Id()
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
packed, err := abi.Pack("slice", []uint32{1, 2})
if err != nil {
t.Error(err)
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
var addrA, addrB = common.Address{1}, common.Address{2}
sig = abi.Methods["sliceAddress"].Id()
sig = append(sig, common.LeftPadBytes([]byte{32}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
packed, err = abi.Pack("sliceAddress", []common.Address{addrA, addrB})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
var addrC, addrD = common.Address{3}, common.Address{4}
sig = abi.Methods["sliceMultiAddress"].Id()
sig = append(sig, common.LeftPadBytes([]byte{64}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{160}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes(addrA[:], 32)...)
sig = append(sig, common.LeftPadBytes(addrB[:], 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
packed, err = abi.Pack("sliceMultiAddress", []common.Address{addrA, addrB}, []common.Address{addrC, addrD})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
sig = abi.Methods["slice256"].Id()
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
packed, err = abi.Pack("slice256", []*big.Int{big.NewInt(1), big.NewInt(2)})
if err != nil {
t.Error(err)
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
a := [2][2]*big.Int{{big.NewInt(1), big.NewInt(1)}, {big.NewInt(2), big.NewInt(0)}}
sig = abi.Methods["nestedArray"].Id()
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes(addrC[:], 32)...)
sig = append(sig, common.LeftPadBytes(addrD[:], 32)...)
packed, err = abi.Pack("nestedArray", a, []common.Address{addrC, addrD})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
sig = abi.Methods["nestedArray2"].Id()
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x80}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
packed, err = abi.Pack("nestedArray2", [2][]uint8{{1}, {1}})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
sig = abi.Methods["nestedSlice"].Id()
sig = append(sig, common.LeftPadBytes([]byte{0x20}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x02}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0x40}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{0xa0}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{1}, 32)...)
sig = append(sig, common.LeftPadBytes([]byte{2}, 32)...)
packed, err = abi.Pack("nestedSlice", [][]uint8{{1, 2}, {1, 2}})
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(packed, sig) {
t.Errorf("expected %x got %x", sig, packed)
}
}
func TestPackNumber(t *testing.T) {
tests := []struct {
value reflect.Value
packed []byte
}{
// Protocol limits
{reflect.ValueOf(0), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000")},
{reflect.ValueOf(1), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001")},
{reflect.ValueOf(-1), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")},
// Type corner cases
{reflect.ValueOf(uint8(math.MaxUint8)), common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000000000ff")},
{reflect.ValueOf(uint16(math.MaxUint16)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000ffff")},
{reflect.ValueOf(uint32(math.MaxUint32)), common.Hex2Bytes("00000000000000000000000000000000000000000000000000000000ffffffff")},
{reflect.ValueOf(uint64(math.MaxUint64)), common.Hex2Bytes("000000000000000000000000000000000000000000000000ffffffffffffffff")},
{reflect.ValueOf(int8(math.MaxInt8)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000007f")},
{reflect.ValueOf(int16(math.MaxInt16)), common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000007fff")},
{reflect.ValueOf(int32(math.MaxInt32)), common.Hex2Bytes("000000000000000000000000000000000000000000000000000000007fffffff")},
{reflect.ValueOf(int64(math.MaxInt64)), common.Hex2Bytes("0000000000000000000000000000000000000000000000007fffffffffffffff")},
{reflect.ValueOf(int8(math.MinInt8)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80")},
{reflect.ValueOf(int16(math.MinInt16)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8000")},
{reflect.ValueOf(int32(math.MinInt32)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000")},
{reflect.ValueOf(int64(math.MinInt64)), common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffff8000000000000000")},
}
for i, tt := range tests {
packed := packNum(tt.value)
if !bytes.Equal(packed, tt.packed) {
t.Errorf("test %d: pack mismatch: have %x, want %x", i, packed, tt.packed)
}
}
}

View File

@ -1,191 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"reflect"
"testing"
)
type reflectTest struct {
name string
args []string
struc interface{}
want map[string]string
err string
}
var reflectTests = []reflectTest{
{
name: "OneToOneCorrespondance",
args: []string{"fieldA"},
struc: struct {
FieldA int `abi:"fieldA"`
}{},
want: map[string]string{
"fieldA": "FieldA",
},
},
{
name: "MissingFieldsInStruct",
args: []string{"fieldA", "fieldB"},
struc: struct {
FieldA int `abi:"fieldA"`
}{},
want: map[string]string{
"fieldA": "FieldA",
},
},
{
name: "MoreFieldsInStructThanArgs",
args: []string{"fieldA"},
struc: struct {
FieldA int `abi:"fieldA"`
FieldB int
}{},
want: map[string]string{
"fieldA": "FieldA",
},
},
{
name: "MissingFieldInArgs",
args: []string{"fieldA"},
struc: struct {
FieldA int `abi:"fieldA"`
FieldB int `abi:"fieldB"`
}{},
err: "struct: abi tag 'fieldB' defined but not found in abi",
},
{
name: "NoAbiDescriptor",
args: []string{"fieldA"},
struc: struct {
FieldA int
}{},
want: map[string]string{
"fieldA": "FieldA",
},
},
{
name: "NoArgs",
args: []string{},
struc: struct {
FieldA int `abi:"fieldA"`
}{},
err: "struct: abi tag 'fieldA' defined but not found in abi",
},
{
name: "DifferentName",
args: []string{"fieldB"},
struc: struct {
FieldA int `abi:"fieldB"`
}{},
want: map[string]string{
"fieldB": "FieldA",
},
},
{
name: "DifferentName",
args: []string{"fieldB"},
struc: struct {
FieldA int `abi:"fieldB"`
}{},
want: map[string]string{
"fieldB": "FieldA",
},
},
{
name: "MultipleFields",
args: []string{"fieldA", "fieldB"},
struc: struct {
FieldA int `abi:"fieldA"`
FieldB int `abi:"fieldB"`
}{},
want: map[string]string{
"fieldA": "FieldA",
"fieldB": "FieldB",
},
},
{
name: "MultipleFieldsABIMissing",
args: []string{"fieldA", "fieldB"},
struc: struct {
FieldA int `abi:"fieldA"`
FieldB int
}{},
want: map[string]string{
"fieldA": "FieldA",
"fieldB": "FieldB",
},
},
{
name: "NameConflict",
args: []string{"fieldB"},
struc: struct {
FieldA int `abi:"fieldB"`
FieldB int
}{},
err: "abi: multiple variables maps to the same abi field 'fieldB'",
},
{
name: "Underscored",
args: []string{"_"},
struc: struct {
FieldA int
}{},
err: "abi: purely underscored output cannot unpack to struct",
},
{
name: "DoubleMapping",
args: []string{"fieldB", "fieldC", "fieldA"},
struc: struct {
FieldA int `abi:"fieldC"`
FieldB int
}{},
err: "abi: multiple outputs mapping to the same struct field 'FieldA'",
},
{
name: "AlreadyMapped",
args: []string{"fieldB", "fieldB"},
struc: struct {
FieldB int `abi:"fieldB"`
}{},
err: "struct: abi tag in 'FieldB' already mapped",
},
}
func TestReflectNameToStruct(t *testing.T) {
for _, test := range reflectTests {
t.Run(test.name, func(t *testing.T) {
m, err := mapArgNamesToStructFields(test.args, reflect.ValueOf(test.struc))
if len(test.err) > 0 {
if err == nil || err.Error() != test.err {
t.Fatalf("Invalid error: expected %v, got %v", test.err, err)
}
} else {
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
for fname := range test.want {
if m[fname] != test.want[fname] {
t.Fatalf("Incorrect value for field %s: expected %v, got %v", fname, test.want[fname], m[fname])
}
}
}
})
}
}

View File

@ -1,308 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"math/big"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/common"
)
// typeWithoutStringer is a alias for the Type type which simply doesn't implement
// the stringer interface to allow printing type details in the tests below.
type typeWithoutStringer Type
// Tests that all allowed types get recognized by the type parser.
func TestTypeRegexp(t *testing.T) {
tests := []struct {
blob string
components []ArgumentMarshaling
kind Type
}{
{"bool", nil, Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}},
{"bool[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool(nil)), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}},
{"bool[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}},
{"bool[2][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}},
{"bool[][]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}},
{"bool[][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}},
{"bool[2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}},
{"bool[2][][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][][2]bool{}), Elem: &Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][]"}, stringKind: "bool[2][][2]"}},
{"bool[2][2][2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][2]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[2]"}, stringKind: "bool[2][2]"}, stringKind: "bool[2][2][2]"}},
{"bool[][][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][]"}, stringKind: "bool[][][]"}},
{"bool[][2][]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][2][]bool{}), Elem: &Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]bool{}), Elem: &Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]bool{}), Elem: &Type{Kind: reflect.Bool, T: BoolTy, Type: reflect.TypeOf(bool(false)), stringKind: "bool"}, stringKind: "bool[]"}, stringKind: "bool[][2]"}, stringKind: "bool[][2][]"}},
{"int8", nil, Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}},
{"int16", nil, Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}},
{"int32", nil, Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}},
{"int64", nil, Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}},
{"int256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}},
{"int8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[]"}},
{"int8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int8{}), Elem: &Type{Kind: reflect.Int8, Type: int8T, Size: 8, T: IntTy, stringKind: "int8"}, stringKind: "int8[2]"}},
{"int16[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[]"}},
{"int16[2]", nil, Type{Size: 2, Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]int16{}), Elem: &Type{Kind: reflect.Int16, Type: int16T, Size: 16, T: IntTy, stringKind: "int16"}, stringKind: "int16[2]"}},
{"int32[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[]"}},
{"int32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int32{}), Elem: &Type{Kind: reflect.Int32, Type: int32T, Size: 32, T: IntTy, stringKind: "int32"}, stringKind: "int32[2]"}},
{"int64[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[]"}},
{"int64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]int64{}), Elem: &Type{Kind: reflect.Int64, Type: int64T, Size: 64, T: IntTy, stringKind: "int64"}, stringKind: "int64[2]"}},
{"int256[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[]"}},
{"int256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: IntTy, stringKind: "int256"}, stringKind: "int256[2]"}},
{"uint8", nil, Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}},
{"uint16", nil, Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}},
{"uint32", nil, Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}},
{"uint64", nil, Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}},
{"uint256", nil, Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}},
{"uint8[]", nil, Type{Kind: reflect.Slice, T: SliceTy, Type: reflect.TypeOf([]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[]"}},
{"uint8[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint8{}), Elem: &Type{Kind: reflect.Uint8, Type: uint8T, Size: 8, T: UintTy, stringKind: "uint8"}, stringKind: "uint8[2]"}},
{"uint16[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[]"}},
{"uint16[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint16{}), Elem: &Type{Kind: reflect.Uint16, Type: uint16T, Size: 16, T: UintTy, stringKind: "uint16"}, stringKind: "uint16[2]"}},
{"uint32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[]"}},
{"uint32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint32{}), Elem: &Type{Kind: reflect.Uint32, Type: uint32T, Size: 32, T: UintTy, stringKind: "uint32"}, stringKind: "uint32[2]"}},
{"uint64[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[]"}},
{"uint64[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]uint64{}), Elem: &Type{Kind: reflect.Uint64, Type: uint64T, Size: 64, T: UintTy, stringKind: "uint64"}, stringKind: "uint64[2]"}},
{"uint256[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]*big.Int{}), Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[]"}},
{"uint256[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Type: reflect.TypeOf([2]*big.Int{}), Size: 2, Elem: &Type{Kind: reflect.Ptr, Type: bigT, Size: 256, T: UintTy, stringKind: "uint256"}, stringKind: "uint256[2]"}},
{"bytes32", nil, Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}},
{"bytes[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][]byte{}), Elem: &Type{Kind: reflect.Slice, Type: reflect.TypeOf([]byte{}), T: BytesTy, stringKind: "bytes"}, stringKind: "bytes[]"}},
{"bytes[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][]byte{}), Elem: &Type{T: BytesTy, Type: reflect.TypeOf([]byte{}), Kind: reflect.Slice, stringKind: "bytes"}, stringKind: "bytes[2]"}},
{"bytes32[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([][32]byte{}), Elem: &Type{Kind: reflect.Array, Type: reflect.TypeOf([32]byte{}), T: FixedBytesTy, Size: 32, stringKind: "bytes32"}, stringKind: "bytes32[]"}},
{"bytes32[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2][32]byte{}), Elem: &Type{Kind: reflect.Array, T: FixedBytesTy, Size: 32, Type: reflect.TypeOf([32]byte{}), stringKind: "bytes32"}, stringKind: "bytes32[2]"}},
{"string", nil, Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}},
{"string[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]string{}), Elem: &Type{Kind: reflect.String, Type: reflect.TypeOf(""), T: StringTy, stringKind: "string"}, stringKind: "string[]"}},
{"string[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]string{}), Elem: &Type{Kind: reflect.String, T: StringTy, Type: reflect.TypeOf(""), stringKind: "string"}, stringKind: "string[2]"}},
{"address", nil, Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}},
{"address[]", nil, Type{T: SliceTy, Kind: reflect.Slice, Type: reflect.TypeOf([]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[]"}},
{"address[2]", nil, Type{Kind: reflect.Array, T: ArrayTy, Size: 2, Type: reflect.TypeOf([2]common.Address{}), Elem: &Type{Kind: reflect.Array, Type: addressT, Size: 20, T: AddressTy, stringKind: "address"}, stringKind: "address[2]"}},
// TODO when fixed types are implemented properly
// {"fixed", nil, Type{}},
// {"fixed128x128", nil, Type{}},
// {"fixed[]", nil, Type{}},
// {"fixed[2]", nil, Type{}},
// {"fixed128x128[]", nil, Type{}},
// {"fixed128x128[2]", nil, Type{}},
{"tuple", []ArgumentMarshaling{{Name: "a", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct {
A int64 `json:"a"`
}{}), stringKind: "(int64)",
TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"a"}}},
{"tuple with long name", []ArgumentMarshaling{{Name: "aTypicalParamName", Type: "int64"}}, Type{Kind: reflect.Struct, T: TupleTy, Type: reflect.TypeOf(struct {
ATypicalParamName int64 `json:"aTypicalParamName"`
}{}), stringKind: "(int64)",
TupleElems: []*Type{{Kind: reflect.Int64, T: IntTy, Type: reflect.TypeOf(int64(0)), Size: 64, stringKind: "int64"}}, TupleRawNames: []string{"aTypicalParamName"}}},
}
for _, tt := range tests {
typ, err := NewType(tt.blob, tt.components)
if err != nil {
t.Errorf("type %q: failed to parse type string: %v", tt.blob, err)
}
if !reflect.DeepEqual(typ, tt.kind) {
t.Errorf("type %q: parsed type mismatch:\nGOT %s\nWANT %s ", tt.blob, spew.Sdump(typeWithoutStringer(typ)), spew.Sdump(typeWithoutStringer(tt.kind)))
}
}
}
func TestTypeCheck(t *testing.T) {
for i, test := range []struct {
typ string
components []ArgumentMarshaling
input interface{}
err string
}{
{"uint", nil, big.NewInt(1), "unsupported arg type: uint"},
{"int", nil, big.NewInt(1), "unsupported arg type: int"},
{"uint256", nil, big.NewInt(1), ""},
{"uint256[][3][]", nil, [][3][]*big.Int{{{}}}, ""},
{"uint256[][][3]", nil, [3][][]*big.Int{{{}}}, ""},
{"uint256[3][][]", nil, [][][3]*big.Int{{{}}}, ""},
{"uint256[3][3][3]", nil, [3][3][3]*big.Int{{{}}}, ""},
{"uint8[][]", nil, [][]uint8{}, ""},
{"int256", nil, big.NewInt(1), ""},
{"uint8", nil, uint8(1), ""},
{"uint16", nil, uint16(1), ""},
{"uint32", nil, uint32(1), ""},
{"uint64", nil, uint64(1), ""},
{"int8", nil, int8(1), ""},
{"int16", nil, int16(1), ""},
{"int32", nil, int32(1), ""},
{"int64", nil, int64(1), ""},
{"uint24", nil, big.NewInt(1), ""},
{"uint40", nil, big.NewInt(1), ""},
{"uint48", nil, big.NewInt(1), ""},
{"uint56", nil, big.NewInt(1), ""},
{"uint72", nil, big.NewInt(1), ""},
{"uint80", nil, big.NewInt(1), ""},
{"uint88", nil, big.NewInt(1), ""},
{"uint96", nil, big.NewInt(1), ""},
{"uint104", nil, big.NewInt(1), ""},
{"uint112", nil, big.NewInt(1), ""},
{"uint120", nil, big.NewInt(1), ""},
{"uint128", nil, big.NewInt(1), ""},
{"uint136", nil, big.NewInt(1), ""},
{"uint144", nil, big.NewInt(1), ""},
{"uint152", nil, big.NewInt(1), ""},
{"uint160", nil, big.NewInt(1), ""},
{"uint168", nil, big.NewInt(1), ""},
{"uint176", nil, big.NewInt(1), ""},
{"uint184", nil, big.NewInt(1), ""},
{"uint192", nil, big.NewInt(1), ""},
{"uint200", nil, big.NewInt(1), ""},
{"uint208", nil, big.NewInt(1), ""},
{"uint216", nil, big.NewInt(1), ""},
{"uint224", nil, big.NewInt(1), ""},
{"uint232", nil, big.NewInt(1), ""},
{"uint240", nil, big.NewInt(1), ""},
{"uint248", nil, big.NewInt(1), ""},
{"int24", nil, big.NewInt(1), ""},
{"int40", nil, big.NewInt(1), ""},
{"int48", nil, big.NewInt(1), ""},
{"int56", nil, big.NewInt(1), ""},
{"int72", nil, big.NewInt(1), ""},
{"int80", nil, big.NewInt(1), ""},
{"int88", nil, big.NewInt(1), ""},
{"int96", nil, big.NewInt(1), ""},
{"int104", nil, big.NewInt(1), ""},
{"int112", nil, big.NewInt(1), ""},
{"int120", nil, big.NewInt(1), ""},
{"int128", nil, big.NewInt(1), ""},
{"int136", nil, big.NewInt(1), ""},
{"int144", nil, big.NewInt(1), ""},
{"int152", nil, big.NewInt(1), ""},
{"int160", nil, big.NewInt(1), ""},
{"int168", nil, big.NewInt(1), ""},
{"int176", nil, big.NewInt(1), ""},
{"int184", nil, big.NewInt(1), ""},
{"int192", nil, big.NewInt(1), ""},
{"int200", nil, big.NewInt(1), ""},
{"int208", nil, big.NewInt(1), ""},
{"int216", nil, big.NewInt(1), ""},
{"int224", nil, big.NewInt(1), ""},
{"int232", nil, big.NewInt(1), ""},
{"int240", nil, big.NewInt(1), ""},
{"int248", nil, big.NewInt(1), ""},
{"uint30", nil, uint8(1), "abi: cannot use uint8 as type ptr as argument"},
{"uint8", nil, uint16(1), "abi: cannot use uint16 as type uint8 as argument"},
{"uint8", nil, uint32(1), "abi: cannot use uint32 as type uint8 as argument"},
{"uint8", nil, uint64(1), "abi: cannot use uint64 as type uint8 as argument"},
{"uint8", nil, int8(1), "abi: cannot use int8 as type uint8 as argument"},
{"uint8", nil, int16(1), "abi: cannot use int16 as type uint8 as argument"},
{"uint8", nil, int32(1), "abi: cannot use int32 as type uint8 as argument"},
{"uint8", nil, int64(1), "abi: cannot use int64 as type uint8 as argument"},
{"uint16", nil, uint16(1), ""},
{"uint16", nil, uint8(1), "abi: cannot use uint8 as type uint16 as argument"},
{"uint16[]", nil, []uint16{1, 2, 3}, ""},
{"uint16[]", nil, [3]uint16{1, 2, 3}, ""},
{"uint16[]", nil, []uint32{1, 2, 3}, "abi: cannot use []uint32 as type [0]uint16 as argument"},
{"uint16[3]", nil, [3]uint32{1, 2, 3}, "abi: cannot use [3]uint32 as type [3]uint16 as argument"},
{"uint16[3]", nil, [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
{"uint16[3]", nil, []uint16{1, 2, 3}, ""},
{"uint16[3]", nil, []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"},
{"address[]", nil, []common.Address{{1}}, ""},
{"address[1]", nil, []common.Address{{1}}, ""},
{"address[1]", nil, [1]common.Address{{1}}, ""},
{"address[2]", nil, [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"},
{"bytes32", nil, [32]byte{}, ""},
{"bytes31", nil, [31]byte{}, ""},
{"bytes30", nil, [30]byte{}, ""},
{"bytes29", nil, [29]byte{}, ""},
{"bytes28", nil, [28]byte{}, ""},
{"bytes27", nil, [27]byte{}, ""},
{"bytes26", nil, [26]byte{}, ""},
{"bytes25", nil, [25]byte{}, ""},
{"bytes24", nil, [24]byte{}, ""},
{"bytes23", nil, [23]byte{}, ""},
{"bytes22", nil, [22]byte{}, ""},
{"bytes21", nil, [21]byte{}, ""},
{"bytes20", nil, [20]byte{}, ""},
{"bytes19", nil, [19]byte{}, ""},
{"bytes18", nil, [18]byte{}, ""},
{"bytes17", nil, [17]byte{}, ""},
{"bytes16", nil, [16]byte{}, ""},
{"bytes15", nil, [15]byte{}, ""},
{"bytes14", nil, [14]byte{}, ""},
{"bytes13", nil, [13]byte{}, ""},
{"bytes12", nil, [12]byte{}, ""},
{"bytes11", nil, [11]byte{}, ""},
{"bytes10", nil, [10]byte{}, ""},
{"bytes9", nil, [9]byte{}, ""},
{"bytes8", nil, [8]byte{}, ""},
{"bytes7", nil, [7]byte{}, ""},
{"bytes6", nil, [6]byte{}, ""},
{"bytes5", nil, [5]byte{}, ""},
{"bytes4", nil, [4]byte{}, ""},
{"bytes3", nil, [3]byte{}, ""},
{"bytes2", nil, [2]byte{}, ""},
{"bytes1", nil, [1]byte{}, ""},
{"bytes32", nil, [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"},
{"bytes32", nil, common.Hash{1}, ""},
{"bytes31", nil, common.Hash{1}, "abi: cannot use common.Hash as type [31]uint8 as argument"},
{"bytes31", nil, [32]byte{}, "abi: cannot use [32]uint8 as type [31]uint8 as argument"},
{"bytes", nil, []byte{0, 1}, ""},
{"bytes", nil, [2]byte{0, 1}, "abi: cannot use array as type slice as argument"},
{"bytes", nil, common.Hash{1}, "abi: cannot use array as type slice as argument"},
{"string", nil, "hello world", ""},
{"string", nil, string(""), ""},
{"string", nil, []byte{}, "abi: cannot use slice as type string as argument"},
{"bytes32[]", nil, [][32]byte{{}}, ""},
{"function", nil, [24]byte{}, ""},
{"bytes20", nil, common.Address{}, ""},
{"address", nil, [20]byte{}, ""},
{"address", nil, common.Address{}, ""},
{"bytes32[]]", nil, "", "invalid arg type in abi"},
{"invalidType", nil, "", "unsupported arg type: invalidType"},
{"invalidSlice[]", nil, "", "unsupported arg type: invalidSlice"},
// simple tuple
{"tuple", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, struct {
A *big.Int
B *big.Int
}{}, ""},
// tuple slice
{"tuple[]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
A *big.Int
B *big.Int
}{}, ""},
// tuple array
{"tuple[2]", []ArgumentMarshaling{{Name: "a", Type: "uint256"}, {Name: "b", Type: "uint256"}}, []struct {
A *big.Int
B *big.Int
}{{big.NewInt(0), big.NewInt(0)}, {big.NewInt(0), big.NewInt(0)}}, ""},
} {
typ, err := NewType(test.typ, test.components)
if err != nil && len(test.err) == 0 {
t.Fatal("unexpected parse error:", err)
} else if err != nil && len(test.err) != 0 {
if err.Error() != test.err {
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
}
continue
}
err = typeCheck(typ, reflect.ValueOf(test.input))
if err != nil && len(test.err) == 0 {
t.Errorf("%d failed. Expected no err but got: %v", i, err)
continue
}
if err == nil && len(test.err) != 0 {
t.Errorf("%d failed. Expected err: %v but got none", i, test.err)
continue
}
if err != nil && len(test.err) != 0 && err.Error() != test.err {
t.Errorf("%d failed. Expected err: '%v' got err: '%v'", i, test.err, err)
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,32 +0,0 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"bytes"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
)
func TestTextHash(t *testing.T) {
hash := TextHash([]byte("Hello Joe"))
want := hexutil.MustDecode("0xa080337ae51c4e064c189e113edd0ba391df9206e2f49db658bb32cf2911730b")
if !bytes.Equal(hash, want) {
t.Fatalf("wrong hash: %x", hash)
}
}

View File

@ -1,79 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"reflect"
"testing"
)
// Tests that HD derivation paths can be correctly parsed into our internal binary
// representation.
func TestHDPathParsing(t *testing.T) {
tests := []struct {
input string
output DerivationPath
}{
// Plain absolute derivation paths
{"m/44'/60'/0'/0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
{"m/44'/60'/0'/128", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 128}},
{"m/44'/60'/0'/0'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
{"m/44'/60'/0'/128'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 128}},
{"m/2147483692/2147483708/2147483648/0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
{"m/2147483692/2147483708/2147483648/2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
// Plain relative derivation paths
{"0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}},
{"128", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 128}},
{"0'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
{"128'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 128}},
{"2147483648", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
// Hexadecimal absolute derivation paths
{"m/0x2C'/0x3c'/0x00'/0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
{"m/0x2C'/0x3c'/0x00'/0x80", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 128}},
{"m/0x2C'/0x3c'/0x00'/0x00'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
{"m/0x2C'/0x3c'/0x00'/0x80'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 128}},
{"m/0x8000002C/0x8000003c/0x80000000/0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
{"m/0x8000002C/0x8000003c/0x80000000/0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0x80000000 + 0}},
// Hexadecimal relative derivation paths
{"0x00", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}},
{"0x80", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 128}},
{"0x00'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
{"0x80'", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 128}},
{"0x80000000", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0x80000000 + 0}},
// Weird inputs just to ensure they work
{" m / 44 '\n/\n 60 \n\n\t' /\n0 ' /\t\t 0", DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}},
// Invaid derivation paths
{"", nil}, // Empty relative derivation path
{"m", nil}, // Empty absolute derivation path
{"m/", nil}, // Missing last derivation component
{"/44'/60'/0'/0", nil}, // Absolute path without m prefix, might be user error
{"m/2147483648'", nil}, // Overflows 32 bit integer
{"m/-1'", nil}, // Cannot contain negative number
}
for i, tt := range tests {
if path, err := ParseDerivationPath(tt.input); !reflect.DeepEqual(path, tt.output) {
t.Errorf("test %d: parse mismatch: have %v (%v), want %v", i, path, err, tt.output)
} else if path == nil && err == nil {
t.Errorf("test %d: nil path and error: %v", i, err)
}
}
}

View File

@ -1,406 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"reflect"
"sort"
"testing"
"time"
"github.com/cespare/cp"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
)
var (
cachetestDir, _ = filepath.Abs(filepath.Join("testdata", "keystore"))
cachetestAccounts = []accounts.Account{
{
Address: common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(cachetestDir, "UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8")},
},
{
Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(cachetestDir, "aaa")},
},
{
Address: common.HexToAddress("289d485d9771714cce91d3393d764e1311907acc"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(cachetestDir, "zzz")},
},
}
)
func TestWatchNewFile(t *testing.T) {
t.Parallel()
dir, ks := tmpKeyStore(t, false)
defer os.RemoveAll(dir)
// Ensure the watcher is started before adding any files.
ks.Accounts()
time.Sleep(1000 * time.Millisecond)
// Move in the files.
wantAccounts := make([]accounts.Account, len(cachetestAccounts))
for i := range cachetestAccounts {
wantAccounts[i] = accounts.Account{
Address: cachetestAccounts[i].Address,
URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, filepath.Base(cachetestAccounts[i].URL.Path))},
}
if err := cp.CopyFile(wantAccounts[i].URL.Path, cachetestAccounts[i].URL.Path); err != nil {
t.Fatal(err)
}
}
// ks should see the accounts.
var list []accounts.Account
for d := 200 * time.Millisecond; d < 5*time.Second; d *= 2 {
list = ks.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
// ks should have also received change notifications
select {
case <-ks.changes:
default:
t.Fatalf("wasn't notified of new accounts")
}
return
}
time.Sleep(d)
}
t.Errorf("got %s, want %s", spew.Sdump(list), spew.Sdump(wantAccounts))
}
func TestWatchNoDir(t *testing.T) {
t.Parallel()
// Create ks but not the directory that it watches.
rand.Seed(time.Now().UnixNano())
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts()
if len(list) > 0 {
t.Error("initial account list not empty:", list)
}
time.Sleep(100 * time.Millisecond)
// Create the directory and copy a key file into it.
os.MkdirAll(dir, 0700)
defer os.RemoveAll(dir)
file := filepath.Join(dir, "aaa")
if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil {
t.Fatal(err)
}
// ks should see the account.
wantAccounts := []accounts.Account{cachetestAccounts[0]}
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
list = ks.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
// ks should have also received change notifications
select {
case <-ks.changes:
default:
t.Fatalf("wasn't notified of new accounts")
}
return
}
time.Sleep(d)
}
t.Errorf("\ngot %v\nwant %v", list, wantAccounts)
}
func TestCacheInitialReload(t *testing.T) {
cache, _ := newAccountCache(cachetestDir)
accounts := cache.accounts()
if !reflect.DeepEqual(accounts, cachetestAccounts) {
t.Fatalf("got initial accounts: %swant %s", spew.Sdump(accounts), spew.Sdump(cachetestAccounts))
}
}
func TestCacheAddDeleteOrder(t *testing.T) {
cache, _ := newAccountCache("testdata/no-such-dir")
cache.watcher.running = true // prevent unexpected reloads
accs := []accounts.Account{
{
Address: common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: "-309830980"},
},
{
Address: common.HexToAddress("2cac1adea150210703ba75ed097ddfe24e14f213"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: "ggg"},
},
{
Address: common.HexToAddress("8bda78331c916a08481428e4b07c96d3e916d165"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: "zzzzzz-the-very-last-one.keyXXX"},
},
{
Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: "SOMETHING.key"},
},
{
Address: common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: "UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8"},
},
{
Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: "aaa"},
},
{
Address: common.HexToAddress("289d485d9771714cce91d3393d764e1311907acc"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: "zzz"},
},
}
for _, a := range accs {
cache.add(a)
}
// Add some of them twice to check that they don't get reinserted.
cache.add(accs[0])
cache.add(accs[2])
// Check that the account list is sorted by filename.
wantAccounts := make([]accounts.Account, len(accs))
copy(wantAccounts, accs)
sort.Sort(accountsByURL(wantAccounts))
list := cache.accounts()
if !reflect.DeepEqual(list, wantAccounts) {
t.Fatalf("got accounts: %s\nwant %s", spew.Sdump(accs), spew.Sdump(wantAccounts))
}
for _, a := range accs {
if !cache.hasAddress(a.Address) {
t.Errorf("expected hasAccount(%x) to return true", a.Address)
}
}
if cache.hasAddress(common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e")) {
t.Errorf("expected hasAccount(%x) to return false", common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e"))
}
// Delete a few keys from the cache.
for i := 0; i < len(accs); i += 2 {
cache.delete(wantAccounts[i])
}
cache.delete(accounts.Account{Address: common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e"), URL: accounts.URL{Scheme: KeyStoreScheme, Path: "something"}})
// Check content again after deletion.
wantAccountsAfterDelete := []accounts.Account{
wantAccounts[1],
wantAccounts[3],
wantAccounts[5],
}
list = cache.accounts()
if !reflect.DeepEqual(list, wantAccountsAfterDelete) {
t.Fatalf("got accounts after delete: %s\nwant %s", spew.Sdump(list), spew.Sdump(wantAccountsAfterDelete))
}
for _, a := range wantAccountsAfterDelete {
if !cache.hasAddress(a.Address) {
t.Errorf("expected hasAccount(%x) to return true", a.Address)
}
}
if cache.hasAddress(wantAccounts[0].Address) {
t.Errorf("expected hasAccount(%x) to return false", wantAccounts[0].Address)
}
}
func TestCacheFind(t *testing.T) {
dir := filepath.Join("testdata", "dir")
cache, _ := newAccountCache(dir)
cache.watcher.running = true // prevent unexpected reloads
accs := []accounts.Account{
{
Address: common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "a.key")},
},
{
Address: common.HexToAddress("2cac1adea150210703ba75ed097ddfe24e14f213"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "b.key")},
},
{
Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "c.key")},
},
{
Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "c2.key")},
},
}
for _, a := range accs {
cache.add(a)
}
nomatchAccount := accounts.Account{
Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"),
URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "something")},
}
tests := []struct {
Query accounts.Account
WantResult accounts.Account
WantError error
}{
// by address
{Query: accounts.Account{Address: accs[0].Address}, WantResult: accs[0]},
// by file
{Query: accounts.Account{URL: accs[0].URL}, WantResult: accs[0]},
// by basename
{Query: accounts.Account{URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Base(accs[0].URL.Path)}}, WantResult: accs[0]},
// by file and address
{Query: accs[0], WantResult: accs[0]},
// ambiguous address, tie resolved by file
{Query: accs[2], WantResult: accs[2]},
// ambiguous address error
{
Query: accounts.Account{Address: accs[2].Address},
WantError: &AmbiguousAddrError{
Addr: accs[2].Address,
Matches: []accounts.Account{accs[2], accs[3]},
},
},
// no match error
{Query: nomatchAccount, WantError: ErrNoMatch},
{Query: accounts.Account{URL: nomatchAccount.URL}, WantError: ErrNoMatch},
{Query: accounts.Account{URL: accounts.URL{Scheme: KeyStoreScheme, Path: filepath.Base(nomatchAccount.URL.Path)}}, WantError: ErrNoMatch},
{Query: accounts.Account{Address: nomatchAccount.Address}, WantError: ErrNoMatch},
}
for i, test := range tests {
a, err := cache.find(test.Query)
if !reflect.DeepEqual(err, test.WantError) {
t.Errorf("test %d: error mismatch for query %v\ngot %q\nwant %q", i, test.Query, err, test.WantError)
continue
}
if a != test.WantResult {
t.Errorf("test %d: result mismatch for query %v\ngot %v\nwant %v", i, test.Query, a, test.WantResult)
continue
}
}
}
func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error {
var list []accounts.Account
for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
list = ks.Accounts()
if reflect.DeepEqual(list, wantAccounts) {
// ks should have also received change notifications
select {
case <-ks.changes:
default:
return fmt.Errorf("wasn't notified of new accounts")
}
return nil
}
time.Sleep(d)
}
return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
}
// TestUpdatedKeyfileContents tests that updating the contents of a keystore file
// is noticed by the watcher, and the account cache is updated accordingly
func TestUpdatedKeyfileContents(t *testing.T) {
t.Parallel()
// Create a temporary kesytore to test with
rand.Seed(time.Now().UnixNano())
dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watch-test-%d-%d", os.Getpid(), rand.Int()))
ks := NewKeyStore(dir, LightScryptN, LightScryptP)
list := ks.Accounts()
if len(list) > 0 {
t.Error("initial account list not empty:", list)
}
time.Sleep(100 * time.Millisecond)
// Create the directory and copy a key file into it.
os.MkdirAll(dir, 0700)
defer os.RemoveAll(dir)
file := filepath.Join(dir, "aaa")
// Place one of our testfiles in there
if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil {
t.Fatal(err)
}
// ks should see the account.
wantAccounts := []accounts.Account{cachetestAccounts[0]}
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
if err := waitForAccounts(wantAccounts, ks); err != nil {
t.Error(err)
return
}
// needed so that modTime of `file` is different to its current value after forceCopyFile
time.Sleep(1000 * time.Millisecond)
// Now replace file contents
if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
t.Fatal(err)
return
}
wantAccounts = []accounts.Account{cachetestAccounts[1]}
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
if err := waitForAccounts(wantAccounts, ks); err != nil {
t.Errorf("First replacement failed")
t.Error(err)
return
}
// needed so that modTime of `file` is different to its current value after forceCopyFile
time.Sleep(1000 * time.Millisecond)
// Now replace file contents again
if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
t.Fatal(err)
return
}
wantAccounts = []accounts.Account{cachetestAccounts[2]}
wantAccounts[0].URL = accounts.URL{Scheme: KeyStoreScheme, Path: file}
if err := waitForAccounts(wantAccounts, ks); err != nil {
t.Errorf("Second replacement failed")
t.Error(err)
return
}
// needed so that modTime of `file` is different to its current value after ioutil.WriteFile
time.Sleep(1000 * time.Millisecond)
// Now replace file contents with crap
if err := ioutil.WriteFile(file, []byte("foo"), 0644); err != nil {
t.Fatal(err)
return
}
if err := waitForAccounts([]accounts.Account{}, ks); err != nil {
t.Errorf("Emptying account file failed")
t.Error(err)
return
}
}
// forceCopyFile is like cp.CopyFile, but doesn't complain if the destination exists.
func forceCopyFile(dst, src string) error {
data, err := ioutil.ReadFile(src)
if err != nil {
return err
}
return ioutil.WriteFile(dst, data, 0644)
}

View File

@ -1,387 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"io/ioutil"
"math/rand"
"os"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event"
)
var testSigData = make([]byte, 32)
func TestKeyStore(t *testing.T) {
dir, ks := tmpKeyStore(t, true)
defer os.RemoveAll(dir)
a, err := ks.NewAccount("foo")
if err != nil {
t.Fatal(err)
}
if !strings.HasPrefix(a.URL.Path, dir) {
t.Errorf("account file %s doesn't have dir prefix", a.URL)
}
stat, err := os.Stat(a.URL.Path)
if err != nil {
t.Fatalf("account file %s doesn't exist (%v)", a.URL, err)
}
if runtime.GOOS != "windows" && stat.Mode() != 0600 {
t.Fatalf("account file has wrong mode: got %o, want %o", stat.Mode(), 0600)
}
if !ks.HasAddress(a.Address) {
t.Errorf("HasAccount(%x) should've returned true", a.Address)
}
if err := ks.Update(a, "foo", "bar"); err != nil {
t.Errorf("Update error: %v", err)
}
if err := ks.Delete(a, "bar"); err != nil {
t.Errorf("Delete error: %v", err)
}
if common.FileExist(a.URL.Path) {
t.Errorf("account file %s should be gone after Delete", a.URL)
}
if ks.HasAddress(a.Address) {
t.Errorf("HasAccount(%x) should've returned true after Delete", a.Address)
}
}
func TestSign(t *testing.T) {
dir, ks := tmpKeyStore(t, true)
defer os.RemoveAll(dir)
pass := "" // not used but required by API
a1, err := ks.NewAccount(pass)
if err != nil {
t.Fatal(err)
}
if err := ks.Unlock(a1, ""); err != nil {
t.Fatal(err)
}
if _, err := ks.SignHash(accounts.Account{Address: a1.Address}, testSigData); err != nil {
t.Fatal(err)
}
}
func TestSignWithPassphrase(t *testing.T) {
dir, ks := tmpKeyStore(t, true)
defer os.RemoveAll(dir)
pass := "passwd"
acc, err := ks.NewAccount(pass)
if err != nil {
t.Fatal(err)
}
if _, unlocked := ks.unlocked[acc.Address]; unlocked {
t.Fatal("expected account to be locked")
}
_, err = ks.SignHashWithPassphrase(acc, pass, testSigData)
if err != nil {
t.Fatal(err)
}
if _, unlocked := ks.unlocked[acc.Address]; unlocked {
t.Fatal("expected account to be locked")
}
if _, err = ks.SignHashWithPassphrase(acc, "invalid passwd", testSigData); err == nil {
t.Fatal("expected SignHashWithPassphrase to fail with invalid password")
}
}
func TestTimedUnlock(t *testing.T) {
dir, ks := tmpKeyStore(t, true)
defer os.RemoveAll(dir)
pass := "foo"
a1, err := ks.NewAccount(pass)
if err != nil {
t.Fatal(err)
}
// Signing without passphrase fails because account is locked
_, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData)
if err != ErrLocked {
t.Fatal("Signing should've failed with ErrLocked before unlocking, got ", err)
}
// Signing with passphrase works
if err = ks.TimedUnlock(a1, pass, 100*time.Millisecond); err != nil {
t.Fatal(err)
}
// Signing without passphrase works because account is temp unlocked
_, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData)
if err != nil {
t.Fatal("Signing shouldn't return an error after unlocking, got ", err)
}
// Signing fails again after automatic locking
time.Sleep(250 * time.Millisecond)
_, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData)
if err != ErrLocked {
t.Fatal("Signing should've failed with ErrLocked timeout expired, got ", err)
}
}
func TestOverrideUnlock(t *testing.T) {
dir, ks := tmpKeyStore(t, false)
defer os.RemoveAll(dir)
pass := "foo"
a1, err := ks.NewAccount(pass)
if err != nil {
t.Fatal(err)
}
// Unlock indefinitely.
if err = ks.TimedUnlock(a1, pass, 5*time.Minute); err != nil {
t.Fatal(err)
}
// Signing without passphrase works because account is temp unlocked
_, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData)
if err != nil {
t.Fatal("Signing shouldn't return an error after unlocking, got ", err)
}
// reset unlock to a shorter period, invalidates the previous unlock
if err = ks.TimedUnlock(a1, pass, 100*time.Millisecond); err != nil {
t.Fatal(err)
}
// Signing without passphrase still works because account is temp unlocked
_, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData)
if err != nil {
t.Fatal("Signing shouldn't return an error after unlocking, got ", err)
}
// Signing fails again after automatic locking
time.Sleep(250 * time.Millisecond)
_, err = ks.SignHash(accounts.Account{Address: a1.Address}, testSigData)
if err != ErrLocked {
t.Fatal("Signing should've failed with ErrLocked timeout expired, got ", err)
}
}
// This test should fail under -race if signing races the expiration goroutine.
func TestSignRace(t *testing.T) {
dir, ks := tmpKeyStore(t, false)
defer os.RemoveAll(dir)
// Create a test account.
a1, err := ks.NewAccount("")
if err != nil {
t.Fatal("could not create the test account", err)
}
if err := ks.TimedUnlock(a1, "", 15*time.Millisecond); err != nil {
t.Fatal("could not unlock the test account", err)
}
end := time.Now().Add(500 * time.Millisecond)
for time.Now().Before(end) {
if _, err := ks.SignHash(accounts.Account{Address: a1.Address}, testSigData); err == ErrLocked {
return
} else if err != nil {
t.Errorf("Sign error: %v", err)
return
}
time.Sleep(1 * time.Millisecond)
}
t.Errorf("Account did not lock within the timeout")
}
// Tests that the wallet notifier loop starts and stops correctly based on the
// addition and removal of wallet event subscriptions.
func TestWalletNotifierLifecycle(t *testing.T) {
// Create a temporary kesytore to test with
dir, ks := tmpKeyStore(t, false)
defer os.RemoveAll(dir)
// Ensure that the notification updater is not running yet
time.Sleep(250 * time.Millisecond)
ks.mu.RLock()
updating := ks.updating
ks.mu.RUnlock()
if updating {
t.Errorf("wallet notifier running without subscribers")
}
// Subscribe to the wallet feed and ensure the updater boots up
updates := make(chan accounts.WalletEvent)
subs := make([]event.Subscription, 2)
for i := 0; i < len(subs); i++ {
// Create a new subscription
subs[i] = ks.Subscribe(updates)
// Ensure the notifier comes online
time.Sleep(250 * time.Millisecond)
ks.mu.RLock()
updating = ks.updating
ks.mu.RUnlock()
if !updating {
t.Errorf("sub %d: wallet notifier not running after subscription", i)
}
}
// Unsubscribe and ensure the updater terminates eventually
for i := 0; i < len(subs); i++ {
// Close an existing subscription
subs[i].Unsubscribe()
// Ensure the notifier shuts down at and only at the last close
for k := 0; k < int(walletRefreshCycle/(250*time.Millisecond))+2; k++ {
ks.mu.RLock()
updating = ks.updating
ks.mu.RUnlock()
if i < len(subs)-1 && !updating {
t.Fatalf("sub %d: event notifier stopped prematurely", i)
}
if i == len(subs)-1 && !updating {
return
}
time.Sleep(250 * time.Millisecond)
}
}
t.Errorf("wallet notifier didn't terminate after unsubscribe")
}
type walletEvent struct {
accounts.WalletEvent
a accounts.Account
}
// Tests that wallet notifications and correctly fired when accounts are added
// or deleted from the keystore.
func TestWalletNotifications(t *testing.T) {
dir, ks := tmpKeyStore(t, false)
defer os.RemoveAll(dir)
// Subscribe to the wallet feed and collect events.
var (
events []walletEvent
updates = make(chan accounts.WalletEvent)
sub = ks.Subscribe(updates)
)
defer sub.Unsubscribe()
go func() {
for {
select {
case ev := <-updates:
events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]})
case <-sub.Err():
close(updates)
return
}
}
}()
// Randomly add and remove accounts.
var (
live = make(map[common.Address]accounts.Account)
wantEvents []walletEvent
)
for i := 0; i < 1024; i++ {
if create := len(live) == 0 || rand.Int()%4 > 0; create {
// Add a new account and ensure wallet notifications arrives
account, err := ks.NewAccount("")
if err != nil {
t.Fatalf("failed to create test account: %v", err)
}
live[account.Address] = account
wantEvents = append(wantEvents, walletEvent{accounts.WalletEvent{Kind: accounts.WalletArrived}, account})
} else {
// Delete a random account.
var account accounts.Account
for _, a := range live {
account = a
break
}
if err := ks.Delete(account, ""); err != nil {
t.Fatalf("failed to delete test account: %v", err)
}
delete(live, account.Address)
wantEvents = append(wantEvents, walletEvent{accounts.WalletEvent{Kind: accounts.WalletDropped}, account})
}
}
// Shut down the event collector and check events.
sub.Unsubscribe()
<-updates
checkAccounts(t, live, ks.Wallets())
checkEvents(t, wantEvents, events)
}
// checkAccounts checks that all known live accounts are present in the wallet list.
func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, wallets []accounts.Wallet) {
if len(live) != len(wallets) {
t.Errorf("wallet list doesn't match required accounts: have %d, want %d", len(wallets), len(live))
return
}
liveList := make([]accounts.Account, 0, len(live))
for _, account := range live {
liveList = append(liveList, account)
}
sort.Sort(accountsByURL(liveList))
for j, wallet := range wallets {
if accs := wallet.Accounts(); len(accs) != 1 {
t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))
} else if accs[0] != liveList[j] {
t.Errorf("wallet %d: account mismatch: have %v, want %v", j, accs[0], liveList[j])
}
}
}
// checkEvents checks that all events in 'want' are present in 'have'. Events may be present multiple times.
func checkEvents(t *testing.T, want []walletEvent, have []walletEvent) {
for _, wantEv := range want {
nmatch := 0
for ; len(have) > 0; nmatch++ {
if have[0].Kind != wantEv.Kind || have[0].a != wantEv.a {
break
}
have = have[1:]
}
if nmatch == 0 {
t.Fatalf("can't find event with Kind=%v for %x", wantEv.Kind, wantEv.a.Address)
}
}
}
func tmpKeyStore(t *testing.T, encrypted bool) (string, *KeyStore) {
d, err := ioutil.TempDir("", "eth-keystore-test")
if err != nil {
t.Fatal(err)
}
new := NewPlaintextKeyStore
if encrypted {
new = func(kd string) *KeyStore { return NewKeyStore(kd, veryLightScryptN, veryLightScryptP) }
}
return d, new(d)
}

View File

@ -1,60 +0,0 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"io/ioutil"
"testing"
"github.com/ethereum/go-ethereum/common"
)
const (
veryLightScryptN = 2
veryLightScryptP = 1
)
// Tests that a json key file can be decrypted and encrypted in multiple rounds.
func TestKeyEncryptDecrypt(t *testing.T) {
keyjson, err := ioutil.ReadFile("testdata/very-light-scrypt.json")
if err != nil {
t.Fatal(err)
}
password := ""
address := common.HexToAddress("45dea0fb0bba44f4fcf290bba71fd57d7117cbb8")
// Do a few rounds of decryption and encryption
for i := 0; i < 3; i++ {
// Try a bad password first
if _, err := DecryptKey(keyjson, password+"bad"); err == nil {
t.Errorf("test %d: json key decrypted with bad password", i)
}
// Decrypt with the correct password
key, err := DecryptKey(keyjson, password)
if err != nil {
t.Fatalf("test %d: json key failed to decrypt: %v", i, err)
}
if key.Address != address {
t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address)
}
// Recrypt with a new password and start over
password += "new data appended"
if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
t.Errorf("test %d: failed to recrypt key %v", i, err)
}
}
}

View File

@ -1,266 +0,0 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"crypto/rand"
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
d, err := ioutil.TempDir("", "geth-keystore-test")
if err != nil {
t.Fatal(err)
}
if encrypted {
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP, true}
} else {
ks = &keyStorePlain{d}
}
return d, ks
}
func TestKeyStorePlain(t *testing.T) {
dir, ks := tmpKeyStoreIface(t, false)
defer os.RemoveAll(dir)
pass := "" // not used but required by API
k1, account, err := storeNewKey(ks, rand.Reader, pass)
if err != nil {
t.Fatal(err)
}
k2, err := ks.GetKey(k1.Address, account.URL.Path, pass)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(k1.Address, k2.Address) {
t.Fatal(err)
}
if !reflect.DeepEqual(k1.PrivateKey, k2.PrivateKey) {
t.Fatal(err)
}
}
func TestKeyStorePassphrase(t *testing.T) {
dir, ks := tmpKeyStoreIface(t, true)
defer os.RemoveAll(dir)
pass := "foo"
k1, account, err := storeNewKey(ks, rand.Reader, pass)
if err != nil {
t.Fatal(err)
}
k2, err := ks.GetKey(k1.Address, account.URL.Path, pass)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(k1.Address, k2.Address) {
t.Fatal(err)
}
if !reflect.DeepEqual(k1.PrivateKey, k2.PrivateKey) {
t.Fatal(err)
}
}
func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
dir, ks := tmpKeyStoreIface(t, true)
defer os.RemoveAll(dir)
pass := "foo"
k1, account, err := storeNewKey(ks, rand.Reader, pass)
if err != nil {
t.Fatal(err)
}
if _, err = ks.GetKey(k1.Address, account.URL.Path, "bar"); err != ErrDecrypt {
t.Fatalf("wrong error for invalid passphrase\ngot %q\nwant %q", err, ErrDecrypt)
}
}
func TestImportPreSaleKey(t *testing.T) {
dir, ks := tmpKeyStoreIface(t, true)
defer os.RemoveAll(dir)
// file content of a presale key file generated with:
// python pyethsaletool.py genwallet
// with password "foo"
fileContent := "{\"encseed\": \"26d87f5f2bf9835f9a47eefae571bc09f9107bb13d54ff12a4ec095d01f83897494cf34f7bed2ed34126ecba9db7b62de56c9d7cd136520a0427bfb11b8954ba7ac39b90d4650d3448e31185affcd74226a68f1e94b1108e6e0a4a91cdd83eba\", \"ethaddr\": \"d4584b5f6229b7be90727b0fc8c6b91bb427821f\", \"email\": \"gustav.simonsson@gmail.com\", \"btcaddr\": \"1EVknXyFC68kKNLkh6YnKzW41svSRoaAcx\"}"
pass := "foo"
account, _, err := importPreSaleKey(ks, []byte(fileContent), pass)
if err != nil {
t.Fatal(err)
}
if account.Address != common.HexToAddress("d4584b5f6229b7be90727b0fc8c6b91bb427821f") {
t.Errorf("imported account has wrong address %x", account.Address)
}
if !strings.HasPrefix(account.URL.Path, dir) {
t.Errorf("imported account file not in keystore directory: %q", account.URL)
}
}
// Test and utils for the key store tests in the Ethereum JSON tests;
// testdataKeyStoreTests/basic_tests.json
type KeyStoreTestV3 struct {
Json encryptedKeyJSONV3
Password string
Priv string
}
type KeyStoreTestV1 struct {
Json encryptedKeyJSONV1
Password string
Priv string
}
func TestV3_PBKDF2_1(t *testing.T) {
t.Parallel()
tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t)
testDecryptV3(tests["wikipage_test_vector_pbkdf2"], t)
}
var testsSubmodule = filepath.Join("..", "..", "tests", "testdata", "KeyStoreTests")
func skipIfSubmoduleMissing(t *testing.T) {
if !common.FileExist(testsSubmodule) {
t.Skipf("can't find JSON tests from submodule at %s", testsSubmodule)
}
}
func TestV3_PBKDF2_2(t *testing.T) {
skipIfSubmoduleMissing(t)
t.Parallel()
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
testDecryptV3(tests["test1"], t)
}
func TestV3_PBKDF2_3(t *testing.T) {
skipIfSubmoduleMissing(t)
t.Parallel()
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
testDecryptV3(tests["python_generated_test_with_odd_iv"], t)
}
func TestV3_PBKDF2_4(t *testing.T) {
skipIfSubmoduleMissing(t)
t.Parallel()
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
testDecryptV3(tests["evilnonce"], t)
}
func TestV3_Scrypt_1(t *testing.T) {
t.Parallel()
tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t)
testDecryptV3(tests["wikipage_test_vector_scrypt"], t)
}
func TestV3_Scrypt_2(t *testing.T) {
skipIfSubmoduleMissing(t)
t.Parallel()
tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
testDecryptV3(tests["test2"], t)
}
func TestV1_1(t *testing.T) {
t.Parallel()
tests := loadKeyStoreTestV1("testdata/v1_test_vector.json", t)
testDecryptV1(tests["test1"], t)
}
func TestV1_2(t *testing.T) {
t.Parallel()
ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP, true}
addr := common.HexToAddress("cb61d5a9c4896fb9658090b597ef0e7be6f7b67e")
file := "testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e"
k, err := ks.GetKey(addr, file, "g")
if err != nil {
t.Fatal(err)
}
privHex := hex.EncodeToString(crypto.FromECDSA(k.PrivateKey))
expectedHex := "d1b1178d3529626a1a93e073f65028370d14c7eb0936eb42abef05db6f37ad7d"
if privHex != expectedHex {
t.Fatal(fmt.Errorf("Unexpected privkey: %v, expected %v", privHex, expectedHex))
}
}
func testDecryptV3(test KeyStoreTestV3, t *testing.T) {
privBytes, _, err := decryptKeyV3(&test.Json, test.Password)
if err != nil {
t.Fatal(err)
}
privHex := hex.EncodeToString(privBytes)
if test.Priv != privHex {
t.Fatal(fmt.Errorf("Decrypted bytes not equal to test, expected %v have %v", test.Priv, privHex))
}
}
func testDecryptV1(test KeyStoreTestV1, t *testing.T) {
privBytes, _, err := decryptKeyV1(&test.Json, test.Password)
if err != nil {
t.Fatal(err)
}
privHex := hex.EncodeToString(privBytes)
if test.Priv != privHex {
t.Fatal(fmt.Errorf("Decrypted bytes not equal to test, expected %v have %v", test.Priv, privHex))
}
}
func loadKeyStoreTestV3(file string, t *testing.T) map[string]KeyStoreTestV3 {
tests := make(map[string]KeyStoreTestV3)
err := common.LoadJSON(file, &tests)
if err != nil {
t.Fatal(err)
}
return tests
}
func loadKeyStoreTestV1(file string, t *testing.T) map[string]KeyStoreTestV1 {
tests := make(map[string]KeyStoreTestV1)
err := common.LoadJSON(file, &tests)
if err != nil {
t.Fatal(err)
}
return tests
}
func TestKeyForDirectICAP(t *testing.T) {
t.Parallel()
key := NewKeyForDirectICAP(rand.Reader)
if !strings.HasPrefix(key.Address.Hex(), "0x00") {
t.Errorf("Expected first address byte to be zero, have: %s", key.Address.Hex())
}
}
func TestV3_31_Byte_Key(t *testing.T) {
t.Parallel()
tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t)
testDecryptV3(tests["31_byte_key"], t)
}
func TestV3_30_Byte_Key(t *testing.T) {
t.Parallel()
tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t)
testDecryptV3(tests["30_byte_key"], t)
}

View File

@ -1 +0,0 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -1 +0,0 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -1 +0,0 @@
{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}

View File

@ -1 +0,0 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -1,21 +0,0 @@
This directory contains accounts for testing.
The passphrase that unlocks them is "foobar".
The "good" key files which are supposed to be loadable are:
- File: UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
Address: 0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8
- File: aaa
Address: 0xf466859ead1932d743d622cb74fc058882e8648a
- File: zzz
Address: 0x289d485d9771714cce91d3393d764e1311907acc
The other files (including this README) are broken in various ways
and should not be picked up by package accounts:
- File: no-address (missing address field, otherwise same as "aaa")
- File: garbage (file with random data)
- File: empty (file with no content)
- File: swapfile~ (should be skipped)
- File: .hiddenfile (should be skipped)
- File: foo/... (should be skipped because it is a directory)

View File

@ -1 +0,0 @@
{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}

View File

@ -1 +0,0 @@
{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -1 +0,0 @@
{"address":"fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e","crypto":{"cipher":"aes-128-ctr","ciphertext":"8124d5134aa4a927c79fd852989e4b5419397566f04b0936a1eb1d168c7c68a5","cipherparams":{"iv":"e2febe17176414dd2cda28287947eb2f"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":4096,"p":6,"r":8,"salt":"44b415ede89f3bdd6830390a21b78965f571b347a589d1d943029f016c5e8bd5"},"mac":"5e149ff25bfd9dd45746a84bb2bcd2f015f2cbca2b6d25c5de8c29617f71fe5b"},"id":"d6ac5452-2b2c-4d3c-ad80-4bf0327d971c","version":3}

Binary file not shown.

View File

@ -1 +0,0 @@
{"crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -1 +0,0 @@
{"address":"0000000000000000000000000000000000000000","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}

View File

@ -1 +0,0 @@
{"address":"289d485d9771714cce91d3393d764e1311907acc","crypto":{"cipher":"aes-128-ctr","ciphertext":"faf32ca89d286b107f5e6d842802e05263c49b78d46eac74e6109e9a963378ab","cipherparams":{"iv":"558833eec4a665a8c55608d7d503407d"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"d571fff447ffb24314f9513f5160246f09997b857ac71348b73e785aab40dc04"},"mac":"21edb85ff7d0dab1767b9bf498f2c3cb7be7609490756bd32300bb213b59effe"},"id":"3279afcf-55ba-43ff-8997-02dcc46a6525","version":3}

View File

@ -1 +0,0 @@
{"address":"cb61d5a9c4896fb9658090b597ef0e7be6f7b67e","Crypto":{"cipher":"aes-128-cbc","ciphertext":"6143d3192db8b66eabd693d9c4e414dcfaee52abda451af79ccf474dafb35f1bfc7ea013aa9d2ee35969a1a2e8d752d0","cipherparams":{"iv":"35337770fc2117994ecdcad026bccff4"},"kdf":"scrypt","kdfparams":{"n":262144,"r":8,"p":1,"dklen":32,"salt":"9afcddebca541253a2f4053391c673ff9fe23097cd8555d149d929e4ccf1257f"},"mac":"3f3d5af884b17a100b0b3232c0636c230a54dc2ac8d986227219b0dd89197644","version":"1"},"id":"e25f7c1f-d318-4f29-b62c-687190d4d299","version":"1"}

View File

@ -1,28 +0,0 @@
{
"test1": {
"json": {
"Crypto": {
"cipher": "aes-128-cbc",
"cipherparams": {
"iv": "35337770fc2117994ecdcad026bccff4"
},
"ciphertext": "6143d3192db8b66eabd693d9c4e414dcfaee52abda451af79ccf474dafb35f1bfc7ea013aa9d2ee35969a1a2e8d752d0",
"kdf": "scrypt",
"kdfparams": {
"dklen": 32,
"n": 262144,
"p": 1,
"r": 8,
"salt": "9afcddebca541253a2f4053391c673ff9fe23097cd8555d149d929e4ccf1257f"
},
"mac": "3f3d5af884b17a100b0b3232c0636c230a54dc2ac8d986227219b0dd89197644",
"version": "1"
},
"address": "cb61d5a9c4896fb9658090b597ef0e7be6f7b67e",
"id": "e25f7c1f-d318-4f29-b62c-687190d4d299",
"version": "1"
},
"password": "g",
"priv": "d1b1178d3529626a1a93e073f65028370d14c7eb0936eb42abef05db6f37ad7d"
}
}

View File

@ -1,97 +0,0 @@
{
"wikipage_test_vector_scrypt": {
"json": {
"crypto" : {
"cipher" : "aes-128-ctr",
"cipherparams" : {
"iv" : "83dbcc02d8ccb40e466191a123791e0e"
},
"ciphertext" : "d172bf743a674da9cdad04534d56926ef8358534d458fffccd4e6ad2fbde479c",
"kdf" : "scrypt",
"kdfparams" : {
"dklen" : 32,
"n" : 262144,
"r" : 1,
"p" : 8,
"salt" : "ab0c7876052600dd703518d6fc3fe8984592145b591fc8fb5c6d43190334ba19"
},
"mac" : "2103ac29920d71da29f15d75b4a16dbe95cfd7ff8faea1056c33131d846e3097"
},
"id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6",
"version" : 3
},
"password": "testpassword",
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d"
},
"wikipage_test_vector_pbkdf2": {
"json": {
"crypto" : {
"cipher" : "aes-128-ctr",
"cipherparams" : {
"iv" : "6087dab2f9fdbbfaddc31a909735c1e6"
},
"ciphertext" : "5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46",
"kdf" : "pbkdf2",
"kdfparams" : {
"c" : 262144,
"dklen" : 32,
"prf" : "hmac-sha256",
"salt" : "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd"
},
"mac" : "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e9b2"
},
"id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6",
"version" : 3
},
"password": "testpassword",
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d"
},
"31_byte_key": {
"json": {
"crypto" : {
"cipher" : "aes-128-ctr",
"cipherparams" : {
"iv" : "e0c41130a323adc1446fc82f724bca2f"
},
"ciphertext" : "9517cd5bdbe69076f9bf5057248c6c050141e970efa36ce53692d5d59a3984",
"kdf" : "scrypt",
"kdfparams" : {
"dklen" : 32,
"n" : 2,
"r" : 8,
"p" : 1,
"salt" : "711f816911c92d649fb4c84b047915679933555030b3552c1212609b38208c63"
},
"mac" : "d5e116151c6aa71470e67a7d42c9620c75c4d23229847dcc127794f0732b0db5"
},
"id" : "fecfc4ce-e956-48fd-953b-30f8b52ed66c",
"version" : 3
},
"password": "foo",
"priv": "fa7b3db73dc7dfdf8c5fbdb796d741e4488628c41fc4febd9160a866ba0f35"
},
"30_byte_key": {
"json": {
"crypto" : {
"cipher" : "aes-128-ctr",
"cipherparams" : {
"iv" : "3ca92af36ad7c2cd92454c59cea5ef00"
},
"ciphertext" : "108b7d34f3442fc26ab1ab90ca91476ba6bfa8c00975a49ef9051dc675aa",
"kdf" : "scrypt",
"kdfparams" : {
"dklen" : 32,
"n" : 2,
"r" : 8,
"p" : 1,
"salt" : "d0769e608fb86cda848065642a9c6fa046845c928175662b8e356c77f914cd3b"
},
"mac" : "75d0e6759f7b3cefa319c3be41680ab6beea7d8328653474bd06706d4cc67420"
},
"id" : "a37e1559-5955-450d-8075-7b8931b392b2",
"version" : 3
},
"password": "foo",
"priv": "81c29e8142bb6a81bef5a92bda7a8328a5c85bb2f9542e76f9b0f94fc018"
}
}

View File

@ -1 +0,0 @@
{"address":"45dea0fb0bba44f4fcf290bba71fd57d7117cbb8","crypto":{"cipher":"aes-128-ctr","ciphertext":"b87781948a1befd247bff51ef4063f716cf6c2d3481163e9a8f42e1f9bb74145","cipherparams":{"iv":"dc4926b48a105133d2f16b96833abf1e"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":2,"p":1,"r":8,"salt":"004244bbdc51cadda545b1cfa43cff9ed2ae88e08c61f1479dbb45410722f8f0"},"mac":"39990c1684557447940d4c69e06b1b82b2aceacb43f284df65c956daf3046b85"},"id":"ce541d8d-c79b-40f8-9f8c-20f59616faba","version":3}

View File

@ -1,96 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"testing"
)
func TestURLParsing(t *testing.T) {
url, err := parseURL("https://ethereum.org")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if url.Scheme != "https" {
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
}
if url.Path != "ethereum.org" {
t.Errorf("expected: %v, got: %v", "ethereum.org", url.Path)
}
_, err = parseURL("ethereum.org")
if err == nil {
t.Error("expected err, got: nil")
}
}
func TestURLString(t *testing.T) {
url := URL{Scheme: "https", Path: "ethereum.org"}
if url.String() != "https://ethereum.org" {
t.Errorf("expected: %v, got: %v", "https://ethereum.org", url.String())
}
url = URL{Scheme: "", Path: "ethereum.org"}
if url.String() != "ethereum.org" {
t.Errorf("expected: %v, got: %v", "ethereum.org", url.String())
}
}
func TestURLMarshalJSON(t *testing.T) {
url := URL{Scheme: "https", Path: "ethereum.org"}
json, err := url.MarshalJSON()
if err != nil {
t.Errorf("unexpcted error: %v", err)
}
if string(json) != "\"https://ethereum.org\"" {
t.Errorf("expected: %v, got: %v", "\"https://ethereum.org\"", string(json))
}
}
func TestURLUnmarshalJSON(t *testing.T) {
url := &URL{}
err := url.UnmarshalJSON([]byte("\"https://ethereum.org\""))
if err != nil {
t.Errorf("unexpcted error: %v", err)
}
if url.Scheme != "https" {
t.Errorf("expected: %v, got: %v", "https", url.Scheme)
}
if url.Path != "ethereum.org" {
t.Errorf("expected: %v, got: %v", "https", url.Path)
}
}
func TestURLComparison(t *testing.T) {
tests := []struct {
urlA URL
urlB URL
expect int
}{
{URL{"https", "ethereum.org"}, URL{"https", "ethereum.org"}, 0},
{URL{"http", "ethereum.org"}, URL{"https", "ethereum.org"}, -1},
{URL{"https", "ethereum.org/a"}, URL{"https", "ethereum.org"}, 1},
{URL{"https", "abc.org"}, URL{"https", "ethereum.org"}, -1},
}
for i, tt := range tests {
result := tt.urlA.Cmp(tt.urlB)
if result != tt.expect {
t.Errorf("test %d: cmp mismatch: expected: %d, got: %d", i, tt.expect, result)
}
}
}

View File

@ -15,9 +15,9 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/ecies"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/sctx"
"github.com/ethersphere/swarm/storage"
"golang.org/x/crypto/scrypt"
"golang.org/x/crypto/sha3"
cli "gopkg.in/urfave/cli.v1"

993
api/api.go Normal file
View File

@ -0,0 +1,993 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package api
//go:generate mimegen --types=./../../cmd/swarm/mimegen/mime.types --package=api --out=gen_mime.go
//go:generate gofmt -s -w gen_mime.go
import (
"archive/tar"
"context"
"crypto/ecdsa"
"encoding/hex"
"errors"
"fmt"
"io"
"math/big"
"net/http"
"path"
"strings"
"bytes"
"mime"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/contracts/ens"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/spancontext"
"github.com/ethersphere/swarm/storage"
"github.com/ethersphere/swarm/storage/feed"
"github.com/ethersphere/swarm/storage/feed/lookup"
opentracing "github.com/opentracing/opentracing-go"
)
var (
apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil)
apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil)
apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil)
apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil)
apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil)
apiManifestUpdateCount = metrics.NewRegisteredCounter("api.manifestupdate.count", nil)
apiManifestUpdateFail = metrics.NewRegisteredCounter("api.manifestupdate.fail", nil)
apiManifestListCount = metrics.NewRegisteredCounter("api.manifestlist.count", nil)
apiManifestListFail = metrics.NewRegisteredCounter("api.manifestlist.fail", nil)
apiDeleteCount = metrics.NewRegisteredCounter("api.delete.count", nil)
apiDeleteFail = metrics.NewRegisteredCounter("api.delete.fail", nil)
apiGetTarCount = metrics.NewRegisteredCounter("api.gettar.count", nil)
apiGetTarFail = metrics.NewRegisteredCounter("api.gettar.fail", nil)
apiUploadTarCount = metrics.NewRegisteredCounter("api.uploadtar.count", nil)
apiUploadTarFail = metrics.NewRegisteredCounter("api.uploadtar.fail", nil)
apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil)
apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil)
apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil)
apiAddFileFail = metrics.NewRegisteredCounter("api.addfile.fail", nil)
apiRmFileCount = metrics.NewRegisteredCounter("api.removefile.count", nil)
apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil)
apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil)
apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil)
apiGetInvalid = metrics.NewRegisteredCounter("api.get.invalid", nil)
)
// Resolver interface resolve a domain name to a hash using ENS
type Resolver interface {
Resolve(string) (common.Hash, error)
}
// ResolveValidator is used to validate the contained Resolver
type ResolveValidator interface {
Resolver
Owner(node [32]byte) (common.Address, error)
HeaderByNumber(context.Context, *big.Int) (*types.Header, error)
}
// NoResolverError is returned by MultiResolver.Resolve if no resolver
// can be found for the address.
type NoResolverError struct {
TLD string
}
// NewNoResolverError creates a NoResolverError for the given top level domain
func NewNoResolverError(tld string) *NoResolverError {
return &NoResolverError{TLD: tld}
}
// Error NoResolverError implements error
func (e *NoResolverError) Error() string {
if e.TLD == "" {
return "no ENS resolver"
}
return fmt.Sprintf("no ENS endpoint configured to resolve .%s TLD names", e.TLD)
}
// MultiResolver is used to resolve URL addresses based on their TLDs.
// Each TLD can have multiple resolvers, and the resolution from the
// first one in the sequence will be returned.
type MultiResolver struct {
resolvers map[string][]ResolveValidator
nameHash func(string) common.Hash
}
// MultiResolverOption sets options for MultiResolver and is used as
// arguments for its constructor.
type MultiResolverOption func(*MultiResolver)
// MultiResolverOptionWithResolver adds a Resolver to a list of resolvers
// for a specific TLD. If TLD is an empty string, the resolver will be added
// to the list of default resolver, the ones that will be used for resolution
// of addresses which do not have their TLD resolver specified.
func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolverOption {
return func(m *MultiResolver) {
m.resolvers[tld] = append(m.resolvers[tld], r)
}
}
// NewMultiResolver creates a new instance of MultiResolver.
func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) {
m = &MultiResolver{
resolvers: make(map[string][]ResolveValidator),
nameHash: ens.EnsNode,
}
for _, o := range opts {
o(m)
}
return m
}
// Resolve resolves address by choosing a Resolver by TLD.
// If there are more default Resolvers, or for a specific TLD,
// the Hash from the first one which does not return error
// will be returned.
func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) {
rs, err := m.getResolveValidator(addr)
if err != nil {
return h, err
}
for _, r := range rs {
h, err = r.Resolve(addr)
if err == nil {
return
}
}
return
}
// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain
func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) {
rs := m.resolvers[""]
tld := path.Ext(name)
if tld != "" {
tld = tld[1:]
rstld, ok := m.resolvers[tld]
if ok {
return rstld, nil
}
}
if len(rs) == 0 {
return rs, NewNoResolverError(tld)
}
return rs, nil
}
/*
API implements webserver/file system related content storage and retrieval
on top of the FileStore
it is the public interface of the FileStore which is included in the ethereum stack
*/
type API struct {
feed *feed.Handler
fileStore *storage.FileStore
dns Resolver
Tags *chunk.Tags
Decryptor func(context.Context, string) DecryptFunc
}
// NewAPI the api constructor initialises a new API instance.
func NewAPI(fileStore *storage.FileStore, dns Resolver, feedHandler *feed.Handler, pk *ecdsa.PrivateKey, tags *chunk.Tags) (self *API) {
self = &API{
fileStore: fileStore,
dns: dns,
feed: feedHandler,
Tags: tags,
Decryptor: func(ctx context.Context, credentials string) DecryptFunc {
return self.doDecrypt(ctx, credentials, pk)
},
}
return
}
// Retrieve FileStore reader API
func (a *API) Retrieve(ctx context.Context, addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) {
return a.fileStore.Retrieve(ctx, addr)
}
// Store wraps the Store API call of the embedded FileStore
func (a *API) Store(ctx context.Context, data io.Reader, size int64, toEncrypt bool) (addr storage.Address, wait func(ctx context.Context) error, err error) {
log.Debug("api.store", "size", size)
return a.fileStore.Store(ctx, data, size, toEncrypt)
}
// Resolve a name into a content-addressed hash
// where address could be an ENS name, or a content addressed hash
func (a *API) Resolve(ctx context.Context, address string) (storage.Address, error) {
// if DNS is not configured, return an error
if a.dns == nil {
if hashMatcher.MatchString(address) {
return common.Hex2Bytes(address), nil
}
apiResolveFail.Inc(1)
return nil, fmt.Errorf("no DNS to resolve name: %q", address)
}
// try and resolve the address
resolved, err := a.dns.Resolve(address)
if err != nil {
if hashMatcher.MatchString(address) {
return common.Hex2Bytes(address), nil
}
return nil, err
}
return resolved[:], nil
}
// Resolve resolves a URI to an Address using the MultiResolver.
func (a *API) ResolveURI(ctx context.Context, uri *URI, credentials string) (storage.Address, error) {
apiResolveCount.Inc(1)
log.Trace("resolving", "uri", uri.Addr)
var sp opentracing.Span
ctx, sp = spancontext.StartSpan(
ctx,
"api.resolve")
defer sp.Finish()
// if the URI is immutable, check if the address looks like a hash
if uri.Immutable() {
key := uri.Address()
if key == nil {
return nil, fmt.Errorf("immutable address not a content hash: %q", uri.Addr)
}
return key, nil
}
addr, err := a.Resolve(ctx, uri.Addr)
if err != nil {
return nil, err
}
if uri.Path == "" {
return addr, nil
}
walker, err := a.NewManifestWalker(ctx, addr, a.Decryptor(ctx, credentials), nil)
if err != nil {
return nil, err
}
var entry *ManifestEntry
walker.Walk(func(e *ManifestEntry) error {
// if the entry matches the path, set entry and stop
// the walk
if e.Path == uri.Path {
entry = e
// return an error to cancel the walk
return errors.New("found")
}
// ignore non-manifest files
if e.ContentType != ManifestType {
return nil
}
// if the manifest's path is a prefix of the
// requested path, recurse into it by returning
// nil and continuing the walk
if strings.HasPrefix(uri.Path, e.Path) {
return nil
}
return ErrSkipManifest
})
if entry == nil {
return nil, errors.New("not found")
}
addr = storage.Address(common.Hex2Bytes(entry.Hash))
return addr, nil
}
// Get uses iterative manifest retrieval and prefix matching
// to resolve basePath to content using FileStore retrieve
// it returns a section reader, mimeType, status, the key of the actual content and an error
func (a *API) Get(ctx context.Context, decrypt DecryptFunc, manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) {
log.Debug("api.get", "key", manifestAddr, "path", path)
apiGetCount.Inc(1)
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, decrypt)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
return nil, "", http.StatusNotFound, nil, err
}
log.Debug("trie getting entry", "key", manifestAddr, "path", path)
entry, _ := trie.getEntry(path)
if entry != nil {
log.Debug("trie got entry", "key", manifestAddr, "path", path, "entry.Hash", entry.Hash)
if entry.ContentType == ManifestType {
log.Debug("entry is manifest", "key", manifestAddr, "new key", entry.Hash)
adr, err := hex.DecodeString(entry.Hash)
if err != nil {
return nil, "", 0, nil, err
}
return a.Get(ctx, decrypt, adr, entry.Path)
}
// we need to do some extra work if this is a Swarm feed manifest
if entry.ContentType == FeedContentType {
if entry.Feed == nil {
return reader, mimeType, status, nil, fmt.Errorf("Cannot decode Feed in manifest")
}
_, err := a.feed.Lookup(ctx, feed.NewQueryLatest(entry.Feed, lookup.NoClue))
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
log.Debug(fmt.Sprintf("get feed update content error: %v", err))
return reader, mimeType, status, nil, err
}
// get the data of the update
_, contentAddr, err := a.feed.GetContent(entry.Feed)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
log.Warn(fmt.Sprintf("get feed update content error: %v", err))
return reader, mimeType, status, nil, err
}
// extract content hash
if len(contentAddr) != storage.AddressLength {
apiGetInvalid.Inc(1)
status = http.StatusUnprocessableEntity
errorMessage := fmt.Sprintf("invalid swarm hash in feed update. Expected %d bytes. Got %d", storage.AddressLength, len(contentAddr))
log.Warn(errorMessage)
return reader, mimeType, status, nil, errors.New(errorMessage)
}
manifestAddr = storage.Address(contentAddr)
log.Trace("feed update contains swarm hash", "key", manifestAddr)
// get the manifest the swarm hash points to
trie, err := loadManifest(ctx, a.fileStore, manifestAddr, nil, NOOPDecrypt)
if err != nil {
apiGetNotFound.Inc(1)
status = http.StatusNotFound
log.Warn(fmt.Sprintf("loadManifestTrie (feed update) error: %v", err))
return reader, mimeType, status, nil, err
}
// finally, get the manifest entry
// it will always be the entry on path ""
entry, _ = trie.getEntry(path)
if entry == nil {
status = http.StatusNotFound
apiGetNotFound.Inc(1)
err = fmt.Errorf("manifest (feed update) entry for '%s' not found", path)
log.Trace("manifest (feed update) entry not found", "key", manifestAddr, "path", path)
return reader, mimeType, status, nil, err
}
}
// regardless of feed update manifests or normal manifests we will converge at this point
// get the key the manifest entry points to and serve it if it's unambiguous
contentAddr = common.Hex2Bytes(entry.Hash)
status = entry.Status
if status == http.StatusMultipleChoices {
apiGetHTTP300.Inc(1)
return nil, entry.ContentType, status, contentAddr, err
}
mimeType = entry.ContentType
log.Debug("content lookup key", "key", contentAddr, "mimetype", mimeType)
reader, _ = a.fileStore.Retrieve(ctx, contentAddr)
} else {
// no entry found
status = http.StatusNotFound
apiGetNotFound.Inc(1)
err = fmt.Errorf("Not found: could not find resource '%s'", path)
log.Trace("manifest entry not found", "key", contentAddr, "path", path)
}
return
}
func (a *API) Delete(ctx context.Context, addr string, path string) (storage.Address, error) {
apiDeleteCount.Inc(1)
uri, err := Parse("bzz:/" + addr)
if err != nil {
apiDeleteFail.Inc(1)
return nil, err
}
key, err := a.ResolveURI(ctx, uri, EmptyCredentials)
if err != nil {
return nil, err
}
newKey, err := a.UpdateManifest(ctx, key, func(mw *ManifestWriter) error {
log.Debug(fmt.Sprintf("removing %s from manifest %s", path, key.Log()))
return mw.RemoveEntry(path)
})
if err != nil {
apiDeleteFail.Inc(1)
return nil, err
}
return newKey, nil
}
// GetDirectoryTar fetches a requested directory as a tarstream
// it returns an io.Reader and an error. Do not forget to Close() the returned ReadCloser
func (a *API) GetDirectoryTar(ctx context.Context, decrypt DecryptFunc, uri *URI) (io.ReadCloser, error) {
apiGetTarCount.Inc(1)
addr, err := a.Resolve(ctx, uri.Addr)
if err != nil {
return nil, err
}
walker, err := a.NewManifestWalker(ctx, addr, decrypt, nil)
if err != nil {
apiGetTarFail.Inc(1)
return nil, err
}
piper, pipew := io.Pipe()
tw := tar.NewWriter(pipew)
go func() {
err := walker.Walk(func(entry *ManifestEntry) error {
// ignore manifests (walk will recurse into them)
if entry.ContentType == ManifestType {
return nil
}
// retrieve the entry's key and size
reader, _ := a.Retrieve(ctx, storage.Address(common.Hex2Bytes(entry.Hash)))
size, err := reader.Size(ctx, nil)
if err != nil {
return err
}
// write a tar header for the entry
hdr := &tar.Header{
Name: entry.Path,
Mode: entry.Mode,
Size: size,
ModTime: entry.ModTime,
Xattrs: map[string]string{
"user.swarm.content-type": entry.ContentType,
},
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
// copy the file into the tar stream
n, err := io.Copy(tw, io.LimitReader(reader, hdr.Size))
if err != nil {
return err
} else if n != size {
return fmt.Errorf("error writing %s: expected %d bytes but sent %d", entry.Path, size, n)
}
return nil
})
// close tar writer before closing pipew
// to flush remaining data to pipew
// regardless of error value
tw.Close()
if err != nil {
apiGetTarFail.Inc(1)
pipew.CloseWithError(err)
} else {
pipew.Close()
}
}()
return piper, nil
}
// GetManifestList lists the manifest entries for the specified address and prefix
// and returns it as a ManifestList
func (a *API) GetManifestList(ctx context.Context, decryptor DecryptFunc, addr storage.Address, prefix string) (list ManifestList, err error) {
apiManifestListCount.Inc(1)
walker, err := a.NewManifestWalker(ctx, addr, decryptor, nil)
if err != nil {
apiManifestListFail.Inc(1)
return ManifestList{}, err
}
err = walker.Walk(func(entry *ManifestEntry) error {
// handle non-manifest files
if entry.ContentType != ManifestType {
// ignore the file if it doesn't have the specified prefix
if !strings.HasPrefix(entry.Path, prefix) {
return nil
}
// if the path after the prefix contains a slash, add a
// common prefix to the list, otherwise add the entry
suffix := strings.TrimPrefix(entry.Path, prefix)
if index := strings.Index(suffix, "/"); index > -1 {
list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1])
return nil
}
if entry.Path == "" {
entry.Path = "/"
}
list.Entries = append(list.Entries, entry)
return nil
}
// if the manifest's path is a prefix of the specified prefix
// then just recurse into the manifest by returning nil and
// continuing the walk
if strings.HasPrefix(prefix, entry.Path) {
return nil
}
// if the manifest's path has the specified prefix, then if the
// path after the prefix contains a slash, add a common prefix
// to the list and skip the manifest, otherwise recurse into
// the manifest by returning nil and continuing the walk
if strings.HasPrefix(entry.Path, prefix) {
suffix := strings.TrimPrefix(entry.Path, prefix)
if index := strings.Index(suffix, "/"); index > -1 {
list.CommonPrefixes = append(list.CommonPrefixes, prefix+suffix[:index+1])
return ErrSkipManifest
}
return nil
}
// the manifest neither has the prefix or needs recursing in to
// so just skip it
return ErrSkipManifest
})
if err != nil {
apiManifestListFail.Inc(1)
return ManifestList{}, err
}
return list, nil
}
func (a *API) UpdateManifest(ctx context.Context, addr storage.Address, update func(mw *ManifestWriter) error) (storage.Address, error) {
apiManifestUpdateCount.Inc(1)
mw, err := a.NewManifestWriter(ctx, addr, nil)
if err != nil {
apiManifestUpdateFail.Inc(1)
return nil, err
}
if err := update(mw); err != nil {
apiManifestUpdateFail.Inc(1)
return nil, err
}
addr, err = mw.Store()
if err != nil {
apiManifestUpdateFail.Inc(1)
return nil, err
}
log.Debug(fmt.Sprintf("generated manifest %s", addr))
return addr, nil
}
// Modify loads manifest and checks the content hash before recalculating and storing the manifest.
func (a *API) Modify(ctx context.Context, addr storage.Address, path, contentHash, contentType string) (storage.Address, error) {
apiModifyCount.Inc(1)
quitC := make(chan bool)
trie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt)
if err != nil {
apiModifyFail.Inc(1)
return nil, err
}
if contentHash != "" {
entry := newManifestTrieEntry(&ManifestEntry{
Path: path,
ContentType: contentType,
}, nil)
entry.Hash = contentHash
trie.addEntry(entry, quitC)
} else {
trie.deleteEntry(path, quitC)
}
if err := trie.recalcAndStore(); err != nil {
apiModifyFail.Inc(1)
return nil, err
}
return trie.ref, nil
}
// AddFile creates a new manifest entry, adds it to swarm, then adds a file to swarm.
func (a *API) AddFile(ctx context.Context, mhash, path, fname string, content []byte, nameresolver bool) (storage.Address, string, error) {
apiAddFileCount.Inc(1)
uri, err := Parse("bzz:/" + mhash)
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
}
mkey, err := a.ResolveURI(ctx, uri, EmptyCredentials)
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
}
// trim the root dir we added
if path[:1] == "/" {
path = path[1:]
}
entry := &ManifestEntry{
Path: filepath.Join(path, fname),
ContentType: mime.TypeByExtension(filepath.Ext(fname)),
Mode: 0700,
Size: int64(len(content)),
ModTime: time.Now(),
}
mw, err := a.NewManifestWriter(ctx, mkey, nil)
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
}
fkey, err := mw.AddEntry(ctx, bytes.NewReader(content), entry)
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
}
newMkey, err := mw.Store()
if err != nil {
apiAddFileFail.Inc(1)
return nil, "", err
}
return fkey, newMkey.String(), nil
}
func (a *API) UploadTar(ctx context.Context, bodyReader io.ReadCloser, manifestPath, defaultPath string, mw *ManifestWriter) (storage.Address, error) {
apiUploadTarCount.Inc(1)
var contentKey storage.Address
tr := tar.NewReader(bodyReader)
defer bodyReader.Close()
var defaultPathFound bool
for {
hdr, err := tr.Next()
if err == io.EOF {
break
} else if err != nil {
apiUploadTarFail.Inc(1)
return nil, fmt.Errorf("error reading tar stream: %s", err)
}
// only store regular files
if !hdr.FileInfo().Mode().IsRegular() {
continue
}
// add the entry under the path from the request
manifestPath := path.Join(manifestPath, hdr.Name)
contentType := hdr.Xattrs["user.swarm.content-type"]
if contentType == "" {
contentType = mime.TypeByExtension(filepath.Ext(hdr.Name))
}
//DetectContentType("")
entry := &ManifestEntry{
Path: manifestPath,
ContentType: contentType,
Mode: hdr.Mode,
Size: hdr.Size,
ModTime: hdr.ModTime,
}
contentKey, err = mw.AddEntry(ctx, tr, entry)
if err != nil {
apiUploadTarFail.Inc(1)
return nil, fmt.Errorf("error adding manifest entry from tar stream: %s", err)
}
if hdr.Name == defaultPath {
contentType := hdr.Xattrs["user.swarm.content-type"]
if contentType == "" {
contentType = mime.TypeByExtension(filepath.Ext(hdr.Name))
}
entry := &ManifestEntry{
Hash: contentKey.Hex(),
Path: "", // default entry
ContentType: contentType,
Mode: hdr.Mode,
Size: hdr.Size,
ModTime: hdr.ModTime,
}
contentKey, err = mw.AddEntry(ctx, nil, entry)
if err != nil {
apiUploadTarFail.Inc(1)
return nil, fmt.Errorf("error adding default manifest entry from tar stream: %s", err)
}
defaultPathFound = true
}
}
if defaultPath != "" && !defaultPathFound {
return contentKey, fmt.Errorf("default path %q not found", defaultPath)
}
return contentKey, nil
}
// RemoveFile removes a file entry in a manifest.
func (a *API) RemoveFile(ctx context.Context, mhash string, path string, fname string, nameresolver bool) (string, error) {
apiRmFileCount.Inc(1)
uri, err := Parse("bzz:/" + mhash)
if err != nil {
apiRmFileFail.Inc(1)
return "", err
}
mkey, err := a.ResolveURI(ctx, uri, EmptyCredentials)
if err != nil {
apiRmFileFail.Inc(1)
return "", err
}
// trim the root dir we added
if path[:1] == "/" {
path = path[1:]
}
mw, err := a.NewManifestWriter(ctx, mkey, nil)
if err != nil {
apiRmFileFail.Inc(1)
return "", err
}
err = mw.RemoveEntry(filepath.Join(path, fname))
if err != nil {
apiRmFileFail.Inc(1)
return "", err
}
newMkey, err := mw.Store()
if err != nil {
apiRmFileFail.Inc(1)
return "", err
}
return newMkey.String(), nil
}
// AppendFile removes old manifest, appends file entry to new manifest and adds it to Swarm.
func (a *API) AppendFile(ctx context.Context, mhash, path, fname string, existingSize int64, content []byte, oldAddr storage.Address, offset int64, addSize int64, nameresolver bool) (storage.Address, string, error) {
apiAppendFileCount.Inc(1)
buffSize := offset + addSize
if buffSize < existingSize {
buffSize = existingSize
}
buf := make([]byte, buffSize)
oldReader, _ := a.Retrieve(ctx, oldAddr)
io.ReadAtLeast(oldReader, buf, int(offset))
newReader := bytes.NewReader(content)
io.ReadAtLeast(newReader, buf[offset:], int(addSize))
if buffSize < existingSize {
io.ReadAtLeast(oldReader, buf[addSize:], int(buffSize))
}
combinedReader := bytes.NewReader(buf)
totalSize := int64(len(buf))
// TODO(jmozah): to append using pyramid chunker when it is ready
//oldReader := a.Retrieve(oldKey)
//newReader := bytes.NewReader(content)
//combinedReader := io.MultiReader(oldReader, newReader)
uri, err := Parse("bzz:/" + mhash)
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
mkey, err := a.ResolveURI(ctx, uri, EmptyCredentials)
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
// trim the root dir we added
if path[:1] == "/" {
path = path[1:]
}
mw, err := a.NewManifestWriter(ctx, mkey, nil)
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
err = mw.RemoveEntry(filepath.Join(path, fname))
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
entry := &ManifestEntry{
Path: filepath.Join(path, fname),
ContentType: mime.TypeByExtension(filepath.Ext(fname)),
Mode: 0700,
Size: totalSize,
ModTime: time.Now(),
}
fkey, err := mw.AddEntry(ctx, io.Reader(combinedReader), entry)
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
newMkey, err := mw.Store()
if err != nil {
apiAppendFileFail.Inc(1)
return nil, "", err
}
return fkey, newMkey.String(), nil
}
// BuildDirectoryTree used by swarmfs_unix
func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver bool) (addr storage.Address, manifestEntryMap map[string]*manifestTrieEntry, err error) {
uri, err := Parse("bzz:/" + mhash)
if err != nil {
return nil, nil, err
}
addr, err = a.Resolve(ctx, uri.Addr)
if err != nil {
return nil, nil, err
}
quitC := make(chan bool)
rootTrie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt)
if err != nil {
return nil, nil, fmt.Errorf("can't load manifest %v: %v", addr.String(), err)
}
manifestEntryMap = map[string]*manifestTrieEntry{}
err = rootTrie.listWithPrefix(uri.Path, quitC, func(entry *manifestTrieEntry, suffix string) {
manifestEntryMap[suffix] = entry
})
if err != nil {
return nil, nil, fmt.Errorf("list with prefix failed %v: %v", addr.String(), err)
}
return addr, manifestEntryMap, nil
}
// FeedsLookup finds Swarm feeds updates at specific points in time, or the latest update
func (a *API) FeedsLookup(ctx context.Context, query *feed.Query) ([]byte, error) {
_, err := a.feed.Lookup(ctx, query)
if err != nil {
return nil, err
}
var data []byte
_, data, err = a.feed.GetContent(&query.Feed)
if err != nil {
return nil, err
}
return data, nil
}
// FeedsNewRequest creates a Request object to update a specific feed
func (a *API) FeedsNewRequest(ctx context.Context, feed *feed.Feed) (*feed.Request, error) {
return a.feed.NewRequest(ctx, feed)
}
// FeedsUpdate publishes a new update on the given feed
func (a *API) FeedsUpdate(ctx context.Context, request *feed.Request) (storage.Address, error) {
return a.feed.Update(ctx, request)
}
// ErrCannotLoadFeedManifest is returned when looking up a feeds manifest fails
var ErrCannotLoadFeedManifest = errors.New("Cannot load feed manifest")
// ErrNotAFeedManifest is returned when the address provided returned something other than a valid manifest
var ErrNotAFeedManifest = errors.New("Not a feed manifest")
// ResolveFeedManifest retrieves the Swarm feed manifest for the given address, and returns the referenced Feed.
func (a *API) ResolveFeedManifest(ctx context.Context, addr storage.Address) (*feed.Feed, error) {
trie, err := loadManifest(ctx, a.fileStore, addr, nil, NOOPDecrypt)
if err != nil {
return nil, ErrCannotLoadFeedManifest
}
entry, _ := trie.getEntry("")
if entry.ContentType != FeedContentType {
return nil, ErrNotAFeedManifest
}
return entry.Feed, nil
}
// ErrCannotResolveFeedURI is returned when the ENS resolver is not able to translate a name to a Swarm feed
var ErrCannotResolveFeedURI = errors.New("Cannot resolve Feed URI")
// ErrCannotResolveFeed is returned when values provided are not enough or invalid to recreate a
// feed out of them.
var ErrCannotResolveFeed = errors.New("Cannot resolve Feed")
// ResolveFeed attempts to extract feed information out of the manifest, if provided
// If not, it attempts to extract the feed out of a set of key-value pairs
func (a *API) ResolveFeed(ctx context.Context, uri *URI, values feed.Values) (*feed.Feed, error) {
var fd *feed.Feed
var err error
if uri.Addr != "" {
// resolve the content key.
manifestAddr := uri.Address()
if manifestAddr == nil {
manifestAddr, err = a.Resolve(ctx, uri.Addr)
if err != nil {
return nil, ErrCannotResolveFeedURI
}
}
// get the Swarm feed from the manifest
fd, err = a.ResolveFeedManifest(ctx, manifestAddr)
if err != nil {
return nil, err
}
log.Debug("handle.get.feed: resolved", "manifestkey", manifestAddr, "feed", fd.Hex())
} else {
var f feed.Feed
if err := f.FromValues(values); err != nil {
return nil, ErrCannotResolveFeed
}
fd = &f
}
return fd, nil
}
// MimeOctetStream default value of http Content-Type header
const MimeOctetStream = "application/octet-stream"
// DetectContentType by file file extension, or fallback to content sniff
func DetectContentType(fileName string, f io.ReadSeeker) (string, error) {
ctype := mime.TypeByExtension(filepath.Ext(fileName))
if ctype != "" {
return ctype, nil
}
// save/rollback to get content probe from begin of file
currentPosition, err := f.Seek(0, io.SeekCurrent)
if err != nil {
return MimeOctetStream, fmt.Errorf("seeker can't seek, %s", err)
}
// read a chunk to decide between utf-8 text and binary
var buf [512]byte
n, _ := f.Read(buf[:])
ctype = http.DetectContentType(buf[:n])
_, err = f.Seek(currentPosition, io.SeekStart) // rewind to output whole file
if err != nil {
return MimeOctetStream, fmt.Errorf("seeker can't seek, %s", err)
}
return ctype, nil
}

577
api/api_test.go Normal file
View File

@ -0,0 +1,577 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package api
import (
"bytes"
"context"
crand "crypto/rand"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"math/big"
"os"
"strings"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/sctx"
"github.com/ethersphere/swarm/storage"
"github.com/ethersphere/swarm/testutil"
)
func init() {
loglevel := flag.Int("loglevel", 2, "loglevel")
flag.Parse()
log.Root().SetHandler(log.CallerFileHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(os.Stderr, log.TerminalFormat(true)))))
}
func testAPI(t *testing.T, f func(*API, *chunk.Tags, bool)) {
for _, v := range []bool{true, false} {
datadir, err := ioutil.TempDir("", "bzz-test")
if err != nil {
t.Fatalf("unable to create temp dir: %v", err)
}
defer os.RemoveAll(datadir)
tags := chunk.NewTags()
fileStore, cleanup, err := storage.NewLocalFileStore(datadir, make([]byte, 32), tags)
if err != nil {
return
}
defer cleanup()
api := NewAPI(fileStore, nil, nil, nil, tags)
f(api, tags, v)
}
}
type testResponse struct {
reader storage.LazySectionReader
*Response
}
type Response struct {
MimeType string
Status int
Size int64
Content string
}
func checkResponse(t *testing.T, resp *testResponse, exp *Response) {
if resp.MimeType != exp.MimeType {
t.Errorf("incorrect mimeType. expected '%s', got '%s'", exp.MimeType, resp.MimeType)
}
if resp.Status != exp.Status {
t.Errorf("incorrect status. expected '%d', got '%d'", exp.Status, resp.Status)
}
if resp.Size != exp.Size {
t.Errorf("incorrect size. expected '%d', got '%d'", exp.Size, resp.Size)
}
if resp.reader != nil {
content := make([]byte, resp.Size)
read, _ := resp.reader.Read(content)
if int64(read) != exp.Size {
t.Errorf("incorrect content length. expected '%d...', got '%d...'", read, exp.Size)
}
resp.Content = string(content)
}
if resp.Content != exp.Content {
// if !bytes.Equal(resp.Content, exp.Content)
t.Errorf("incorrect content. expected '%s...', got '%s...'", string(exp.Content), string(resp.Content))
}
}
// func expResponse(content []byte, mimeType string, status int) *Response {
func expResponse(content string, mimeType string, status int) *Response {
log.Trace(fmt.Sprintf("expected content (%v): %v ", len(content), content))
return &Response{mimeType, status, int64(len(content)), content}
}
func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse {
addr := storage.Address(common.Hex2Bytes(bzzhash))
reader, mimeType, status, _, err := api.Get(context.TODO(), NOOPDecrypt, addr, path)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
quitC := make(chan bool)
size, err := reader.Size(context.TODO(), quitC)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
log.Trace(fmt.Sprintf("reader size: %v ", size))
s := make([]byte, size)
_, err = reader.Read(s)
if err != io.EOF {
t.Fatalf("unexpected error: %v", err)
}
reader.Seek(0, 0)
return &testResponse{reader, &Response{mimeType, status, size, string(s)}}
}
func TestApiPut(t *testing.T) {
testAPI(t, func(api *API, tags *chunk.Tags, toEncrypt bool) {
content := "hello"
exp := expResponse(content, "text/plain", 0)
ctx := context.TODO()
addr, wait, err := putString(ctx, api, content, exp.MimeType, toEncrypt)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
err = wait(ctx)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
resp := testGet(t, api, addr.Hex(), "")
checkResponse(t, resp, exp)
tag := tags.All()[0]
testutil.CheckTag(t, tag, 2, 2, 0, 2) //1 chunk data, 1 chunk manifest
})
}
// TestApiTagLarge tests that the the number of chunks counted is larger for a larger input
func TestApiTagLarge(t *testing.T) {
const contentLength = 4096 * 4095
testAPI(t, func(api *API, tags *chunk.Tags, toEncrypt bool) {
randomContentReader := io.LimitReader(crand.Reader, int64(contentLength))
tag, err := api.Tags.New("unnamed-tag", 0)
if err != nil {
t.Fatal(err)
}
ctx := sctx.SetTag(context.Background(), tag.Uid)
key, waitContent, err := api.Store(ctx, randomContentReader, int64(contentLength), toEncrypt)
if err != nil {
t.Fatal(err)
}
err = waitContent(ctx)
if err != nil {
t.Fatal(err)
}
tag.DoneSplit(key)
if toEncrypt {
tag := tags.All()[0]
expect := int64(4095 + 64 + 1)
testutil.CheckTag(t, tag, expect, expect, 0, expect)
} else {
tag := tags.All()[0]
expect := int64(4095 + 32 + 1)
testutil.CheckTag(t, tag, expect, expect, 0, expect)
}
})
}
// testResolver implements the Resolver interface and either returns the given
// hash if it is set, or returns a "name not found" error
type testResolveValidator struct {
hash *common.Hash
}
func newTestResolveValidator(addr string) *testResolveValidator {
r := &testResolveValidator{}
if addr != "" {
hash := common.HexToHash(addr)
r.hash = &hash
}
return r
}
func (t *testResolveValidator) Resolve(addr string) (common.Hash, error) {
if t.hash == nil {
return common.Hash{}, fmt.Errorf("DNS name not found: %q", addr)
}
return *t.hash, nil
}
func (t *testResolveValidator) Owner(node [32]byte) (addr common.Address, err error) {
return
}
func (t *testResolveValidator) HeaderByNumber(context.Context, *big.Int) (header *types.Header, err error) {
return
}
// TestAPIResolve tests resolving URIs which can either contain content hashes
// or ENS names
func TestAPIResolve(t *testing.T) {
ensAddr := "swarm.eth"
hashAddr := "1111111111111111111111111111111111111111111111111111111111111111"
resolvedAddr := "2222222222222222222222222222222222222222222222222222222222222222"
doesResolve := newTestResolveValidator(resolvedAddr)
doesntResolve := newTestResolveValidator("")
type test struct {
desc string
dns Resolver
addr string
immutable bool
result string
expectErr error
}
tests := []*test{
{
desc: "DNS not configured, hash address, returns hash address",
dns: nil,
addr: hashAddr,
result: hashAddr,
},
{
desc: "DNS not configured, ENS address, returns error",
dns: nil,
addr: ensAddr,
expectErr: errors.New(`no DNS to resolve name: "swarm.eth"`),
},
{
desc: "DNS configured, hash address, hash resolves, returns resolved address",
dns: doesResolve,
addr: hashAddr,
result: resolvedAddr,
},
{
desc: "DNS configured, immutable hash address, hash resolves, returns hash address",
dns: doesResolve,
addr: hashAddr,
immutable: true,
result: hashAddr,
},
{
desc: "DNS configured, hash address, hash doesn't resolve, returns hash address",
dns: doesntResolve,
addr: hashAddr,
result: hashAddr,
},
{
desc: "DNS configured, ENS address, name resolves, returns resolved address",
dns: doesResolve,
addr: ensAddr,
result: resolvedAddr,
},
{
desc: "DNS configured, immutable ENS address, name resolves, returns error",
dns: doesResolve,
addr: ensAddr,
immutable: true,
expectErr: errors.New(`immutable address not a content hash: "swarm.eth"`),
},
{
desc: "DNS configured, ENS address, name doesn't resolve, returns error",
dns: doesntResolve,
addr: ensAddr,
expectErr: errors.New(`DNS name not found: "swarm.eth"`),
},
}
for _, x := range tests {
t.Run(x.desc, func(t *testing.T) {
api := &API{dns: x.dns}
uri := &URI{Addr: x.addr, Scheme: "bzz"}
if x.immutable {
uri.Scheme = "bzz-immutable"
}
res, err := api.ResolveURI(context.TODO(), uri, "")
if err == nil {
if x.expectErr != nil {
t.Fatalf("expected error %q, got result %q", x.expectErr, res)
}
if res.String() != x.result {
t.Fatalf("expected result %q, got %q", x.result, res)
}
} else {
if x.expectErr == nil {
t.Fatalf("expected no error, got %q", err)
}
if err.Error() != x.expectErr.Error() {
t.Fatalf("expected error %q, got %q", x.expectErr, err)
}
}
})
}
}
func TestMultiResolver(t *testing.T) {
doesntResolve := newTestResolveValidator("")
ethAddr := "swarm.eth"
ethHash := "0x2222222222222222222222222222222222222222222222222222222222222222"
ethResolve := newTestResolveValidator(ethHash)
testAddr := "swarm.test"
testHash := "0x1111111111111111111111111111111111111111111111111111111111111111"
testResolve := newTestResolveValidator(testHash)
tests := []struct {
desc string
r Resolver
addr string
result string
err error
}{
{
desc: "No resolvers, returns error",
r: NewMultiResolver(),
err: NewNoResolverError(""),
},
{
desc: "One default resolver, returns resolved address",
r: NewMultiResolver(MultiResolverOptionWithResolver(ethResolve, "")),
addr: ethAddr,
result: ethHash,
},
{
desc: "Two default resolvers, returns resolved address",
r: NewMultiResolver(
MultiResolverOptionWithResolver(ethResolve, ""),
MultiResolverOptionWithResolver(ethResolve, ""),
),
addr: ethAddr,
result: ethHash,
},
{
desc: "Two default resolvers, first doesn't resolve, returns resolved address",
r: NewMultiResolver(
MultiResolverOptionWithResolver(doesntResolve, ""),
MultiResolverOptionWithResolver(ethResolve, ""),
),
addr: ethAddr,
result: ethHash,
},
{
desc: "Default resolver doesn't resolve, tld resolver resolve, returns resolved address",
r: NewMultiResolver(
MultiResolverOptionWithResolver(doesntResolve, ""),
MultiResolverOptionWithResolver(ethResolve, "eth"),
),
addr: ethAddr,
result: ethHash,
},
{
desc: "Three TLD resolvers, third resolves, returns resolved address",
r: NewMultiResolver(
MultiResolverOptionWithResolver(doesntResolve, "eth"),
MultiResolverOptionWithResolver(doesntResolve, "eth"),
MultiResolverOptionWithResolver(ethResolve, "eth"),
),
addr: ethAddr,
result: ethHash,
},
{
desc: "One TLD resolver doesn't resolve, returns error",
r: NewMultiResolver(
MultiResolverOptionWithResolver(doesntResolve, ""),
MultiResolverOptionWithResolver(ethResolve, "eth"),
),
addr: ethAddr,
result: ethHash,
},
{
desc: "One defautl and one TLD resolver, all doesn't resolve, returns error",
r: NewMultiResolver(
MultiResolverOptionWithResolver(doesntResolve, ""),
MultiResolverOptionWithResolver(doesntResolve, "eth"),
),
addr: ethAddr,
result: ethHash,
err: errors.New(`DNS name not found: "swarm.eth"`),
},
{
desc: "Two TLD resolvers, both resolve, returns resolved address",
r: NewMultiResolver(
MultiResolverOptionWithResolver(ethResolve, "eth"),
MultiResolverOptionWithResolver(testResolve, "test"),
),
addr: testAddr,
result: testHash,
},
{
desc: "One TLD resolver, no default resolver, returns error for different TLD",
r: NewMultiResolver(
MultiResolverOptionWithResolver(ethResolve, "eth"),
),
addr: testAddr,
err: NewNoResolverError("test"),
},
}
for _, x := range tests {
t.Run(x.desc, func(t *testing.T) {
res, err := x.r.Resolve(x.addr)
if err == nil {
if x.err != nil {
t.Fatalf("expected error %q, got result %q", x.err, res.Hex())
}
if res.Hex() != x.result {
t.Fatalf("expected result %q, got %q", x.result, res.Hex())
}
} else {
if x.err == nil {
t.Fatalf("expected no error, got %q", err)
}
if err.Error() != x.err.Error() {
t.Fatalf("expected error %q, got %q", x.err, err)
}
}
})
}
}
func TestDecryptOriginForbidden(t *testing.T) {
ctx := context.TODO()
ctx = sctx.SetHost(ctx, "swarm-gateways.net")
me := &ManifestEntry{
Access: &AccessEntry{Type: AccessTypePass},
}
api := NewAPI(nil, nil, nil, nil, chunk.NewTags())
f := api.Decryptor(ctx, "")
err := f(me)
if err != ErrDecryptDomainForbidden {
t.Fatalf("should fail with ErrDecryptDomainForbidden, got %v", err)
}
}
func TestDecryptOrigin(t *testing.T) {
for _, v := range []struct {
host string
expectError error
}{
{
host: "localhost",
expectError: ErrDecrypt,
},
{
host: "127.0.0.1",
expectError: ErrDecrypt,
},
{
host: "swarm-gateways.net",
expectError: ErrDecryptDomainForbidden,
},
} {
ctx := context.TODO()
ctx = sctx.SetHost(ctx, v.host)
me := &ManifestEntry{
Access: &AccessEntry{Type: AccessTypePass},
}
api := NewAPI(nil, nil, nil, nil, chunk.NewTags())
f := api.Decryptor(ctx, "")
err := f(me)
if err != v.expectError {
t.Fatalf("should fail with %v, got %v", v.expectError, err)
}
}
}
func TestDetectContentType(t *testing.T) {
for _, tc := range []struct {
file string
content string
expectedContentType string
}{
{
file: "file-with-correct-css.css",
content: "body {background-color: orange}",
expectedContentType: "text/css; charset=utf-8",
},
{
file: "empty-file.css",
content: "",
expectedContentType: "text/css; charset=utf-8",
},
{
file: "empty-file.pdf",
content: "",
expectedContentType: "application/pdf",
},
{
file: "empty-file.md",
content: "",
expectedContentType: "text/markdown; charset=utf-8",
},
{
file: "empty-file-with-unknown-content.strangeext",
content: "",
expectedContentType: "text/plain; charset=utf-8",
},
{
file: "file-with-unknown-extension-and-content.strangeext",
content: "Lorem Ipsum",
expectedContentType: "text/plain; charset=utf-8",
},
{
file: "file-no-extension",
content: "Lorem Ipsum",
expectedContentType: "text/plain; charset=utf-8",
},
{
file: "file-no-extension-no-content",
content: "",
expectedContentType: "text/plain; charset=utf-8",
},
{
file: "css-file-with-html-inside.css",
content: "<!doctype html><html><head></head><body></body></html>",
expectedContentType: "text/css; charset=utf-8",
},
} {
t.Run(tc.file, func(t *testing.T) {
detected, err := DetectContentType(tc.file, bytes.NewReader([]byte(tc.content)))
if err != nil {
t.Fatal(err)
}
if detected != tc.expectedContentType {
t.Fatalf("File: %s, Expected mime type %s, got %s", tc.file, tc.expectedContentType, detected)
}
})
}
}
// putString provides singleton manifest creation on top of api.API
func putString(ctx context.Context, a *API, content string, contentType string, toEncrypt bool) (k storage.Address, wait func(context.Context) error, err error) {
r := strings.NewReader(content)
tag, err := a.Tags.New("unnamed-tag", 0)
log.Trace("created new tag", "uid", tag.Uid)
cCtx := sctx.SetTag(ctx, tag.Uid)
key, waitContent, err := a.Store(cCtx, r, int64(len(content)), toEncrypt)
if err != nil {
return nil, nil, err
}
manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType)
r = strings.NewReader(manifest)
key, waitManifest, err := a.Store(cCtx, r, int64(len(manifest)), toEncrypt)
if err != nil {
return nil, nil, err
}
tag.DoneSplit(key)
return key, func(ctx context.Context) error {
err := waitContent(ctx)
if err != nil {
return err
}
return waitManifest(ctx)
}, nil
}

829
api/client/client.go Normal file
View File

@ -0,0 +1,829 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/http/httptrace"
"net/textproto"
"net/url"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethersphere/swarm/api"
swarmhttp "github.com/ethersphere/swarm/api/http"
"github.com/ethersphere/swarm/spancontext"
"github.com/ethersphere/swarm/storage/feed"
"github.com/pborman/uuid"
)
var (
ErrUnauthorized = errors.New("unauthorized")
)
func NewClient(gateway string) *Client {
return &Client{
Gateway: gateway,
}
}
// Client wraps interaction with a swarm HTTP gateway.
type Client struct {
Gateway string
}
// UploadRaw uploads raw data to swarm and returns the resulting hash. If toEncrypt is true it
// uploads encrypted data
func (c *Client) UploadRaw(r io.Reader, size int64, toEncrypt bool) (string, error) {
if size <= 0 {
return "", errors.New("data size must be greater than zero")
}
addr := ""
if toEncrypt {
addr = "encrypt"
}
req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/"+addr, r)
if err != nil {
return "", err
}
req.ContentLength = size
req.Header.Set(swarmhttp.SwarmTagHeaderName, fmt.Sprintf("raw_upload_%d", time.Now().Unix()))
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return "", fmt.Errorf("unexpected HTTP status: %s", res.Status)
}
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
return string(data), nil
}
// DownloadRaw downloads raw data from swarm and it returns a ReadCloser and a bool whether the
// content was encrypted
func (c *Client) DownloadRaw(hash string) (io.ReadCloser, bool, error) {
uri := c.Gateway + "/bzz-raw:/" + hash
res, err := http.DefaultClient.Get(uri)
if err != nil {
return nil, false, err
}
if res.StatusCode != http.StatusOK {
res.Body.Close()
return nil, false, fmt.Errorf("unexpected HTTP status: %s", res.Status)
}
isEncrypted := (res.Header.Get("X-Decrypted") == "true")
return res.Body, isEncrypted, nil
}
// File represents a file in a swarm manifest and is used for uploading and
// downloading content to and from swarm
type File struct {
io.ReadCloser
api.ManifestEntry
Tag string
}
// Open opens a local file which can then be passed to client.Upload to upload
// it to swarm
func Open(path string) (*File, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
stat, err := f.Stat()
if err != nil {
f.Close()
return nil, err
}
contentType, err := api.DetectContentType(f.Name(), f)
if err != nil {
return nil, err
}
return &File{
ReadCloser: f,
ManifestEntry: api.ManifestEntry{
ContentType: contentType,
Mode: int64(stat.Mode()),
Size: stat.Size(),
ModTime: stat.ModTime(),
},
Tag: filepath.Base(path),
}, nil
}
// Upload uploads a file to swarm and either adds it to an existing manifest
// (if the manifest argument is non-empty) or creates a new manifest containing
// the file, returning the resulting manifest hash (the file will then be
// available at bzz:/<hash>/<path>)
func (c *Client) Upload(file *File, manifest string, toEncrypt bool) (string, error) {
if file.Size <= 0 {
return "", errors.New("file size must be greater than zero")
}
return c.TarUpload(manifest, &FileUploader{file}, "", toEncrypt)
}
// Download downloads a file with the given path from the swarm manifest with
// the given hash (i.e. it gets bzz:/<hash>/<path>)
func (c *Client) Download(hash, path string) (*File, error) {
uri := c.Gateway + "/bzz:/" + hash + "/" + path
res, err := http.DefaultClient.Get(uri)
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
res.Body.Close()
return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status)
}
return &File{
ReadCloser: res.Body,
ManifestEntry: api.ManifestEntry{
ContentType: res.Header.Get("Content-Type"),
Size: res.ContentLength,
},
}, nil
}
// UploadDirectory uploads a directory tree to swarm and either adds the files
// to an existing manifest (if the manifest argument is non-empty) or creates a
// new manifest, returning the resulting manifest hash (files from the
// directory will then be available at bzz:/<hash>/path/to/file), with
// the file specified in defaultPath being uploaded to the root of the manifest
// (i.e. bzz:/<hash>/)
func (c *Client) UploadDirectory(dir, defaultPath, manifest string, toEncrypt bool) (string, error) {
stat, err := os.Stat(dir)
if err != nil {
return "", err
} else if !stat.IsDir() {
return "", fmt.Errorf("not a directory: %s", dir)
}
if defaultPath != "" {
if _, err := os.Stat(filepath.Join(dir, defaultPath)); err != nil {
if os.IsNotExist(err) {
return "", fmt.Errorf("the default path %q was not found in the upload directory %q", defaultPath, dir)
}
return "", fmt.Errorf("default path: %v", err)
}
}
return c.TarUpload(manifest, &DirectoryUploader{dir}, defaultPath, toEncrypt)
}
// DownloadDirectory downloads the files contained in a swarm manifest under
// the given path into a local directory (existing files will be overwritten)
func (c *Client) DownloadDirectory(hash, path, destDir, credentials string) error {
stat, err := os.Stat(destDir)
if err != nil {
return err
} else if !stat.IsDir() {
return fmt.Errorf("not a directory: %s", destDir)
}
uri := c.Gateway + "/bzz:/" + hash + "/" + path
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return err
}
if credentials != "" {
req.SetBasicAuth("", credentials)
}
req.Header.Set("Accept", "application/x-tar")
res, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
case http.StatusUnauthorized:
return ErrUnauthorized
default:
return fmt.Errorf("unexpected HTTP status: %s", res.Status)
}
tr := tar.NewReader(res.Body)
for {
hdr, err := tr.Next()
if err == io.EOF {
return nil
} else if err != nil {
return err
}
// ignore the default path file
if hdr.Name == "" {
continue
}
dstPath := filepath.Join(destDir, filepath.Clean(strings.TrimPrefix(hdr.Name, path)))
if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil {
return err
}
var mode os.FileMode = 0644
if hdr.Mode > 0 {
mode = os.FileMode(hdr.Mode)
}
dst, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return err
}
n, err := io.Copy(dst, tr)
dst.Close()
if err != nil {
return err
} else if n != hdr.Size {
return fmt.Errorf("expected %s to be %d bytes but got %d", hdr.Name, hdr.Size, n)
}
}
}
// DownloadFile downloads a single file into the destination directory
// if the manifest entry does not specify a file name - it will fallback
// to the hash of the file as a filename
func (c *Client) DownloadFile(hash, path, dest, credentials string) error {
hasDestinationFilename := false
if stat, err := os.Stat(dest); err == nil {
hasDestinationFilename = !stat.IsDir()
} else {
if os.IsNotExist(err) {
// does not exist - should be created
hasDestinationFilename = true
} else {
return fmt.Errorf("could not stat path: %v", err)
}
}
manifestList, err := c.List(hash, path, credentials)
if err != nil {
return err
}
switch len(manifestList.Entries) {
case 0:
return fmt.Errorf("could not find path requested at manifest address. make sure the path you've specified is correct")
case 1:
//continue
default:
return fmt.Errorf("got too many matches for this path")
}
uri := c.Gateway + "/bzz:/" + hash + "/" + path
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return err
}
if credentials != "" {
req.SetBasicAuth("", credentials)
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
case http.StatusUnauthorized:
return ErrUnauthorized
default:
return fmt.Errorf("unexpected HTTP status: expected 200 OK, got %d", res.StatusCode)
}
filename := ""
if hasDestinationFilename {
filename = dest
} else {
// try to assert
re := regexp.MustCompile("[^/]+$") //everything after last slash
if results := re.FindAllString(path, -1); len(results) > 0 {
filename = results[len(results)-1]
} else {
if entry := manifestList.Entries[0]; entry.Path != "" && entry.Path != "/" {
filename = entry.Path
} else {
// assume hash as name if there's nothing from the command line
filename = hash
}
}
filename = filepath.Join(dest, filename)
}
filePath, err := filepath.Abs(filename)
if err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(filePath), 0777); err != nil {
return err
}
dst, err := os.Create(filename)
if err != nil {
return err
}
defer dst.Close()
_, err = io.Copy(dst, res.Body)
return err
}
// UploadManifest uploads the given manifest to swarm
func (c *Client) UploadManifest(m *api.Manifest, toEncrypt bool) (string, error) {
data, err := json.Marshal(m)
if err != nil {
return "", err
}
return c.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt)
}
// DownloadManifest downloads a swarm manifest
func (c *Client) DownloadManifest(hash string) (*api.Manifest, bool, error) {
res, isEncrypted, err := c.DownloadRaw(hash)
if err != nil {
return nil, isEncrypted, err
}
defer res.Close()
var manifest api.Manifest
if err := json.NewDecoder(res).Decode(&manifest); err != nil {
return nil, isEncrypted, err
}
return &manifest, isEncrypted, nil
}
// List list files in a swarm manifest which have the given prefix, grouping
// common prefixes using "/" as a delimiter.
//
// For example, if the manifest represents the following directory structure:
//
// file1.txt
// file2.txt
// dir1/file3.txt
// dir1/dir2/file4.txt
//
// Then:
//
// - a prefix of "" would return [dir1/, file1.txt, file2.txt]
// - a prefix of "file" would return [file1.txt, file2.txt]
// - a prefix of "dir1/" would return [dir1/dir2/, dir1/file3.txt]
//
// where entries ending with "/" are common prefixes.
func (c *Client) List(hash, prefix, credentials string) (*api.ManifestList, error) {
req, err := http.NewRequest(http.MethodGet, c.Gateway+"/bzz-list:/"+hash+"/"+prefix, nil)
if err != nil {
return nil, err
}
if credentials != "" {
req.SetBasicAuth("", credentials)
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
case http.StatusUnauthorized:
return nil, ErrUnauthorized
default:
return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status)
}
var list api.ManifestList
if err := json.NewDecoder(res.Body).Decode(&list); err != nil {
return nil, err
}
return &list, nil
}
// Uploader uploads files to swarm using a provided UploadFn
type Uploader interface {
Upload(UploadFn) error
Tag() string
}
type UploaderFunc func(UploadFn) error
func (u UploaderFunc) Upload(upload UploadFn) error {
return u(upload)
}
func (u UploaderFunc) Tag() string {
return fmt.Sprintf("multipart_upload_%d", time.Now().Unix())
}
// DirectoryUploader implements Uploader
var _ Uploader = &DirectoryUploader{}
// DirectoryUploader uploads all files in a directory, optionally uploading
// a file to the default path
type DirectoryUploader struct {
Dir string
}
func (d *DirectoryUploader) Tag() string {
return filepath.Base(d.Dir)
}
// Upload performs the upload of the directory and default path
func (d *DirectoryUploader) Upload(upload UploadFn) error {
return filepath.Walk(d.Dir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if f.IsDir() {
return nil
}
file, err := Open(path)
if err != nil {
return err
}
relPath, err := filepath.Rel(d.Dir, path)
if err != nil {
return err
}
file.Path = filepath.ToSlash(relPath)
return upload(file)
})
}
var _ Uploader = &FileUploader{}
// FileUploader uploads a single file
type FileUploader struct {
File *File
}
func (f *FileUploader) Tag() string {
return f.File.Tag
}
// Upload performs the upload of the file
func (f *FileUploader) Upload(upload UploadFn) error {
return upload(f.File)
}
// UploadFn is the type of function passed to an Uploader to perform the upload
// of a single file (for example, a directory uploader would call a provided
// UploadFn for each file in the directory tree)
type UploadFn func(file *File) error
// TarUpload uses the given Uploader to upload files to swarm as a tar stream,
// returning the resulting manifest hash
func (c *Client) TarUpload(hash string, uploader Uploader, defaultPath string, toEncrypt bool) (string, error) {
ctx, sp := spancontext.StartSpan(context.Background(), "api.client.tarupload")
defer sp.Finish()
var tn time.Time
reqR, reqW := io.Pipe()
defer reqR.Close()
addr := hash
// If there is a hash already (a manifest), then that manifest will determine if the upload has
// to be encrypted or not. If there is no manifest then the toEncrypt parameter decides if
// there is encryption or not.
if hash == "" && toEncrypt {
// This is the built-in address for the encrypted upload endpoint
addr = "encrypt"
}
req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+addr, reqR)
if err != nil {
return "", err
}
trace := GetClientTrace("swarm api client - upload tar", "api.client.uploadtar", uuid.New()[:8], &tn)
req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
transport := http.DefaultTransport
req.Header.Set("Content-Type", "application/x-tar")
if defaultPath != "" {
q := req.URL.Query()
q.Set("defaultpath", defaultPath)
req.URL.RawQuery = q.Encode()
}
tag := uploader.Tag()
if tag == "" {
tag = "unnamed_tag_" + fmt.Sprintf("%d", time.Now().Unix())
}
log.Trace("setting upload tag", "tag", tag)
req.Header.Set(swarmhttp.SwarmTagHeaderName, tag)
// use 'Expect: 100-continue' so we don't send the request body if
// the server refuses the request
req.Header.Set("Expect", "100-continue")
tw := tar.NewWriter(reqW)
// define an UploadFn which adds files to the tar stream
uploadFn := func(file *File) error {
hdr := &tar.Header{
Name: file.Path,
Mode: file.Mode,
Size: file.Size,
ModTime: file.ModTime,
Xattrs: map[string]string{
"user.swarm.content-type": file.ContentType,
},
}
if err := tw.WriteHeader(hdr); err != nil {
return err
}
_, err = io.Copy(tw, file)
return err
}
// run the upload in a goroutine so we can send the request headers and
// wait for a '100 Continue' response before sending the tar stream
go func() {
err := uploader.Upload(uploadFn)
if err == nil {
err = tw.Close()
}
reqW.CloseWithError(err)
}()
tn = time.Now()
res, err := transport.RoundTrip(req)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return "", fmt.Errorf("unexpected HTTP status: %s", res.Status)
}
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
return string(data), nil
}
// MultipartUpload uses the given Uploader to upload files to swarm as a
// multipart form, returning the resulting manifest hash
func (c *Client) MultipartUpload(hash string, uploader Uploader) (string, error) {
reqR, reqW := io.Pipe()
defer reqR.Close()
req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+hash, reqR)
if err != nil {
return "", err
}
// use 'Expect: 100-continue' so we don't send the request body if
// the server refuses the request
req.Header.Set("Expect", "100-continue")
mw := multipart.NewWriter(reqW)
req.Header.Set("Content-Type", fmt.Sprintf("multipart/form-data; boundary=%q", mw.Boundary()))
req.Header.Set(swarmhttp.SwarmTagHeaderName, fmt.Sprintf("multipart_upload_%d", time.Now().Unix()))
// define an UploadFn which adds files to the multipart form
uploadFn := func(file *File) error {
hdr := make(textproto.MIMEHeader)
hdr.Set("Content-Disposition", fmt.Sprintf("form-data; name=%q", file.Path))
hdr.Set("Content-Type", file.ContentType)
hdr.Set("Content-Length", strconv.FormatInt(file.Size, 10))
w, err := mw.CreatePart(hdr)
if err != nil {
return err
}
_, err = io.Copy(w, file)
return err
}
// run the upload in a goroutine so we can send the request headers and
// wait for a '100 Continue' response before sending the multipart form
go func() {
err := uploader.Upload(uploadFn)
if err == nil {
err = mw.Close()
}
reqW.CloseWithError(err)
}()
res, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return "", fmt.Errorf("unexpected HTTP status: %s", res.Status)
}
data, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
return string(data), nil
}
// ErrNoFeedUpdatesFound is returned when Swarm cannot find updates of the given feed
var ErrNoFeedUpdatesFound = errors.New("No updates found for this feed")
// CreateFeedWithManifest creates a feed manifest, initializing it with the provided
// data
// Returns the resulting feed manifest address that you can use to include in an ENS Resolver (setContent)
// or reference future updates (Client.UpdateFeed)
func (c *Client) CreateFeedWithManifest(request *feed.Request) (string, error) {
responseStream, err := c.updateFeed(request, true)
if err != nil {
return "", err
}
defer responseStream.Close()
body, err := ioutil.ReadAll(responseStream)
if err != nil {
return "", err
}
var manifestAddress string
if err = json.Unmarshal(body, &manifestAddress); err != nil {
return "", err
}
return manifestAddress, nil
}
// UpdateFeed allows you to set a new version of your content
func (c *Client) UpdateFeed(request *feed.Request) error {
_, err := c.updateFeed(request, false)
return err
}
func (c *Client) updateFeed(request *feed.Request, createManifest bool) (io.ReadCloser, error) {
URL, err := url.Parse(c.Gateway)
if err != nil {
return nil, err
}
URL.Path = "/bzz-feed:/"
values := URL.Query()
body := request.AppendValues(values)
if createManifest {
values.Set("manifest", "1")
}
URL.RawQuery = values.Encode()
req, err := http.NewRequest("POST", URL.String(), bytes.NewBuffer(body))
if err != nil {
return nil, err
}
res, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
return res.Body, nil
}
// QueryFeed returns a byte stream with the raw content of the feed update
// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver
// points to that address
func (c *Client) QueryFeed(query *feed.Query, manifestAddressOrDomain string) (io.ReadCloser, error) {
return c.queryFeed(query, manifestAddressOrDomain, false)
}
// queryFeed returns a byte stream with the raw content of the feed update
// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver
// points to that address
// meta set to true will instruct the node return feed metainformation instead
func (c *Client) queryFeed(query *feed.Query, manifestAddressOrDomain string, meta bool) (io.ReadCloser, error) {
URL, err := url.Parse(c.Gateway)
if err != nil {
return nil, err
}
URL.Path = "/bzz-feed:/" + manifestAddressOrDomain
values := URL.Query()
if query != nil {
query.AppendValues(values) //adds query parameters
}
if meta {
values.Set("meta", "1")
}
URL.RawQuery = values.Encode()
res, err := http.Get(URL.String())
if err != nil {
return nil, err
}
if res.StatusCode != http.StatusOK {
if res.StatusCode == http.StatusNotFound {
return nil, ErrNoFeedUpdatesFound
}
errorMessageBytes, err := ioutil.ReadAll(res.Body)
var errorMessage string
if err != nil {
errorMessage = "cannot retrieve error message: " + err.Error()
} else {
errorMessage = string(errorMessageBytes)
}
return nil, fmt.Errorf("Error retrieving feed updates: %s", errorMessage)
}
return res.Body, nil
}
// GetFeedRequest returns a structure that describes the referenced feed status
// manifestAddressOrDomain is the address you obtained in CreateFeedWithManifest or an ENS domain whose Resolver
// points to that address
func (c *Client) GetFeedRequest(query *feed.Query, manifestAddressOrDomain string) (*feed.Request, error) {
responseStream, err := c.queryFeed(query, manifestAddressOrDomain, true)
if err != nil {
return nil, err
}
defer responseStream.Close()
body, err := ioutil.ReadAll(responseStream)
if err != nil {
return nil, err
}
var metadata feed.Request
if err := metadata.UnmarshalJSON(body); err != nil {
return nil, err
}
return &metadata, nil
}
func GetClientTrace(traceMsg, metricPrefix, ruid string, tn *time.Time) *httptrace.ClientTrace {
trace := &httptrace.ClientTrace{
GetConn: func(_ string) {
log.Trace(traceMsg+" - http get", "event", "GetConn", "ruid", ruid)
metrics.GetOrRegisterResettingTimer(metricPrefix+".getconn", nil).Update(time.Since(*tn))
},
GotConn: func(_ httptrace.GotConnInfo) {
log.Trace(traceMsg+" - http get", "event", "GotConn", "ruid", ruid)
metrics.GetOrRegisterResettingTimer(metricPrefix+".gotconn", nil).Update(time.Since(*tn))
},
PutIdleConn: func(err error) {
log.Trace(traceMsg+" - http get", "event", "PutIdleConn", "ruid", ruid, "err", err)
metrics.GetOrRegisterResettingTimer(metricPrefix+".putidle", nil).Update(time.Since(*tn))
},
GotFirstResponseByte: func() {
log.Trace(traceMsg+" - http get", "event", "GotFirstResponseByte", "ruid", ruid)
metrics.GetOrRegisterResettingTimer(metricPrefix+".firstbyte", nil).Update(time.Since(*tn))
},
Got100Continue: func() {
log.Trace(traceMsg, "event", "Got100Continue", "ruid", ruid)
metrics.GetOrRegisterResettingTimer(metricPrefix+".got100continue", nil).Update(time.Since(*tn))
},
DNSStart: func(_ httptrace.DNSStartInfo) {
log.Trace(traceMsg, "event", "DNSStart", "ruid", ruid)
metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsstart", nil).Update(time.Since(*tn))
},
DNSDone: func(_ httptrace.DNSDoneInfo) {
log.Trace(traceMsg, "event", "DNSDone", "ruid", ruid)
metrics.GetOrRegisterResettingTimer(metricPrefix+".dnsdone", nil).Update(time.Since(*tn))
},
ConnectStart: func(network, addr string) {
log.Trace(traceMsg, "event", "ConnectStart", "ruid", ruid, "network", network, "addr", addr)
metrics.GetOrRegisterResettingTimer(metricPrefix+".connectstart", nil).Update(time.Since(*tn))
},
ConnectDone: func(network, addr string, err error) {
log.Trace(traceMsg, "event", "ConnectDone", "ruid", ruid, "network", network, "addr", addr, "err", err)
metrics.GetOrRegisterResettingTimer(metricPrefix+".connectdone", nil).Update(time.Since(*tn))
},
WroteHeaders: func() {
log.Trace(traceMsg, "event", "WroteHeaders(request)", "ruid", ruid)
metrics.GetOrRegisterResettingTimer(metricPrefix+".wroteheaders", nil).Update(time.Since(*tn))
},
Wait100Continue: func() {
log.Trace(traceMsg, "event", "Wait100Continue", "ruid", ruid)
metrics.GetOrRegisterResettingTimer(metricPrefix+".wait100continue", nil).Update(time.Since(*tn))
},
WroteRequest: func(_ httptrace.WroteRequestInfo) {
log.Trace(traceMsg, "event", "WroteRequest", "ruid", ruid)
metrics.GetOrRegisterResettingTimer(metricPrefix+".wroterequest", nil).Update(time.Since(*tn))
},
}
return trace
}

608
api/client/client_test.go Normal file
View File

@ -0,0 +1,608 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package client
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"sort"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethersphere/swarm/api"
swarmhttp "github.com/ethersphere/swarm/api/http"
"github.com/ethersphere/swarm/storage"
"github.com/ethersphere/swarm/storage/feed"
"github.com/ethersphere/swarm/storage/feed/lookup"
"github.com/ethersphere/swarm/testutil"
)
func serverFunc(api *api.API) swarmhttp.TestServer {
return swarmhttp.NewServer(api, "")
}
// TestClientUploadDownloadRaw test uploading and downloading raw data to swarm
func TestClientUploadDownloadRaw(t *testing.T) {
testClientUploadDownloadRaw(false, t)
}
func TestClientUploadDownloadRawEncrypted(t *testing.T) {
if testutil.RaceEnabled {
t.Skip("flaky with -race on Travis")
// See: https://github.com/ethersphere/go-ethereum/issues/1254
}
testClientUploadDownloadRaw(true, t)
}
func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) {
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
client := NewClient(srv.URL)
// upload some raw data
data := []byte("foo123")
hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt)
if err != nil {
t.Fatal(err)
}
// check the tag was created successfully
tag := srv.Tags.All()[0]
testutil.CheckTag(t, tag, 1, 1, 0, 1)
// check we can download the same data
res, isEncrypted, err := client.DownloadRaw(hash)
if err != nil {
t.Fatal(err)
}
if isEncrypted != toEncrypt {
t.Fatalf("Expected encyption status %v got %v", toEncrypt, isEncrypted)
}
defer res.Close()
gotData, err := ioutil.ReadAll(res)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(gotData, data) {
t.Fatalf("expected downloaded data to be %q, got %q", data, gotData)
}
}
// TestClientUploadDownloadFiles test uploading and downloading files to swarm
// manifests
func TestClientUploadDownloadFiles(t *testing.T) {
testClientUploadDownloadFiles(false, t)
}
func TestClientUploadDownloadFilesEncrypted(t *testing.T) {
testClientUploadDownloadFiles(true, t)
}
func testClientUploadDownloadFiles(toEncrypt bool, t *testing.T) {
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
client := NewClient(srv.URL)
upload := func(manifest, path string, data []byte) string {
file := &File{
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
ManifestEntry: api.ManifestEntry{
Path: path,
ContentType: "text/plain",
Size: int64(len(data)),
},
}
hash, err := client.Upload(file, manifest, toEncrypt)
if err != nil {
t.Fatal(err)
}
return hash
}
checkDownload := func(manifest, path string, expected []byte) {
file, err := client.Download(manifest, path)
if err != nil {
t.Fatal(err)
}
defer file.Close()
if file.Size != int64(len(expected)) {
t.Fatalf("expected downloaded file to be %d bytes, got %d", len(expected), file.Size)
}
if file.ContentType != "text/plain" {
t.Fatalf("expected downloaded file to have type %q, got %q", "text/plain", file.ContentType)
}
data, err := ioutil.ReadAll(file)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, expected) {
t.Fatalf("expected downloaded data to be %q, got %q", expected, data)
}
}
// upload a file to the root of a manifest
rootData := []byte("some-data")
rootHash := upload("", "", rootData)
// check we can download the root file
checkDownload(rootHash, "", rootData)
// upload another file to the same manifest
otherData := []byte("some-other-data")
newHash := upload(rootHash, "some/other/path", otherData)
// check we can download both files from the new manifest
checkDownload(newHash, "", rootData)
checkDownload(newHash, "some/other/path", otherData)
// replace the root file with different data
newHash = upload(newHash, "", otherData)
// check both files have the other data
checkDownload(newHash, "", otherData)
checkDownload(newHash, "some/other/path", otherData)
}
var testDirFiles = []string{
"file1.txt",
"file2.txt",
"dir1/file3.txt",
"dir1/file4.txt",
"dir2/file5.txt",
"dir2/dir3/file6.txt",
"dir2/dir4/file7.txt",
"dir2/dir4/file8.txt",
}
func newTestDirectory(t *testing.T) string {
dir, err := ioutil.TempDir("", "swarm-client-test")
if err != nil {
t.Fatal(err)
}
for _, file := range testDirFiles {
path := filepath.Join(dir, file)
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
os.RemoveAll(dir)
t.Fatalf("error creating dir for %s: %s", path, err)
}
if err := ioutil.WriteFile(path, []byte(file), 0644); err != nil {
os.RemoveAll(dir)
t.Fatalf("error writing file %s: %s", path, err)
}
}
return dir
}
// TestClientUploadDownloadDirectory tests uploading and downloading a
// directory of files to a swarm manifest
func TestClientUploadDownloadDirectory(t *testing.T) {
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
dir := newTestDirectory(t)
defer os.RemoveAll(dir)
// upload the directory
client := NewClient(srv.URL)
defaultPath := testDirFiles[0]
hash, err := client.UploadDirectory(dir, defaultPath, "", false)
if err != nil {
t.Fatalf("error uploading directory: %s", err)
}
// check the tag was created successfully
tag := srv.Tags.All()[0]
testutil.CheckTag(t, tag, 9, 9, 0, 9)
// check we can download the individual files
checkDownloadFile := func(path string, expected []byte) {
file, err := client.Download(hash, path)
if err != nil {
t.Fatal(err)
}
defer file.Close()
data, err := ioutil.ReadAll(file)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, expected) {
t.Fatalf("expected data to be %q, got %q", expected, data)
}
}
for _, file := range testDirFiles {
checkDownloadFile(file, []byte(file))
}
// check we can download the default path
checkDownloadFile("", []byte(testDirFiles[0]))
// check we can download the directory
tmp, err := ioutil.TempDir("", "swarm-client-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
if err := client.DownloadDirectory(hash, "", tmp, ""); err != nil {
t.Fatal(err)
}
for _, file := range testDirFiles {
data, err := ioutil.ReadFile(filepath.Join(tmp, file))
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, []byte(file)) {
t.Fatalf("expected data to be %q, got %q", file, data)
}
}
}
// TestClientFileList tests listing files in a swarm manifest
func TestClientFileList(t *testing.T) {
testClientFileList(false, t)
}
func TestClientFileListEncrypted(t *testing.T) {
testClientFileList(true, t)
}
func testClientFileList(toEncrypt bool, t *testing.T) {
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
dir := newTestDirectory(t)
defer os.RemoveAll(dir)
client := NewClient(srv.URL)
hash, err := client.UploadDirectory(dir, "", "", toEncrypt)
if err != nil {
t.Fatalf("error uploading directory: %s", err)
}
ls := func(prefix string) []string {
list, err := client.List(hash, prefix, "")
if err != nil {
t.Fatal(err)
}
paths := make([]string, 0, len(list.CommonPrefixes)+len(list.Entries))
paths = append(paths, list.CommonPrefixes...)
for _, entry := range list.Entries {
paths = append(paths, entry.Path)
}
sort.Strings(paths)
return paths
}
tests := map[string][]string{
"": {"dir1/", "dir2/", "file1.txt", "file2.txt"},
"file": {"file1.txt", "file2.txt"},
"file1": {"file1.txt"},
"file2.txt": {"file2.txt"},
"file12": {},
"dir": {"dir1/", "dir2/"},
"dir1": {"dir1/"},
"dir1/": {"dir1/file3.txt", "dir1/file4.txt"},
"dir1/file": {"dir1/file3.txt", "dir1/file4.txt"},
"dir1/file3.txt": {"dir1/file3.txt"},
"dir1/file34": {},
"dir2/": {"dir2/dir3/", "dir2/dir4/", "dir2/file5.txt"},
"dir2/file": {"dir2/file5.txt"},
"dir2/dir": {"dir2/dir3/", "dir2/dir4/"},
"dir2/dir3/": {"dir2/dir3/file6.txt"},
"dir2/dir4/": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
"dir2/dir4/file": {"dir2/dir4/file7.txt", "dir2/dir4/file8.txt"},
"dir2/dir4/file7.txt": {"dir2/dir4/file7.txt"},
"dir2/dir4/file78": {},
}
for prefix, expected := range tests {
actual := ls(prefix)
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("expected prefix %q to return %v, got %v", prefix, expected, actual)
}
}
}
// TestClientMultipartUpload tests uploading files to swarm using a multipart
// upload
func TestClientMultipartUpload(t *testing.T) {
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
defer srv.Close()
// define an uploader which uploads testDirFiles with some data
// note: this test should result in SEEN chunks. assert accordingly
data := []byte("some-data")
uploader := UploaderFunc(func(upload UploadFn) error {
for _, name := range testDirFiles {
file := &File{
ReadCloser: ioutil.NopCloser(bytes.NewReader(data)),
ManifestEntry: api.ManifestEntry{
Path: name,
ContentType: "text/plain",
Size: int64(len(data)),
},
}
if err := upload(file); err != nil {
return err
}
}
return nil
})
// upload the files as a multipart upload
client := NewClient(srv.URL)
hash, err := client.MultipartUpload("", uploader)
if err != nil {
t.Fatal(err)
}
// check the tag was created successfully
tag := srv.Tags.All()[0]
testutil.CheckTag(t, tag, 9, 9, 7, 9)
// check we can download the individual files
checkDownloadFile := func(path string) {
file, err := client.Download(hash, path)
if err != nil {
t.Fatal(err)
}
defer file.Close()
gotData, err := ioutil.ReadAll(file)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(gotData, data) {
t.Fatalf("expected data to be %q, got %q", data, gotData)
}
}
for _, file := range testDirFiles {
checkDownloadFile(file)
}
}
func newTestSigner() (*feed.GenericSigner, error) {
privKey, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
if err != nil {
return nil, err
}
return feed.NewGenericSigner(privKey), nil
}
// Test the transparent resolving of feed updates with bzz:// scheme
//
// First upload data to bzz:, and store the Swarm hash to the resulting manifest in a feed update.
// This effectively uses a feed to store a pointer to content rather than the content itself
// Retrieving the update with the Swarm hash should return the manifest pointing directly to the data
// and raw retrieve of that hash should return the data
func TestClientBzzWithFeed(t *testing.T) {
signer, _ := newTestSigner()
// Initialize a Swarm test server
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
swarmClient := NewClient(srv.URL)
defer srv.Close()
// put together some data for our test:
dataBytes := []byte(`
//
// Create some data our manifest will point to. Data that could be very big and wouldn't fit in a feed update.
// So what we are going to do is upload it to Swarm bzz:// and obtain a **manifest hash** pointing to it:
//
// MANIFEST HASH --> DATA
//
// Then, we store that **manifest hash** into a Swarm Feed update. Once we have done this,
// we can use the **feed manifest hash** in bzz:// instead, this way: bzz://feed-manifest-hash.
//
// FEED MANIFEST HASH --> MANIFEST HASH --> DATA
//
// Given that we can update the feed at any time with a new **manifest hash** but the **feed manifest hash**
// stays constant, we have effectively created a fixed address to changing content. (Applause)
//
// FEED MANIFEST HASH (the same) --> MANIFEST HASH(2) --> DATA(2)
//
`)
// Create a virtual File out of memory containing the above data
f := &File{
ReadCloser: ioutil.NopCloser(bytes.NewReader(dataBytes)),
ManifestEntry: api.ManifestEntry{
ContentType: "text/plain",
Mode: 0660,
Size: int64(len(dataBytes)),
},
}
// upload data to bzz:// and retrieve the content-addressed manifest hash, hex-encoded.
manifestAddressHex, err := swarmClient.Upload(f, "", false)
if err != nil {
t.Fatalf("Error creating manifest: %s", err)
}
// convert the hex-encoded manifest hash to a 32-byte slice
manifestAddress := common.FromHex(manifestAddressHex)
if len(manifestAddress) != storage.AddressLength {
t.Fatalf("Something went wrong. Got a hash of an unexpected length. Expected %d bytes. Got %d", storage.AddressLength, len(manifestAddress))
}
// Now create a **feed manifest**. For that, we need a topic:
topic, _ := feed.NewTopic("interesting topic indeed", nil)
// Build a feed request to update data
request := feed.NewFirstRequest(topic)
// Put the 32-byte address of the manifest into the feed update
request.SetData(manifestAddress)
// Sign the update
if err := request.Sign(signer); err != nil {
t.Fatalf("Error signing update: %s", err)
}
// Publish the update and at the same time request a **feed manifest** to be created
feedManifestAddressHex, err := swarmClient.CreateFeedWithManifest(request)
if err != nil {
t.Fatalf("Error creating feed manifest: %s", err)
}
// Check we have received the exact **feed manifest** to be expected
// given the topic and user signing the updates:
correctFeedManifestAddrHex := "747c402e5b9dc715a25a4393147512167bab018a007fad7cdcd9adc7fce1ced2"
if feedManifestAddressHex != correctFeedManifestAddrHex {
t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctFeedManifestAddrHex, feedManifestAddressHex)
}
// Check we get a not found error when trying to get feed updates with a made-up manifest
_, err = swarmClient.QueryFeed(nil, "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb")
if err != ErrNoFeedUpdatesFound {
t.Fatalf("Expected to receive ErrNoFeedUpdatesFound error. Got: %s", err)
}
// If we query the feed directly we should get **manifest hash** back:
reader, err := swarmClient.QueryFeed(nil, correctFeedManifestAddrHex)
if err != nil {
t.Fatalf("Error retrieving feed updates: %s", err)
}
defer reader.Close()
gotData, err := ioutil.ReadAll(reader)
if err != nil {
t.Fatal(err)
}
//Check that indeed the **manifest hash** is retrieved
if !bytes.Equal(manifestAddress, gotData) {
t.Fatalf("Expected: %v, got %v", manifestAddress, gotData)
}
// Now the final test we were looking for: Use bzz://<feed-manifest> and that should resolve all manifests
// and return the original data directly:
f, err = swarmClient.Download(feedManifestAddressHex, "")
if err != nil {
t.Fatal(err)
}
gotData, err = ioutil.ReadAll(f)
if err != nil {
t.Fatal(err)
}
// Check that we get back the original data:
if !bytes.Equal(dataBytes, gotData) {
t.Fatalf("Expected: %v, got %v", manifestAddress, gotData)
}
}
// TestClientCreateUpdateFeed will check that feeds can be created and updated via the HTTP client.
func TestClientCreateUpdateFeed(t *testing.T) {
signer, _ := newTestSigner()
srv := swarmhttp.NewTestSwarmServer(t, serverFunc, nil)
client := NewClient(srv.URL)
defer srv.Close()
// set raw data for the feed update
databytes := []byte("En un lugar de La Mancha, de cuyo nombre no quiero acordarme...")
// our feed topic name
topic, _ := feed.NewTopic("El Quijote", nil)
createRequest := feed.NewFirstRequest(topic)
createRequest.SetData(databytes)
if err := createRequest.Sign(signer); err != nil {
t.Fatalf("Error signing update: %s", err)
}
feedManifestHash, err := client.CreateFeedWithManifest(createRequest)
if err != nil {
t.Fatal(err)
}
correctManifestAddrHex := "0e9b645ebc3da167b1d56399adc3276f7a08229301b72a03336be0e7d4b71882"
if feedManifestHash != correctManifestAddrHex {
t.Fatalf("Response feed manifest mismatch, expected '%s', got '%s'", correctManifestAddrHex, feedManifestHash)
}
reader, err := client.QueryFeed(nil, correctManifestAddrHex)
if err != nil {
t.Fatalf("Error retrieving feed updates: %s", err)
}
defer reader.Close()
gotData, err := ioutil.ReadAll(reader)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(databytes, gotData) {
t.Fatalf("Expected: %v, got %v", databytes, gotData)
}
// define different data
databytes = []byte("... no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero ...")
updateRequest, err := client.GetFeedRequest(nil, correctManifestAddrHex)
if err != nil {
t.Fatalf("Error retrieving update request template: %s", err)
}
updateRequest.SetData(databytes)
if err := updateRequest.Sign(signer); err != nil {
t.Fatalf("Error signing update: %s", err)
}
if err = client.UpdateFeed(updateRequest); err != nil {
t.Fatalf("Error updating feed: %s", err)
}
reader, err = client.QueryFeed(nil, correctManifestAddrHex)
if err != nil {
t.Fatalf("Error retrieving feed updates: %s", err)
}
defer reader.Close()
gotData, err = ioutil.ReadAll(reader)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(databytes, gotData) {
t.Fatalf("Expected: %v, got %v", databytes, gotData)
}
// now try retrieving feed updates without a manifest
fd := &feed.Feed{
Topic: topic,
User: signer.Address(),
}
lookupParams := feed.NewQueryLatest(fd, lookup.NoClue)
reader, err = client.QueryFeed(lookupParams, "")
if err != nil {
t.Fatalf("Error retrieving feed updates: %s", err)
}
defer reader.Close()
gotData, err = ioutil.ReadAll(reader)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(databytes, gotData) {
t.Fatalf("Expected: %v, got %v", databytes, gotData)
}
}

174
api/config.go Normal file
View File

@ -0,0 +1,174 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package api
import (
"crypto/ecdsa"
"fmt"
"os"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethersphere/swarm/contracts/ens"
"github.com/ethersphere/swarm/network"
"github.com/ethersphere/swarm/pss"
"github.com/ethersphere/swarm/services/swap"
"github.com/ethersphere/swarm/storage"
)
const (
DefaultHTTPListenAddr = "127.0.0.1"
DefaultHTTPPort = "8500"
)
// separate bzz directories
// allow several bzz nodes running in parallel
type Config struct {
// serialised/persisted fields
*storage.FileStoreParams
// LocalStore
ChunkDbPath string
DbCapacity uint64
CacheCapacity uint
BaseKey []byte
*network.HiveParams
Swap *swap.LocalProfile
Pss *pss.PssParams
Contract common.Address
EnsRoot common.Address
EnsAPIs []string
Path string
ListenAddr string
Port string
PublicKey string
BzzKey string
Enode *enode.Node `toml:"-"`
NetworkID uint64
SwapEnabled bool
SyncEnabled bool
SyncingSkipCheck bool
DeliverySkipCheck bool
MaxStreamPeerServers int
LightNodeEnabled bool
BootnodeMode bool
SyncUpdateDelay time.Duration
SwapAPI string
Cors string
BzzAccount string
GlobalStoreAPI string
privateKey *ecdsa.PrivateKey
}
//create a default config with all parameters to set to defaults
func NewConfig() (c *Config) {
c = &Config{
FileStoreParams: storage.NewFileStoreParams(),
HiveParams: network.NewHiveParams(),
Swap: swap.NewDefaultSwapParams(),
Pss: pss.NewPssParams(),
ListenAddr: DefaultHTTPListenAddr,
Port: DefaultHTTPPort,
Path: node.DefaultDataDir(),
EnsAPIs: nil,
EnsRoot: ens.TestNetAddress,
NetworkID: network.DefaultNetworkID,
SwapEnabled: false,
SyncEnabled: true,
SyncingSkipCheck: false,
MaxStreamPeerServers: 10000,
DeliverySkipCheck: true,
SyncUpdateDelay: 15 * time.Second,
SwapAPI: "",
}
return
}
//some config params need to be initialized after the complete
//config building phase is completed (e.g. due to overriding flags)
func (c *Config) Init(prvKey *ecdsa.PrivateKey, nodeKey *ecdsa.PrivateKey) error {
// create swarm dir and record key
err := c.createAndSetPath(c.Path, prvKey)
if err != nil {
return fmt.Errorf("Error creating root swarm data directory: %v", err)
}
c.setKey(prvKey)
// create the new enode record
// signed with the ephemeral node key
enodeParams := &network.EnodeParams{
PrivateKey: prvKey,
EnodeKey: nodeKey,
Lightnode: c.LightNodeEnabled,
Bootnode: c.BootnodeMode,
}
c.Enode, err = network.NewEnode(enodeParams)
if err != nil {
return fmt.Errorf("Error creating enode: %v", err)
}
// initialize components that depend on the swarm instance's private key
if c.SwapEnabled {
c.Swap.Init(c.Contract, prvKey)
}
c.privateKey = prvKey
c.ChunkDbPath = filepath.Join(c.Path, "chunks")
c.BaseKey = common.FromHex(c.BzzKey)
c.Pss = c.Pss.WithPrivateKey(c.privateKey)
return nil
}
func (c *Config) ShiftPrivateKey() (privKey *ecdsa.PrivateKey) {
if c.privateKey != nil {
privKey = c.privateKey
c.privateKey = nil
}
return privKey
}
func (c *Config) setKey(prvKey *ecdsa.PrivateKey) {
bzzkeybytes := network.PrivateKeyToBzzKey(prvKey)
pubkey := crypto.FromECDSAPub(&prvKey.PublicKey)
pubkeyhex := hexutil.Encode(pubkey)
keyhex := hexutil.Encode(bzzkeybytes)
c.privateKey = prvKey
c.PublicKey = pubkeyhex
c.BzzKey = keyhex
}
func (c *Config) createAndSetPath(datadirPath string, prvKey *ecdsa.PrivateKey) error {
address := crypto.PubkeyToAddress(prvKey.PublicKey)
bzzdirPath := filepath.Join(datadirPath, "bzz-"+common.Bytes2Hex(address.Bytes()))
err := os.MkdirAll(bzzdirPath, os.ModePerm)
if err != nil {
return err
}
c.Path = bzzdirPath
return nil
}

View File

@ -20,7 +20,7 @@ import (
"encoding/binary"
"errors"
"github.com/ethereum/go-ethereum/swarm/storage/encryption"
"github.com/ethersphere/swarm/storage/encryption"
"golang.org/x/crypto/sha3"
)

View File

@ -27,8 +27,8 @@ import (
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/storage"
)
const maxParallelFiles = 5

View File

@ -25,12 +25,10 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/storage"
)
var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test")
func testFileSystem(t *testing.T, f func(*FileSystem, bool)) {
testAPI(t, func(api *API, _ *chunk.Tags, toEncrypt bool) {
f(NewFileSystem(api), toEncrypt)
@ -70,8 +68,13 @@ func TestApiDirUpload0(t *testing.T) {
t.Fatalf("expected error: %v", err)
}
testDownloadDir, err := ioutil.TempDir(os.TempDir(), "bzz-test")
downloadDir := filepath.Join(testDownloadDir, "test0")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer os.RemoveAll(downloadDir)
err = fs.Download(bzzhash, downloadDir)
if err != nil {
t.Fatalf("unexpected error: %v", err)

View File

@ -8,11 +8,11 @@ import (
"time"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethereum/go-ethereum/swarm/spancontext"
"github.com/ethersphere/swarm/api"
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/sctx"
"github.com/ethersphere/swarm/spancontext"
"github.com/pborman/uuid"
)

View File

@ -26,7 +26,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethersphere/swarm/api"
)
var (

View File

@ -20,7 +20,7 @@ import (
"fmt"
"net/http"
"github.com/ethereum/go-ethereum/swarm/log"
"github.com/ethersphere/swarm/log"
)
/*
@ -30,7 +30,7 @@ Usage:
import (
"github.com/ethereum/go-ethereum/common/httpclient"
"github.com/ethereum/go-ethereum/swarm/api/http"
"github.com/ethersphere/swarm/api/http"
)
client := httpclient.New()
// for (private) swarm proxy running locally

View File

@ -3,8 +3,8 @@ package http
import (
"context"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethersphere/swarm/api"
"github.com/ethersphere/swarm/sctx"
)
type uriKey struct{}

937
api/http/server.go Normal file
View File

@ -0,0 +1,937 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
/*
A simple http server interface to Swarm
*/
package http
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math"
"mime"
"mime/multipart"
"net/http"
"os"
"path"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethersphere/swarm/api"
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/sctx"
"github.com/ethersphere/swarm/storage"
"github.com/ethersphere/swarm/storage/feed"
"github.com/rs/cors"
)
var (
postRawCount = metrics.NewRegisteredCounter("api.http.post.raw.count", nil)
postRawFail = metrics.NewRegisteredCounter("api.http.post.raw.fail", nil)
postFilesCount = metrics.NewRegisteredCounter("api.http.post.files.count", nil)
postFilesFail = metrics.NewRegisteredCounter("api.http.post.files.fail", nil)
deleteCount = metrics.NewRegisteredCounter("api.http.delete.count", nil)
deleteFail = metrics.NewRegisteredCounter("api.http.delete.fail", nil)
getCount = metrics.NewRegisteredCounter("api.http.get.count", nil)
getFail = metrics.NewRegisteredCounter("api.http.get.fail", nil)
getFileCount = metrics.NewRegisteredCounter("api.http.get.file.count", nil)
getFileNotFound = metrics.NewRegisteredCounter("api.http.get.file.notfound", nil)
getFileFail = metrics.NewRegisteredCounter("api.http.get.file.fail", nil)
getListCount = metrics.NewRegisteredCounter("api.http.get.list.count", nil)
getListFail = metrics.NewRegisteredCounter("api.http.get.list.fail", nil)
)
const SwarmTagHeaderName = "x-swarm-tag"
type methodHandler map[string]http.Handler
func (m methodHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
v, ok := m[r.Method]
if ok {
v.ServeHTTP(rw, r)
return
}
rw.WriteHeader(http.StatusMethodNotAllowed)
}
func NewServer(api *api.API, corsString string) *Server {
var allowedOrigins []string
for _, domain := range strings.Split(corsString, ",") {
allowedOrigins = append(allowedOrigins, strings.TrimSpace(domain))
}
c := cors.New(cors.Options{
AllowedOrigins: allowedOrigins,
AllowedMethods: []string{http.MethodPost, http.MethodGet, http.MethodDelete, http.MethodPatch, http.MethodPut},
MaxAge: 600,
AllowedHeaders: []string{"*"},
})
server := &Server{api: api}
defaultMiddlewares := []Adapter{
RecoverPanic,
SetRequestID,
SetRequestHost,
InitLoggingResponseWriter,
ParseURI,
InstrumentOpenTracing,
}
tagAdapter := Adapter(func(h http.Handler) http.Handler {
return InitUploadTag(h, api.Tags)
})
defaultPostMiddlewares := append(defaultMiddlewares, tagAdapter)
mux := http.NewServeMux()
mux.Handle("/bzz:/", methodHandler{
"GET": Adapt(
http.HandlerFunc(server.HandleBzzGet),
defaultMiddlewares...,
),
"POST": Adapt(
http.HandlerFunc(server.HandlePostFiles),
defaultPostMiddlewares...,
),
"DELETE": Adapt(
http.HandlerFunc(server.HandleDelete),
defaultMiddlewares...,
),
})
mux.Handle("/bzz-raw:/", methodHandler{
"GET": Adapt(
http.HandlerFunc(server.HandleGet),
defaultMiddlewares...,
),
"POST": Adapt(
http.HandlerFunc(server.HandlePostRaw),
defaultPostMiddlewares...,
),
})
mux.Handle("/bzz-immutable:/", methodHandler{
"GET": Adapt(
http.HandlerFunc(server.HandleBzzGet),
defaultMiddlewares...,
),
})
mux.Handle("/bzz-hash:/", methodHandler{
"GET": Adapt(
http.HandlerFunc(server.HandleGet),
defaultMiddlewares...,
),
})
mux.Handle("/bzz-list:/", methodHandler{
"GET": Adapt(
http.HandlerFunc(server.HandleGetList),
defaultMiddlewares...,
),
})
mux.Handle("/bzz-feed:/", methodHandler{
"GET": Adapt(
http.HandlerFunc(server.HandleGetFeed),
defaultMiddlewares...,
),
"POST": Adapt(
http.HandlerFunc(server.HandlePostFeed),
defaultMiddlewares...,
),
})
mux.Handle("/", methodHandler{
"GET": Adapt(
http.HandlerFunc(server.HandleRootPaths),
SetRequestID,
InitLoggingResponseWriter,
),
})
server.Handler = c.Handler(mux)
return server
}
func (s *Server) ListenAndServe(addr string) error {
s.listenAddr = addr
return http.ListenAndServe(addr, s)
}
// browser API for registering bzz url scheme handlers:
// https://developer.mozilla.org/en/docs/Web-based_protocol_handlers
// electron (chromium) api for registering bzz url scheme handlers:
// https://github.com/atom/electron/blob/master/docs/api/protocol.md
type Server struct {
http.Handler
api *api.API
listenAddr string
}
func (s *Server) HandleBzzGet(w http.ResponseWriter, r *http.Request) {
log.Debug("handleBzzGet", "ruid", GetRUID(r.Context()), "uri", r.RequestURI)
if r.Header.Get("Accept") == "application/x-tar" {
uri := GetURI(r.Context())
_, credentials, _ := r.BasicAuth()
reader, err := s.api.GetDirectoryTar(r.Context(), s.api.Decryptor(r.Context(), credentials), uri)
if err != nil {
if isDecryptError(err) {
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", uri.Address().String()))
respondError(w, r, err.Error(), http.StatusUnauthorized)
return
}
respondError(w, r, fmt.Sprintf("Had an error building the tarball: %v", err), http.StatusInternalServerError)
return
}
defer reader.Close()
w.Header().Set("Content-Type", "application/x-tar")
fileName := uri.Addr
if found := path.Base(uri.Path); found != "" && found != "." && found != "/" {
fileName = found
}
w.Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=\"%s.tar\"", fileName))
w.WriteHeader(http.StatusOK)
io.Copy(w, reader)
return
}
s.HandleGetFile(w, r)
}
func (s *Server) HandleRootPaths(w http.ResponseWriter, r *http.Request) {
switch r.RequestURI {
case "/":
respondTemplate(w, r, "landing-page", "Swarm: Please request a valid ENS or swarm hash with the appropriate bzz scheme", 200)
return
case "/robots.txt":
w.Header().Set("Last-Modified", time.Now().Format(http.TimeFormat))
fmt.Fprintf(w, "User-agent: *\nDisallow: /")
case "/favicon.ico":
w.WriteHeader(http.StatusOK)
w.Write(faviconBytes)
default:
respondError(w, r, "Not Found", http.StatusNotFound)
}
}
// HandlePostRaw handles a POST request to a raw bzz-raw:/ URI, stores the request
// body in swarm and returns the resulting storage address as a text/plain response
func (s *Server) HandlePostRaw(w http.ResponseWriter, r *http.Request) {
ruid := GetRUID(r.Context())
log.Debug("handle.post.raw", "ruid", ruid)
tagUid := sctx.GetTag(r.Context())
tag, err := s.api.Tags.Get(tagUid)
if err != nil {
log.Error("handle post raw got an error retrieving tag for DoneSplit", "tagUid", tagUid, "err", err)
}
postRawCount.Inc(1)
toEncrypt := false
uri := GetURI(r.Context())
if uri.Addr == "encrypt" {
toEncrypt = true
}
if uri.Path != "" {
postRawFail.Inc(1)
respondError(w, r, "raw POST request cannot contain a path", http.StatusBadRequest)
return
}
if uri.Addr != "" && uri.Addr != "encrypt" {
postRawFail.Inc(1)
respondError(w, r, "raw POST request addr can only be empty or \"encrypt\"", http.StatusBadRequest)
return
}
if r.Header.Get("Content-Length") == "" {
postRawFail.Inc(1)
respondError(w, r, "missing Content-Length header in request", http.StatusBadRequest)
return
}
addr, wait, err := s.api.Store(r.Context(), r.Body, r.ContentLength, toEncrypt)
if err != nil {
postRawFail.Inc(1)
respondError(w, r, err.Error(), http.StatusInternalServerError)
return
}
wait(r.Context())
tag.DoneSplit(addr)
log.Debug("stored content", "ruid", ruid, "key", addr)
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, addr)
}
// HandlePostFiles handles a POST request to
// bzz:/<hash>/<path> which contains either a single file or multiple files
// (either a tar archive or multipart form), adds those files either to an
// existing manifest or to a new manifest under <path> and returns the
// resulting manifest hash as a text/plain response
func (s *Server) HandlePostFiles(w http.ResponseWriter, r *http.Request) {
ruid := GetRUID(r.Context())
log.Debug("handle.post.files", "ruid", ruid)
postFilesCount.Inc(1)
contentType, params, err := mime.ParseMediaType(r.Header.Get("Content-Type"))
if err != nil {
postFilesFail.Inc(1)
respondError(w, r, err.Error(), http.StatusBadRequest)
return
}
toEncrypt := false
uri := GetURI(r.Context())
if uri.Addr == "encrypt" {
toEncrypt = true
}
var addr storage.Address
if uri.Addr != "" && uri.Addr != "encrypt" {
addr, err = s.api.Resolve(r.Context(), uri.Addr)
if err != nil {
postFilesFail.Inc(1)
respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusInternalServerError)
return
}
log.Debug("resolved key", "ruid", ruid, "key", addr)
} else {
addr, err = s.api.NewManifest(r.Context(), toEncrypt)
if err != nil {
postFilesFail.Inc(1)
respondError(w, r, err.Error(), http.StatusInternalServerError)
return
}
log.Debug("new manifest", "ruid", ruid, "key", addr)
}
newAddr, err := s.api.UpdateManifest(r.Context(), addr, func(mw *api.ManifestWriter) error {
switch contentType {
case "application/x-tar":
_, err := s.handleTarUpload(r, mw)
if err != nil {
respondError(w, r, fmt.Sprintf("error uploading tarball: %v", err), http.StatusInternalServerError)
return err
}
return nil
case "multipart/form-data":
return s.handleMultipartUpload(r, params["boundary"], mw)
default:
return s.handleDirectUpload(r, mw)
}
})
if err != nil {
postFilesFail.Inc(1)
respondError(w, r, fmt.Sprintf("cannot create manifest: %s", err), http.StatusInternalServerError)
return
}
tagUid := sctx.GetTag(r.Context())
tag, err := s.api.Tags.Get(tagUid)
if err != nil {
log.Error("got an error retrieving tag for DoneSplit", "tagUid", tagUid, "err", err)
}
log.Debug("done splitting, setting tag total", "SPLIT", tag.Get(chunk.StateSplit), "TOTAL", tag.Total())
tag.DoneSplit(newAddr)
log.Debug("stored content", "ruid", ruid, "key", newAddr)
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, newAddr)
}
func (s *Server) handleTarUpload(r *http.Request, mw *api.ManifestWriter) (storage.Address, error) {
log.Debug("handle.tar.upload", "ruid", GetRUID(r.Context()), "tag", sctx.GetTag(r.Context()))
defaultPath := r.URL.Query().Get("defaultpath")
key, err := s.api.UploadTar(r.Context(), r.Body, GetURI(r.Context()).Path, defaultPath, mw)
if err != nil {
return nil, err
}
return key, nil
}
func (s *Server) handleMultipartUpload(r *http.Request, boundary string, mw *api.ManifestWriter) error {
ruid := GetRUID(r.Context())
log.Debug("handle.multipart.upload", "ruid", ruid)
mr := multipart.NewReader(r.Body, boundary)
for {
part, err := mr.NextPart()
if err == io.EOF {
return nil
} else if err != nil {
return fmt.Errorf("error reading multipart form: %s", err)
}
var size int64
var reader io.Reader
if contentLength := part.Header.Get("Content-Length"); contentLength != "" {
size, err = strconv.ParseInt(contentLength, 10, 64)
if err != nil {
return fmt.Errorf("error parsing multipart content length: %s", err)
}
reader = part
} else {
// copy the part to a tmp file to get its size
tmp, err := ioutil.TempFile("", "swarm-multipart")
if err != nil {
return err
}
defer os.Remove(tmp.Name())
defer tmp.Close()
size, err = io.Copy(tmp, part)
if err != nil {
return fmt.Errorf("error copying multipart content: %s", err)
}
if _, err := tmp.Seek(0, io.SeekStart); err != nil {
return fmt.Errorf("error copying multipart content: %s", err)
}
reader = tmp
}
// add the entry under the path from the request
name := part.FileName()
if name == "" {
name = part.FormName()
}
uri := GetURI(r.Context())
path := path.Join(uri.Path, name)
entry := &api.ManifestEntry{
Path: path,
ContentType: part.Header.Get("Content-Type"),
Size: size,
}
log.Debug("adding path to new manifest", "ruid", ruid, "bytes", entry.Size, "path", entry.Path)
contentKey, err := mw.AddEntry(r.Context(), reader, entry)
if err != nil {
return fmt.Errorf("error adding manifest entry from multipart form: %s", err)
}
log.Debug("stored content", "ruid", ruid, "key", contentKey)
}
}
func (s *Server) handleDirectUpload(r *http.Request, mw *api.ManifestWriter) error {
ruid := GetRUID(r.Context())
log.Debug("handle.direct.upload", "ruid", ruid)
key, err := mw.AddEntry(r.Context(), r.Body, &api.ManifestEntry{
Path: GetURI(r.Context()).Path,
ContentType: r.Header.Get("Content-Type"),
Mode: 0644,
Size: r.ContentLength,
})
if err != nil {
return err
}
log.Debug("stored content", "ruid", ruid, "key", key)
return nil
}
// HandleDelete handles a DELETE request to bzz:/<manifest>/<path>, removes
// <path> from <manifest> and returns the resulting manifest hash as a
// text/plain response
func (s *Server) HandleDelete(w http.ResponseWriter, r *http.Request) {
ruid := GetRUID(r.Context())
uri := GetURI(r.Context())
log.Debug("handle.delete", "ruid", ruid)
deleteCount.Inc(1)
newKey, err := s.api.Delete(r.Context(), uri.Addr, uri.Path)
if err != nil {
deleteFail.Inc(1)
respondError(w, r, fmt.Sprintf("could not delete from manifest: %v", err), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, newKey)
}
// Handles feed manifest creation and feed updates
// The POST request admits a JSON structure as defined in the feeds package: `feed.updateRequestJSON`
// The requests can be to a) create a feed manifest, b) update a feed or c) both a+b: create a feed manifest and publish a first update
func (s *Server) HandlePostFeed(w http.ResponseWriter, r *http.Request) {
ruid := GetRUID(r.Context())
uri := GetURI(r.Context())
log.Debug("handle.post.feed", "ruid", ruid)
var err error
// Creation and update must send feed.updateRequestJSON JSON structure
body, err := ioutil.ReadAll(r.Body)
if err != nil {
respondError(w, r, err.Error(), http.StatusInternalServerError)
return
}
fd, err := s.api.ResolveFeed(r.Context(), uri, r.URL.Query())
if err != nil { // couldn't parse query string or retrieve manifest
getFail.Inc(1)
httpStatus := http.StatusBadRequest
if err == api.ErrCannotLoadFeedManifest || err == api.ErrCannotResolveFeedURI {
httpStatus = http.StatusNotFound
}
respondError(w, r, fmt.Sprintf("cannot retrieve feed from manifest: %s", err), httpStatus)
return
}
var updateRequest feed.Request
updateRequest.Feed = *fd
query := r.URL.Query()
if err := updateRequest.FromValues(query, body); err != nil { // decodes request from query parameters
respondError(w, r, err.Error(), http.StatusBadRequest)
return
}
switch {
case updateRequest.IsUpdate():
// Verify that the signature is intact and that the signer is authorized
// to update this feed
// Check this early, to avoid creating a feed and then not being able to set its first update.
if err = updateRequest.Verify(); err != nil {
respondError(w, r, err.Error(), http.StatusForbidden)
return
}
_, err = s.api.FeedsUpdate(r.Context(), &updateRequest)
if err != nil {
respondError(w, r, err.Error(), http.StatusInternalServerError)
return
}
fallthrough
case query.Get("manifest") == "1":
// we create a manifest so we can retrieve feed updates with bzz:// later
// this manifest has a special "feed type" manifest, and saves the
// feed identification used to retrieve feed updates later
m, err := s.api.NewFeedManifest(r.Context(), &updateRequest.Feed)
if err != nil {
respondError(w, r, fmt.Sprintf("failed to create feed manifest: %v", err), http.StatusInternalServerError)
return
}
// the key to the manifest will be passed back to the client
// the client can access the feed directly through its Feed member
// the manifest key can be set as content in the resolver of the ENS name
outdata, err := json.Marshal(m)
if err != nil {
respondError(w, r, fmt.Sprintf("failed to create json response: %s", err), http.StatusInternalServerError)
return
}
fmt.Fprint(w, string(outdata))
w.Header().Add("Content-type", "application/json")
default:
respondError(w, r, "Missing signature in feed update request", http.StatusBadRequest)
}
}
// HandleGetFeed retrieves Swarm feeds updates:
// bzz-feed://<manifest address or ENS name> - get latest feed update, given a manifest address
// - or -
// specify user + topic (optional), subtopic name (optional) directly, without manifest:
// bzz-feed://?user=0x...&topic=0x...&name=subtopic name
// topic defaults to 0x000... if not specified.
// name defaults to empty string if not specified.
// thus, empty name and topic refers to the user's default feed.
//
// Optional parameters:
// time=xx - get the latest update before time (in epoch seconds)
// hint.time=xx - hint the lookup algorithm looking for updates at around that time
// hint.level=xx - hint the lookup algorithm looking for updates at around this frequency level
// meta=1 - get feed metadata and status information instead of performing a feed query
// NOTE: meta=1 will be deprecated in the near future
func (s *Server) HandleGetFeed(w http.ResponseWriter, r *http.Request) {
ruid := GetRUID(r.Context())
uri := GetURI(r.Context())
log.Debug("handle.get.feed", "ruid", ruid)
var err error
fd, err := s.api.ResolveFeed(r.Context(), uri, r.URL.Query())
if err != nil { // couldn't parse query string or retrieve manifest
getFail.Inc(1)
httpStatus := http.StatusBadRequest
if err == api.ErrCannotLoadFeedManifest || err == api.ErrCannotResolveFeedURI {
httpStatus = http.StatusNotFound
}
respondError(w, r, fmt.Sprintf("cannot retrieve feed information from manifest: %s", err), httpStatus)
return
}
// determine if the query specifies period and version or it is a metadata query
if r.URL.Query().Get("meta") == "1" {
unsignedUpdateRequest, err := s.api.FeedsNewRequest(r.Context(), fd)
if err != nil {
getFail.Inc(1)
respondError(w, r, fmt.Sprintf("cannot retrieve feed metadata for feed=%s: %s", fd.Hex(), err), http.StatusNotFound)
return
}
rawResponse, err := unsignedUpdateRequest.MarshalJSON()
if err != nil {
respondError(w, r, fmt.Sprintf("cannot encode unsigned feed update request: %v", err), http.StatusInternalServerError)
return
}
w.Header().Add("Content-type", "application/json")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, string(rawResponse))
return
}
lookupParams := &feed.Query{Feed: *fd}
if err = lookupParams.FromValues(r.URL.Query()); err != nil { // parse period, version
respondError(w, r, fmt.Sprintf("invalid feed update request:%s", err), http.StatusBadRequest)
return
}
data, err := s.api.FeedsLookup(r.Context(), lookupParams)
// any error from the switch statement will end up here
if err != nil {
code, err2 := s.translateFeedError(w, r, "feed lookup fail", err)
respondError(w, r, err2.Error(), code)
return
}
// All ok, serve the retrieved update
log.Debug("Found update", "feed", fd.Hex(), "ruid", ruid)
w.Header().Set("Content-Type", api.MimeOctetStream)
http.ServeContent(w, r, "", time.Now(), bytes.NewReader(data))
}
func (s *Server) translateFeedError(w http.ResponseWriter, r *http.Request, supErr string, err error) (int, error) {
code := 0
defaultErr := fmt.Errorf("%s: %v", supErr, err)
rsrcErr, ok := err.(*feed.Error)
if !ok && rsrcErr != nil {
code = rsrcErr.Code()
}
switch code {
case storage.ErrInvalidValue:
return http.StatusBadRequest, defaultErr
case storage.ErrNotFound, storage.ErrNotSynced, storage.ErrNothingToReturn, storage.ErrInit:
return http.StatusNotFound, defaultErr
case storage.ErrUnauthorized, storage.ErrInvalidSignature:
return http.StatusUnauthorized, defaultErr
case storage.ErrDataOverflow:
return http.StatusRequestEntityTooLarge, defaultErr
}
return http.StatusInternalServerError, defaultErr
}
// HandleGet handles a GET request to
// - bzz-raw://<key> and responds with the raw content stored at the
// given storage key
// - bzz-hash://<key> and responds with the hash of the content stored
// at the given storage key as a text/plain response
func (s *Server) HandleGet(w http.ResponseWriter, r *http.Request) {
ruid := GetRUID(r.Context())
uri := GetURI(r.Context())
log.Debug("handle.get", "ruid", ruid, "uri", uri)
getCount.Inc(1)
_, pass, _ := r.BasicAuth()
addr, err := s.api.ResolveURI(r.Context(), uri, pass)
if err != nil {
getFail.Inc(1)
respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound)
return
}
w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz://<hex key>/path, so we are sure it is immutable.
log.Debug("handle.get: resolved", "ruid", ruid, "key", addr)
// if path is set, interpret <key> as a manifest and return the
// raw entry at the given path
etag := common.Bytes2Hex(addr)
noneMatchEtag := r.Header.Get("If-None-Match")
w.Header().Set("ETag", fmt.Sprintf("%q", etag)) // set etag to manifest key or raw entry key.
if noneMatchEtag != "" {
if bytes.Equal(storage.Address(common.Hex2Bytes(noneMatchEtag)), addr) {
w.WriteHeader(http.StatusNotModified)
return
}
}
switch {
case uri.Raw():
// check the root chunk exists by retrieving the file's size
reader, isEncrypted := s.api.Retrieve(r.Context(), addr)
if _, err := reader.Size(r.Context(), nil); err != nil {
getFail.Inc(1)
respondError(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound)
return
}
w.Header().Set("X-Decrypted", fmt.Sprintf("%v", isEncrypted))
// allow the request to overwrite the content type using a query
// parameter
if typ := r.URL.Query().Get("content_type"); typ != "" {
w.Header().Set("Content-Type", typ)
}
http.ServeContent(w, r, "", time.Now(), reader)
case uri.Hash():
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
fmt.Fprint(w, addr)
}
}
// HandleGetList handles a GET request to bzz-list:/<manifest>/<path> and returns
// a list of all files contained in <manifest> under <path> grouped into
// common prefixes using "/" as a delimiter
func (s *Server) HandleGetList(w http.ResponseWriter, r *http.Request) {
ruid := GetRUID(r.Context())
uri := GetURI(r.Context())
_, credentials, _ := r.BasicAuth()
log.Debug("handle.get.list", "ruid", ruid, "uri", uri)
getListCount.Inc(1)
// ensure the root path has a trailing slash so that relative URLs work
if uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") {
http.Redirect(w, r, r.URL.Path+"/", http.StatusMovedPermanently)
return
}
addr, err := s.api.Resolve(r.Context(), uri.Addr)
if err != nil {
getListFail.Inc(1)
respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound)
return
}
log.Debug("handle.get.list: resolved", "ruid", ruid, "key", addr)
list, err := s.api.GetManifestList(r.Context(), s.api.Decryptor(r.Context(), credentials), addr, uri.Path)
if err != nil {
getListFail.Inc(1)
if isDecryptError(err) {
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", addr.String()))
respondError(w, r, err.Error(), http.StatusUnauthorized)
return
}
respondError(w, r, err.Error(), http.StatusInternalServerError)
return
}
// if the client wants HTML (e.g. a browser) then render the list as a
// HTML index with relative URLs
if strings.Contains(r.Header.Get("Accept"), "text/html") {
w.Header().Set("Content-Type", "text/html")
err := TemplatesMap["bzz-list"].Execute(w, &htmlListData{
URI: &api.URI{
Scheme: "bzz",
Addr: uri.Addr,
Path: uri.Path,
},
List: &list,
})
if err != nil {
getListFail.Inc(1)
log.Error(fmt.Sprintf("error rendering list HTML: %s", err))
}
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(&list)
}
// HandleGetFile handles a GET request to bzz://<manifest>/<path> and responds
// with the content of the file at <path> from the given <manifest>
func (s *Server) HandleGetFile(w http.ResponseWriter, r *http.Request) {
ruid := GetRUID(r.Context())
uri := GetURI(r.Context())
_, credentials, _ := r.BasicAuth()
log.Debug("handle.get.file", "ruid", ruid, "uri", r.RequestURI)
getFileCount.Inc(1)
// ensure the root path has a trailing slash so that relative URLs work
if uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") {
http.Redirect(w, r, r.URL.Path+"/", http.StatusMovedPermanently)
return
}
var err error
manifestAddr := uri.Address()
if manifestAddr == nil {
manifestAddr, err = s.api.Resolve(r.Context(), uri.Addr)
if err != nil {
getFileFail.Inc(1)
respondError(w, r, fmt.Sprintf("cannot resolve %s: %s", uri.Addr, err), http.StatusNotFound)
return
}
} else {
w.Header().Set("Cache-Control", "max-age=2147483648, immutable") // url was of type bzz://<hex key>/path, so we are sure it is immutable.
}
log.Debug("handle.get.file: resolved", "ruid", ruid, "key", manifestAddr)
reader, contentType, status, contentKey, err := s.api.Get(r.Context(), s.api.Decryptor(r.Context(), credentials), manifestAddr, uri.Path)
etag := common.Bytes2Hex(contentKey)
noneMatchEtag := r.Header.Get("If-None-Match")
w.Header().Set("ETag", fmt.Sprintf("%q", etag)) // set etag to actual content key.
if noneMatchEtag != "" {
if bytes.Equal(storage.Address(common.Hex2Bytes(noneMatchEtag)), contentKey) {
w.WriteHeader(http.StatusNotModified)
return
}
}
if err != nil {
if isDecryptError(err) {
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", manifestAddr))
respondError(w, r, err.Error(), http.StatusUnauthorized)
return
}
switch status {
case http.StatusNotFound:
getFileNotFound.Inc(1)
respondError(w, r, err.Error(), http.StatusNotFound)
default:
getFileFail.Inc(1)
respondError(w, r, err.Error(), http.StatusInternalServerError)
}
return
}
//the request results in ambiguous files
//e.g. /read with readme.md and readinglist.txt available in manifest
if status == http.StatusMultipleChoices {
list, err := s.api.GetManifestList(r.Context(), s.api.Decryptor(r.Context(), credentials), manifestAddr, uri.Path)
if err != nil {
getFileFail.Inc(1)
if isDecryptError(err) {
w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", manifestAddr))
respondError(w, r, err.Error(), http.StatusUnauthorized)
return
}
respondError(w, r, err.Error(), http.StatusInternalServerError)
return
}
log.Debug(fmt.Sprintf("Multiple choices! --> %v", list), "ruid", ruid)
//show a nice page links to available entries
ShowMultipleChoices(w, r, list)
return
}
// check the root chunk exists by retrieving the file's size
if _, err := reader.Size(r.Context(), nil); err != nil {
getFileNotFound.Inc(1)
respondError(w, r, fmt.Sprintf("file not found %s: %s", uri, err), http.StatusNotFound)
return
}
if contentType != "" {
w.Header().Set("Content-Type", contentType)
}
fileName := uri.Addr
if found := path.Base(uri.Path); found != "" && found != "." && found != "/" {
fileName = found
}
w.Header().Set("Content-Disposition", fmt.Sprintf("inline; filename=\"%s\"", fileName))
http.ServeContent(w, r, fileName, time.Now(), newBufferedReadSeeker(reader, getFileBufferSize))
}
// calculateNumberOfChunks calculates the number of chunks in an arbitrary content length
func calculateNumberOfChunks(contentLength int64, isEncrypted bool) int64 {
if contentLength < 4096 {
return 1
}
branchingFactor := 128
if isEncrypted {
branchingFactor = 64
}
dataChunks := math.Ceil(float64(contentLength) / float64(4096))
totalChunks := dataChunks
intermediate := dataChunks / float64(branchingFactor)
for intermediate > 1 {
totalChunks += math.Ceil(intermediate)
intermediate = intermediate / float64(branchingFactor)
}
return int64(totalChunks) + 1
}
// The size of buffer used for bufio.Reader on LazyChunkReader passed to
// http.ServeContent in HandleGetFile.
// Warning: This value influences the number of chunk requests and chunker join goroutines
// per file request.
// Recommended value is 4 times the io.Copy default buffer value which is 32kB.
const getFileBufferSize = 4 * 32 * 1024
// bufferedReadSeeker wraps bufio.Reader to expose Seek method
// from the provied io.ReadSeeker in newBufferedReadSeeker.
type bufferedReadSeeker struct {
r io.Reader
s io.Seeker
}
// newBufferedReadSeeker creates a new instance of bufferedReadSeeker,
// out of io.ReadSeeker. Argument `size` is the size of the read buffer.
func newBufferedReadSeeker(readSeeker io.ReadSeeker, size int) bufferedReadSeeker {
return bufferedReadSeeker{
r: bufio.NewReaderSize(readSeeker, size),
s: readSeeker,
}
}
func (b bufferedReadSeeker) Read(p []byte) (n int, err error) {
return b.r.Read(p)
}
func (b bufferedReadSeeker) Seek(offset int64, whence int) (int64, error) {
return b.s.Seek(offset, whence)
}
type loggingResponseWriter struct {
http.ResponseWriter
statusCode int
}
func newLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
return &loggingResponseWriter{w, http.StatusOK}
}
func (lrw *loggingResponseWriter) WriteHeader(code int) {
lrw.statusCode = code
lrw.ResponseWriter.WriteHeader(code)
}
func isDecryptError(err error) bool {
return strings.Contains(err.Error(), api.ErrDecrypt.Error())
}

1409
api/http/server_test.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -22,7 +22,7 @@ import (
"html/template"
"path"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethersphere/swarm/api"
)
type htmlListData struct {

View File

@ -23,11 +23,11 @@ import (
"os"
"testing"
"github.com/ethereum/go-ethereum/swarm/api"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethereum/go-ethereum/swarm/storage/feed"
"github.com/ethereum/go-ethereum/swarm/storage/localstore"
"github.com/ethersphere/swarm/api"
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/storage"
"github.com/ethersphere/swarm/storage/feed"
"github.com/ethersphere/swarm/storage/localstore"
)
type TestServer interface {

98
api/inspector.go Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package api
import (
"context"
"fmt"
"strings"
"time"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/network"
"github.com/ethersphere/swarm/storage"
)
type Inspector struct {
api *API
hive *network.Hive
netStore *storage.NetStore
}
func NewInspector(api *API, hive *network.Hive, netStore *storage.NetStore) *Inspector {
return &Inspector{api, hive, netStore}
}
// Hive prints the kademlia table
func (i *Inspector) Hive() string {
return i.hive.String()
}
// KademliaInfo returns structured output of the Kademlia state that we can check for equality
func (i *Inspector) KademliaInfo() network.KademliaInfo {
return i.hive.KademliaInfo()
}
func (i *Inspector) IsPullSyncing() bool {
lastReceivedChunksMsg := metrics.GetOrRegisterGauge("network.stream.received_chunks", nil)
// last received chunks msg time
lrct := time.Unix(0, lastReceivedChunksMsg.Value())
// if last received chunks msg time is after now-15sec. (i.e. within the last 15sec.) then we say that the node is still syncing
// technically this is not correct, because this might have been a retrieve request, but for the time being it works for our purposes
// because we know we are not making retrieve requests on the node while checking this
return lrct.After(time.Now().Add(-15 * time.Second))
}
// DeliveriesPerPeer returns the sum of chunks we received from a given peer
func (i *Inspector) DeliveriesPerPeer() map[string]int64 {
res := map[string]int64{}
// iterate connection in kademlia
i.hive.Kademlia.EachConn(nil, 255, func(p *network.Peer, po int) bool {
// get how many chunks we receive for retrieve requests per peer
peermetric := fmt.Sprintf("chunk.delivery.%x", p.Over()[:16])
res[fmt.Sprintf("%x", p.Over()[:16])] = metrics.GetOrRegisterCounter(peermetric, nil).Count()
return true
})
return res
}
// Has checks whether each chunk address is present in the underlying datastore,
// the bool in the returned structs indicates if the underlying datastore has
// the chunk stored with the given address (true), or not (false)
func (i *Inspector) Has(chunkAddresses []storage.Address) string {
hostChunks := []string{}
for _, addr := range chunkAddresses {
has, err := i.netStore.Has(context.Background(), addr)
if err != nil {
log.Error(err.Error())
}
if has {
hostChunks = append(hostChunks, "1")
} else {
hostChunks = append(hostChunks, "0")
}
}
return strings.Join(hostChunks, "")
}

584
api/manifest.go Normal file
View File

@ -0,0 +1,584 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package api
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/ethersphere/swarm/storage/feed"
"github.com/ethereum/go-ethereum/common"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/storage"
)
const (
ManifestType = "application/bzz-manifest+json"
FeedContentType = "application/bzz-feed"
manifestSizeLimit = 5 * 1024 * 1024
)
// Manifest represents a swarm manifest
type Manifest struct {
Entries []ManifestEntry `json:"entries,omitempty"`
}
// ManifestEntry represents an entry in a swarm manifest
type ManifestEntry struct {
Hash string `json:"hash,omitempty"`
Path string `json:"path,omitempty"`
ContentType string `json:"contentType,omitempty"`
Mode int64 `json:"mode,omitempty"`
Size int64 `json:"size,omitempty"`
ModTime time.Time `json:"mod_time,omitempty"`
Status int `json:"status,omitempty"`
Access *AccessEntry `json:"access,omitempty"`
Feed *feed.Feed `json:"feed,omitempty"`
}
// ManifestList represents the result of listing files in a manifest
type ManifestList struct {
CommonPrefixes []string `json:"common_prefixes,omitempty"`
Entries []*ManifestEntry `json:"entries,omitempty"`
}
// NewManifest creates and stores a new, empty manifest
func (a *API) NewManifest(ctx context.Context, toEncrypt bool) (storage.Address, error) {
var manifest Manifest
data, err := json.Marshal(&manifest)
if err != nil {
return nil, err
}
addr, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), toEncrypt)
if err != nil {
return nil, err
}
err = wait(ctx)
return addr, err
}
// Manifest hack for supporting Swarm feeds from the bzz: scheme
// see swarm/api/api.go:API.Get() for more information
func (a *API) NewFeedManifest(ctx context.Context, feed *feed.Feed) (storage.Address, error) {
var manifest Manifest
entry := ManifestEntry{
Feed: feed,
ContentType: FeedContentType,
}
manifest.Entries = append(manifest.Entries, entry)
data, err := json.Marshal(&manifest)
if err != nil {
return nil, err
}
addr, wait, err := a.Store(ctx, bytes.NewReader(data), int64(len(data)), false)
if err != nil {
return nil, err
}
err = wait(ctx)
return addr, err
}
// ManifestWriter is used to add and remove entries from an underlying manifest
type ManifestWriter struct {
api *API
trie *manifestTrie
quitC chan bool
}
func (a *API) NewManifestWriter(ctx context.Context, addr storage.Address, quitC chan bool) (*ManifestWriter, error) {
trie, err := loadManifest(ctx, a.fileStore, addr, quitC, NOOPDecrypt)
if err != nil {
return nil, fmt.Errorf("error loading manifest %s: %s", addr, err)
}
return &ManifestWriter{a, trie, quitC}, nil
}
// AddEntry stores the given data and adds the resulting address to the manifest
func (m *ManifestWriter) AddEntry(ctx context.Context, data io.Reader, e *ManifestEntry) (addr storage.Address, err error) {
entry := newManifestTrieEntry(e, nil)
if data != nil {
var wait func(context.Context) error
addr, wait, err = m.api.Store(ctx, data, e.Size, m.trie.encrypted)
if err != nil {
return nil, err
}
err = wait(ctx)
if err != nil {
return nil, err
}
entry.Hash = addr.Hex()
}
if entry.Hash == "" {
return addr, errors.New("missing entry hash")
}
m.trie.addEntry(entry, m.quitC)
return addr, nil
}
// RemoveEntry removes the given path from the manifest
func (m *ManifestWriter) RemoveEntry(path string) error {
m.trie.deleteEntry(path, m.quitC)
return nil
}
// Store stores the manifest, returning the resulting storage address
func (m *ManifestWriter) Store() (storage.Address, error) {
return m.trie.ref, m.trie.recalcAndStore()
}
// ManifestWalker is used to recursively walk the entries in the manifest and
// all of its submanifests
type ManifestWalker struct {
api *API
trie *manifestTrie
quitC chan bool
}
func (a *API) NewManifestWalker(ctx context.Context, addr storage.Address, decrypt DecryptFunc, quitC chan bool) (*ManifestWalker, error) {
trie, err := loadManifest(ctx, a.fileStore, addr, quitC, decrypt)
if err != nil {
return nil, fmt.Errorf("error loading manifest %s: %s", addr, err)
}
return &ManifestWalker{a, trie, quitC}, nil
}
// ErrSkipManifest is used as a return value from WalkFn to indicate that the
// manifest should be skipped
var ErrSkipManifest = errors.New("skip this manifest")
// WalkFn is the type of function called for each entry visited by a recursive
// manifest walk
type WalkFn func(entry *ManifestEntry) error
// Walk recursively walks the manifest calling walkFn for each entry in the
// manifest, including submanifests
func (m *ManifestWalker) Walk(walkFn WalkFn) error {
return m.walk(m.trie, "", walkFn)
}
func (m *ManifestWalker) walk(trie *manifestTrie, prefix string, walkFn WalkFn) error {
for _, entry := range &trie.entries {
if entry == nil {
continue
}
entry.Path = prefix + entry.Path
err := walkFn(&entry.ManifestEntry)
if err != nil {
if entry.ContentType == ManifestType && err == ErrSkipManifest {
continue
}
return err
}
if entry.ContentType != ManifestType {
continue
}
if err := trie.loadSubTrie(entry, nil); err != nil {
return err
}
if err := m.walk(entry.subtrie, entry.Path, walkFn); err != nil {
return err
}
}
return nil
}
type manifestTrie struct {
fileStore *storage.FileStore
entries [257]*manifestTrieEntry // indexed by first character of basePath, entries[256] is the empty basePath entry
ref storage.Address // if ref != nil, it is stored
encrypted bool
decrypt DecryptFunc
}
func newManifestTrieEntry(entry *ManifestEntry, subtrie *manifestTrie) *manifestTrieEntry {
return &manifestTrieEntry{
ManifestEntry: *entry,
subtrie: subtrie,
}
}
type manifestTrieEntry struct {
ManifestEntry
subtrie *manifestTrie
}
func loadManifest(ctx context.Context, fileStore *storage.FileStore, addr storage.Address, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
log.Trace("manifest lookup", "addr", addr)
// retrieve manifest via FileStore
manifestReader, isEncrypted := fileStore.Retrieve(ctx, addr)
log.Trace("reader retrieved", "addr", addr)
return readManifest(manifestReader, addr, fileStore, isEncrypted, quitC, decrypt)
}
func readManifest(mr storage.LazySectionReader, addr storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool, decrypt DecryptFunc) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand
// TODO check size for oversized manifests
size, err := mr.Size(mr.Context(), quitC)
if err != nil { // size == 0
// can't determine size means we don't have the root chunk
log.Trace("manifest not found", "addr", addr)
err = fmt.Errorf("Manifest not Found")
return
}
if size > manifestSizeLimit {
log.Warn("manifest exceeds size limit", "addr", addr, "size", size, "limit", manifestSizeLimit)
err = fmt.Errorf("Manifest size of %v bytes exceeds the %v byte limit", size, manifestSizeLimit)
return
}
manifestData := make([]byte, size)
read, err := mr.Read(manifestData)
if int64(read) < size {
log.Trace("manifest not found", "addr", addr)
if err == nil {
err = fmt.Errorf("Manifest retrieval cut short: read %v, expect %v", read, size)
}
return
}
log.Debug("manifest retrieved", "addr", addr)
var man struct {
Entries []*manifestTrieEntry `json:"entries"`
}
err = json.Unmarshal(manifestData, &man)
if err != nil {
err = fmt.Errorf("Manifest %v is malformed: %v", addr.Log(), err)
log.Trace("malformed manifest", "addr", addr)
return
}
log.Trace("manifest entries", "addr", addr, "len", len(man.Entries))
trie = &manifestTrie{
fileStore: fileStore,
encrypted: isEncrypted,
decrypt: decrypt,
}
for _, entry := range man.Entries {
err = trie.addEntry(entry, quitC)
if err != nil {
return
}
}
return
}
func (mt *manifestTrie) addEntry(entry *manifestTrieEntry, quitC chan bool) error {
mt.ref = nil // trie modified, hash needs to be re-calculated on demand
if entry.ManifestEntry.Access != nil {
if mt.decrypt == nil {
return errors.New("dont have decryptor")
}
err := mt.decrypt(&entry.ManifestEntry)
if err != nil {
return err
}
}
if len(entry.Path) == 0 {
mt.entries[256] = entry
return nil
}
b := entry.Path[0]
oldentry := mt.entries[b]
if (oldentry == nil) || (oldentry.Path == entry.Path && oldentry.ContentType != ManifestType) {
mt.entries[b] = entry
return nil
}
cpl := 0
for (len(entry.Path) > cpl) && (len(oldentry.Path) > cpl) && (entry.Path[cpl] == oldentry.Path[cpl]) {
cpl++
}
if (oldentry.ContentType == ManifestType) && (cpl == len(oldentry.Path)) {
if mt.loadSubTrie(oldentry, quitC) != nil {
return nil
}
entry.Path = entry.Path[cpl:]
oldentry.subtrie.addEntry(entry, quitC)
oldentry.Hash = ""
return nil
}
commonPrefix := entry.Path[:cpl]
subtrie := &manifestTrie{
fileStore: mt.fileStore,
encrypted: mt.encrypted,
}
entry.Path = entry.Path[cpl:]
oldentry.Path = oldentry.Path[cpl:]
subtrie.addEntry(entry, quitC)
subtrie.addEntry(oldentry, quitC)
mt.entries[b] = newManifestTrieEntry(&ManifestEntry{
Path: commonPrefix,
ContentType: ManifestType,
}, subtrie)
return nil
}
func (mt *manifestTrie) getCountLast() (cnt int, entry *manifestTrieEntry) {
for _, e := range &mt.entries {
if e != nil {
cnt++
entry = e
}
}
return
}
func (mt *manifestTrie) deleteEntry(path string, quitC chan bool) {
mt.ref = nil // trie modified, hash needs to be re-calculated on demand
if len(path) == 0 {
mt.entries[256] = nil
return
}
b := path[0]
entry := mt.entries[b]
if entry == nil {
return
}
if entry.Path == path {
mt.entries[b] = nil
return
}
epl := len(entry.Path)
if (entry.ContentType == ManifestType) && (len(path) >= epl) && (path[:epl] == entry.Path) {
if mt.loadSubTrie(entry, quitC) != nil {
return
}
entry.subtrie.deleteEntry(path[epl:], quitC)
entry.Hash = ""
// remove subtree if it has less than 2 elements
cnt, lastentry := entry.subtrie.getCountLast()
if cnt < 2 {
if lastentry != nil {
lastentry.Path = entry.Path + lastentry.Path
}
mt.entries[b] = lastentry
}
}
}
func (mt *manifestTrie) recalcAndStore() error {
if mt.ref != nil {
return nil
}
var buffer bytes.Buffer
buffer.WriteString(`{"entries":[`)
list := &Manifest{}
for _, entry := range &mt.entries {
if entry != nil {
if entry.Hash == "" { // TODO: paralellize
err := entry.subtrie.recalcAndStore()
if err != nil {
return err
}
entry.Hash = entry.subtrie.ref.Hex()
}
list.Entries = append(list.Entries, entry.ManifestEntry)
}
}
manifest, err := json.Marshal(list)
if err != nil {
return err
}
sr := bytes.NewReader(manifest)
ctx := context.TODO()
addr, wait, err2 := mt.fileStore.Store(ctx, sr, int64(len(manifest)), mt.encrypted)
if err2 != nil {
return err2
}
err2 = wait(ctx)
mt.ref = addr
return err2
}
func (mt *manifestTrie) loadSubTrie(entry *manifestTrieEntry, quitC chan bool) (err error) {
if entry.ManifestEntry.Access != nil {
if mt.decrypt == nil {
return errors.New("dont have decryptor")
}
err := mt.decrypt(&entry.ManifestEntry)
if err != nil {
return err
}
}
if entry.subtrie == nil {
hash := common.Hex2Bytes(entry.Hash)
entry.subtrie, err = loadManifest(context.TODO(), mt.fileStore, hash, quitC, mt.decrypt)
entry.Hash = "" // might not match, should be recalculated
}
return
}
func (mt *manifestTrie) listWithPrefixInt(prefix, rp string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) error {
plen := len(prefix)
var start, stop int
if plen == 0 {
start = 0
stop = 256
} else {
start = int(prefix[0])
stop = start
}
for i := start; i <= stop; i++ {
select {
case <-quitC:
return fmt.Errorf("aborted")
default:
}
entry := mt.entries[i]
if entry != nil {
epl := len(entry.Path)
if entry.ContentType == ManifestType {
l := plen
if epl < l {
l = epl
}
if prefix[:l] == entry.Path[:l] {
err := mt.loadSubTrie(entry, quitC)
if err != nil {
return err
}
err = entry.subtrie.listWithPrefixInt(prefix[l:], rp+entry.Path[l:], quitC, cb)
if err != nil {
return err
}
}
} else {
if (epl >= plen) && (prefix == entry.Path[:plen]) {
cb(entry, rp+entry.Path[plen:])
}
}
}
}
return nil
}
func (mt *manifestTrie) listWithPrefix(prefix string, quitC chan bool, cb func(entry *manifestTrieEntry, suffix string)) (err error) {
return mt.listWithPrefixInt(prefix, "", quitC, cb)
}
func (mt *manifestTrie) findPrefixOf(path string, quitC chan bool) (entry *manifestTrieEntry, pos int) {
log.Trace(fmt.Sprintf("findPrefixOf(%s)", path))
if len(path) == 0 {
return mt.entries[256], 0
}
//see if first char is in manifest entries
b := path[0]
entry = mt.entries[b]
if entry == nil {
return mt.entries[256], 0
}
epl := len(entry.Path)
log.Trace(fmt.Sprintf("path = %v entry.Path = %v epl = %v", path, entry.Path, epl))
if len(path) <= epl {
if entry.Path[:len(path)] == path {
if entry.ContentType == ManifestType {
err := mt.loadSubTrie(entry, quitC)
if err == nil && entry.subtrie != nil {
subentries := entry.subtrie.entries
for i := 0; i < len(subentries); i++ {
sub := subentries[i]
if sub != nil && sub.Path == "" {
return sub, len(path)
}
}
}
entry.Status = http.StatusMultipleChoices
}
pos = len(path)
return
}
return nil, 0
}
if path[:epl] == entry.Path {
log.Trace(fmt.Sprintf("entry.ContentType = %v", entry.ContentType))
//the subentry is a manifest, load subtrie
if entry.ContentType == ManifestType && (strings.Contains(entry.Path, path) || strings.Contains(path, entry.Path)) {
err := mt.loadSubTrie(entry, quitC)
if err != nil {
return nil, 0
}
sub, pos := entry.subtrie.findPrefixOf(path[epl:], quitC)
if sub != nil {
entry = sub
pos += epl
return sub, pos
} else if path == entry.Path {
entry.Status = http.StatusMultipleChoices
}
} else {
//entry is not a manifest, return it
if path != entry.Path {
return nil, 0
}
}
}
return nil, 0
}
// file system manifest always contains regularized paths
// no leading or trailing slashes, only single slashes inside
func RegularSlashes(path string) (res string) {
for i := 0; i < len(path); i++ {
if (path[i] != '/') || ((i > 0) && (path[i-1] != '/')) {
res = res + path[i:i+1]
}
}
if (len(res) > 0) && (res[len(res)-1] == '/') {
res = res[:len(res)-1]
}
return
}
func (mt *manifestTrie) getEntry(spath string) (entry *manifestTrieEntry, fullpath string) {
path := RegularSlashes(spath)
var pos int
quitC := make(chan bool)
entry, pos = mt.findPrefixOf(path, quitC)
return entry, path[:pos]
}

View File

@ -25,8 +25,8 @@ import (
"strings"
"testing"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethersphere/swarm/chunk"
"github.com/ethersphere/swarm/storage"
)
func manifest(paths ...string) (manifestReader storage.LazySectionReader) {

View File

Before

Width:  |  Height:  |  Size: 4.0 KiB

After

Width:  |  Height:  |  Size: 4.0 KiB

View File

@ -23,7 +23,7 @@ import (
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethersphere/swarm/storage"
)
//matches hex swarm hashes

View File

@ -21,7 +21,7 @@ import (
"reflect"
"testing"
"github.com/ethereum/go-ethereum/swarm/storage"
"github.com/ethersphere/swarm/storage"
)
func TestParseURI(t *testing.T) {

View File

@ -1,7 +1,11 @@
os: Visual Studio 2015
branches:
only:
- master
# Clone directly into GOPATH.
clone_folder: C:\gopath\src\github.com\ethereum\go-ethereum
clone_folder: C:\gopath\src\github.com\ethersphere\swarm
clone_depth: 5
version: "{branch}.{build}"
environment:
@ -9,12 +13,12 @@ environment:
GOPATH: C:\gopath
CC: gcc.exe
matrix:
- GETH_ARCH: amd64
- APP_ARCH: amd64
MSYS2_ARCH: x86_64
MSYS2_BITS: 64
MSYSTEM: MINGW64
PATH: C:\msys64\mingw64\bin\;C:\Program Files (x86)\NSIS\;%PATH%
- GETH_ARCH: 386
- APP_ARCH: 386
MSYS2_ARCH: i686
MSYS2_BITS: 32
MSYSTEM: MINGW32
@ -23,8 +27,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- appveyor DownloadFile https://dl.google.com/go/go1.12.5.windows-%GETH_ARCH%.zip
- 7z x go1.12.5.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- appveyor DownloadFile https://dl.google.com/go/go1.12.windows-%APP_ARCH%.zip
- 7z x go1.12.windows-%APP_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version
@ -32,8 +36,7 @@ build_script:
- go run build\ci.go install
after_build:
- go run build\ci.go archive -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
- go run build\ci.go nsis -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
- go run build\ci.go archive -type zip -signer WINDOWS_SIGNING_KEY -upload ethswarm/builds
test_script:
- set CGO_ENABLED=1

View File

@ -26,7 +26,7 @@ import (
"testing"
"time"
"github.com/ethereum/go-ethereum/swarm/testutil"
"github.com/ethersphere/swarm/testutil"
"golang.org/x/crypto/sha3"
)

View File

@ -29,9 +29,6 @@ Available commands are:
archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artifacts
importkeys -- imports signing keys from env
debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package
nsis -- creates a Windows NSIS installer
aar [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an Android archive
xcode [ -local ] [ -sign key-id ] [-deploy repo] [ -upload dest ] -- creates an iOS XCode framework
xgo [ -alltools ] [ options ] -- cross builds according to options
purge [ -store blobstore ] [ -days threshold ] -- purges old archives from the blobstore
@ -41,7 +38,6 @@ For all commands, -n prevents execution of external programs (dry run mode).
package main
import (
"bufio"
"bytes"
"encoding/base64"
"flag"
@ -58,85 +54,46 @@ import (
"strings"
"time"
"github.com/ethereum/go-ethereum/internal/build"
"github.com/ethereum/go-ethereum/params"
"github.com/ethersphere/swarm/internal/build"
sv "github.com/ethersphere/swarm/version"
)
var (
// Files that end up in the geth*.zip archive.
gethArchiveFiles = []string{
// Files that end up in the swarm*.zip archive.
swarmArchiveFiles = []string{
"COPYING",
executablePath("geth"),
}
// Files that end up in the geth-alltools*.zip archive.
allToolsArchiveFiles = []string{
"COPYING",
executablePath("abigen"),
executablePath("bootnode"),
executablePath("evm"),
executablePath("geth"),
executablePath("puppeth"),
executablePath("rlpdump"),
executablePath("wnode"),
executablePath("clef"),
executablePath("swarm"),
}
// A debian package is created for all executables listed here.
debExecutables = []debExecutable{
debSwarmExecutables = []debExecutable{
{
BinaryName: "abigen",
Description: "Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages.",
},
{
BinaryName: "bootnode",
Description: "Ethereum bootnode.",
},
{
BinaryName: "evm",
Description: "Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode.",
},
{
BinaryName: "geth",
Description: "Ethereum CLI client.",
},
{
BinaryName: "puppeth",
Description: "Ethereum private network manager.",
},
{
BinaryName: "rlpdump",
Description: "Developer utility tool that prints RLP structures.",
},
{
BinaryName: "wnode",
Description: "Ethereum Whisper diagnostic tool",
},
{
BinaryName: "clef",
Description: "Ethereum account management tool.",
BinaryName: "swarm",
PackageName: "ethereum-swarm",
Description: "Ethereum Swarm daemon and tools",
},
}
// A debian package is created for all executables listed here.
debEthereum = debPackage{
Name: "ethereum",
Version: params.Version,
Executables: debExecutables,
debSwarm = debPackage{
Name: "ethereum-swarm",
Version: sv.Version,
Executables: debSwarmExecutables,
}
// Debian meta packages to build and push to Ubuntu PPA
debPackages = []debPackage{
debEthereum,
debSwarm,
}
// Packages to be cross-compiled by the xgo command
allCrossCompiledArchiveFiles = swarmArchiveFiles
// Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: wily is unsupported because it was officially deprecated on Launchpad.
// Note: yakkety is unsupported because it was officially deprecated on Launchpad.
// Note: zesty is unsupported because it was officially deprecated on Launchpad.
// Note: artful is unsupported because it was officially deprecated on Launchpad.
// Note: wily is unsupported because it was officially deprecated on lanchpad.
// Note: yakkety is unsupported because it was officially deprecated on lanchpad.
// Note: zesty is unsupported because it was officially deprecated on lanchpad.
// Note: artful is unsupported because it was officially deprecated on lanchpad.
debDistros = []string{"trusty", "xenial", "bionic", "cosmic", "disco"}
)
@ -169,12 +126,6 @@ func main() {
doArchive(os.Args[2:])
case "debsrc":
doDebianSource(os.Args[2:])
case "nsis":
doWindowsInstaller(os.Args[2:])
case "aar":
doAndroidArchive(os.Args[2:])
case "xcode":
doXCodeFramework(os.Args[2:])
case "xgo":
doXgo(os.Args[2:])
case "purge":
@ -260,7 +211,6 @@ func buildFlags(env build.Environment) (flags []string) {
var ld []string
if env.Commit != "" {
ld = append(ld, "-X", "main.gitCommit="+env.Commit)
ld = append(ld, "-X", "main.gitDate="+env.Date)
}
if runtime.GOOS == "darwin" {
ld = append(ld, "-s")
@ -366,7 +316,7 @@ func doArchive(cmdline []string) {
arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging")
atype = flag.String("type", "zip", "Type of archive to write (zip|tar)")
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`)
upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`)
upload = flag.String("upload", "", `Destination to upload the archives (usually "ethswarm/builds")`)
ext string
)
flag.CommandLine.Parse(cmdline)
@ -382,18 +332,15 @@ func doArchive(cmdline []string) {
var (
env = build.Env()
basegeth = archiveBasename(*arch, params.ArchiveVersion(env.Commit))
geth = "geth-" + basegeth + ext
alltools = "geth-alltools-" + basegeth + ext
baseswarm = archiveBasename(*arch, sv.ArchiveVersion(env.Commit))
swarm = "swarm-" + baseswarm + ext
)
maybeSkipArchive(env)
if err := build.WriteArchive(geth, gethArchiveFiles); err != nil {
if err := build.WriteArchive(swarm, swarmArchiveFiles); err != nil {
log.Fatal(err)
}
if err := build.WriteArchive(alltools, allToolsArchiveFiles); err != nil {
log.Fatal(err)
}
for _, archive := range []string{geth, alltools} {
for _, archive := range []string{swarm} {
if err := archiveUpload(archive, *upload, *signer); err != nil {
log.Fatal(err)
}
@ -405,12 +352,6 @@ func archiveBasename(arch string, archiveVersion string) string {
if arch == "arm" {
platform += os.Getenv("GOARM")
}
if arch == "android" {
platform = "android-all"
}
if arch == "ios" {
platform = "ios-all"
}
return platform + "-" + archiveVersion
}
@ -451,7 +392,9 @@ func maybeSkipArchive(env build.Environment) {
log.Printf("skipping because this is a cron job")
os.Exit(0)
}
if env.Branch != "master" && !strings.HasPrefix(env.Tag, "v1.") {
validArchiveTag, _ := regexp.MatchString("v([0-9]+).([0-9]+).([0-9]+)", "v10.2.3")
if env.Branch != "master" && !validArchiveTag {
log.Printf("skipping because branch %q, tag %q is not on the whitelist", env.Branch, env.Tag)
os.Exit(0)
}
@ -556,8 +499,8 @@ func isUnstableBuild(env build.Environment) bool {
}
type debPackage struct {
Name string // the name of the Debian package to produce, e.g. "ethereum"
Version string // the clean version of the debPackage, e.g. 1.8.12, without any metadata
Name string // the name of the Debian package to produce, e.g. "ethereum", or "ethereum-swarm"
Version string // the clean version of the debPackage, e.g. 1.8.12 or 0.3.0, without any metadata
Executables []debExecutable // executables to be included in the package
}
@ -690,291 +633,6 @@ func stageDebianSource(tmpdir string, meta debMetadata) (pkgdir string) {
return pkgdir
}
// Windows installer
func doWindowsInstaller(cmdline []string) {
// Parse the flags and make skip installer generation on PRs
var (
arch = flag.String("arch", runtime.GOARCH, "Architecture for cross build packaging")
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. WINDOWS_SIGNING_KEY)`)
upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`)
workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`)
)
flag.CommandLine.Parse(cmdline)
*workdir = makeWorkdir(*workdir)
env := build.Env()
maybeSkipArchive(env)
// Aggregate binaries that are included in the installer
var (
devTools []string
allTools []string
gethTool string
)
for _, file := range allToolsArchiveFiles {
if file == "COPYING" { // license, copied later
continue
}
allTools = append(allTools, filepath.Base(file))
if filepath.Base(file) == "geth.exe" {
gethTool = file
} else {
devTools = append(devTools, file)
}
}
// Render NSIS scripts: Installer NSIS contains two installer sections,
// first section contains the geth binary, second section holds the dev tools.
templateData := map[string]interface{}{
"License": "COPYING",
"Geth": gethTool,
"DevTools": devTools,
}
build.Render("build/nsis.geth.nsi", filepath.Join(*workdir, "geth.nsi"), 0644, nil)
build.Render("build/nsis.install.nsh", filepath.Join(*workdir, "install.nsh"), 0644, templateData)
build.Render("build/nsis.uninstall.nsh", filepath.Join(*workdir, "uninstall.nsh"), 0644, allTools)
build.Render("build/nsis.pathupdate.nsh", filepath.Join(*workdir, "PathUpdate.nsh"), 0644, nil)
build.Render("build/nsis.envvarupdate.nsh", filepath.Join(*workdir, "EnvVarUpdate.nsh"), 0644, nil)
build.CopyFile(filepath.Join(*workdir, "SimpleFC.dll"), "build/nsis.simplefc.dll", 0755)
build.CopyFile(filepath.Join(*workdir, "COPYING"), "COPYING", 0755)
// Build the installer. This assumes that all the needed files have been previously
// built (don't mix building and packaging to keep cross compilation complexity to a
// minimum).
version := strings.Split(params.Version, ".")
if env.Commit != "" {
version[2] += "-" + env.Commit[:8]
}
installer, _ := filepath.Abs("geth-" + archiveBasename(*arch, params.ArchiveVersion(env.Commit)) + ".exe")
build.MustRunCommand("makensis.exe",
"/DOUTPUTFILE="+installer,
"/DMAJORVERSION="+version[0],
"/DMINORVERSION="+version[1],
"/DBUILDVERSION="+version[2],
"/DARCH="+*arch,
filepath.Join(*workdir, "geth.nsi"),
)
// Sign and publish installer.
if err := archiveUpload(installer, *upload, *signer); err != nil {
log.Fatal(err)
}
}
// Android archives
func doAndroidArchive(cmdline []string) {
var (
local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`)
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`)
deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`)
upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`)
)
flag.CommandLine.Parse(cmdline)
env := build.Env()
// Sanity check that the SDK and NDK are installed and set
if os.Getenv("ANDROID_HOME") == "" {
log.Fatal("Please ensure ANDROID_HOME points to your Android SDK")
}
// Build the Android archive and Maven resources
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
build.MustRun(gomobileTool("bind", "-ldflags", "-s -w", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
if *local {
// If we're building locally, copy bundle to build dir and skip Maven
os.Rename("geth.aar", filepath.Join(GOBIN, "geth.aar"))
return
}
meta := newMavenMetadata(env)
build.Render("build/mvn.pom", meta.Package+".pom", 0755, meta)
// Skip Maven deploy and Azure upload for PR builds
maybeSkipArchive(env)
// Sign and upload the archive to Azure
archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar"
os.Rename("geth.aar", archive)
if err := archiveUpload(archive, *upload, *signer); err != nil {
log.Fatal(err)
}
// Sign and upload all the artifacts to Maven Central
os.Rename(archive, meta.Package+".aar")
if *signer != "" && *deploy != "" {
// Import the signing key into the local GPG instance
key := getenvBase64(*signer)
gpg := exec.Command("gpg", "--import")
gpg.Stdin = bytes.NewReader(key)
build.MustRun(gpg)
keyID, err := build.PGPKeyID(string(key))
if err != nil {
log.Fatal(err)
}
// Upload the artifacts to Sonatype and/or Maven Central
repo := *deploy + "/service/local/staging/deploy/maven2"
if meta.Develop {
repo = *deploy + "/content/repositories/snapshots"
}
build.MustRunCommand("mvn", "gpg:sign-and-deploy-file", "-e", "-X",
"-settings=build/mvn.settings", "-Durl="+repo, "-DrepositoryId=ossrh",
"-Dgpg.keyname="+keyID,
"-DpomFile="+meta.Package+".pom", "-Dfile="+meta.Package+".aar")
}
}
func gomobileTool(subcmd string, args ...string) *exec.Cmd {
cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd)
cmd.Args = append(cmd.Args, args...)
cmd.Env = []string{
"GOPATH=" + build.GOPATH(),
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
}
for _, e := range os.Environ() {
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") {
continue
}
cmd.Env = append(cmd.Env, e)
}
return cmd
}
type mavenMetadata struct {
Version string
Package string
Develop bool
Contributors []mavenContributor
}
type mavenContributor struct {
Name string
Email string
}
func newMavenMetadata(env build.Environment) mavenMetadata {
// Collect the list of authors from the repo root
contribs := []mavenContributor{}
if authors, err := os.Open("AUTHORS"); err == nil {
defer authors.Close()
scanner := bufio.NewScanner(authors)
for scanner.Scan() {
// Skip any whitespace from the authors list
line := strings.TrimSpace(scanner.Text())
if line == "" || line[0] == '#' {
continue
}
// Split the author and insert as a contributor
re := regexp.MustCompile("([^<]+) <(.+)>")
parts := re.FindStringSubmatch(line)
if len(parts) == 3 {
contribs = append(contribs, mavenContributor{Name: parts[1], Email: parts[2]})
}
}
}
// Render the version and package strings
version := params.Version
if isUnstableBuild(env) {
version += "-SNAPSHOT"
}
return mavenMetadata{
Version: version,
Package: "geth-" + version,
Develop: isUnstableBuild(env),
Contributors: contribs,
}
}
// XCode frameworks
func doXCodeFramework(cmdline []string) {
var (
local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`)
signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`)
deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`)
upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`)
)
flag.CommandLine.Parse(cmdline)
env := build.Env()
// Build the iOS XCode framework
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
build.MustRun(gomobileTool("init"))
bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "--tags", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
if *local {
// If we're building locally, use the build folder and stop afterwards
bind.Dir, _ = filepath.Abs(GOBIN)
build.MustRun(bind)
return
}
archive := "geth-" + archiveBasename("ios", params.ArchiveVersion(env.Commit))
if err := os.Mkdir(archive, os.ModePerm); err != nil {
log.Fatal(err)
}
bind.Dir, _ = filepath.Abs(archive)
build.MustRun(bind)
build.MustRunCommand("tar", "-zcvf", archive+".tar.gz", archive)
// Skip CocoaPods deploy and Azure upload for PR builds
maybeSkipArchive(env)
// Sign and upload the framework to Azure
if err := archiveUpload(archive+".tar.gz", *upload, *signer); err != nil {
log.Fatal(err)
}
// Prepare and upload a PodSpec to CocoaPods
if *deploy != "" {
meta := newPodMetadata(env, archive)
build.Render("build/pod.podspec", "Geth.podspec", 0755, meta)
build.MustRunCommand("pod", *deploy, "push", "Geth.podspec", "--allow-warnings", "--verbose")
}
}
type podMetadata struct {
Version string
Commit string
Archive string
Contributors []podContributor
}
type podContributor struct {
Name string
Email string
}
func newPodMetadata(env build.Environment, archive string) podMetadata {
// Collect the list of authors from the repo root
contribs := []podContributor{}
if authors, err := os.Open("AUTHORS"); err == nil {
defer authors.Close()
scanner := bufio.NewScanner(authors)
for scanner.Scan() {
// Skip any whitespace from the authors list
line := strings.TrimSpace(scanner.Text())
if line == "" || line[0] == '#' {
continue
}
// Split the author and insert as a contributor
re := regexp.MustCompile("([^<]+) <(.+)>")
parts := re.FindStringSubmatch(line)
if len(parts) == 3 {
contribs = append(contribs, podContributor{Name: parts[1], Email: parts[2]})
}
}
}
version := params.Version
if isUnstableBuild(env) {
version += "-unstable." + env.Buildnum
}
return podMetadata{
Archive: archive,
Version: version,
Commit: env.Commit,
Contributors: contribs,
}
}
// Cross compilation
func doXgo(cmdline []string) {
@ -993,7 +651,7 @@ func doXgo(cmdline []string) {
if *alltools {
args = append(args, []string{"--dest", GOBIN}...)
for _, res := range allToolsArchiveFiles {
for _, res := range allCrossCompiledArchiveFiles {
if strings.HasPrefix(res, GOBIN) {
// Binary tool found, cross build it explicitly
args = append(args, "./"+filepath.Join("cmd", filepath.Base(res)))
@ -1031,7 +689,7 @@ func xgoTool(args []string) *exec.Cmd {
func doPurge(cmdline []string) {
var (
store = flag.String("store", "", `Destination from where to purge archives (usually "gethstore/builds")`)
store = flag.String("store", "", `Destination from where to purge archives (usually "ethswarm/builds")`)
limit = flag.Int("days", 30, `Age threshold above which to delete unstable archives`)
)
flag.CommandLine.Parse(cmdline)

View File

@ -4,9 +4,9 @@ Priority: extra
Maintainer: {{.Author}}
Build-Depends: debhelper (>= 8.0.0), golang-1.11
Standards-Version: 3.9.5
Homepage: https://ethereum.org
Vcs-Git: git://github.com/ethereum/go-ethereum.git
Vcs-Browser: https://github.com/ethereum/go-ethereum
Homepage: https://swarm.ethereum.org
Vcs-Git: git://github.com/ethersphere/swarm.git
Vcs-Browser: https://github.com/ethersphere/swarm
{{range .Executables}}
Package: {{$.ExeName .}}

View File

@ -1,5 +0,0 @@
{{.Name}} ({{.VersionString}}) {{.Distro}}; urgency=low
* git build of {{.Env.Commit}}
-- {{.Author}} {{.Time}}

View File

@ -1,25 +0,0 @@
Source: {{.Name}}
Section: science
Priority: extra
Maintainer: {{.Author}}
Build-Depends: debhelper (>= 8.0.0), golang-1.11
Standards-Version: 3.9.5
Homepage: https://ethereum.org
Vcs-Git: git://github.com/ethereum/go-ethereum.git
Vcs-Browser: https://github.com/ethereum/go-ethereum
Package: {{.Name}}
Architecture: any
Depends: ${misc:Depends}, {{.ExeList}}
Description: Meta-package to install geth, swarm, and other tools
Meta-package to install geth, swarm and other tools
{{range .Executables}}
Package: {{$.ExeName .}}
Conflicts: {{$.ExeConflicts .}}
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}
Built-Using: ${misc:Built-Using}
Description: {{.Description}}
{{.Description}}
{{end}}

View File

@ -1,14 +0,0 @@
Copyright 2018 The go-ethereum Authors
go-ethereum is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
go-ethereum is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.

View File

@ -1 +0,0 @@
AUTHORS

View File

@ -1 +0,0 @@
build/bin/{{.BinaryName}} usr/bin

View File

@ -1,16 +0,0 @@
#!/usr/bin/make -f
# -*- makefile -*-
# Uncomment this to turn on verbose mode.
#export DH_VERBOSE=1
# Launchpad rejects Go's access to $HOME/.cache, use custom folder
export GOCACHE=/tmp/go-build
override_dh_auto_build:
build/env.sh /usr/lib/go-1.11/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
override_dh_auto_test:
%:
dh $@

View File

@ -10,11 +10,11 @@ fi
# Create fake Go workspace if it doesn't exist yet.
workspace="$PWD/build/_workspace"
root="$PWD"
ethdir="$workspace/src/github.com/ethereum"
if [ ! -L "$ethdir/go-ethereum" ]; then
ethdir="$workspace/src/github.com/ethersphere"
if [ ! -L "$ethdir/swarm" ]; then
mkdir -p "$ethdir"
cd "$ethdir"
ln -s ../../../../../. go-ethereum
ln -s ../../../../../. swarm
cd "$root"
fi
@ -23,8 +23,8 @@ GOPATH="$workspace"
export GOPATH
# Run the command inside the workspace.
cd "$ethdir/go-ethereum"
PWD="$ethdir/go-ethereum"
cd "$ethdir/swarm"
PWD="$ethdir/swarm"
# Launch the arguments with the configured environment.
exec "$@"

View File

@ -23,7 +23,7 @@ import (
"sync"
"time"
"github.com/ethereum/go-ethereum/swarm/sctx"
"github.com/ethersphere/swarm/sctx"
)
// Tags hold tag information indexed by a unique random uint32

81
client/bzz.go Normal file
View File

@ -0,0 +1,81 @@
package client
import (
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethersphere/swarm"
"github.com/ethersphere/swarm/log"
"github.com/ethersphere/swarm/storage"
)
type Bzz struct {
client *rpc.Client
}
// NewBzz is a constructor for a Bzz API
func NewBzz(client *rpc.Client) *Bzz {
return &Bzz{
client: client,
}
}
// GetChunksBitVector returns a bit vector of presence for a given slice of chunks
func (b *Bzz) GetChunksBitVector(addrs []storage.Address) (string, error) {
var hostChunks string
const trackChunksPageSize = 7500
for len(addrs) > 0 {
var pageChunks string
// get current page size, so that we avoid a slice out of bounds on the last page
pagesize := trackChunksPageSize
if len(addrs) < trackChunksPageSize {
pagesize = len(addrs)
}
err := b.client.Call(&pageChunks, "bzz_has", addrs[:pagesize])
if err != nil {
return "", err
}
hostChunks += pageChunks
addrs = addrs[pagesize:]
}
return hostChunks, nil
}
// GetBzzAddr returns the bzzAddr of the node
func (b *Bzz) GetBzzAddr() (string, error) {
var info swarm.Info
err := b.client.Call(&info, "bzz_info")
if err != nil {
return "", err
}
return info.BzzKey[2:], nil
}
// IsPullSyncing is checking if the node is still receiving chunk deliveries due to pull syncing
func (b *Bzz) IsPullSyncing() (bool, error) {
var isSyncing bool
err := b.client.Call(&isSyncing, "bzz_isPullSyncing")
if err != nil {
log.Error("error calling host for isPullSyncing", "err", err)
return false, err
}
return isSyncing, nil
}
// IsPushSynced checks if the given `tag` is done syncing, i.e. we've received receipts for all chunks
func (b *Bzz) IsPushSynced(tagname string) (bool, error) {
var isSynced bool
err := b.client.Call(&isSynced, "bzz_isPushSynced", tagname)
if err != nil {
log.Error("error calling host for isPushSynced", "err", err)
return false, err
}
return isSynced, nil
}

Some files were not shown because too many files have changed in this diff Show More