Compare commits
36 Commits
Author | SHA1 | Date | |
---|---|---|---|
0cdc7647aa | |||
c5d7faefe9 | |||
d7211dec79 | |||
cea2c0eedf | |||
b738172dcc | |||
abdc3d3c77 | |||
80901e8288 | |||
465796c3a8 | |||
9c05284bd1 | |||
56edaa1653 | |||
8865fda872 | |||
b0df9b164c | |||
72188234aa | |||
8c0619d29c | |||
16a3a4303f | |||
eaed7584f1 | |||
0262ba58cb | |||
e088998867 | |||
db5ec711e8 | |||
9d49c80783 | |||
b1fdb9f38e | |||
a606dc274b | |||
c9d6fba07d | |||
228fc5a83a | |||
c28dc03f6d | |||
dcb276a0dd | |||
53864a73db | |||
cf65a127e1 | |||
fd64dce6a5 | |||
d60b07249c | |||
7b99278eb0 | |||
aaf8ae1d0b | |||
a83fdd0046 | |||
b1a219b0ec | |||
487b3b0f7b | |||
4ca3d49307 |
2
.gitattributes
vendored
2
.gitattributes
vendored
@ -1,2 +0,0 @@
|
||||
# Auto detect text files and perform LF normalization
|
||||
* text=auto
|
18
.github/CONTRIBUTING.md
vendored
18
.github/CONTRIBUTING.md
vendored
@ -1,18 +0,0 @@
|
||||
## Can I have feature X
|
||||
|
||||
Before you do a feature request please check and make sure that it isn't possible
|
||||
through some other means. The JavaScript enabled console is a powerful feature
|
||||
in the right hands. Please check our [Bitchin' tricks](https://github.com/ethereum/go-ethereum/wiki/bitchin-tricks) wiki page for more info
|
||||
and help.
|
||||
|
||||
## Contributing
|
||||
|
||||
If you'd like to contribute to go-ethereum please fork, fix, commit and
|
||||
send a pull request. Commits who do not comply with the coding standards
|
||||
are ignored (use gofmt!). If you send pull requests make absolute sure that you
|
||||
commit on the `develop` branch and that you do not merge to master.
|
||||
Commits that are directly based on master are simply ignored.
|
||||
|
||||
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, testing, and
|
||||
dependency management.
|
20
.github/ISSUE_TEMPLATE.md
vendored
20
.github/ISSUE_TEMPLATE.md
vendored
@ -1,20 +0,0 @@
|
||||
#### System information
|
||||
|
||||
Geth version: `geth version`
|
||||
OS & Version: Windows/Linux/OSX
|
||||
Commit hash : (if `develop`)
|
||||
|
||||
#### Expected behaviour
|
||||
|
||||
|
||||
#### Actual behaviour
|
||||
|
||||
|
||||
#### Steps to reproduce the behaviour
|
||||
|
||||
|
||||
#### Backtrace
|
||||
|
||||
````
|
||||
[backtrace]
|
||||
````
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -35,5 +35,3 @@ cmd/mist/assets/ext/ethereum.js/
|
||||
profile.tmp
|
||||
profile.cov
|
||||
|
||||
# vagrant
|
||||
.vagrant
|
||||
|
4
.mailmap
4
.mailmap
@ -59,7 +59,3 @@ Jason Carver <jacarver@linkedin.com> <ut96caarrs@snkmail.com>
|
||||
|
||||
Joseph Chow <ethereum@outlook.com>
|
||||
Joseph Chow <ethereum@outlook.com> ethers <TODO>
|
||||
|
||||
Enrique Fynn <enriquefynn@gmail.com>
|
||||
|
||||
Vincent G <caktux@gmail.com>
|
@ -1,11 +1,14 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.4.2
|
||||
before_install:
|
||||
- sudo apt-get update -qq
|
||||
- sudo apt-get install -yqq libgmp3-dev
|
||||
install:
|
||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||
# - go get github.com/golang/lint/golint
|
||||
# - go get golang.org/x/tools/cmd/vet
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get golang.org/x/tools/cmd/cover github.com/mattn/goveralls
|
||||
before_script:
|
||||
# - gofmt -l -w .
|
||||
# - goimports -l -w .
|
||||
@ -15,11 +18,11 @@ before_script:
|
||||
script:
|
||||
- make travis-test-with-coverage
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
- if [ "$COVERALLS_TOKEN" ]; then goveralls -coverprofile=profile.cov -service=travis-ci -repotoken $COVERALLS_TOKEN; fi
|
||||
env:
|
||||
global:
|
||||
- secure: "U2U1AmkU4NJBgKR/uUAebQY87cNL0+1JHjnLOmmXwxYYyj5ralWb1aSuSH3qSXiT93qLBmtaUkuv9fberHVqrbAeVlztVdUsKAq7JMQH+M99iFkC9UiRMqHmtjWJ0ok4COD1sRYixxi21wb/JrMe3M1iL4QJVS61iltjHhVdM64="
|
||||
sudo: false
|
||||
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
|
12
AUTHORS
12
AUTHORS
@ -3,34 +3,22 @@
|
||||
Alex Leverington <alex@ethdev.com>
|
||||
Alexandre Van de Sande <alex.vandesande@ethdev.com>
|
||||
Bas van Kervel <bas@ethdev.com>
|
||||
Christoph Jentzsch <jentzsch.software@gmail.com>
|
||||
Daniel A. Nagy <nagy.da@gmail.com>
|
||||
Drake Burroughs <wildfyre@hotmail.com>
|
||||
Enrique Fynn <enriquefynn@gmail.com>
|
||||
Ethan Buchman <ethan@coinculture.info>
|
||||
Fabian Vogelsteller <fabian@frozeman.de>
|
||||
Felix Lange <fjl@twurst.com>
|
||||
Gustav Simonsson <gustav.simonsson@gmail.com>
|
||||
Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
|
||||
Jae Kwon <jkwon.work@gmail.com>
|
||||
Jason Carver <jacarver@linkedin.com>
|
||||
Jeff R. Allen <jra@nella.org>
|
||||
Jeffrey Wilcke <jeffrey@ethereum.org>
|
||||
Joseph Chow <ethereum@outlook.com>
|
||||
Kobi Gurkan <kobigurk@gmail.com>
|
||||
Lefteris Karapetsas <lefteris@refu.co>
|
||||
Leif Jurvetson <leijurv@gmail.com>
|
||||
Maran Hidskes <maran.hidskes@gmail.com>
|
||||
Marek Kotewicz <marek.kotewicz@gmail.com>
|
||||
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
|
||||
Nick Dodson <silentcicero@outlook.com>
|
||||
Peter Pratscher <pratscher@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Ramesh Nair <ram@hiddentao.com>
|
||||
Ricardo Catalinas Jiménez <r@untroubled.be>
|
||||
Rémy Roy <remyroy@remyroy.com>
|
||||
Taylor Gerring <taylor.gerring@gmail.com>
|
||||
Viktor Trón <viktor.tron@gmail.com>
|
||||
Vincent G <caktux@gmail.com>
|
||||
Vitalik Buterin <v@buterin.com>
|
||||
Zsolt Felföldi <zsfelfoldi@gmail.com>
|
||||
|
273
Godeps/Godeps.json
generated
273
Godeps/Godeps.json
generated
@ -1,315 +1,118 @@
|
||||
{
|
||||
"ImportPath": "github.com/ethereum/go-ethereum",
|
||||
"GoVersion": "go1.5.2",
|
||||
"GoVersion": "go1.4",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/Gustav-Simonsson/go-opencl/cl",
|
||||
"Rev": "593e01cfc4f3353585015321e01951d4a907d3ef"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cespare/cp",
|
||||
"Rev": "165db2f241fd235aec29ba6d9b1ccd5f1c14637c"
|
||||
"ImportPath": "code.google.com/p/go-uuid/uuid",
|
||||
"Comment": "null-12",
|
||||
"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codegangsta/cli",
|
||||
"Comment": "1.2.0-215-g0ab42fd",
|
||||
"Rev": "0ab42fd482c27cf2c95e7794ad3bb2082c2ab2d7"
|
||||
"Comment": "1.2.0-95-g9b2bd2b",
|
||||
"Rev": "9b2bd2b3489748d4d0a204fa4eb2ee9e89e0ebc6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||
"Rev": "3e6e67c4dcea3ac2f25fd4731abc0e1deaf36216"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ethereum/ethash",
|
||||
"Comment": "v23.1-245-g25b32de",
|
||||
"Rev": "25b32de0c0271065c28c3719c2bfe86959d72f0c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fatih/color",
|
||||
"Comment": "v0.1-12-g9aae6aa",
|
||||
"Rev": "9aae6aaa22315390f03959adca2c4d395b02fcef"
|
||||
"Comment": "v23.1-227-g8f6ccaa",
|
||||
"Rev": "8f6ccaaef9b418553807a73a95cb5f49cd3ea39f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gizak/termui",
|
||||
"Rev": "08a5d3f67b7d9ec87830ea39c48e570a1f18531f"
|
||||
"Rev": "bab8dce01c193d82bc04888a0a9a7814d505f532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/snappy",
|
||||
"Rev": "799c780093d646c1b79d30894e22512c319fa137"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru/simplelru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
"ImportPath": "github.com/howeyc/fsnotify",
|
||||
"Comment": "v0.9.0-11-g6b1ef89",
|
||||
"Rev": "6b1ef893dc11e0447abda6da20a5203481878dda"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/dcps/internetgateway1",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/dcps/internetgateway2",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/httpu",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/scpd",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/soap",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/ssdp",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jackpal/gateway",
|
||||
"Rev": "192609c58b8985e645cbe82ddcb28a4362ca0fdc"
|
||||
"Rev": "5cff77a69fb22f5f1774c4451ea2aab63d4d2f20"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jackpal/go-nat-pmp",
|
||||
"Rev": "46523a463303c6ede3ddfe45bde1c7ed52ebaacd"
|
||||
"Rev": "a45aa3d54aef73b504e15eb71bea0e5565b5e6e1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kardianos/osext",
|
||||
"Rev": "ccfcd0245381f0c94c68f50626665eed3c6b726a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-colorable",
|
||||
"Rev": "9fdad7c47650b7d2e1da50644c1f4ba7f172f252"
|
||||
"Rev": "043ae16291351db8465272edf465c9f388161627"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-isatty",
|
||||
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
|
||||
"Rev": "fdbe02a1b44e75977b2690062b83cf507d70c013"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-runewidth",
|
||||
"Comment": "travisish-44-ge882a96",
|
||||
"Rev": "e882a96ec18dd43fa283187b66af74497c9101c0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/microsoft/go-winio",
|
||||
"Comment": "v0.2.0",
|
||||
"Rev": "9e2895e5f6c3f16473b91d37fae6e89990a4520c"
|
||||
"Comment": "travisish-33-g5890272",
|
||||
"Rev": "5890272cd41c5103531cd7b79e428d99c9e97f76"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/nsf/termbox-go",
|
||||
"Rev": "362329b0aa6447eadd52edd8d660ec1dff470295"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Comment": "v1.0-6-g0f1a469",
|
||||
"Rev": "0f1a46960a86dcdf5dd30d3e6568a497a997909f"
|
||||
"Rev": "675ffd907b7401b8a709a5ef2249978af5616bb2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterh/liner",
|
||||
"Rev": "ad1edfd30321d8f006ccf05f1e0524adeb943060"
|
||||
"Rev": "29f6a646557d83e2b6e9ba05c45fbea9c006dbe8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rcrowley/go-metrics",
|
||||
"Rev": "51425a2415d21afadfd55cd93432c0bc69e9598d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rjeczalik/notify",
|
||||
"Rev": "5dd6205716539662f8f14ab513552b41eab69d5d"
|
||||
"Rev": "a5cfc242a56ba7fa70b785f678d6214837bf93b9"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/ast",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/dbg",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/file",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/parser",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/registry",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/token",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
"Rev": "dea31a3d392779af358ec41f77a07fcc7e9d04ba"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rs/cors",
|
||||
"Rev": "5950cf11d77f8a61b432a25dd4d444b4ced01379"
|
||||
"Rev": "6e0c3cb65fc0fdb064c743d176a620e3ca446dfb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
"Rev": "4875955338b0a434238a31165cb87255ab6e9e4a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/cache",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/comparer",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/errors",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/filter",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/iterator",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/journal",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/memdb",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/opt",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/storage",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/table",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/util",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
"ImportPath": "github.com/syndtr/gosnappy/snappy",
|
||||
"Rev": "156a073208e131d7d2e212cb749feae7c339e846"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ripemd160",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/scrypt",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634"
|
||||
"Rev": "4ed45ec682102c643324fae5dff8dab085b6c300"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html",
|
||||
"Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html/atom",
|
||||
"Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html/charset",
|
||||
"Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/websocket",
|
||||
"Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "50c6bc5e4292a1d4e65c6e9be5f53be28bcbe28e"
|
||||
"Rev": "e0403b4e005737430c05a57aac078479844f919c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/charmap",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/htmlindex",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/internal",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/internal/identifier",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/japanese",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/korean",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/simplifiedchinese",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/traditionalchinese",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/unicode",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/internal/tag",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/internal/utf8internal",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/language",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/runes",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
"Rev": "c93e7c9fff19fb9139b5ab04ce041833add0134e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/go/ast/astutil",
|
||||
"Rev": "758728c4b28cfbac299730969ef8f655c4761283"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/imports",
|
||||
"Rev": "758728c4b28cfbac299730969ef8f655c4761283"
|
||||
"Rev": "c93e7c9fff19fb9139b5ab04ce041833add0134e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/check.v1",
|
||||
"Rev": "4f90aeace3a26ad7021961c297b22c42160c7b25"
|
||||
"Rev": "64131543e7896d5bcc6bd5a76287eb75ea96c673"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/fatih/set.v0",
|
||||
|
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2009 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
@ -19,7 +19,7 @@ var (
|
||||
NIL = Parse("00000000-0000-0000-0000-000000000000")
|
||||
)
|
||||
|
||||
// NewHash returns a new UUID derived from the hash of space concatenated with
|
||||
// NewHash returns a new UUID dervied from the hash of space concatenated with
|
||||
// data generated by h. The hash should be at least 16 byte in length. The
|
||||
// first 16 bytes of the hash are used to form the UUID. The version of the
|
||||
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
@ -4,13 +4,9 @@
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
import "net"
|
||||
|
||||
var (
|
||||
nodeMu sync.Mutex
|
||||
interfaces []net.Interface // cached list of interfaces
|
||||
ifname string // name of interface being used
|
||||
nodeID []byte // hardware for version 1 UUIDs
|
||||
@ -20,8 +16,6 @@ var (
|
||||
// derived. The interface "user" is returned if the NodeID was set by
|
||||
// SetNodeID.
|
||||
func NodeInterface() string {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
return ifname
|
||||
}
|
||||
|
||||
@ -32,12 +26,6 @@ func NodeInterface() string {
|
||||
//
|
||||
// SetNodeInterface never fails when name is "".
|
||||
func SetNodeInterface(name string) bool {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
return setNodeInterface(name)
|
||||
}
|
||||
|
||||
func setNodeInterface(name string) bool {
|
||||
if interfaces == nil {
|
||||
var err error
|
||||
interfaces, err = net.Interfaces()
|
||||
@ -71,10 +59,8 @@ func setNodeInterface(name string) bool {
|
||||
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
|
||||
// if not already set.
|
||||
func NodeID() []byte {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
if nodeID == nil {
|
||||
setNodeInterface("")
|
||||
SetNodeInterface("")
|
||||
}
|
||||
nid := make([]byte, 6)
|
||||
copy(nid, nodeID)
|
||||
@ -85,8 +71,6 @@ func NodeID() []byte {
|
||||
// of id are used. If id is less than 6 bytes then false is returned and the
|
||||
// Node ID is not set.
|
||||
func SetNodeID(id []byte) bool {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
if setNodeID(id) {
|
||||
ifname = "user"
|
||||
return true
|
@ -23,7 +23,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
timeMu sync.Mutex
|
||||
mu sync.Mutex
|
||||
lasttime uint64 // last time we returned
|
||||
clock_seq uint16 // clock sequence for this run
|
||||
|
||||
@ -40,15 +40,15 @@ func (t Time) UnixTime() (sec, nsec int64) {
|
||||
}
|
||||
|
||||
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
||||
// clock sequence as well as adjusting the clock sequence as needed. An error
|
||||
// is returned if the current time cannot be determined.
|
||||
func GetTime() (Time, uint16, error) {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
// adjusts the clock sequence as needed. An error is returned if the current
|
||||
// time cannot be determined.
|
||||
func GetTime() (Time, error) {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
return getTime()
|
||||
}
|
||||
|
||||
func getTime() (Time, uint16, error) {
|
||||
func getTime() (Time, error) {
|
||||
t := timeNow()
|
||||
|
||||
// If we don't have a clock sequence already, set one.
|
||||
@ -63,7 +63,7 @@ func getTime() (Time, uint16, error) {
|
||||
clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
|
||||
}
|
||||
lasttime = now
|
||||
return Time(now), clock_seq, nil
|
||||
return Time(now), nil
|
||||
}
|
||||
|
||||
// ClockSequence returns the current clock sequence, generating one if not
|
||||
@ -75,8 +75,8 @@ func getTime() (Time, uint16, error) {
|
||||
// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
|
||||
// for
|
||||
func ClockSequence() int {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
return clockSequence()
|
||||
}
|
||||
|
||||
@ -90,8 +90,8 @@ func clockSequence() int {
|
||||
// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
|
||||
// -1 causes a new sequence to be generated.
|
||||
func SetClockSequence(seq int) {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
setClockSequence(seq)
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ func randomBits(b []byte) {
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
var xvalues = []byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
@ -7,7 +7,6 @@ package uuid
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
@ -55,8 +54,8 @@ func Parse(s string) UUID {
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return nil
|
||||
}
|
||||
var uuid [16]byte
|
||||
for i, x := range [16]int{
|
||||
uuid := make([]byte, 16)
|
||||
for i, x := range []int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
@ -68,7 +67,7 @@ func Parse(s string) UUID {
|
||||
uuid[i] = v
|
||||
}
|
||||
}
|
||||
return uuid[:]
|
||||
return uuid
|
||||
}
|
||||
|
||||
// Equal returns true if uuid1 and uuid2 are equal.
|
||||
@ -79,36 +78,23 @@ func Equal(uuid1, uuid2 UUID) bool {
|
||||
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// , or "" if uuid is invalid.
|
||||
func (uuid UUID) String() string {
|
||||
if len(uuid) != 16 {
|
||||
if uuid == nil || len(uuid) != 16 {
|
||||
return ""
|
||||
}
|
||||
var buf [36]byte
|
||||
encodeHex(buf[:], uuid)
|
||||
return string(buf[:])
|
||||
b := []byte(uuid)
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
|
||||
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||
}
|
||||
|
||||
// URN returns the RFC 2141 URN form of uuid,
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
|
||||
func (uuid UUID) URN() string {
|
||||
if len(uuid) != 16 {
|
||||
if uuid == nil || len(uuid) != 16 {
|
||||
return ""
|
||||
}
|
||||
var buf [36 + 9]byte
|
||||
copy(buf[:], "urn:uuid:")
|
||||
encodeHex(buf[9:], uuid)
|
||||
return string(buf[:])
|
||||
}
|
||||
|
||||
func encodeHex(dst []byte, uuid UUID) {
|
||||
hex.Encode(dst[:], uuid[:4])
|
||||
dst[8] = '-'
|
||||
hex.Encode(dst[9:13], uuid[4:6])
|
||||
dst[13] = '-'
|
||||
hex.Encode(dst[14:18], uuid[6:8])
|
||||
dst[18] = '-'
|
||||
hex.Encode(dst[19:23], uuid[8:10])
|
||||
dst[23] = '-'
|
||||
hex.Encode(dst[24:], uuid[10:])
|
||||
b := []byte(uuid)
|
||||
return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
|
||||
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||
}
|
||||
|
||||
// Variant returns the variant encoded in uuid. It returns Invalid if
|
||||
@ -127,9 +113,10 @@ func (uuid UUID) Variant() Variant {
|
||||
default:
|
||||
return Reserved
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Version returns the version of uuid. It returns false if uuid is not
|
||||
// Version returns the verison of uuid. It returns false if uuid is not
|
||||
// valid.
|
||||
func (uuid UUID) Version() (Version, bool) {
|
||||
if len(uuid) != 16 {
|
390
Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go
generated
vendored
Normal file
390
Godeps/_workspace/src/code.google.com/p/go-uuid/uuid/uuid_test.go
generated
vendored
Normal file
@ -0,0 +1,390 @@
|
||||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type test struct {
|
||||
in string
|
||||
version Version
|
||||
variant Variant
|
||||
isuuid bool
|
||||
}
|
||||
|
||||
var tests = []test{
|
||||
{"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true},
|
||||
{"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true},
|
||||
{"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true},
|
||||
{"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true},
|
||||
{"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true},
|
||||
{"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true},
|
||||
{"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true},
|
||||
{"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true},
|
||||
{"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true},
|
||||
{"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true},
|
||||
{"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true},
|
||||
{"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true},
|
||||
{"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true},
|
||||
{"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true},
|
||||
|
||||
{"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true},
|
||||
{"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true},
|
||||
{"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true},
|
||||
{"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true},
|
||||
|
||||
{"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false},
|
||||
{"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false},
|
||||
}
|
||||
|
||||
var constants = []struct {
|
||||
c interface{}
|
||||
name string
|
||||
}{
|
||||
{Person, "Person"},
|
||||
{Group, "Group"},
|
||||
{Org, "Org"},
|
||||
{Invalid, "Invalid"},
|
||||
{RFC4122, "RFC4122"},
|
||||
{Reserved, "Reserved"},
|
||||
{Microsoft, "Microsoft"},
|
||||
{Future, "Future"},
|
||||
{Domain(17), "Domain17"},
|
||||
{Variant(42), "BadVariant42"},
|
||||
}
|
||||
|
||||
func testTest(t *testing.T, in string, tt test) {
|
||||
uuid := Parse(in)
|
||||
if ok := (uuid != nil); ok != tt.isuuid {
|
||||
t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid)
|
||||
}
|
||||
if uuid == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if v := uuid.Variant(); v != tt.variant {
|
||||
t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant)
|
||||
}
|
||||
if v, _ := uuid.Version(); v != tt.version {
|
||||
t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUUID(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
testTest(t, tt.in, tt)
|
||||
testTest(t, strings.ToUpper(tt.in), tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstants(t *testing.T) {
|
||||
for x, tt := range constants {
|
||||
v, ok := tt.c.(fmt.Stringer)
|
||||
if !ok {
|
||||
t.Errorf("%x: %v: not a stringer", x, v)
|
||||
} else if s := v.String(); s != tt.name {
|
||||
v, _ := tt.c.(int)
|
||||
t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRandomUUID(t *testing.T) {
|
||||
m := make(map[string]bool)
|
||||
for x := 1; x < 32; x++ {
|
||||
uuid := NewRandom()
|
||||
s := uuid.String()
|
||||
if m[s] {
|
||||
t.Errorf("NewRandom returned duplicated UUID %s\n", s)
|
||||
}
|
||||
m[s] = true
|
||||
if v, _ := uuid.Version(); v != 4 {
|
||||
t.Errorf("Random UUID of version %s\n", v)
|
||||
}
|
||||
if uuid.Variant() != RFC4122 {
|
||||
t.Errorf("Random UUID is variant %d\n", uuid.Variant())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
m := make(map[string]bool)
|
||||
for x := 1; x < 32; x++ {
|
||||
s := New()
|
||||
if m[s] {
|
||||
t.Errorf("New returned duplicated UUID %s\n", s)
|
||||
}
|
||||
m[s] = true
|
||||
uuid := Parse(s)
|
||||
if uuid == nil {
|
||||
t.Errorf("New returned %q which does not decode\n", s)
|
||||
continue
|
||||
}
|
||||
if v, _ := uuid.Version(); v != 4 {
|
||||
t.Errorf("Random UUID of version %s\n", v)
|
||||
}
|
||||
if uuid.Variant() != RFC4122 {
|
||||
t.Errorf("Random UUID is variant %d\n", uuid.Variant())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func clockSeq(t *testing.T, uuid UUID) int {
|
||||
seq, ok := uuid.ClockSequence()
|
||||
if !ok {
|
||||
t.Fatalf("%s: invalid clock sequence\n", uuid)
|
||||
}
|
||||
return seq
|
||||
}
|
||||
|
||||
func TestClockSeq(t *testing.T) {
|
||||
// Fake time.Now for this test to return a monotonically advancing time; restore it at end.
|
||||
defer func(orig func() time.Time) { timeNow = orig }(timeNow)
|
||||
monTime := time.Now()
|
||||
timeNow = func() time.Time {
|
||||
monTime = monTime.Add(1 * time.Second)
|
||||
return monTime
|
||||
}
|
||||
|
||||
SetClockSequence(-1)
|
||||
uuid1 := NewUUID()
|
||||
uuid2 := NewUUID()
|
||||
|
||||
if clockSeq(t, uuid1) != clockSeq(t, uuid2) {
|
||||
t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2))
|
||||
}
|
||||
|
||||
SetClockSequence(-1)
|
||||
uuid2 = NewUUID()
|
||||
|
||||
// Just on the very off chance we generated the same sequence
|
||||
// two times we try again.
|
||||
if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
|
||||
SetClockSequence(-1)
|
||||
uuid2 = NewUUID()
|
||||
}
|
||||
if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
|
||||
t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1))
|
||||
}
|
||||
|
||||
SetClockSequence(0x1234)
|
||||
uuid1 = NewUUID()
|
||||
if seq := clockSeq(t, uuid1); seq != 0x1234 {
|
||||
t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoding(t *testing.T) {
|
||||
text := "7d444840-9dc0-11d1-b245-5ffdce74fad2"
|
||||
urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2"
|
||||
data := UUID{
|
||||
0x7d, 0x44, 0x48, 0x40,
|
||||
0x9d, 0xc0,
|
||||
0x11, 0xd1,
|
||||
0xb2, 0x45,
|
||||
0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2,
|
||||
}
|
||||
if v := data.String(); v != text {
|
||||
t.Errorf("%x: encoded to %s, expected %s\n", data, v, text)
|
||||
}
|
||||
if v := data.URN(); v != urn {
|
||||
t.Errorf("%x: urn is %s, expected %s\n", data, v, urn)
|
||||
}
|
||||
|
||||
uuid := Parse(text)
|
||||
if !Equal(uuid, data) {
|
||||
t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersion1(t *testing.T) {
|
||||
uuid1 := NewUUID()
|
||||
uuid2 := NewUUID()
|
||||
|
||||
if Equal(uuid1, uuid2) {
|
||||
t.Errorf("%s:duplicate uuid\n", uuid1)
|
||||
}
|
||||
if v, _ := uuid1.Version(); v != 1 {
|
||||
t.Errorf("%s: version %s expected 1\n", uuid1, v)
|
||||
}
|
||||
if v, _ := uuid2.Version(); v != 1 {
|
||||
t.Errorf("%s: version %s expected 1\n", uuid2, v)
|
||||
}
|
||||
n1 := uuid1.NodeID()
|
||||
n2 := uuid2.NodeID()
|
||||
if !bytes.Equal(n1, n2) {
|
||||
t.Errorf("Different nodes %x != %x\n", n1, n2)
|
||||
}
|
||||
t1, ok := uuid1.Time()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid time\n", uuid1)
|
||||
}
|
||||
t2, ok := uuid2.Time()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid time\n", uuid2)
|
||||
}
|
||||
q1, ok := uuid1.ClockSequence()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid clock sequence\n", uuid1)
|
||||
}
|
||||
q2, ok := uuid2.ClockSequence()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid clock sequence", uuid2)
|
||||
}
|
||||
|
||||
switch {
|
||||
case t1 == t2 && q1 == q2:
|
||||
t.Errorf("time stopped\n")
|
||||
case t1 > t2 && q1 == q2:
|
||||
t.Errorf("time reversed\n")
|
||||
case t1 < t2 && q1 != q2:
|
||||
t.Errorf("clock sequence chaned unexpectedly\n")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAndTime(t *testing.T) {
|
||||
// Time is February 5, 1998 12:30:23.136364800 AM GMT
|
||||
|
||||
uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2")
|
||||
node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2}
|
||||
|
||||
ts, ok := uuid.Time()
|
||||
if ok {
|
||||
c := time.Unix(ts.UnixTime())
|
||||
want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC)
|
||||
if !c.Equal(want) {
|
||||
t.Errorf("Got time %v, want %v", c, want)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("%s: bad time\n", uuid)
|
||||
}
|
||||
if !bytes.Equal(node, uuid.NodeID()) {
|
||||
t.Errorf("Expected node %v got %v\n", node, uuid.NodeID())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMD5(t *testing.T) {
|
||||
uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String()
|
||||
want := "6fa459ea-ee8a-3ca4-894e-db77e160355e"
|
||||
if uuid != want {
|
||||
t.Errorf("MD5: got %q expected %q\n", uuid, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSHA1(t *testing.T) {
|
||||
uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String()
|
||||
want := "886313e1-3b8a-5372-9b90-0c9aee199e5d"
|
||||
if uuid != want {
|
||||
t.Errorf("SHA1: got %q expected %q\n", uuid, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeID(t *testing.T) {
|
||||
nid := []byte{1, 2, 3, 4, 5, 6}
|
||||
SetNodeInterface("")
|
||||
s := NodeInterface()
|
||||
if s == "" || s == "user" {
|
||||
t.Errorf("NodeInterface %q after SetInteface\n", s)
|
||||
}
|
||||
node1 := NodeID()
|
||||
if node1 == nil {
|
||||
t.Errorf("NodeID nil after SetNodeInterface\n", s)
|
||||
}
|
||||
SetNodeID(nid)
|
||||
s = NodeInterface()
|
||||
if s != "user" {
|
||||
t.Errorf("Expected NodeInterface %q got %q\n", "user", s)
|
||||
}
|
||||
node2 := NodeID()
|
||||
if node2 == nil {
|
||||
t.Errorf("NodeID nil after SetNodeID\n", s)
|
||||
}
|
||||
if bytes.Equal(node1, node2) {
|
||||
t.Errorf("NodeID not changed after SetNodeID\n", s)
|
||||
} else if !bytes.Equal(nid, node2) {
|
||||
t.Errorf("NodeID is %x, expected %x\n", node2, nid)
|
||||
}
|
||||
}
|
||||
|
||||
func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) {
|
||||
if uuid == nil {
|
||||
t.Errorf("%s failed\n", name)
|
||||
return
|
||||
}
|
||||
if v, _ := uuid.Version(); v != 2 {
|
||||
t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v)
|
||||
return
|
||||
}
|
||||
if v, ok := uuid.Domain(); !ok || v != domain {
|
||||
if !ok {
|
||||
t.Errorf("%s: %d: Domain failed\n", name, uuid)
|
||||
} else {
|
||||
t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v)
|
||||
}
|
||||
}
|
||||
if v, ok := uuid.Id(); !ok || v != id {
|
||||
if !ok {
|
||||
t.Errorf("%s: %d: Id failed\n", name, uuid)
|
||||
} else {
|
||||
t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDCE(t *testing.T) {
|
||||
testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678)
|
||||
testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid()))
|
||||
testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid()))
|
||||
}
|
||||
|
||||
type badRand struct{}
|
||||
|
||||
func (r badRand) Read(buf []byte) (int, error) {
|
||||
for i, _ := range buf {
|
||||
buf[i] = byte(i)
|
||||
}
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
func TestBadRand(t *testing.T) {
|
||||
SetRand(badRand{})
|
||||
uuid1 := New()
|
||||
uuid2 := New()
|
||||
if uuid1 != uuid2 {
|
||||
t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2)
|
||||
}
|
||||
SetRand(nil)
|
||||
uuid1 = New()
|
||||
uuid2 = New()
|
||||
if uuid1 == uuid2 {
|
||||
t.Errorf("unexecpted duplicates, got %q\n", uuid1)
|
||||
}
|
||||
}
|
@ -19,7 +19,7 @@ func NewUUID() UUID {
|
||||
SetNodeInterface("")
|
||||
}
|
||||
|
||||
now, seq, err := GetTime()
|
||||
now, err := GetTime()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@ -34,7 +34,7 @@ func NewUUID() UUID {
|
||||
binary.BigEndian.PutUint32(uuid[0:], time_low)
|
||||
binary.BigEndian.PutUint16(uuid[4:], time_mid)
|
||||
binary.BigEndian.PutUint16(uuid[6:], time_hi)
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
binary.BigEndian.PutUint16(uuid[8:], clock_seq)
|
||||
copy(uuid[10:], nodeID)
|
||||
|
||||
return uuid
|
26
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/cl.go
generated
vendored
26
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/cl.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
/*
|
||||
Package cl provides a binding to the OpenCL api. It's mostly a low-level
|
||||
wrapper that avoids adding functionality while still making the interface
|
||||
a little more friendly and easy to use.
|
||||
|
||||
Resource life-cycle management:
|
||||
|
||||
For any CL object that gets created (buffer, queue, kernel, etc..) you should
|
||||
call object.Release() when finished with it to free the CL resources. This
|
||||
explicitely calls the related clXXXRelease method for the type. However,
|
||||
as a fallback there is a finalizer set for every resource item that takes
|
||||
care of it (eventually) if Release isn't called. In this way you can have
|
||||
better control over the life cycle of resources while having a fall back
|
||||
to avoid leaks. This is similar to how file handles and such are handled
|
||||
in the Go standard packages.
|
||||
*/
|
||||
package cl
|
||||
|
||||
// #include "headers/1.2/opencl.h"
|
||||
// #cgo CFLAGS: -Iheaders/1.2
|
||||
// #cgo darwin LDFLAGS: -framework OpenCL
|
||||
// #cgo linux LDFLAGS: -lOpenCL
|
||||
import "C"
|
||||
import "errors"
|
||||
|
||||
var ErrUnsupported = errors.New("cl: unsupported")
|
161
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/context.go
generated
vendored
161
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/context.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #include <stdlib.h>
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const maxImageFormats = 256
|
||||
|
||||
type Context struct {
|
||||
clContext C.cl_context
|
||||
devices []*Device
|
||||
}
|
||||
|
||||
type MemObject struct {
|
||||
clMem C.cl_mem
|
||||
size int
|
||||
}
|
||||
|
||||
func releaseContext(c *Context) {
|
||||
if c.clContext != nil {
|
||||
C.clReleaseContext(c.clContext)
|
||||
c.clContext = nil
|
||||
}
|
||||
}
|
||||
|
||||
func releaseMemObject(b *MemObject) {
|
||||
if b.clMem != nil {
|
||||
C.clReleaseMemObject(b.clMem)
|
||||
b.clMem = nil
|
||||
}
|
||||
}
|
||||
|
||||
func newMemObject(mo C.cl_mem, size int) *MemObject {
|
||||
memObject := &MemObject{clMem: mo, size: size}
|
||||
runtime.SetFinalizer(memObject, releaseMemObject)
|
||||
return memObject
|
||||
}
|
||||
|
||||
func (b *MemObject) Release() {
|
||||
releaseMemObject(b)
|
||||
}
|
||||
|
||||
// TODO: properties
|
||||
func CreateContext(devices []*Device) (*Context, error) {
|
||||
deviceIds := buildDeviceIdList(devices)
|
||||
var err C.cl_int
|
||||
clContext := C.clCreateContext(nil, C.cl_uint(len(devices)), &deviceIds[0], nil, nil, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clContext == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
context := &Context{clContext: clContext, devices: devices}
|
||||
runtime.SetFinalizer(context, releaseContext)
|
||||
return context, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) GetSupportedImageFormats(flags MemFlag, imageType MemObjectType) ([]ImageFormat, error) {
|
||||
var formats [maxImageFormats]C.cl_image_format
|
||||
var nFormats C.cl_uint
|
||||
if err := C.clGetSupportedImageFormats(ctx.clContext, C.cl_mem_flags(flags), C.cl_mem_object_type(imageType), maxImageFormats, &formats[0], &nFormats); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
fmts := make([]ImageFormat, nFormats)
|
||||
for i, f := range formats[:nFormats] {
|
||||
fmts[i] = ImageFormat{
|
||||
ChannelOrder: ChannelOrder(f.image_channel_order),
|
||||
ChannelDataType: ChannelDataType(f.image_channel_data_type),
|
||||
}
|
||||
}
|
||||
return fmts, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateCommandQueue(device *Device, properties CommandQueueProperty) (*CommandQueue, error) {
|
||||
var err C.cl_int
|
||||
clQueue := C.clCreateCommandQueue(ctx.clContext, device.id, C.cl_command_queue_properties(properties), &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clQueue == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
commandQueue := &CommandQueue{clQueue: clQueue, device: device}
|
||||
runtime.SetFinalizer(commandQueue, releaseCommandQueue)
|
||||
return commandQueue, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateProgramWithSource(sources []string) (*Program, error) {
|
||||
cSources := make([]*C.char, len(sources))
|
||||
for i, s := range sources {
|
||||
cs := C.CString(s)
|
||||
cSources[i] = cs
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
}
|
||||
var err C.cl_int
|
||||
clProgram := C.clCreateProgramWithSource(ctx.clContext, C.cl_uint(len(sources)), &cSources[0], nil, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clProgram == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
program := &Program{clProgram: clProgram, devices: ctx.devices}
|
||||
runtime.SetFinalizer(program, releaseProgram)
|
||||
return program, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateBufferUnsafe(flags MemFlag, size int, dataPtr unsafe.Pointer) (*MemObject, error) {
|
||||
var err C.cl_int
|
||||
clBuffer := C.clCreateBuffer(ctx.clContext, C.cl_mem_flags(flags), C.size_t(size), dataPtr, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clBuffer == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
return newMemObject(clBuffer, size), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateEmptyBuffer(flags MemFlag, size int) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, size, nil)
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateEmptyBufferFloat32(flags MemFlag, size int) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, 4*size, nil)
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateBuffer(flags MemFlag, data []byte) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, len(data), unsafe.Pointer(&data[0]))
|
||||
}
|
||||
|
||||
//float64
|
||||
func (ctx *Context) CreateBufferFloat32(flags MemFlag, data []float32) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, 4*len(data), unsafe.Pointer(&data[0]))
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateUserEvent() (*Event, error) {
|
||||
var err C.cl_int
|
||||
clEvent := C.clCreateUserEvent(ctx.clContext, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
return newEvent(clEvent), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) Release() {
|
||||
releaseContext(ctx)
|
||||
}
|
||||
|
||||
// http://www.khronos.org/registry/cl/sdk/1.2/docs/man/xhtml/clCreateSubBuffer.html
|
||||
// func (memObject *MemObject) CreateSubBuffer(flags MemFlag, bufferCreateType BufferCreateType, )
|
510
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/device.go
generated
vendored
510
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/device.go
generated
vendored
@ -1,510 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #include "cl_ext.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const maxDeviceCount = 64
|
||||
|
||||
type DeviceType uint
|
||||
|
||||
const (
|
||||
DeviceTypeCPU DeviceType = C.CL_DEVICE_TYPE_CPU
|
||||
DeviceTypeGPU DeviceType = C.CL_DEVICE_TYPE_GPU
|
||||
DeviceTypeAccelerator DeviceType = C.CL_DEVICE_TYPE_ACCELERATOR
|
||||
DeviceTypeDefault DeviceType = C.CL_DEVICE_TYPE_DEFAULT
|
||||
DeviceTypeAll DeviceType = C.CL_DEVICE_TYPE_ALL
|
||||
)
|
||||
|
||||
type FPConfig int
|
||||
|
||||
const (
|
||||
FPConfigDenorm FPConfig = C.CL_FP_DENORM // denorms are supported
|
||||
FPConfigInfNaN FPConfig = C.CL_FP_INF_NAN // INF and NaNs are supported
|
||||
FPConfigRoundToNearest FPConfig = C.CL_FP_ROUND_TO_NEAREST // round to nearest even rounding mode supported
|
||||
FPConfigRoundToZero FPConfig = C.CL_FP_ROUND_TO_ZERO // round to zero rounding mode supported
|
||||
FPConfigRoundToInf FPConfig = C.CL_FP_ROUND_TO_INF // round to positive and negative infinity rounding modes supported
|
||||
FPConfigFMA FPConfig = C.CL_FP_FMA // IEEE754-2008 fused multiply-add is supported
|
||||
FPConfigSoftFloat FPConfig = C.CL_FP_SOFT_FLOAT // Basic floating-point operations (such as addition, subtraction, multiplication) are implemented in software
|
||||
)
|
||||
|
||||
var fpConfigNameMap = map[FPConfig]string{
|
||||
FPConfigDenorm: "Denorm",
|
||||
FPConfigInfNaN: "InfNaN",
|
||||
FPConfigRoundToNearest: "RoundToNearest",
|
||||
FPConfigRoundToZero: "RoundToZero",
|
||||
FPConfigRoundToInf: "RoundToInf",
|
||||
FPConfigFMA: "FMA",
|
||||
FPConfigSoftFloat: "SoftFloat",
|
||||
}
|
||||
|
||||
func (c FPConfig) String() string {
|
||||
var parts []string
|
||||
for bit, name := range fpConfigNameMap {
|
||||
if c&bit != 0 {
|
||||
parts = append(parts, name)
|
||||
}
|
||||
}
|
||||
if parts == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(parts, "|")
|
||||
}
|
||||
|
||||
func (dt DeviceType) String() string {
|
||||
var parts []string
|
||||
if dt&DeviceTypeCPU != 0 {
|
||||
parts = append(parts, "CPU")
|
||||
}
|
||||
if dt&DeviceTypeGPU != 0 {
|
||||
parts = append(parts, "GPU")
|
||||
}
|
||||
if dt&DeviceTypeAccelerator != 0 {
|
||||
parts = append(parts, "Accelerator")
|
||||
}
|
||||
if dt&DeviceTypeDefault != 0 {
|
||||
parts = append(parts, "Default")
|
||||
}
|
||||
if parts == nil {
|
||||
parts = append(parts, "None")
|
||||
}
|
||||
return strings.Join(parts, "|")
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
id C.cl_device_id
|
||||
}
|
||||
|
||||
func buildDeviceIdList(devices []*Device) []C.cl_device_id {
|
||||
deviceIds := make([]C.cl_device_id, len(devices))
|
||||
for i, d := range devices {
|
||||
deviceIds[i] = d.id
|
||||
}
|
||||
return deviceIds
|
||||
}
|
||||
|
||||
// Obtain the list of devices available on a platform. 'platform' refers
|
||||
// to the platform returned by GetPlatforms or can be nil. If platform
|
||||
// is nil, the behavior is implementation-defined.
|
||||
func GetDevices(platform *Platform, deviceType DeviceType) ([]*Device, error) {
|
||||
var deviceIds [maxDeviceCount]C.cl_device_id
|
||||
var numDevices C.cl_uint
|
||||
var platformId C.cl_platform_id
|
||||
if platform != nil {
|
||||
platformId = platform.id
|
||||
}
|
||||
if err := C.clGetDeviceIDs(platformId, C.cl_device_type(deviceType), C.cl_uint(maxDeviceCount), &deviceIds[0], &numDevices); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if numDevices > maxDeviceCount {
|
||||
numDevices = maxDeviceCount
|
||||
}
|
||||
devices := make([]*Device, numDevices)
|
||||
for i := 0; i < int(numDevices); i++ {
|
||||
devices[i] = &Device{id: deviceIds[i]}
|
||||
}
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
func (d *Device) nullableId() C.cl_device_id {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
return d.id
|
||||
}
|
||||
|
||||
func (d *Device) GetInfoString(param C.cl_device_info, panicOnError bool) (string, error) {
|
||||
var strC [1024]C.char
|
||||
var strN C.size_t
|
||||
if err := C.clGetDeviceInfo(d.id, param, 1024, unsafe.Pointer(&strC), &strN); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return "", toError(err)
|
||||
}
|
||||
|
||||
// OpenCL strings are NUL-terminated, and the terminator is included in strN
|
||||
// Go strings aren't NUL-terminated, so subtract 1 from the length
|
||||
return C.GoStringN((*C.char)(unsafe.Pointer(&strC)), C.int(strN-1)), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoUint(param C.cl_device_info, panicOnError bool) (uint, error) {
|
||||
var val C.cl_uint
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return 0, toError(err)
|
||||
}
|
||||
return uint(val), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoSize(param C.cl_device_info, panicOnError bool) (int, error) {
|
||||
var val C.size_t
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return 0, toError(err)
|
||||
}
|
||||
return int(val), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoUlong(param C.cl_device_info, panicOnError bool) (int64, error) {
|
||||
var val C.cl_ulong
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return 0, toError(err)
|
||||
}
|
||||
return int64(val), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoBool(param C.cl_device_info, panicOnError bool) (bool, error) {
|
||||
var val C.cl_bool
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return false, toError(err)
|
||||
}
|
||||
return val == C.CL_TRUE, nil
|
||||
}
|
||||
|
||||
func (d *Device) Name() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_NAME, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Vendor() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_VENDOR, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Extensions() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_EXTENSIONS, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) OpenCLCVersion() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_OPENCL_C_VERSION, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Profile() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_PROFILE, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Version() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_VERSION, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) DriverVersion() string {
|
||||
str, _ := d.GetInfoString(C.CL_DRIVER_VERSION, true)
|
||||
return str
|
||||
}
|
||||
|
||||
// The default compute device address space size specified as an
|
||||
// unsigned integer value in bits. Currently supported values are 32 or 64 bits.
|
||||
func (d *Device) AddressBits() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_ADDRESS_BITS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Size of global memory cache line in bytes.
|
||||
func (d *Device) GlobalMemCachelineSize() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum configured clock frequency of the device in MHz.
|
||||
func (d *Device) MaxClockFrequency() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_CLOCK_FREQUENCY, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// The number of parallel compute units on the OpenCL device.
|
||||
// A work-group executes on a single compute unit. The minimum value is 1.
|
||||
func (d *Device) MaxComputeUnits() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_COMPUTE_UNITS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of arguments declared with the __constant qualifier in a kernel.
|
||||
// The minimum value is 8 for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxConstantArgs() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_CONSTANT_ARGS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of simultaneous image objects that can be read by a kernel.
|
||||
// The minimum value is 128 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) MaxReadImageArgs() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_READ_IMAGE_ARGS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum number of samplers that can be used in a kernel. The minimum
|
||||
// value is 16 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE. (Also see sampler_t.)
|
||||
func (d *Device) MaxSamplers() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_SAMPLERS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum dimensions that specify the global and local work-item IDs used
|
||||
// by the data parallel execution model. (Refer to clEnqueueNDRangeKernel).
|
||||
// The minimum value is 3 for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxWorkItemDimensions() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of simultaneous image objects that can be written to by a
|
||||
// kernel. The minimum value is 8 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) MaxWriteImageArgs() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_WRITE_IMAGE_ARGS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// The minimum value is the size (in bits) of the largest OpenCL built-in
|
||||
// data type supported by the device (long16 in FULL profile, long16 or
|
||||
// int16 in EMBEDDED profile) for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MemBaseAddrAlign() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MEM_BASE_ADDR_ALIGN, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthChar() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthShort() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthInt() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthLong() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthFloat() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthDouble() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthHalf() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max height of 2D image in pixels. The minimum value is 8192
|
||||
// if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image2DMaxHeight() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE2D_MAX_HEIGHT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max width of 2D image or 1D image not created from a buffer object in
|
||||
// pixels. The minimum value is 8192 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image2DMaxWidth() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE2D_MAX_WIDTH, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max depth of 3D image in pixels. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image3DMaxDepth() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE3D_MAX_DEPTH, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max height of 3D image in pixels. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image3DMaxHeight() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE3D_MAX_HEIGHT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max width of 3D image in pixels. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image3DMaxWidth() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE3D_MAX_WIDTH, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max size in bytes of the arguments that can be passed to a kernel. The
|
||||
// minimum value is 1024 for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
// For this minimum value, only a maximum of 128 arguments can be passed to a kernel.
|
||||
func (d *Device) MaxParameterSize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_MAX_PARAMETER_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum number of work-items in a work-group executing a kernel on a
|
||||
// single compute unit, using the data parallel execution model. (Refer
|
||||
// to clEnqueueNDRangeKernel). The minimum value is 1.
|
||||
func (d *Device) MaxWorkGroupSize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_MAX_WORK_GROUP_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Describes the resolution of device timer. This is measured in nanoseconds.
|
||||
func (d *Device) ProfilingTimerResolution() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_PROFILING_TIMER_RESOLUTION, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Size of local memory arena in bytes. The minimum value is 32 KB for
|
||||
// devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) LocalMemSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_LOCAL_MEM_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Max size in bytes of a constant buffer allocation. The minimum value is
|
||||
// 64 KB for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxConstantBufferSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Max size of memory object allocation in bytes. The minimum value is max
|
||||
// (1/4th of CL_DEVICE_GLOBAL_MEM_SIZE, 128*1024*1024) for devices that are
|
||||
// not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxMemAllocSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_MAX_MEM_ALLOC_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Size of global device memory in bytes.
|
||||
func (d *Device) GlobalMemSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_GLOBAL_MEM_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) Available() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_AVAILABLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) CompilerAvailable() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_COMPILER_AVAILABLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) EndianLittle() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_ENDIAN_LITTLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Is CL_TRUE if the device implements error correction for all
|
||||
// accesses to compute device memory (global and constant). Is
|
||||
// CL_FALSE if the device does not implement such error correction.
|
||||
func (d *Device) ErrorCorrectionSupport() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_ERROR_CORRECTION_SUPPORT, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) HostUnifiedMemory() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_HOST_UNIFIED_MEMORY, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) ImageSupport() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_IMAGE_SUPPORT, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) Type() DeviceType {
|
||||
var deviceType C.cl_device_type
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_TYPE, C.size_t(unsafe.Sizeof(deviceType)), unsafe.Pointer(&deviceType), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get device type")
|
||||
}
|
||||
return DeviceType(deviceType)
|
||||
}
|
||||
|
||||
// Describes double precision floating-point capability of the OpenCL device
|
||||
func (d *Device) DoubleFPConfig() FPConfig {
|
||||
var fpConfig C.cl_device_fp_config
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_DOUBLE_FP_CONFIG, C.size_t(unsafe.Sizeof(fpConfig)), unsafe.Pointer(&fpConfig), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get double FP config")
|
||||
}
|
||||
return FPConfig(fpConfig)
|
||||
}
|
||||
|
||||
// Describes the OPTIONAL half precision floating-point capability of the OpenCL device
|
||||
func (d *Device) HalfFPConfig() FPConfig {
|
||||
var fpConfig C.cl_device_fp_config
|
||||
err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_HALF_FP_CONFIG, C.size_t(unsafe.Sizeof(fpConfig)), unsafe.Pointer(&fpConfig), nil)
|
||||
if err != C.CL_SUCCESS {
|
||||
return FPConfig(0)
|
||||
}
|
||||
return FPConfig(fpConfig)
|
||||
}
|
||||
|
||||
// Type of local memory supported. This can be set to CL_LOCAL implying dedicated
|
||||
// local memory storage such as SRAM, or CL_GLOBAL. For custom devices, CL_NONE
|
||||
// can also be returned indicating no local memory support.
|
||||
func (d *Device) LocalMemType() LocalMemType {
|
||||
var memType C.cl_device_local_mem_type
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_LOCAL_MEM_TYPE, C.size_t(unsafe.Sizeof(memType)), unsafe.Pointer(&memType), nil); err != C.CL_SUCCESS {
|
||||
return LocalMemType(C.CL_NONE)
|
||||
}
|
||||
return LocalMemType(memType)
|
||||
}
|
||||
|
||||
// Describes the execution capabilities of the device. The mandated minimum capability is CL_EXEC_KERNEL.
|
||||
func (d *Device) ExecutionCapabilities() ExecCapability {
|
||||
var execCap C.cl_device_exec_capabilities
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_EXECUTION_CAPABILITIES, C.size_t(unsafe.Sizeof(execCap)), unsafe.Pointer(&execCap), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get execution capabilities")
|
||||
}
|
||||
return ExecCapability(execCap)
|
||||
}
|
||||
|
||||
func (d *Device) GlobalMemCacheType() MemCacheType {
|
||||
var memType C.cl_device_mem_cache_type
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, C.size_t(unsafe.Sizeof(memType)), unsafe.Pointer(&memType), nil); err != C.CL_SUCCESS {
|
||||
return MemCacheType(C.CL_NONE)
|
||||
}
|
||||
return MemCacheType(memType)
|
||||
}
|
||||
|
||||
// Maximum number of work-items that can be specified in each dimension of the work-group to clEnqueueNDRangeKernel.
|
||||
//
|
||||
// Returns n size_t entries, where n is the value returned by the query for CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS.
|
||||
//
|
||||
// The minimum value is (1, 1, 1) for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxWorkItemSizes() []int {
|
||||
dims := d.MaxWorkItemDimensions()
|
||||
sizes := make([]C.size_t, dims)
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_MAX_WORK_ITEM_SIZES, C.size_t(int(unsafe.Sizeof(sizes[0]))*dims), unsafe.Pointer(&sizes[0]), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get max work item sizes")
|
||||
}
|
||||
intSizes := make([]int, dims)
|
||||
for i, s := range sizes {
|
||||
intSizes[i] = int(s)
|
||||
}
|
||||
return intSizes
|
||||
}
|
51
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/device12.go
generated
vendored
51
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/device12.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
import "unsafe"
|
||||
|
||||
const FPConfigCorrectlyRoundedDivideSqrt FPConfig = C.CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT
|
||||
|
||||
func init() {
|
||||
fpConfigNameMap[FPConfigCorrectlyRoundedDivideSqrt] = "CorrectlyRoundedDivideSqrt"
|
||||
}
|
||||
|
||||
func (d *Device) BuiltInKernels() string {
|
||||
str, _ := d.getInfoString(C.CL_DEVICE_BUILT_IN_KERNELS, true)
|
||||
return str
|
||||
}
|
||||
|
||||
// Is CL_FALSE if the implementation does not have a linker available. Is CL_TRUE if the linker is available. This can be CL_FALSE for the embedded platform profile only. This must be CL_TRUE if CL_DEVICE_COMPILER_AVAILABLE is CL_TRUE
|
||||
func (d *Device) LinkerAvailable() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_LINKER_AVAILABLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) ParentDevice() *Device {
|
||||
var deviceId C.cl_device_id
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_PARENT_DEVICE, C.size_t(unsafe.Sizeof(deviceId)), unsafe.Pointer(&deviceId), nil); err != C.CL_SUCCESS {
|
||||
panic("ParentDevice failed")
|
||||
}
|
||||
if deviceId == nil {
|
||||
return nil
|
||||
}
|
||||
return &Device{id: deviceId}
|
||||
}
|
||||
|
||||
// Max number of pixels for a 1D image created from a buffer object. The minimum value is 65536 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) ImageMaxBufferSize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE_MAX_BUFFER_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of images in a 1D or 2D image array. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE
|
||||
func (d *Device) ImageMaxArraySize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE_MAX_ARRAY_SIZE, true)
|
||||
return int(val)
|
||||
}
|
1210
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl.h
generated
vendored
1210
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl.h
generated
vendored
File diff suppressed because it is too large
Load Diff
315
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_ext.h
generated
vendored
315
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_ext.h
generated
vendored
@ -1,315 +0,0 @@
|
||||
/*******************************************************************************
|
||||
* Copyright (c) 2008-2013 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
******************************************************************************/
|
||||
|
||||
/* $Revision: 11928 $ on $Date: 2010-07-13 09:04:56 -0700 (Tue, 13 Jul 2010) $ */
|
||||
|
||||
/* cl_ext.h contains OpenCL extensions which don't have external */
|
||||
/* (OpenGL, D3D) dependencies. */
|
||||
|
||||
#ifndef __CL_EXT_H
|
||||
#define __CL_EXT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <AvailabilityMacros.h>
|
||||
#endif
|
||||
|
||||
#include <cl.h>
|
||||
|
||||
/* cl_khr_fp16 extension - no extension #define since it has no functions */
|
||||
#define CL_DEVICE_HALF_FP_CONFIG 0x1033
|
||||
|
||||
/* Memory object destruction
|
||||
*
|
||||
* Apple extension for use to manage externally allocated buffers used with cl_mem objects with CL_MEM_USE_HOST_PTR
|
||||
*
|
||||
* Registers a user callback function that will be called when the memory object is deleted and its resources
|
||||
* freed. Each call to clSetMemObjectCallbackFn registers the specified user callback function on a callback
|
||||
* stack associated with memobj. The registered user callback functions are called in the reverse order in
|
||||
* which they were registered. The user callback functions are called and then the memory object is deleted
|
||||
* and its resources freed. This provides a mechanism for the application (and libraries) using memobj to be
|
||||
* notified when the memory referenced by host_ptr, specified when the memory object is created and used as
|
||||
* the storage bits for the memory object, can be reused or freed.
|
||||
*
|
||||
* The application may not call CL api's with the cl_mem object passed to the pfn_notify.
|
||||
*
|
||||
* Please check for the "cl_APPLE_SetMemObjectDestructor" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)
|
||||
* before using.
|
||||
*/
|
||||
#define cl_APPLE_SetMemObjectDestructor 1
|
||||
cl_int CL_API_ENTRY clSetMemObjectDestructorAPPLE( cl_mem /* memobj */,
|
||||
void (* /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/),
|
||||
void * /*user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
|
||||
/* Context Logging Functions
|
||||
*
|
||||
* The next three convenience functions are intended to be used as the pfn_notify parameter to clCreateContext().
|
||||
* Please check for the "cl_APPLE_ContextLoggingFunctions" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)
|
||||
* before using.
|
||||
*
|
||||
* clLogMessagesToSystemLog fowards on all log messages to the Apple System Logger
|
||||
*/
|
||||
#define cl_APPLE_ContextLoggingFunctions 1
|
||||
extern void CL_API_ENTRY clLogMessagesToSystemLogAPPLE( const char * /* errstr */,
|
||||
const void * /* private_info */,
|
||||
size_t /* cb */,
|
||||
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
/* clLogMessagesToStdout sends all log messages to the file descriptor stdout */
|
||||
extern void CL_API_ENTRY clLogMessagesToStdoutAPPLE( const char * /* errstr */,
|
||||
const void * /* private_info */,
|
||||
size_t /* cb */,
|
||||
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
/* clLogMessagesToStderr sends all log messages to the file descriptor stderr */
|
||||
extern void CL_API_ENTRY clLogMessagesToStderrAPPLE( const char * /* errstr */,
|
||||
const void * /* private_info */,
|
||||
size_t /* cb */,
|
||||
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
|
||||
/************************
|
||||
* cl_khr_icd extension *
|
||||
************************/
|
||||
#define cl_khr_icd 1
|
||||
|
||||
/* cl_platform_info */
|
||||
#define CL_PLATFORM_ICD_SUFFIX_KHR 0x0920
|
||||
|
||||
/* Additional Error Codes */
|
||||
#define CL_PLATFORM_NOT_FOUND_KHR -1001
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clIcdGetPlatformIDsKHR(cl_uint /* num_entries */,
|
||||
cl_platform_id * /* platforms */,
|
||||
cl_uint * /* num_platforms */);
|
||||
|
||||
typedef CL_API_ENTRY cl_int (CL_API_CALL *clIcdGetPlatformIDsKHR_fn)(
|
||||
cl_uint /* num_entries */,
|
||||
cl_platform_id * /* platforms */,
|
||||
cl_uint * /* num_platforms */);
|
||||
|
||||
|
||||
/* Extension: cl_khr_image2D_buffer
|
||||
*
|
||||
* This extension allows a 2D image to be created from a cl_mem buffer without a copy.
|
||||
* The type associated with a 2D image created from a buffer in an OpenCL program is image2d_t.
|
||||
* Both the sampler and sampler-less read_image built-in functions are supported for 2D images
|
||||
* and 2D images created from a buffer. Similarly, the write_image built-ins are also supported
|
||||
* for 2D images created from a buffer.
|
||||
*
|
||||
* When the 2D image from buffer is created, the client must specify the width,
|
||||
* height, image format (i.e. channel order and channel data type) and optionally the row pitch
|
||||
*
|
||||
* The pitch specified must be a multiple of CL_DEVICE_IMAGE_PITCH_ALIGNMENT pixels.
|
||||
* The base address of the buffer must be aligned to CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT pixels.
|
||||
*/
|
||||
|
||||
/*************************************
|
||||
* cl_khr_initalize_memory extension *
|
||||
*************************************/
|
||||
|
||||
#define CL_CONTEXT_MEMORY_INITIALIZE_KHR 0x200E
|
||||
|
||||
|
||||
/**************************************
|
||||
* cl_khr_terminate_context extension *
|
||||
**************************************/
|
||||
|
||||
#define CL_DEVICE_TERMINATE_CAPABILITY_KHR 0x200F
|
||||
#define CL_CONTEXT_TERMINATE_KHR 0x2010
|
||||
|
||||
#define cl_khr_terminate_context 1
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL clTerminateContextKHR(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;
|
||||
|
||||
typedef CL_API_ENTRY cl_int (CL_API_CALL *clTerminateContextKHR_fn)(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;
|
||||
|
||||
|
||||
/*
|
||||
* Extension: cl_khr_spir
|
||||
*
|
||||
* This extension adds support to create an OpenCL program object from a
|
||||
* Standard Portable Intermediate Representation (SPIR) instance
|
||||
*/
|
||||
|
||||
#define CL_DEVICE_SPIR_VERSIONS 0x40E0
|
||||
#define CL_PROGRAM_BINARY_TYPE_INTERMEDIATE 0x40E1
|
||||
|
||||
|
||||
/******************************************
|
||||
* cl_nv_device_attribute_query extension *
|
||||
******************************************/
|
||||
/* cl_nv_device_attribute_query extension - no extension #define since it has no functions */
|
||||
#define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
|
||||
#define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
|
||||
#define CL_DEVICE_REGISTERS_PER_BLOCK_NV 0x4002
|
||||
#define CL_DEVICE_WARP_SIZE_NV 0x4003
|
||||
#define CL_DEVICE_GPU_OVERLAP_NV 0x4004
|
||||
#define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
|
||||
#define CL_DEVICE_INTEGRATED_MEMORY_NV 0x4006
|
||||
|
||||
/*********************************
|
||||
* cl_amd_device_attribute_query *
|
||||
*********************************/
|
||||
#define CL_DEVICE_PROFILING_TIMER_OFFSET_AMD 0x4036
|
||||
|
||||
/*********************************
|
||||
* cl_arm_printf extension
|
||||
*********************************/
|
||||
#define CL_PRINTF_CALLBACK_ARM 0x40B0
|
||||
#define CL_PRINTF_BUFFERSIZE_ARM 0x40B1
|
||||
|
||||
#ifdef CL_VERSION_1_1
|
||||
/***********************************
|
||||
* cl_ext_device_fission extension *
|
||||
***********************************/
|
||||
#define cl_ext_device_fission 1
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clReleaseDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef CL_API_ENTRY cl_int
|
||||
(CL_API_CALL *clReleaseDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clRetainDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef CL_API_ENTRY cl_int
|
||||
(CL_API_CALL *clRetainDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef cl_ulong cl_device_partition_property_ext;
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clCreateSubDevicesEXT( cl_device_id /*in_device*/,
|
||||
const cl_device_partition_property_ext * /* properties */,
|
||||
cl_uint /*num_entries*/,
|
||||
cl_device_id * /*out_devices*/,
|
||||
cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef CL_API_ENTRY cl_int
|
||||
( CL_API_CALL * clCreateSubDevicesEXT_fn)( cl_device_id /*in_device*/,
|
||||
const cl_device_partition_property_ext * /* properties */,
|
||||
cl_uint /*num_entries*/,
|
||||
cl_device_id * /*out_devices*/,
|
||||
cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
/* cl_device_partition_property_ext */
|
||||
#define CL_DEVICE_PARTITION_EQUALLY_EXT 0x4050
|
||||
#define CL_DEVICE_PARTITION_BY_COUNTS_EXT 0x4051
|
||||
#define CL_DEVICE_PARTITION_BY_NAMES_EXT 0x4052
|
||||
#define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN_EXT 0x4053
|
||||
|
||||
/* clDeviceGetInfo selectors */
|
||||
#define CL_DEVICE_PARENT_DEVICE_EXT 0x4054
|
||||
#define CL_DEVICE_PARTITION_TYPES_EXT 0x4055
|
||||
#define CL_DEVICE_AFFINITY_DOMAINS_EXT 0x4056
|
||||
#define CL_DEVICE_REFERENCE_COUNT_EXT 0x4057
|
||||
#define CL_DEVICE_PARTITION_STYLE_EXT 0x4058
|
||||
|
||||
/* error codes */
|
||||
#define CL_DEVICE_PARTITION_FAILED_EXT -1057
|
||||
#define CL_INVALID_PARTITION_COUNT_EXT -1058
|
||||
#define CL_INVALID_PARTITION_NAME_EXT -1059
|
||||
|
||||
/* CL_AFFINITY_DOMAINs */
|
||||
#define CL_AFFINITY_DOMAIN_L1_CACHE_EXT 0x1
|
||||
#define CL_AFFINITY_DOMAIN_L2_CACHE_EXT 0x2
|
||||
#define CL_AFFINITY_DOMAIN_L3_CACHE_EXT 0x3
|
||||
#define CL_AFFINITY_DOMAIN_L4_CACHE_EXT 0x4
|
||||
#define CL_AFFINITY_DOMAIN_NUMA_EXT 0x10
|
||||
#define CL_AFFINITY_DOMAIN_NEXT_FISSIONABLE_EXT 0x100
|
||||
|
||||
/* cl_device_partition_property_ext list terminators */
|
||||
#define CL_PROPERTIES_LIST_END_EXT ((cl_device_partition_property_ext) 0)
|
||||
#define CL_PARTITION_BY_COUNTS_LIST_END_EXT ((cl_device_partition_property_ext) 0)
|
||||
#define CL_PARTITION_BY_NAMES_LIST_END_EXT ((cl_device_partition_property_ext) 0 - 1)
|
||||
|
||||
/*********************************
|
||||
* cl_qcom_ext_host_ptr extension
|
||||
*********************************/
|
||||
|
||||
#define CL_MEM_EXT_HOST_PTR_QCOM (1 << 29)
|
||||
|
||||
#define CL_DEVICE_EXT_MEM_PADDING_IN_BYTES_QCOM 0x40A0
|
||||
#define CL_DEVICE_PAGE_SIZE_QCOM 0x40A1
|
||||
#define CL_IMAGE_ROW_ALIGNMENT_QCOM 0x40A2
|
||||
#define CL_IMAGE_SLICE_ALIGNMENT_QCOM 0x40A3
|
||||
#define CL_MEM_HOST_UNCACHED_QCOM 0x40A4
|
||||
#define CL_MEM_HOST_WRITEBACK_QCOM 0x40A5
|
||||
#define CL_MEM_HOST_WRITETHROUGH_QCOM 0x40A6
|
||||
#define CL_MEM_HOST_WRITE_COMBINING_QCOM 0x40A7
|
||||
|
||||
typedef cl_uint cl_image_pitch_info_qcom;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetDeviceImageInfoQCOM(cl_device_id device,
|
||||
size_t image_width,
|
||||
size_t image_height,
|
||||
const cl_image_format *image_format,
|
||||
cl_image_pitch_info_qcom param_name,
|
||||
size_t param_value_size,
|
||||
void *param_value,
|
||||
size_t *param_value_size_ret);
|
||||
|
||||
typedef struct _cl_mem_ext_host_ptr
|
||||
{
|
||||
/* Type of external memory allocation. */
|
||||
/* Legal values will be defined in layered extensions. */
|
||||
cl_uint allocation_type;
|
||||
|
||||
/* Host cache policy for this external memory allocation. */
|
||||
cl_uint host_cache_policy;
|
||||
|
||||
} cl_mem_ext_host_ptr;
|
||||
|
||||
/*********************************
|
||||
* cl_qcom_ion_host_ptr extension
|
||||
*********************************/
|
||||
|
||||
#define CL_MEM_ION_HOST_PTR_QCOM 0x40A8
|
||||
|
||||
typedef struct _cl_mem_ion_host_ptr
|
||||
{
|
||||
/* Type of external memory allocation. */
|
||||
/* Must be CL_MEM_ION_HOST_PTR_QCOM for ION allocations. */
|
||||
cl_mem_ext_host_ptr ext_host_ptr;
|
||||
|
||||
/* ION file descriptor */
|
||||
int ion_filedesc;
|
||||
|
||||
/* Host pointer to the ION allocated memory */
|
||||
void* ion_hostptr;
|
||||
|
||||
} cl_mem_ion_host_ptr;
|
||||
|
||||
#endif /* CL_VERSION_1_1 */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* __CL_EXT_H */
|
158
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl.h
generated
vendored
158
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl.h
generated
vendored
@ -1,158 +0,0 @@
|
||||
/**********************************************************************************
|
||||
* Copyright (c) 2008 - 2012 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
**********************************************************************************/
|
||||
|
||||
#ifndef __OPENCL_CL_GL_H
|
||||
#define __OPENCL_CL_GL_H
|
||||
|
||||
#include <cl.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef cl_uint cl_gl_object_type;
|
||||
typedef cl_uint cl_gl_texture_info;
|
||||
typedef cl_uint cl_gl_platform_info;
|
||||
typedef struct __GLsync *cl_GLsync;
|
||||
|
||||
/* cl_gl_object_type = 0x2000 - 0x200F enum values are currently taken */
|
||||
#define CL_GL_OBJECT_BUFFER 0x2000
|
||||
#define CL_GL_OBJECT_TEXTURE2D 0x2001
|
||||
#define CL_GL_OBJECT_TEXTURE3D 0x2002
|
||||
#define CL_GL_OBJECT_RENDERBUFFER 0x2003
|
||||
#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E
|
||||
#define CL_GL_OBJECT_TEXTURE1D 0x200F
|
||||
#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010
|
||||
#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011
|
||||
|
||||
/* cl_gl_texture_info */
|
||||
#define CL_GL_TEXTURE_TARGET 0x2004
|
||||
#define CL_GL_MIPMAP_LEVEL 0x2005
|
||||
#define CL_GL_NUM_SAMPLES 0x2012
|
||||
|
||||
|
||||
extern CL_API_ENTRY cl_mem CL_API_CALL
|
||||
clCreateFromGLBuffer(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLuint /* bufobj */,
|
||||
int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_mem CL_API_CALL
|
||||
clCreateFromGLTexture(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLenum /* target */,
|
||||
cl_GLint /* miplevel */,
|
||||
cl_GLuint /* texture */,
|
||||
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;
|
||||
|
||||
extern CL_API_ENTRY cl_mem CL_API_CALL
|
||||
clCreateFromGLRenderbuffer(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLuint /* renderbuffer */,
|
||||
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetGLObjectInfo(cl_mem /* memobj */,
|
||||
cl_gl_object_type * /* gl_object_type */,
|
||||
cl_GLuint * /* gl_object_name */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetGLTextureInfo(cl_mem /* memobj */,
|
||||
cl_gl_texture_info /* param_name */,
|
||||
size_t /* param_value_size */,
|
||||
void * /* param_value */,
|
||||
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clEnqueueAcquireGLObjects(cl_command_queue /* command_queue */,
|
||||
cl_uint /* num_objects */,
|
||||
const cl_mem * /* mem_objects */,
|
||||
cl_uint /* num_events_in_wait_list */,
|
||||
const cl_event * /* event_wait_list */,
|
||||
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clEnqueueReleaseGLObjects(cl_command_queue /* command_queue */,
|
||||
cl_uint /* num_objects */,
|
||||
const cl_mem * /* mem_objects */,
|
||||
cl_uint /* num_events_in_wait_list */,
|
||||
const cl_event * /* event_wait_list */,
|
||||
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
|
||||
/* Deprecated OpenCL 1.1 APIs */
|
||||
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
|
||||
clCreateFromGLTexture2D(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLenum /* target */,
|
||||
cl_GLint /* miplevel */,
|
||||
cl_GLuint /* texture */,
|
||||
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
|
||||
|
||||
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
|
||||
clCreateFromGLTexture3D(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLenum /* target */,
|
||||
cl_GLint /* miplevel */,
|
||||
cl_GLuint /* texture */,
|
||||
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
|
||||
|
||||
/* cl_khr_gl_sharing extension */
|
||||
|
||||
#define cl_khr_gl_sharing 1
|
||||
|
||||
typedef cl_uint cl_gl_context_info;
|
||||
|
||||
/* Additional Error Codes */
|
||||
#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000
|
||||
|
||||
/* cl_gl_context_info */
|
||||
#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006
|
||||
#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007
|
||||
|
||||
/* Additional cl_context_properties */
|
||||
#define CL_GL_CONTEXT_KHR 0x2008
|
||||
#define CL_EGL_DISPLAY_KHR 0x2009
|
||||
#define CL_GLX_DISPLAY_KHR 0x200A
|
||||
#define CL_WGL_HDC_KHR 0x200B
|
||||
#define CL_CGL_SHAREGROUP_KHR 0x200C
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetGLContextInfoKHR(const cl_context_properties * /* properties */,
|
||||
cl_gl_context_info /* param_name */,
|
||||
size_t /* param_value_size */,
|
||||
void * /* param_value */,
|
||||
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetGLContextInfoKHR_fn)(
|
||||
const cl_context_properties * properties,
|
||||
cl_gl_context_info param_name,
|
||||
size_t param_value_size,
|
||||
void * param_value,
|
||||
size_t * param_value_size_ret);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCL_CL_GL_H */
|
65
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl_ext.h
generated
vendored
65
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl_ext.h
generated
vendored
@ -1,65 +0,0 @@
|
||||
/**********************************************************************************
|
||||
* Copyright (c) 2008-2012 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
**********************************************************************************/
|
||||
|
||||
/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
|
||||
|
||||
/* cl_gl_ext.h contains vendor (non-KHR) OpenCL extensions which have */
|
||||
/* OpenGL dependencies. */
|
||||
|
||||
#ifndef __OPENCL_CL_GL_EXT_H
|
||||
#define __OPENCL_CL_GL_EXT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <cl_gl.h>
|
||||
|
||||
/*
|
||||
* For each extension, follow this template
|
||||
* cl_VEN_extname extension */
|
||||
/* #define cl_VEN_extname 1
|
||||
* ... define new types, if any
|
||||
* ... define new tokens, if any
|
||||
* ... define new APIs, if any
|
||||
*
|
||||
* If you need GLtypes here, mirror them with a cl_GLtype, rather than including a GL header
|
||||
* This allows us to avoid having to decide whether to include GL headers or GLES here.
|
||||
*/
|
||||
|
||||
/*
|
||||
* cl_khr_gl_event extension
|
||||
* See section 9.9 in the OpenCL 1.1 spec for more information
|
||||
*/
|
||||
#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D
|
||||
|
||||
extern CL_API_ENTRY cl_event CL_API_CALL
|
||||
clCreateEventFromGLsyncKHR(cl_context /* context */,
|
||||
cl_GLsync /* cl_GLsync */,
|
||||
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCL_CL_GL_EXT_H */
|
1278
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_platform.h
generated
vendored
1278
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_platform.h
generated
vendored
File diff suppressed because it is too large
Load Diff
43
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/opencl.h
generated
vendored
43
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/opencl.h
generated
vendored
@ -1,43 +0,0 @@
|
||||
/*******************************************************************************
|
||||
* Copyright (c) 2008-2012 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
******************************************************************************/
|
||||
|
||||
/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
|
||||
|
||||
#ifndef __OPENCL_H
|
||||
#define __OPENCL_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <cl.h>
|
||||
#include <cl_gl.h>
|
||||
#include <cl_gl_ext.h>
|
||||
#include <cl_ext.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCL_H */
|
||||
|
83
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/image.go
generated
vendored
83
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/image.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
import (
|
||||
"image"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func (ctx *Context) CreateImage(flags MemFlag, imageFormat ImageFormat, imageDesc ImageDescription, data []byte) (*MemObject, error) {
|
||||
format := imageFormat.toCl()
|
||||
desc := imageDesc.toCl()
|
||||
var dataPtr unsafe.Pointer
|
||||
if data != nil {
|
||||
dataPtr = unsafe.Pointer(&data[0])
|
||||
}
|
||||
var err C.cl_int
|
||||
clBuffer := C.clCreateImage(ctx.clContext, C.cl_mem_flags(flags), &format, &desc, dataPtr, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clBuffer == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
return newMemObject(clBuffer, len(data)), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateImageSimple(flags MemFlag, width, height int, channelOrder ChannelOrder, channelDataType ChannelDataType, data []byte) (*MemObject, error) {
|
||||
format := ImageFormat{channelOrder, channelDataType}
|
||||
desc := ImageDescription{
|
||||
Type: MemObjectTypeImage2D,
|
||||
Width: width,
|
||||
Height: height,
|
||||
}
|
||||
return ctx.CreateImage(flags, format, desc, data)
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateImageFromImage(flags MemFlag, img image.Image) (*MemObject, error) {
|
||||
switch m := img.(type) {
|
||||
case *image.Gray:
|
||||
format := ImageFormat{ChannelOrderIntensity, ChannelDataTypeUNormInt8}
|
||||
desc := ImageDescription{
|
||||
Type: MemObjectTypeImage2D,
|
||||
Width: m.Bounds().Dx(),
|
||||
Height: m.Bounds().Dy(),
|
||||
RowPitch: m.Stride,
|
||||
}
|
||||
return ctx.CreateImage(flags, format, desc, m.Pix)
|
||||
case *image.RGBA:
|
||||
format := ImageFormat{ChannelOrderRGBA, ChannelDataTypeUNormInt8}
|
||||
desc := ImageDescription{
|
||||
Type: MemObjectTypeImage2D,
|
||||
Width: m.Bounds().Dx(),
|
||||
Height: m.Bounds().Dy(),
|
||||
RowPitch: m.Stride,
|
||||
}
|
||||
return ctx.CreateImage(flags, format, desc, m.Pix)
|
||||
}
|
||||
|
||||
b := img.Bounds()
|
||||
w := b.Dx()
|
||||
h := b.Dy()
|
||||
data := make([]byte, w*h*4)
|
||||
dataOffset := 0
|
||||
for y := 0; y < h; y++ {
|
||||
for x := 0; x < w; x++ {
|
||||
c := img.At(x+b.Min.X, y+b.Min.Y)
|
||||
r, g, b, a := c.RGBA()
|
||||
data[dataOffset] = uint8(r >> 8)
|
||||
data[dataOffset+1] = uint8(g >> 8)
|
||||
data[dataOffset+2] = uint8(b >> 8)
|
||||
data[dataOffset+3] = uint8(a >> 8)
|
||||
dataOffset += 4
|
||||
}
|
||||
}
|
||||
return ctx.CreateImageSimple(flags, w, h, ChannelOrderRGBA, ChannelDataTypeUNormInt8, data)
|
||||
}
|
127
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel.go
generated
vendored
127
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel.go
generated
vendored
@ -1,127 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type ErrUnsupportedArgumentType struct {
|
||||
Index int
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (e ErrUnsupportedArgumentType) Error() string {
|
||||
return fmt.Sprintf("cl: unsupported argument type for index %d: %+v", e.Index, e.Value)
|
||||
}
|
||||
|
||||
type Kernel struct {
|
||||
clKernel C.cl_kernel
|
||||
name string
|
||||
}
|
||||
|
||||
type LocalBuffer int
|
||||
|
||||
func releaseKernel(k *Kernel) {
|
||||
if k.clKernel != nil {
|
||||
C.clReleaseKernel(k.clKernel)
|
||||
k.clKernel = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kernel) Release() {
|
||||
releaseKernel(k)
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgs(args ...interface{}) error {
|
||||
for index, arg := range args {
|
||||
if err := k.SetArg(index, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArg(index int, arg interface{}) error {
|
||||
switch val := arg.(type) {
|
||||
case uint8:
|
||||
return k.SetArgUint8(index, val)
|
||||
case int8:
|
||||
return k.SetArgInt8(index, val)
|
||||
case uint32:
|
||||
return k.SetArgUint32(index, val)
|
||||
case uint64:
|
||||
return k.SetArgUint64(index, val)
|
||||
case int32:
|
||||
return k.SetArgInt32(index, val)
|
||||
case float32:
|
||||
return k.SetArgFloat32(index, val)
|
||||
case *MemObject:
|
||||
return k.SetArgBuffer(index, val)
|
||||
case LocalBuffer:
|
||||
return k.SetArgLocal(index, int(val))
|
||||
default:
|
||||
return ErrUnsupportedArgumentType{Index: index, Value: arg}
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgBuffer(index int, buffer *MemObject) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(buffer.clMem)), unsafe.Pointer(&buffer.clMem))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgFloat32(index int, val float32) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgInt8(index int, val int8) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUint8(index int, val uint8) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgInt32(index int, val int32) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUint32(index int, val uint32) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUint64(index int, val uint64) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgLocal(index int, size int) error {
|
||||
return k.SetArgUnsafe(index, size, nil)
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUnsafe(index, argSize int, arg unsafe.Pointer) error {
|
||||
//fmt.Println("FUNKY: ", index, argSize)
|
||||
return toError(C.clSetKernelArg(k.clKernel, C.cl_uint(index), C.size_t(argSize), arg))
|
||||
}
|
||||
|
||||
func (k *Kernel) PreferredWorkGroupSizeMultiple(device *Device) (int, error) {
|
||||
var size C.size_t
|
||||
err := C.clGetKernelWorkGroupInfo(k.clKernel, device.nullableId(), C.CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, C.size_t(unsafe.Sizeof(size)), unsafe.Pointer(&size), nil)
|
||||
return int(size), toError(err)
|
||||
}
|
||||
|
||||
func (k *Kernel) WorkGroupSize(device *Device) (int, error) {
|
||||
var size C.size_t
|
||||
err := C.clGetKernelWorkGroupInfo(k.clKernel, device.nullableId(), C.CL_KERNEL_WORK_GROUP_SIZE, C.size_t(unsafe.Sizeof(size)), unsafe.Pointer(&size), nil)
|
||||
return int(size), toError(err)
|
||||
}
|
||||
|
||||
func (k *Kernel) NumArgs() (int, error) {
|
||||
var num C.cl_uint
|
||||
err := C.clGetKernelInfo(k.clKernel, C.CL_KERNEL_NUM_ARGS, C.size_t(unsafe.Sizeof(num)), unsafe.Pointer(&num), nil)
|
||||
return int(num), toError(err)
|
||||
}
|
7
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel10.go
generated
vendored
7
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel10.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build !cl12
|
||||
|
||||
package cl
|
||||
|
||||
func (k *Kernel) ArgName(index int) (string, error) {
|
||||
return "", ErrUnsupported
|
||||
}
|
20
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel12.go
generated
vendored
20
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel12.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
import "unsafe"
|
||||
|
||||
func (k *Kernel) ArgName(index int) (string, error) {
|
||||
var strC [1024]byte
|
||||
var strN C.size_t
|
||||
if err := C.clGetKernelArgInfo(k.clKernel, C.cl_uint(index), C.CL_KERNEL_ARG_NAME, 1024, unsafe.Pointer(&strC[0]), &strN); err != C.CL_SUCCESS {
|
||||
return "", toError(err)
|
||||
}
|
||||
return string(strC[:strN]), nil
|
||||
}
|
83
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/platform.go
generated
vendored
83
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/platform.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const maxPlatforms = 32
|
||||
|
||||
type Platform struct {
|
||||
id C.cl_platform_id
|
||||
}
|
||||
|
||||
// Obtain the list of platforms available.
|
||||
func GetPlatforms() ([]*Platform, error) {
|
||||
var platformIds [maxPlatforms]C.cl_platform_id
|
||||
var nPlatforms C.cl_uint
|
||||
if err := C.clGetPlatformIDs(C.cl_uint(maxPlatforms), &platformIds[0], &nPlatforms); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
platforms := make([]*Platform, nPlatforms)
|
||||
for i := 0; i < int(nPlatforms); i++ {
|
||||
platforms[i] = &Platform{id: platformIds[i]}
|
||||
}
|
||||
return platforms, nil
|
||||
}
|
||||
|
||||
func (p *Platform) GetDevices(deviceType DeviceType) ([]*Device, error) {
|
||||
return GetDevices(p, deviceType)
|
||||
}
|
||||
|
||||
func (p *Platform) getInfoString(param C.cl_platform_info) (string, error) {
|
||||
var strC [2048]byte
|
||||
var strN C.size_t
|
||||
if err := C.clGetPlatformInfo(p.id, param, 2048, unsafe.Pointer(&strC[0]), &strN); err != C.CL_SUCCESS {
|
||||
return "", toError(err)
|
||||
}
|
||||
return string(strC[:(strN - 1)]), nil
|
||||
}
|
||||
|
||||
func (p *Platform) Name() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_NAME); err != nil {
|
||||
panic("Platform.Name() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Vendor() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_VENDOR); err != nil {
|
||||
panic("Platform.Vendor() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Profile() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_PROFILE); err != nil {
|
||||
panic("Platform.Profile() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Version() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_VERSION); err != nil {
|
||||
panic("Platform.Version() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Extensions() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_EXTENSIONS); err != nil {
|
||||
panic("Platform.Extensions() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
105
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/program.go
generated
vendored
105
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/program.go
generated
vendored
@ -1,105 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #include <stdlib.h>
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type BuildError struct {
|
||||
Message string
|
||||
Device *Device
|
||||
}
|
||||
|
||||
func (e BuildError) Error() string {
|
||||
if e.Device != nil {
|
||||
return fmt.Sprintf("cl: build error on %q: %s", e.Device.Name(), e.Message)
|
||||
} else {
|
||||
return fmt.Sprintf("cl: build error: %s", e.Message)
|
||||
}
|
||||
}
|
||||
|
||||
type Program struct {
|
||||
clProgram C.cl_program
|
||||
devices []*Device
|
||||
}
|
||||
|
||||
func releaseProgram(p *Program) {
|
||||
if p.clProgram != nil {
|
||||
C.clReleaseProgram(p.clProgram)
|
||||
p.clProgram = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Program) Release() {
|
||||
releaseProgram(p)
|
||||
}
|
||||
|
||||
func (p *Program) BuildProgram(devices []*Device, options string) error {
|
||||
var cOptions *C.char
|
||||
if options != "" {
|
||||
cOptions = C.CString(options)
|
||||
defer C.free(unsafe.Pointer(cOptions))
|
||||
}
|
||||
var deviceList []C.cl_device_id
|
||||
var deviceListPtr *C.cl_device_id
|
||||
numDevices := C.cl_uint(len(devices))
|
||||
if devices != nil && len(devices) > 0 {
|
||||
deviceList = buildDeviceIdList(devices)
|
||||
deviceListPtr = &deviceList[0]
|
||||
}
|
||||
if err := C.clBuildProgram(p.clProgram, numDevices, deviceListPtr, cOptions, nil, nil); err != C.CL_SUCCESS {
|
||||
buffer := make([]byte, 4096)
|
||||
var bLen C.size_t
|
||||
var err C.cl_int
|
||||
|
||||
for _, dev := range p.devices {
|
||||
for i := 2; i >= 0; i-- {
|
||||
err = C.clGetProgramBuildInfo(p.clProgram, dev.id, C.CL_PROGRAM_BUILD_LOG, C.size_t(len(buffer)), unsafe.Pointer(&buffer[0]), &bLen)
|
||||
if err == C.CL_INVALID_VALUE && i > 0 && bLen < 1024*1024 {
|
||||
// INVALID_VALUE probably means our buffer isn't large enough
|
||||
buffer = make([]byte, bLen)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != C.CL_SUCCESS {
|
||||
return toError(err)
|
||||
}
|
||||
|
||||
if bLen > 1 {
|
||||
return BuildError{
|
||||
Device: dev,
|
||||
Message: string(buffer[:bLen-1]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return BuildError{
|
||||
Device: nil,
|
||||
Message: "build failed and produced no log entries",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Program) CreateKernel(name string) (*Kernel, error) {
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
var err C.cl_int
|
||||
clKernel := C.clCreateKernel(p.clProgram, cName, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
kernel := &Kernel{clKernel: clKernel, name: name}
|
||||
runtime.SetFinalizer(kernel, releaseKernel)
|
||||
return kernel, nil
|
||||
}
|
193
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/queue.go
generated
vendored
193
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/queue.go
generated
vendored
@ -1,193 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type CommandQueueProperty int
|
||||
|
||||
const (
|
||||
CommandQueueOutOfOrderExecModeEnable CommandQueueProperty = C.CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE
|
||||
CommandQueueProfilingEnable CommandQueueProperty = C.CL_QUEUE_PROFILING_ENABLE
|
||||
)
|
||||
|
||||
type CommandQueue struct {
|
||||
clQueue C.cl_command_queue
|
||||
device *Device
|
||||
}
|
||||
|
||||
func releaseCommandQueue(q *CommandQueue) {
|
||||
if q.clQueue != nil {
|
||||
C.clReleaseCommandQueue(q.clQueue)
|
||||
q.clQueue = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Call clReleaseCommandQueue on the CommandQueue. Using the CommandQueue after Release will cause a panick.
|
||||
func (q *CommandQueue) Release() {
|
||||
releaseCommandQueue(q)
|
||||
}
|
||||
|
||||
// Blocks until all previously queued OpenCL commands in a command-queue are issued to the associated device and have completed.
|
||||
func (q *CommandQueue) Finish() error {
|
||||
return toError(C.clFinish(q.clQueue))
|
||||
}
|
||||
|
||||
// Issues all previously queued OpenCL commands in a command-queue to the device associated with the command-queue.
|
||||
func (q *CommandQueue) Flush() error {
|
||||
return toError(C.clFlush(q.clQueue))
|
||||
}
|
||||
|
||||
// Enqueues a command to map a region of the buffer object given by buffer into the host address space and returns a pointer to this mapped region.
|
||||
func (q *CommandQueue) EnqueueMapBuffer(buffer *MemObject, blocking bool, flags MapFlag, offset, size int, eventWaitList []*Event) (*MappedMemObject, *Event, error) {
|
||||
var event C.cl_event
|
||||
var err C.cl_int
|
||||
ptr := C.clEnqueueMapBuffer(q.clQueue, buffer.clMem, clBool(blocking), flags.toCl(), C.size_t(offset), C.size_t(size), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, nil, toError(err)
|
||||
}
|
||||
ev := newEvent(event)
|
||||
if ptr == nil {
|
||||
return nil, ev, ErrUnknown
|
||||
}
|
||||
return &MappedMemObject{ptr: ptr, size: size}, ev, nil
|
||||
}
|
||||
|
||||
// Enqueues a command to map a region of an image object into the host address space and returns a pointer to this mapped region.
|
||||
func (q *CommandQueue) EnqueueMapImage(buffer *MemObject, blocking bool, flags MapFlag, origin, region [3]int, eventWaitList []*Event) (*MappedMemObject, *Event, error) {
|
||||
cOrigin := sizeT3(origin)
|
||||
cRegion := sizeT3(region)
|
||||
var event C.cl_event
|
||||
var err C.cl_int
|
||||
var rowPitch, slicePitch C.size_t
|
||||
ptr := C.clEnqueueMapImage(q.clQueue, buffer.clMem, clBool(blocking), flags.toCl(), &cOrigin[0], &cRegion[0], &rowPitch, &slicePitch, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, nil, toError(err)
|
||||
}
|
||||
ev := newEvent(event)
|
||||
if ptr == nil {
|
||||
return nil, ev, ErrUnknown
|
||||
}
|
||||
size := 0 // TODO: could calculate this
|
||||
return &MappedMemObject{ptr: ptr, size: size, rowPitch: int(rowPitch), slicePitch: int(slicePitch)}, ev, nil
|
||||
}
|
||||
|
||||
// Enqueues a command to unmap a previously mapped region of a memory object.
|
||||
func (q *CommandQueue) EnqueueUnmapMemObject(buffer *MemObject, mappedObj *MappedMemObject, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
if err := C.clEnqueueUnmapMemObject(q.clQueue, buffer.clMem, mappedObj.ptr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
return newEvent(event), nil
|
||||
}
|
||||
|
||||
// Enqueues a command to copy a buffer object to another buffer object.
|
||||
func (q *CommandQueue) EnqueueCopyBuffer(srcBuffer, dstBuffer *MemObject, srcOffset, dstOffset, byteCount int, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueCopyBuffer(q.clQueue, srcBuffer.clMem, dstBuffer.clMem, C.size_t(srcOffset), C.size_t(dstOffset), C.size_t(byteCount), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueue commands to write to a buffer object from host memory.
|
||||
func (q *CommandQueue) EnqueueWriteBuffer(buffer *MemObject, blocking bool, offset, dataSize int, dataPtr unsafe.Pointer, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueWriteBuffer(q.clQueue, buffer.clMem, clBool(blocking), C.size_t(offset), C.size_t(dataSize), dataPtr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
func (q *CommandQueue) EnqueueWriteBufferFloat32(buffer *MemObject, blocking bool, offset int, data []float32, eventWaitList []*Event) (*Event, error) {
|
||||
dataPtr := unsafe.Pointer(&data[0])
|
||||
dataSize := int(unsafe.Sizeof(data[0])) * len(data)
|
||||
return q.EnqueueWriteBuffer(buffer, blocking, offset, dataSize, dataPtr, eventWaitList)
|
||||
}
|
||||
|
||||
// Enqueue commands to read from a buffer object to host memory.
|
||||
func (q *CommandQueue) EnqueueReadBuffer(buffer *MemObject, blocking bool, offset, dataSize int, dataPtr unsafe.Pointer, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueReadBuffer(q.clQueue, buffer.clMem, clBool(blocking), C.size_t(offset), C.size_t(dataSize), dataPtr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
func (q *CommandQueue) EnqueueReadBufferFloat32(buffer *MemObject, blocking bool, offset int, data []float32, eventWaitList []*Event) (*Event, error) {
|
||||
dataPtr := unsafe.Pointer(&data[0])
|
||||
dataSize := int(unsafe.Sizeof(data[0])) * len(data)
|
||||
return q.EnqueueReadBuffer(buffer, blocking, offset, dataSize, dataPtr, eventWaitList)
|
||||
}
|
||||
|
||||
// Enqueues a command to execute a kernel on a device.
|
||||
func (q *CommandQueue) EnqueueNDRangeKernel(kernel *Kernel, globalWorkOffset, globalWorkSize, localWorkSize []int, eventWaitList []*Event) (*Event, error) {
|
||||
workDim := len(globalWorkSize)
|
||||
var globalWorkOffsetList []C.size_t
|
||||
var globalWorkOffsetPtr *C.size_t
|
||||
if globalWorkOffset != nil {
|
||||
globalWorkOffsetList = make([]C.size_t, len(globalWorkOffset))
|
||||
for i, off := range globalWorkOffset {
|
||||
globalWorkOffsetList[i] = C.size_t(off)
|
||||
}
|
||||
globalWorkOffsetPtr = &globalWorkOffsetList[0]
|
||||
}
|
||||
var globalWorkSizeList []C.size_t
|
||||
var globalWorkSizePtr *C.size_t
|
||||
if globalWorkSize != nil {
|
||||
globalWorkSizeList = make([]C.size_t, len(globalWorkSize))
|
||||
for i, off := range globalWorkSize {
|
||||
globalWorkSizeList[i] = C.size_t(off)
|
||||
}
|
||||
globalWorkSizePtr = &globalWorkSizeList[0]
|
||||
}
|
||||
var localWorkSizeList []C.size_t
|
||||
var localWorkSizePtr *C.size_t
|
||||
if localWorkSize != nil {
|
||||
localWorkSizeList = make([]C.size_t, len(localWorkSize))
|
||||
for i, off := range localWorkSize {
|
||||
localWorkSizeList[i] = C.size_t(off)
|
||||
}
|
||||
localWorkSizePtr = &localWorkSizeList[0]
|
||||
}
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueNDRangeKernel(q.clQueue, kernel.clKernel, C.cl_uint(workDim), globalWorkOffsetPtr, globalWorkSizePtr, localWorkSizePtr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueues a command to read from a 2D or 3D image object to host memory.
|
||||
func (q *CommandQueue) EnqueueReadImage(image *MemObject, blocking bool, origin, region [3]int, rowPitch, slicePitch int, data []byte, eventWaitList []*Event) (*Event, error) {
|
||||
cOrigin := sizeT3(origin)
|
||||
cRegion := sizeT3(region)
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueReadImage(q.clQueue, image.clMem, clBool(blocking), &cOrigin[0], &cRegion[0], C.size_t(rowPitch), C.size_t(slicePitch), unsafe.Pointer(&data[0]), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueues a command to write from a 2D or 3D image object to host memory.
|
||||
func (q *CommandQueue) EnqueueWriteImage(image *MemObject, blocking bool, origin, region [3]int, rowPitch, slicePitch int, data []byte, eventWaitList []*Event) (*Event, error) {
|
||||
cOrigin := sizeT3(origin)
|
||||
cRegion := sizeT3(region)
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueWriteImage(q.clQueue, image.clMem, clBool(blocking), &cOrigin[0], &cRegion[0], C.size_t(rowPitch), C.size_t(slicePitch), unsafe.Pointer(&data[0]), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
func (q *CommandQueue) EnqueueFillBuffer(buffer *MemObject, pattern unsafe.Pointer, patternSize, offset, size int, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueFillBuffer(q.clQueue, buffer.clMem, pattern, C.size_t(patternSize), C.size_t(offset), C.size_t(size), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// A synchronization point that enqueues a barrier operation.
|
||||
func (q *CommandQueue) EnqueueBarrierWithWaitList(eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueBarrierWithWaitList(q.clQueue, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueues a marker command which waits for either a list of events to complete, or all previously enqueued commands to complete.
|
||||
func (q *CommandQueue) EnqueueMarkerWithWaitList(eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueMarkerWithWaitList(q.clQueue, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
487
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types.go
generated
vendored
487
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types.go
generated
vendored
@ -1,487 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnknown = errors.New("cl: unknown error") // Generally an unexpected result from an OpenCL function (e.g. CL_SUCCESS but null pointer)
|
||||
)
|
||||
|
||||
type ErrOther int
|
||||
|
||||
func (e ErrOther) Error() string {
|
||||
return fmt.Sprintf("cl: error %d", int(e))
|
||||
}
|
||||
|
||||
var (
|
||||
ErrDeviceNotFound = errors.New("cl: Device Not Found")
|
||||
ErrDeviceNotAvailable = errors.New("cl: Device Not Available")
|
||||
ErrCompilerNotAvailable = errors.New("cl: Compiler Not Available")
|
||||
ErrMemObjectAllocationFailure = errors.New("cl: Mem Object Allocation Failure")
|
||||
ErrOutOfResources = errors.New("cl: Out Of Resources")
|
||||
ErrOutOfHostMemory = errors.New("cl: Out Of Host Memory")
|
||||
ErrProfilingInfoNotAvailable = errors.New("cl: Profiling Info Not Available")
|
||||
ErrMemCopyOverlap = errors.New("cl: Mem Copy Overlap")
|
||||
ErrImageFormatMismatch = errors.New("cl: Image Format Mismatch")
|
||||
ErrImageFormatNotSupported = errors.New("cl: Image Format Not Supported")
|
||||
ErrBuildProgramFailure = errors.New("cl: Build Program Failure")
|
||||
ErrMapFailure = errors.New("cl: Map Failure")
|
||||
ErrMisalignedSubBufferOffset = errors.New("cl: Misaligned Sub Buffer Offset")
|
||||
ErrExecStatusErrorForEventsInWaitList = errors.New("cl: Exec Status Error For Events In Wait List")
|
||||
ErrCompileProgramFailure = errors.New("cl: Compile Program Failure")
|
||||
ErrLinkerNotAvailable = errors.New("cl: Linker Not Available")
|
||||
ErrLinkProgramFailure = errors.New("cl: Link Program Failure")
|
||||
ErrDevicePartitionFailed = errors.New("cl: Device Partition Failed")
|
||||
ErrKernelArgInfoNotAvailable = errors.New("cl: Kernel Arg Info Not Available")
|
||||
ErrInvalidValue = errors.New("cl: Invalid Value")
|
||||
ErrInvalidDeviceType = errors.New("cl: Invalid Device Type")
|
||||
ErrInvalidPlatform = errors.New("cl: Invalid Platform")
|
||||
ErrInvalidDevice = errors.New("cl: Invalid Device")
|
||||
ErrInvalidContext = errors.New("cl: Invalid Context")
|
||||
ErrInvalidQueueProperties = errors.New("cl: Invalid Queue Properties")
|
||||
ErrInvalidCommandQueue = errors.New("cl: Invalid Command Queue")
|
||||
ErrInvalidHostPtr = errors.New("cl: Invalid Host Ptr")
|
||||
ErrInvalidMemObject = errors.New("cl: Invalid Mem Object")
|
||||
ErrInvalidImageFormatDescriptor = errors.New("cl: Invalid Image Format Descriptor")
|
||||
ErrInvalidImageSize = errors.New("cl: Invalid Image Size")
|
||||
ErrInvalidSampler = errors.New("cl: Invalid Sampler")
|
||||
ErrInvalidBinary = errors.New("cl: Invalid Binary")
|
||||
ErrInvalidBuildOptions = errors.New("cl: Invalid Build Options")
|
||||
ErrInvalidProgram = errors.New("cl: Invalid Program")
|
||||
ErrInvalidProgramExecutable = errors.New("cl: Invalid Program Executable")
|
||||
ErrInvalidKernelName = errors.New("cl: Invalid Kernel Name")
|
||||
ErrInvalidKernelDefinition = errors.New("cl: Invalid Kernel Definition")
|
||||
ErrInvalidKernel = errors.New("cl: Invalid Kernel")
|
||||
ErrInvalidArgIndex = errors.New("cl: Invalid Arg Index")
|
||||
ErrInvalidArgValue = errors.New("cl: Invalid Arg Value")
|
||||
ErrInvalidArgSize = errors.New("cl: Invalid Arg Size")
|
||||
ErrInvalidKernelArgs = errors.New("cl: Invalid Kernel Args")
|
||||
ErrInvalidWorkDimension = errors.New("cl: Invalid Work Dimension")
|
||||
ErrInvalidWorkGroupSize = errors.New("cl: Invalid Work Group Size")
|
||||
ErrInvalidWorkItemSize = errors.New("cl: Invalid Work Item Size")
|
||||
ErrInvalidGlobalOffset = errors.New("cl: Invalid Global Offset")
|
||||
ErrInvalidEventWaitList = errors.New("cl: Invalid Event Wait List")
|
||||
ErrInvalidEvent = errors.New("cl: Invalid Event")
|
||||
ErrInvalidOperation = errors.New("cl: Invalid Operation")
|
||||
ErrInvalidGlObject = errors.New("cl: Invalid Gl Object")
|
||||
ErrInvalidBufferSize = errors.New("cl: Invalid Buffer Size")
|
||||
ErrInvalidMipLevel = errors.New("cl: Invalid Mip Level")
|
||||
ErrInvalidGlobalWorkSize = errors.New("cl: Invalid Global Work Size")
|
||||
ErrInvalidProperty = errors.New("cl: Invalid Property")
|
||||
ErrInvalidImageDescriptor = errors.New("cl: Invalid Image Descriptor")
|
||||
ErrInvalidCompilerOptions = errors.New("cl: Invalid Compiler Options")
|
||||
ErrInvalidLinkerOptions = errors.New("cl: Invalid Linker Options")
|
||||
ErrInvalidDevicePartitionCount = errors.New("cl: Invalid Device Partition Count")
|
||||
)
|
||||
var errorMap = map[C.cl_int]error{
|
||||
C.CL_SUCCESS: nil,
|
||||
C.CL_DEVICE_NOT_FOUND: ErrDeviceNotFound,
|
||||
C.CL_DEVICE_NOT_AVAILABLE: ErrDeviceNotAvailable,
|
||||
C.CL_COMPILER_NOT_AVAILABLE: ErrCompilerNotAvailable,
|
||||
C.CL_MEM_OBJECT_ALLOCATION_FAILURE: ErrMemObjectAllocationFailure,
|
||||
C.CL_OUT_OF_RESOURCES: ErrOutOfResources,
|
||||
C.CL_OUT_OF_HOST_MEMORY: ErrOutOfHostMemory,
|
||||
C.CL_PROFILING_INFO_NOT_AVAILABLE: ErrProfilingInfoNotAvailable,
|
||||
C.CL_MEM_COPY_OVERLAP: ErrMemCopyOverlap,
|
||||
C.CL_IMAGE_FORMAT_MISMATCH: ErrImageFormatMismatch,
|
||||
C.CL_IMAGE_FORMAT_NOT_SUPPORTED: ErrImageFormatNotSupported,
|
||||
C.CL_BUILD_PROGRAM_FAILURE: ErrBuildProgramFailure,
|
||||
C.CL_MAP_FAILURE: ErrMapFailure,
|
||||
C.CL_MISALIGNED_SUB_BUFFER_OFFSET: ErrMisalignedSubBufferOffset,
|
||||
C.CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST: ErrExecStatusErrorForEventsInWaitList,
|
||||
C.CL_INVALID_VALUE: ErrInvalidValue,
|
||||
C.CL_INVALID_DEVICE_TYPE: ErrInvalidDeviceType,
|
||||
C.CL_INVALID_PLATFORM: ErrInvalidPlatform,
|
||||
C.CL_INVALID_DEVICE: ErrInvalidDevice,
|
||||
C.CL_INVALID_CONTEXT: ErrInvalidContext,
|
||||
C.CL_INVALID_QUEUE_PROPERTIES: ErrInvalidQueueProperties,
|
||||
C.CL_INVALID_COMMAND_QUEUE: ErrInvalidCommandQueue,
|
||||
C.CL_INVALID_HOST_PTR: ErrInvalidHostPtr,
|
||||
C.CL_INVALID_MEM_OBJECT: ErrInvalidMemObject,
|
||||
C.CL_INVALID_IMAGE_FORMAT_DESCRIPTOR: ErrInvalidImageFormatDescriptor,
|
||||
C.CL_INVALID_IMAGE_SIZE: ErrInvalidImageSize,
|
||||
C.CL_INVALID_SAMPLER: ErrInvalidSampler,
|
||||
C.CL_INVALID_BINARY: ErrInvalidBinary,
|
||||
C.CL_INVALID_BUILD_OPTIONS: ErrInvalidBuildOptions,
|
||||
C.CL_INVALID_PROGRAM: ErrInvalidProgram,
|
||||
C.CL_INVALID_PROGRAM_EXECUTABLE: ErrInvalidProgramExecutable,
|
||||
C.CL_INVALID_KERNEL_NAME: ErrInvalidKernelName,
|
||||
C.CL_INVALID_KERNEL_DEFINITION: ErrInvalidKernelDefinition,
|
||||
C.CL_INVALID_KERNEL: ErrInvalidKernel,
|
||||
C.CL_INVALID_ARG_INDEX: ErrInvalidArgIndex,
|
||||
C.CL_INVALID_ARG_VALUE: ErrInvalidArgValue,
|
||||
C.CL_INVALID_ARG_SIZE: ErrInvalidArgSize,
|
||||
C.CL_INVALID_KERNEL_ARGS: ErrInvalidKernelArgs,
|
||||
C.CL_INVALID_WORK_DIMENSION: ErrInvalidWorkDimension,
|
||||
C.CL_INVALID_WORK_GROUP_SIZE: ErrInvalidWorkGroupSize,
|
||||
C.CL_INVALID_WORK_ITEM_SIZE: ErrInvalidWorkItemSize,
|
||||
C.CL_INVALID_GLOBAL_OFFSET: ErrInvalidGlobalOffset,
|
||||
C.CL_INVALID_EVENT_WAIT_LIST: ErrInvalidEventWaitList,
|
||||
C.CL_INVALID_EVENT: ErrInvalidEvent,
|
||||
C.CL_INVALID_OPERATION: ErrInvalidOperation,
|
||||
C.CL_INVALID_GL_OBJECT: ErrInvalidGlObject,
|
||||
C.CL_INVALID_BUFFER_SIZE: ErrInvalidBufferSize,
|
||||
C.CL_INVALID_MIP_LEVEL: ErrInvalidMipLevel,
|
||||
C.CL_INVALID_GLOBAL_WORK_SIZE: ErrInvalidGlobalWorkSize,
|
||||
C.CL_INVALID_PROPERTY: ErrInvalidProperty,
|
||||
}
|
||||
|
||||
func toError(code C.cl_int) error {
|
||||
if err, ok := errorMap[code]; ok {
|
||||
return err
|
||||
}
|
||||
return ErrOther(code)
|
||||
}
|
||||
|
||||
type LocalMemType int
|
||||
|
||||
const (
|
||||
LocalMemTypeNone LocalMemType = C.CL_NONE
|
||||
LocalMemTypeGlobal LocalMemType = C.CL_GLOBAL
|
||||
LocalMemTypeLocal LocalMemType = C.CL_LOCAL
|
||||
)
|
||||
|
||||
var localMemTypeMap = map[LocalMemType]string{
|
||||
LocalMemTypeNone: "None",
|
||||
LocalMemTypeGlobal: "Global",
|
||||
LocalMemTypeLocal: "Local",
|
||||
}
|
||||
|
||||
func (t LocalMemType) String() string {
|
||||
name := localMemTypeMap[t]
|
||||
if name == "" {
|
||||
name = "Unknown"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type ExecCapability int
|
||||
|
||||
const (
|
||||
ExecCapabilityKernel ExecCapability = C.CL_EXEC_KERNEL // The OpenCL device can execute OpenCL kernels.
|
||||
ExecCapabilityNativeKernel ExecCapability = C.CL_EXEC_NATIVE_KERNEL // The OpenCL device can execute native kernels.
|
||||
)
|
||||
|
||||
func (ec ExecCapability) String() string {
|
||||
var parts []string
|
||||
if ec&ExecCapabilityKernel != 0 {
|
||||
parts = append(parts, "Kernel")
|
||||
}
|
||||
if ec&ExecCapabilityNativeKernel != 0 {
|
||||
parts = append(parts, "NativeKernel")
|
||||
}
|
||||
if parts == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(parts, "|")
|
||||
}
|
||||
|
||||
type MemCacheType int
|
||||
|
||||
const (
|
||||
MemCacheTypeNone MemCacheType = C.CL_NONE
|
||||
MemCacheTypeReadOnlyCache MemCacheType = C.CL_READ_ONLY_CACHE
|
||||
MemCacheTypeReadWriteCache MemCacheType = C.CL_READ_WRITE_CACHE
|
||||
)
|
||||
|
||||
func (ct MemCacheType) String() string {
|
||||
switch ct {
|
||||
case MemCacheTypeNone:
|
||||
return "None"
|
||||
case MemCacheTypeReadOnlyCache:
|
||||
return "ReadOnly"
|
||||
case MemCacheTypeReadWriteCache:
|
||||
return "ReadWrite"
|
||||
}
|
||||
return fmt.Sprintf("Unknown(%x)", int(ct))
|
||||
}
|
||||
|
||||
type MemFlag int
|
||||
|
||||
const (
|
||||
MemReadWrite MemFlag = C.CL_MEM_READ_WRITE
|
||||
MemWriteOnly MemFlag = C.CL_MEM_WRITE_ONLY
|
||||
MemReadOnly MemFlag = C.CL_MEM_READ_ONLY
|
||||
MemUseHostPtr MemFlag = C.CL_MEM_USE_HOST_PTR
|
||||
MemAllocHostPtr MemFlag = C.CL_MEM_ALLOC_HOST_PTR
|
||||
MemCopyHostPtr MemFlag = C.CL_MEM_COPY_HOST_PTR
|
||||
|
||||
MemWriteOnlyHost MemFlag = C.CL_MEM_HOST_WRITE_ONLY
|
||||
MemReadOnlyHost MemFlag = C.CL_MEM_HOST_READ_ONLY
|
||||
MemNoAccessHost MemFlag = C.CL_MEM_HOST_NO_ACCESS
|
||||
)
|
||||
|
||||
type MemObjectType int
|
||||
|
||||
const (
|
||||
MemObjectTypeBuffer MemObjectType = C.CL_MEM_OBJECT_BUFFER
|
||||
MemObjectTypeImage2D MemObjectType = C.CL_MEM_OBJECT_IMAGE2D
|
||||
MemObjectTypeImage3D MemObjectType = C.CL_MEM_OBJECT_IMAGE3D
|
||||
)
|
||||
|
||||
type MapFlag int
|
||||
|
||||
const (
|
||||
// This flag specifies that the region being mapped in the memory object is being mapped for reading.
|
||||
MapFlagRead MapFlag = C.CL_MAP_READ
|
||||
MapFlagWrite MapFlag = C.CL_MAP_WRITE
|
||||
MapFlagWriteInvalidateRegion MapFlag = C.CL_MAP_WRITE_INVALIDATE_REGION
|
||||
)
|
||||
|
||||
func (mf MapFlag) toCl() C.cl_map_flags {
|
||||
return C.cl_map_flags(mf)
|
||||
}
|
||||
|
||||
type ChannelOrder int
|
||||
|
||||
const (
|
||||
ChannelOrderR ChannelOrder = C.CL_R
|
||||
ChannelOrderA ChannelOrder = C.CL_A
|
||||
ChannelOrderRG ChannelOrder = C.CL_RG
|
||||
ChannelOrderRA ChannelOrder = C.CL_RA
|
||||
ChannelOrderRGB ChannelOrder = C.CL_RGB
|
||||
ChannelOrderRGBA ChannelOrder = C.CL_RGBA
|
||||
ChannelOrderBGRA ChannelOrder = C.CL_BGRA
|
||||
ChannelOrderARGB ChannelOrder = C.CL_ARGB
|
||||
ChannelOrderIntensity ChannelOrder = C.CL_INTENSITY
|
||||
ChannelOrderLuminance ChannelOrder = C.CL_LUMINANCE
|
||||
ChannelOrderRx ChannelOrder = C.CL_Rx
|
||||
ChannelOrderRGx ChannelOrder = C.CL_RGx
|
||||
ChannelOrderRGBx ChannelOrder = C.CL_RGBx
|
||||
)
|
||||
|
||||
var channelOrderNameMap = map[ChannelOrder]string{
|
||||
ChannelOrderR: "R",
|
||||
ChannelOrderA: "A",
|
||||
ChannelOrderRG: "RG",
|
||||
ChannelOrderRA: "RA",
|
||||
ChannelOrderRGB: "RGB",
|
||||
ChannelOrderRGBA: "RGBA",
|
||||
ChannelOrderBGRA: "BGRA",
|
||||
ChannelOrderARGB: "ARGB",
|
||||
ChannelOrderIntensity: "Intensity",
|
||||
ChannelOrderLuminance: "Luminance",
|
||||
ChannelOrderRx: "Rx",
|
||||
ChannelOrderRGx: "RGx",
|
||||
ChannelOrderRGBx: "RGBx",
|
||||
}
|
||||
|
||||
func (co ChannelOrder) String() string {
|
||||
name := channelOrderNameMap[co]
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Unknown(%x)", int(co))
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type ChannelDataType int
|
||||
|
||||
const (
|
||||
ChannelDataTypeSNormInt8 ChannelDataType = C.CL_SNORM_INT8
|
||||
ChannelDataTypeSNormInt16 ChannelDataType = C.CL_SNORM_INT16
|
||||
ChannelDataTypeUNormInt8 ChannelDataType = C.CL_UNORM_INT8
|
||||
ChannelDataTypeUNormInt16 ChannelDataType = C.CL_UNORM_INT16
|
||||
ChannelDataTypeUNormShort565 ChannelDataType = C.CL_UNORM_SHORT_565
|
||||
ChannelDataTypeUNormShort555 ChannelDataType = C.CL_UNORM_SHORT_555
|
||||
ChannelDataTypeUNormInt101010 ChannelDataType = C.CL_UNORM_INT_101010
|
||||
ChannelDataTypeSignedInt8 ChannelDataType = C.CL_SIGNED_INT8
|
||||
ChannelDataTypeSignedInt16 ChannelDataType = C.CL_SIGNED_INT16
|
||||
ChannelDataTypeSignedInt32 ChannelDataType = C.CL_SIGNED_INT32
|
||||
ChannelDataTypeUnsignedInt8 ChannelDataType = C.CL_UNSIGNED_INT8
|
||||
ChannelDataTypeUnsignedInt16 ChannelDataType = C.CL_UNSIGNED_INT16
|
||||
ChannelDataTypeUnsignedInt32 ChannelDataType = C.CL_UNSIGNED_INT32
|
||||
ChannelDataTypeHalfFloat ChannelDataType = C.CL_HALF_FLOAT
|
||||
ChannelDataTypeFloat ChannelDataType = C.CL_FLOAT
|
||||
)
|
||||
|
||||
var channelDataTypeNameMap = map[ChannelDataType]string{
|
||||
ChannelDataTypeSNormInt8: "SNormInt8",
|
||||
ChannelDataTypeSNormInt16: "SNormInt16",
|
||||
ChannelDataTypeUNormInt8: "UNormInt8",
|
||||
ChannelDataTypeUNormInt16: "UNormInt16",
|
||||
ChannelDataTypeUNormShort565: "UNormShort565",
|
||||
ChannelDataTypeUNormShort555: "UNormShort555",
|
||||
ChannelDataTypeUNormInt101010: "UNormInt101010",
|
||||
ChannelDataTypeSignedInt8: "SignedInt8",
|
||||
ChannelDataTypeSignedInt16: "SignedInt16",
|
||||
ChannelDataTypeSignedInt32: "SignedInt32",
|
||||
ChannelDataTypeUnsignedInt8: "UnsignedInt8",
|
||||
ChannelDataTypeUnsignedInt16: "UnsignedInt16",
|
||||
ChannelDataTypeUnsignedInt32: "UnsignedInt32",
|
||||
ChannelDataTypeHalfFloat: "HalfFloat",
|
||||
ChannelDataTypeFloat: "Float",
|
||||
}
|
||||
|
||||
func (ct ChannelDataType) String() string {
|
||||
name := channelDataTypeNameMap[ct]
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Unknown(%x)", int(ct))
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type ImageFormat struct {
|
||||
ChannelOrder ChannelOrder
|
||||
ChannelDataType ChannelDataType
|
||||
}
|
||||
|
||||
func (f ImageFormat) toCl() C.cl_image_format {
|
||||
var format C.cl_image_format
|
||||
format.image_channel_order = C.cl_channel_order(f.ChannelOrder)
|
||||
format.image_channel_data_type = C.cl_channel_type(f.ChannelDataType)
|
||||
return format
|
||||
}
|
||||
|
||||
type ProfilingInfo int
|
||||
|
||||
const (
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event is enqueued in
|
||||
// a command-queue by the host.
|
||||
ProfilingInfoCommandQueued ProfilingInfo = C.CL_PROFILING_COMMAND_QUEUED
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event that has been
|
||||
// enqueued is submitted by the host to the device associated with the command-queue.
|
||||
ProfilingInfoCommandSubmit ProfilingInfo = C.CL_PROFILING_COMMAND_SUBMIT
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event starts execution on the device.
|
||||
ProfilingInfoCommandStart ProfilingInfo = C.CL_PROFILING_COMMAND_START
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event has finished
|
||||
// execution on the device.
|
||||
ProfilingInfoCommandEnd ProfilingInfo = C.CL_PROFILING_COMMAND_END
|
||||
)
|
||||
|
||||
type CommmandExecStatus int
|
||||
|
||||
const (
|
||||
CommmandExecStatusComplete CommmandExecStatus = C.CL_COMPLETE
|
||||
CommmandExecStatusRunning CommmandExecStatus = C.CL_RUNNING
|
||||
CommmandExecStatusSubmitted CommmandExecStatus = C.CL_SUBMITTED
|
||||
CommmandExecStatusQueued CommmandExecStatus = C.CL_QUEUED
|
||||
)
|
||||
|
||||
type Event struct {
|
||||
clEvent C.cl_event
|
||||
}
|
||||
|
||||
func releaseEvent(ev *Event) {
|
||||
if ev.clEvent != nil {
|
||||
C.clReleaseEvent(ev.clEvent)
|
||||
ev.clEvent = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Event) Release() {
|
||||
releaseEvent(e)
|
||||
}
|
||||
|
||||
func (e *Event) GetEventProfilingInfo(paramName ProfilingInfo) (int64, error) {
|
||||
var paramValue C.cl_ulong
|
||||
if err := C.clGetEventProfilingInfo(e.clEvent, C.cl_profiling_info(paramName), C.size_t(unsafe.Sizeof(paramValue)), unsafe.Pointer(¶mValue), nil); err != C.CL_SUCCESS {
|
||||
return 0, toError(err)
|
||||
}
|
||||
return int64(paramValue), nil
|
||||
}
|
||||
|
||||
// Sets the execution status of a user event object.
|
||||
//
|
||||
// `status` specifies the new execution status to be set and
|
||||
// can be CL_COMPLETE or a negative integer value to indicate
|
||||
// an error. A negative integer value causes all enqueued commands
|
||||
// that wait on this user event to be terminated. clSetUserEventStatus
|
||||
// can only be called once to change the execution status of event.
|
||||
func (e *Event) SetUserEventStatus(status int) error {
|
||||
return toError(C.clSetUserEventStatus(e.clEvent, C.cl_int(status)))
|
||||
}
|
||||
|
||||
// Waits on the host thread for commands identified by event objects in
|
||||
// events to complete. A command is considered complete if its execution
|
||||
// status is CL_COMPLETE or a negative value. The events specified in
|
||||
// event_list act as synchronization points.
|
||||
//
|
||||
// If the cl_khr_gl_event extension is enabled, event objects can also be
|
||||
// used to reflect the status of an OpenGL sync object. The sync object
|
||||
// in turn refers to a fence command executing in an OpenGL command
|
||||
// stream. This provides another method of coordinating sharing of buffers
|
||||
// and images between OpenGL and OpenCL.
|
||||
func WaitForEvents(events []*Event) error {
|
||||
return toError(C.clWaitForEvents(C.cl_uint(len(events)), eventListPtr(events)))
|
||||
}
|
||||
|
||||
func newEvent(clEvent C.cl_event) *Event {
|
||||
ev := &Event{clEvent: clEvent}
|
||||
runtime.SetFinalizer(ev, releaseEvent)
|
||||
return ev
|
||||
}
|
||||
|
||||
func eventListPtr(el []*Event) *C.cl_event {
|
||||
if el == nil {
|
||||
return nil
|
||||
}
|
||||
elist := make([]C.cl_event, len(el))
|
||||
for i, e := range el {
|
||||
elist[i] = e.clEvent
|
||||
}
|
||||
return (*C.cl_event)(&elist[0])
|
||||
}
|
||||
|
||||
func clBool(b bool) C.cl_bool {
|
||||
if b {
|
||||
return C.CL_TRUE
|
||||
}
|
||||
return C.CL_FALSE
|
||||
}
|
||||
|
||||
func sizeT3(i3 [3]int) [3]C.size_t {
|
||||
var val [3]C.size_t
|
||||
val[0] = C.size_t(i3[0])
|
||||
val[1] = C.size_t(i3[1])
|
||||
val[2] = C.size_t(i3[2])
|
||||
return val
|
||||
}
|
||||
|
||||
type MappedMemObject struct {
|
||||
ptr unsafe.Pointer
|
||||
size int
|
||||
rowPitch int
|
||||
slicePitch int
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) ByteSlice() []byte {
|
||||
var byteSlice []byte
|
||||
sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&byteSlice))
|
||||
sliceHeader.Cap = mb.size
|
||||
sliceHeader.Len = mb.size
|
||||
sliceHeader.Data = uintptr(mb.ptr)
|
||||
return byteSlice
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) Ptr() unsafe.Pointer {
|
||||
return mb.ptr
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) Size() int {
|
||||
return mb.size
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) RowPitch() int {
|
||||
return mb.rowPitch
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) SlicePitch() int {
|
||||
return mb.slicePitch
|
||||
}
|
71
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types12.go
generated
vendored
71
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types12.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
const (
|
||||
ChannelDataTypeUNormInt24 ChannelDataType = C.CL_UNORM_INT24
|
||||
ChannelOrderDepth ChannelOrder = C.CL_DEPTH
|
||||
ChannelOrderDepthStencil ChannelOrder = C.CL_DEPTH_STENCIL
|
||||
MemHostNoAccess MemFlag = C.CL_MEM_HOST_NO_ACCESS // OpenCL 1.2
|
||||
MemHostReadOnly MemFlag = C.CL_MEM_HOST_READ_ONLY // OpenCL 1.2
|
||||
MemHostWriteOnly MemFlag = C.CL_MEM_HOST_WRITE_ONLY // OpenCL 1.2
|
||||
MemObjectTypeImage1D MemObjectType = C.CL_MEM_OBJECT_IMAGE1D
|
||||
MemObjectTypeImage1DArray MemObjectType = C.CL_MEM_OBJECT_IMAGE1D_ARRAY
|
||||
MemObjectTypeImage1DBuffer MemObjectType = C.CL_MEM_OBJECT_IMAGE1D_BUFFER
|
||||
MemObjectTypeImage2DArray MemObjectType = C.CL_MEM_OBJECT_IMAGE2D_ARRAY
|
||||
// This flag specifies that the region being mapped in the memory object is being mapped for writing.
|
||||
//
|
||||
// The contents of the region being mapped are to be discarded. This is typically the case when the
|
||||
// region being mapped is overwritten by the host. This flag allows the implementation to no longer
|
||||
// guarantee that the pointer returned by clEnqueueMapBuffer or clEnqueueMapImage contains the
|
||||
// latest bits in the region being mapped which can be a significant performance enhancement.
|
||||
MapFlagWriteInvalidateRegion MapFlag = C.CL_MAP_WRITE_INVALIDATE_REGION
|
||||
)
|
||||
|
||||
func init() {
|
||||
errorMap[C.CL_COMPILE_PROGRAM_FAILURE] = ErrCompileProgramFailure
|
||||
errorMap[C.CL_DEVICE_PARTITION_FAILED] = ErrDevicePartitionFailed
|
||||
errorMap[C.CL_INVALID_COMPILER_OPTIONS] = ErrInvalidCompilerOptions
|
||||
errorMap[C.CL_INVALID_DEVICE_PARTITION_COUNT] = ErrInvalidDevicePartitionCount
|
||||
errorMap[C.CL_INVALID_IMAGE_DESCRIPTOR] = ErrInvalidImageDescriptor
|
||||
errorMap[C.CL_INVALID_LINKER_OPTIONS] = ErrInvalidLinkerOptions
|
||||
errorMap[C.CL_KERNEL_ARG_INFO_NOT_AVAILABLE] = ErrKernelArgInfoNotAvailable
|
||||
errorMap[C.CL_LINK_PROGRAM_FAILURE] = ErrLinkProgramFailure
|
||||
errorMap[C.CL_LINKER_NOT_AVAILABLE] = ErrLinkerNotAvailable
|
||||
channelOrderNameMap[ChannelOrderDepth] = "Depth"
|
||||
channelOrderNameMap[ChannelOrderDepthStencil] = "DepthStencil"
|
||||
channelDataTypeNameMap[ChannelDataTypeUNormInt24] = "UNormInt24"
|
||||
}
|
||||
|
||||
type ImageDescription struct {
|
||||
Type MemObjectType
|
||||
Width, Height, Depth int
|
||||
ArraySize, RowPitch, SlicePitch int
|
||||
NumMipLevels, NumSamples int
|
||||
Buffer *MemObject
|
||||
}
|
||||
|
||||
func (d ImageDescription) toCl() C.cl_image_desc {
|
||||
var desc C.cl_image_desc
|
||||
desc.image_type = C.cl_mem_object_type(d.Type)
|
||||
desc.image_width = C.size_t(d.Width)
|
||||
desc.image_height = C.size_t(d.Height)
|
||||
desc.image_depth = C.size_t(d.Depth)
|
||||
desc.image_array_size = C.size_t(d.ArraySize)
|
||||
desc.image_row_pitch = C.size_t(d.RowPitch)
|
||||
desc.image_slice_pitch = C.size_t(d.SlicePitch)
|
||||
desc.num_mip_levels = C.cl_uint(d.NumMipLevels)
|
||||
desc.num_samples = C.cl_uint(d.NumSamples)
|
||||
desc.buffer = nil
|
||||
if d.Buffer != nil {
|
||||
desc.buffer = d.Buffer.clMem
|
||||
}
|
||||
return desc
|
||||
}
|
45
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types_darwin.go
generated
vendored
45
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types_darwin.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
// Extension: cl_APPLE_fixed_alpha_channel_orders
|
||||
//
|
||||
// These selectors may be passed to clCreateImage2D() in the cl_image_format.image_channel_order field.
|
||||
// They are like CL_BGRA and CL_ARGB except that the alpha channel to be ignored. On calls to read_imagef,
|
||||
// the alpha will be 0xff (1.0f) if the sample falls in the image and 0 if it does not fall in the image.
|
||||
// On calls to write_imagef, the alpha value is ignored and 0xff (1.0f) is written. These formats are
|
||||
// currently only available for the CL_UNORM_INT8 cl_channel_type. They are intended to support legacy
|
||||
// image formats.
|
||||
const (
|
||||
ChannelOrder1RGBApple ChannelOrder = C.CL_1RGB_APPLE // Introduced in MacOS X.7.
|
||||
ChannelOrderBGR1Apple ChannelOrder = C.CL_BGR1_APPLE // Introduced in MacOS X.7.
|
||||
)
|
||||
|
||||
// Extension: cl_APPLE_biased_fixed_point_image_formats
|
||||
//
|
||||
// This selector may be passed to clCreateImage2D() in the cl_image_format.image_channel_data_type field.
|
||||
// It defines a biased signed 1.14 fixed point storage format, with range [-1, 3). The conversion from
|
||||
// float to this fixed point format is defined as follows:
|
||||
//
|
||||
// ushort float_to_sfixed14( float x ){
|
||||
// int i = convert_int_sat_rte( x * 0x1.0p14f ); // scale [-1, 3.0) to [-16384, 3*16384), round to nearest integer
|
||||
// i = add_sat( i, 0x4000 ); // apply bias, to convert to [0, 65535) range
|
||||
// return convert_ushort_sat(i); // clamp to destination size
|
||||
// }
|
||||
//
|
||||
// The inverse conversion is the reverse process. The formats are currently only available on the CPU with
|
||||
// the CL_RGBA channel layout.
|
||||
const (
|
||||
ChannelDataTypeSFixed14Apple ChannelDataType = C.CL_SFIXED14_APPLE // Introduced in MacOS X.7.
|
||||
)
|
||||
|
||||
func init() {
|
||||
channelOrderNameMap[ChannelOrder1RGBApple] = "1RGBApple"
|
||||
channelOrderNameMap[ChannelOrderBGR1Apple] = "RGB1Apple"
|
||||
channelDataTypeNameMap[ChannelDataTypeSFixed14Apple] = "SFixed14Apple"
|
||||
}
|
19
Godeps/_workspace/src/github.com/cespare/cp/LICENSE.txt
generated
vendored
19
Godeps/_workspace/src/github.com/cespare/cp/LICENSE.txt
generated
vendored
@ -1,19 +0,0 @@
|
||||
Copyright (c) 2015 Caleb Spare
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
9
Godeps/_workspace/src/github.com/cespare/cp/README.md
generated
vendored
9
Godeps/_workspace/src/github.com/cespare/cp/README.md
generated
vendored
@ -1,9 +0,0 @@
|
||||
# cp
|
||||
|
||||
[](https://godoc.org/github.com/cespare/cp)
|
||||
|
||||
cp is a small Go package for copying files and directories.
|
||||
|
||||
The API may change because I want to add some options in the future (for merging with existing dirs).
|
||||
|
||||
It does not currently handle Windows specifically (I think it may require some special treatment).
|
58
Godeps/_workspace/src/github.com/cespare/cp/cp.go
generated
vendored
58
Godeps/_workspace/src/github.com/cespare/cp/cp.go
generated
vendored
@ -1,58 +0,0 @@
|
||||
// Package cp offers simple file and directory copying for Go.
|
||||
package cp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var errCopyFileWithDir = errors.New("dir argument to CopyFile")
|
||||
|
||||
// CopyFile copies the file with path src to dst. The new file must not exist.
|
||||
// It is created with the same permissions as src.
|
||||
func CopyFile(dst, src string) error {
|
||||
rf, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rf.Close()
|
||||
rstat, err := rf.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rstat.IsDir() {
|
||||
return errCopyFileWithDir
|
||||
}
|
||||
|
||||
wf, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_EXCL, rstat.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := io.Copy(wf, rf); err != nil {
|
||||
wf.Close()
|
||||
return err
|
||||
}
|
||||
return wf.Close()
|
||||
}
|
||||
|
||||
// CopyAll copies the file or (recursively) the directory at src to dst.
|
||||
// Permissions are preserved. dst must not already exist.
|
||||
func CopyAll(dst, src string) error {
|
||||
return filepath.Walk(src, makeWalkFn(dst, src))
|
||||
}
|
||||
|
||||
func makeWalkFn(dst, src string) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dstPath := filepath.Join(dst, strings.TrimPrefix(path, src))
|
||||
if info.IsDir() {
|
||||
return os.Mkdir(dstPath, info.Mode())
|
||||
}
|
||||
return CopyFile(dstPath, path)
|
||||
}
|
||||
}
|
15
Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml
generated
vendored
15
Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml
generated
vendored
@ -1,18 +1,5 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.0.3
|
||||
- 1.1.2
|
||||
- 1.2.2
|
||||
- 1.3.3
|
||||
- 1.4.2
|
||||
- 1.5.1
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
go: 1.1
|
||||
|
||||
script:
|
||||
- go vet ./...
|
||||
|
70
Godeps/_workspace/src/github.com/codegangsta/cli/README.md
generated
vendored
70
Godeps/_workspace/src/github.com/codegangsta/cli/README.md
generated
vendored
@ -1,20 +1,18 @@
|
||||
[](http://gocover.io/github.com/codegangsta/cli)
|
||||
[](https://travis-ci.org/codegangsta/cli)
|
||||
[](https://godoc.org/github.com/codegangsta/cli)
|
||||
[](https://travis-ci.org/codegangsta/cli)
|
||||
|
||||
# cli.go
|
||||
cli.go is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way.
|
||||
|
||||
`cli.go` is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way.
|
||||
You can view the API docs here:
|
||||
http://godoc.org/github.com/codegangsta/cli
|
||||
|
||||
## Overview
|
||||
|
||||
Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app.
|
||||
|
||||
**This is where `cli.go` comes into play.** `cli.go` makes command line programming fun, organized, and expressive!
|
||||
**This is where cli.go comes into play.** cli.go makes command line programming fun, organized, and expressive!
|
||||
|
||||
## Installation
|
||||
|
||||
Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html).
|
||||
Make sure you have a working Go environment (go 1.1 is *required*). [See the install instructions](http://golang.org/doc/install.html).
|
||||
|
||||
To install `cli.go`, simply run:
|
||||
```
|
||||
@ -27,8 +25,7 @@ export PATH=$PATH:$GOPATH/bin
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
One of the philosophies behind `cli.go` is that an API should be playful and full of discovery. So a `cli.go` app can be as little as one line of code in `main()`.
|
||||
One of the philosophies behind cli.go is that an API should be playful and full of discovery. So a cli.go app can be as little as one line of code in `main()`.
|
||||
|
||||
``` go
|
||||
package main
|
||||
@ -106,8 +103,7 @@ $ greet
|
||||
Hello friend!
|
||||
```
|
||||
|
||||
`cli.go` also generates neat help text:
|
||||
|
||||
cli.go also generates some bitchass help text:
|
||||
```
|
||||
$ greet help
|
||||
NAME:
|
||||
@ -127,7 +123,6 @@ GLOBAL OPTIONS
|
||||
```
|
||||
|
||||
### Arguments
|
||||
|
||||
You can lookup arguments by calling the `Args` function on `cli.Context`.
|
||||
|
||||
``` go
|
||||
@ -139,9 +134,7 @@ app.Action = func(c *cli.Context) {
|
||||
```
|
||||
|
||||
### Flags
|
||||
|
||||
Setting and querying flags is simple.
|
||||
|
||||
``` go
|
||||
...
|
||||
app.Flags = []cli.Flag {
|
||||
@ -165,35 +158,6 @@ app.Action = func(c *cli.Context) {
|
||||
...
|
||||
```
|
||||
|
||||
You can also set a destination variable for a flag, to which the content will be scanned.
|
||||
|
||||
``` go
|
||||
...
|
||||
var language string
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
Destination: &language,
|
||||
},
|
||||
}
|
||||
app.Action = func(c *cli.Context) {
|
||||
name := "someone"
|
||||
if len(c.Args()) > 0 {
|
||||
name = c.Args()[0]
|
||||
}
|
||||
if language == "spanish" {
|
||||
println("Hola", name)
|
||||
} else {
|
||||
println("Hello", name)
|
||||
}
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
See full list of flags at http://godoc.org/github.com/codegangsta/cli
|
||||
|
||||
#### Alternate Names
|
||||
|
||||
You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g.
|
||||
@ -241,7 +205,6 @@ app.Flags = []cli.Flag {
|
||||
### Subcommands
|
||||
|
||||
Subcommands can be defined for a more git-like command line app.
|
||||
|
||||
```go
|
||||
...
|
||||
app.Commands = []cli.Command{
|
||||
@ -292,7 +255,6 @@ You can enable completion commands by setting the `EnableBashCompletion`
|
||||
flag on the `App` object. By default, this setting will only auto-complete to
|
||||
show an app's subcommands, but you can write your own completion methods for
|
||||
the App or its subcommands.
|
||||
|
||||
```go
|
||||
...
|
||||
var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"}
|
||||
@ -327,24 +289,8 @@ setting the `PROG` variable to the name of your program:
|
||||
|
||||
`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete`
|
||||
|
||||
#### To Distribute
|
||||
|
||||
Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename
|
||||
it to the name of the program you wish to add autocomplete support for (or
|
||||
automatically install it there if you are distributing a package). Don't forget
|
||||
to source the file to make it active in the current shell.
|
||||
|
||||
```
|
||||
sudo cp src/bash_autocomplete /etc/bash_completion.d/<myprogram>
|
||||
source /etc/bash_completion.d/<myprogram>
|
||||
```
|
||||
|
||||
Alternatively, you can just document that users should source the generic
|
||||
`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set
|
||||
to the name of their program (as above).
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch.
|
||||
|
||||
If you have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together.
|
||||
|
134
Godeps/_workspace/src/github.com/codegangsta/cli/app.go
generated
vendored
134
Godeps/_workspace/src/github.com/codegangsta/cli/app.go
generated
vendored
@ -5,23 +5,19 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
"time"
|
||||
)
|
||||
|
||||
// App is the main structure of a cli application. It is recommended that
|
||||
// an app be created with the cli.NewApp() function
|
||||
// App is the main structure of a cli application. It is recomended that
|
||||
// and app be created with the cli.NewApp() function
|
||||
type App struct {
|
||||
// The name of the program. Defaults to path.Base(os.Args[0])
|
||||
// The name of the program. Defaults to os.Args[0]
|
||||
Name string
|
||||
// Full name of command for help, defaults to Name
|
||||
HelpName string
|
||||
// Description of the program.
|
||||
Usage string
|
||||
// Text to override the USAGE section of help
|
||||
UsageText string
|
||||
// Description of the program argument format.
|
||||
ArgsUsage string
|
||||
// Version of the program
|
||||
Version string
|
||||
// List of commands to execute
|
||||
@ -46,16 +42,10 @@ type App struct {
|
||||
Action func(context *Context)
|
||||
// Execute this function if the proper command cannot be found
|
||||
CommandNotFound func(context *Context, command string)
|
||||
// Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages.
|
||||
// This function is able to replace the original error messages.
|
||||
// If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted.
|
||||
OnUsageError func(context *Context, err error, isSubcommand bool) error
|
||||
// Compilation date
|
||||
Compiled time.Time
|
||||
// List of all authors who contributed
|
||||
Authors []Author
|
||||
// Copyright of the binary if any
|
||||
Copyright string
|
||||
// Name of Author (Note: Use App.Authors, this is deprecated)
|
||||
Author string
|
||||
// Email of Author (Note: Use App.Authors, this is deprecated)
|
||||
@ -77,10 +67,8 @@ func compileTime() time.Time {
|
||||
// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action.
|
||||
func NewApp() *App {
|
||||
return &App{
|
||||
Name: path.Base(os.Args[0]),
|
||||
HelpName: path.Base(os.Args[0]),
|
||||
Name: os.Args[0],
|
||||
Usage: "A new cli application",
|
||||
UsageText: "",
|
||||
Version: "0.0.0",
|
||||
BashComplete: DefaultAppComplete,
|
||||
Action: helpCommand.Action,
|
||||
@ -95,14 +83,25 @@ func (a *App) Run(arguments []string) (err error) {
|
||||
a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email})
|
||||
}
|
||||
|
||||
newCmds := []Command{}
|
||||
for _, c := range a.Commands {
|
||||
if c.HelpName == "" {
|
||||
c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
|
||||
if HelpPrinter == nil {
|
||||
defer func() {
|
||||
HelpPrinter = nil
|
||||
}()
|
||||
|
||||
HelpPrinter = func(templ string, data interface{}) {
|
||||
funcMap := template.FuncMap{
|
||||
"join": strings.Join,
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(a.Writer, 0, 8, 1, '\t', 0)
|
||||
t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
|
||||
err := t.Execute(w, data)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
newCmds = append(newCmds, c)
|
||||
}
|
||||
a.Commands = newCmds
|
||||
|
||||
// append help to commands
|
||||
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
|
||||
@ -126,55 +125,46 @@ func (a *App) Run(arguments []string) (err error) {
|
||||
set.SetOutput(ioutil.Discard)
|
||||
err = set.Parse(arguments[1:])
|
||||
nerr := normalizeFlags(a.Flags, set)
|
||||
context := NewContext(a, set, nil)
|
||||
if nerr != nil {
|
||||
fmt.Fprintln(a.Writer, nerr)
|
||||
context := NewContext(a, set, set)
|
||||
ShowAppHelp(context)
|
||||
fmt.Fprintln(a.Writer)
|
||||
return nerr
|
||||
}
|
||||
context := NewContext(a, set, set)
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(a.Writer, "Incorrect Usage.\n\n")
|
||||
ShowAppHelp(context)
|
||||
fmt.Fprintln(a.Writer)
|
||||
return err
|
||||
}
|
||||
|
||||
if checkCompletions(context) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if a.OnUsageError != nil {
|
||||
err := a.OnUsageError(context, err, false)
|
||||
return err
|
||||
} else {
|
||||
fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
|
||||
ShowAppHelp(context)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !a.HideHelp && checkHelp(context) {
|
||||
ShowAppHelp(context)
|
||||
if checkHelp(context) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !a.HideVersion && checkVersion(context) {
|
||||
ShowVersion(context)
|
||||
if checkVersion(context) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if a.After != nil {
|
||||
defer func() {
|
||||
if afterErr := a.After(context); afterErr != nil {
|
||||
if err != nil {
|
||||
err = NewMultiError(err, afterErr)
|
||||
} else {
|
||||
err = afterErr
|
||||
}
|
||||
}
|
||||
// err is always nil here.
|
||||
// There is a check to see if it is non-nil
|
||||
// just few lines before.
|
||||
err = a.After(context)
|
||||
}()
|
||||
}
|
||||
|
||||
if a.Before != nil {
|
||||
err = a.Before(context)
|
||||
err := a.Before(context)
|
||||
if err != nil {
|
||||
fmt.Fprintf(a.Writer, "%v\n\n", err)
|
||||
ShowAppHelp(context)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -213,15 +203,6 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
newCmds := []Command{}
|
||||
for _, c := range a.Commands {
|
||||
if c.HelpName == "" {
|
||||
c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
|
||||
}
|
||||
newCmds = append(newCmds, c)
|
||||
}
|
||||
a.Commands = newCmds
|
||||
|
||||
// append flags
|
||||
if a.EnableBashCompletion {
|
||||
a.appendFlag(BashCompletionFlag)
|
||||
@ -232,34 +213,29 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
||||
set.SetOutput(ioutil.Discard)
|
||||
err = set.Parse(ctx.Args().Tail())
|
||||
nerr := normalizeFlags(a.Flags, set)
|
||||
context := NewContext(a, set, ctx)
|
||||
context := NewContext(a, set, ctx.globalSet)
|
||||
|
||||
if nerr != nil {
|
||||
fmt.Fprintln(a.Writer, nerr)
|
||||
fmt.Fprintln(a.Writer)
|
||||
if len(a.Commands) > 0 {
|
||||
ShowSubcommandHelp(context)
|
||||
} else {
|
||||
ShowCommandHelp(ctx, context.Args().First())
|
||||
}
|
||||
fmt.Fprintln(a.Writer)
|
||||
return nerr
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(a.Writer, "Incorrect Usage.\n\n")
|
||||
ShowSubcommandHelp(context)
|
||||
return err
|
||||
}
|
||||
|
||||
if checkCompletions(context) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if a.OnUsageError != nil {
|
||||
err = a.OnUsageError(context, err, true)
|
||||
return err
|
||||
} else {
|
||||
fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
|
||||
ShowSubcommandHelp(context)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(a.Commands) > 0 {
|
||||
if checkSubcommandHelp(context) {
|
||||
return nil
|
||||
@ -272,14 +248,10 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
||||
|
||||
if a.After != nil {
|
||||
defer func() {
|
||||
afterErr := a.After(context)
|
||||
if afterErr != nil {
|
||||
if err != nil {
|
||||
err = NewMultiError(err, afterErr)
|
||||
} else {
|
||||
err = afterErr
|
||||
}
|
||||
}
|
||||
// err is always nil here.
|
||||
// There is a check to see if it is non-nil
|
||||
// just few lines before.
|
||||
err = a.After(context)
|
||||
}()
|
||||
}
|
||||
|
||||
|
622
Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go
generated
vendored
Normal file
622
Godeps/_workspace/src/github.com/codegangsta/cli/app_test.go
generated
vendored
Normal file
@ -0,0 +1,622 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func ExampleApp() {
|
||||
// set args for examples sake
|
||||
os.Args = []string{"greet", "--name", "Jeremy"}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "greet"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
|
||||
}
|
||||
app.Action = func(c *cli.Context) {
|
||||
fmt.Printf("Hello %v\n", c.String("name"))
|
||||
}
|
||||
app.Author = "Harrison"
|
||||
app.Email = "harrison@lolwut.com"
|
||||
app.Authors = []cli.Author{cli.Author{Name: "Oliver Allen", Email: "oliver@toyshop.com"}}
|
||||
app.Run(os.Args)
|
||||
// Output:
|
||||
// Hello Jeremy
|
||||
}
|
||||
|
||||
func ExampleAppSubcommand() {
|
||||
// set args for examples sake
|
||||
os.Args = []string{"say", "hi", "english", "--name", "Jeremy"}
|
||||
app := cli.NewApp()
|
||||
app.Name = "say"
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "hello",
|
||||
Aliases: []string{"hi"},
|
||||
Usage: "use it to see a description",
|
||||
Description: "This is how we describe hello the function",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "english",
|
||||
Aliases: []string{"en"},
|
||||
Usage: "sends a greeting in english",
|
||||
Description: "greets someone in english",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Value: "Bob",
|
||||
Usage: "Name of the person to greet",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) {
|
||||
fmt.Println("Hello,", c.String("name"))
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
// Output:
|
||||
// Hello, Jeremy
|
||||
}
|
||||
|
||||
func ExampleAppHelp() {
|
||||
// set args for examples sake
|
||||
os.Args = []string{"greet", "h", "describeit"}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "greet"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "describeit",
|
||||
Aliases: []string{"d"},
|
||||
Usage: "use it to see a description",
|
||||
Description: "This is how we describe describeit the function",
|
||||
Action: func(c *cli.Context) {
|
||||
fmt.Printf("i like to describe things")
|
||||
},
|
||||
},
|
||||
}
|
||||
app.Run(os.Args)
|
||||
// Output:
|
||||
// NAME:
|
||||
// describeit - use it to see a description
|
||||
//
|
||||
// USAGE:
|
||||
// command describeit [arguments...]
|
||||
//
|
||||
// DESCRIPTION:
|
||||
// This is how we describe describeit the function
|
||||
}
|
||||
|
||||
func ExampleAppBashComplete() {
|
||||
// set args for examples sake
|
||||
os.Args = []string{"greet", "--generate-bash-completion"}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "greet"
|
||||
app.EnableBashCompletion = true
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "describeit",
|
||||
Aliases: []string{"d"},
|
||||
Usage: "use it to see a description",
|
||||
Description: "This is how we describe describeit the function",
|
||||
Action: func(c *cli.Context) {
|
||||
fmt.Printf("i like to describe things")
|
||||
},
|
||||
}, {
|
||||
Name: "next",
|
||||
Usage: "next example",
|
||||
Description: "more stuff to see when generating bash completion",
|
||||
Action: func(c *cli.Context) {
|
||||
fmt.Printf("the next example")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
// Output:
|
||||
// describeit
|
||||
// d
|
||||
// next
|
||||
// help
|
||||
// h
|
||||
}
|
||||
|
||||
func TestApp_Run(t *testing.T) {
|
||||
s := ""
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Action = func(c *cli.Context) {
|
||||
s = s + c.Args().First()
|
||||
}
|
||||
|
||||
err := app.Run([]string{"command", "foo"})
|
||||
expect(t, err, nil)
|
||||
err = app.Run([]string{"command", "bar"})
|
||||
expect(t, err, nil)
|
||||
expect(t, s, "foobar")
|
||||
}
|
||||
|
||||
var commandAppTests = []struct {
|
||||
name string
|
||||
expected bool
|
||||
}{
|
||||
{"foobar", true},
|
||||
{"batbaz", true},
|
||||
{"b", true},
|
||||
{"f", true},
|
||||
{"bat", false},
|
||||
{"nothing", false},
|
||||
}
|
||||
|
||||
func TestApp_Command(t *testing.T) {
|
||||
app := cli.NewApp()
|
||||
fooCommand := cli.Command{Name: "foobar", Aliases: []string{"f"}}
|
||||
batCommand := cli.Command{Name: "batbaz", Aliases: []string{"b"}}
|
||||
app.Commands = []cli.Command{
|
||||
fooCommand,
|
||||
batCommand,
|
||||
}
|
||||
|
||||
for _, test := range commandAppTests {
|
||||
expect(t, app.Command(test.name) != nil, test.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApp_CommandWithArgBeforeFlags(t *testing.T) {
|
||||
var parsedOption, firstArg string
|
||||
|
||||
app := cli.NewApp()
|
||||
command := cli.Command{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{Name: "option", Value: "", Usage: "some option"},
|
||||
},
|
||||
Action: func(c *cli.Context) {
|
||||
parsedOption = c.String("option")
|
||||
firstArg = c.Args().First()
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{command}
|
||||
|
||||
app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"})
|
||||
|
||||
expect(t, parsedOption, "my-option")
|
||||
expect(t, firstArg, "my-arg")
|
||||
}
|
||||
|
||||
func TestApp_RunAsSubcommandParseFlags(t *testing.T) {
|
||||
var context *cli.Context
|
||||
|
||||
a := cli.NewApp()
|
||||
a.Commands = []cli.Command{
|
||||
{
|
||||
Name: "foo",
|
||||
Action: func(c *cli.Context) {
|
||||
context = c
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "lang",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
},
|
||||
},
|
||||
Before: func(_ *cli.Context) error { return nil },
|
||||
},
|
||||
}
|
||||
a.Run([]string{"", "foo", "--lang", "spanish", "abcd"})
|
||||
|
||||
expect(t, context.Args().Get(0), "abcd")
|
||||
expect(t, context.String("lang"), "spanish")
|
||||
}
|
||||
|
||||
func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) {
|
||||
var parsedOption string
|
||||
var args []string
|
||||
|
||||
app := cli.NewApp()
|
||||
command := cli.Command{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{Name: "option", Value: "", Usage: "some option"},
|
||||
},
|
||||
Action: func(c *cli.Context) {
|
||||
parsedOption = c.String("option")
|
||||
args = c.Args()
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{command}
|
||||
|
||||
app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"})
|
||||
|
||||
expect(t, parsedOption, "my-option")
|
||||
expect(t, args[0], "my-arg")
|
||||
expect(t, args[1], "--")
|
||||
expect(t, args[2], "--notARealFlag")
|
||||
}
|
||||
|
||||
func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) {
|
||||
var args []string
|
||||
|
||||
app := cli.NewApp()
|
||||
command := cli.Command{
|
||||
Name: "cmd",
|
||||
Action: func(c *cli.Context) {
|
||||
args = c.Args()
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{command}
|
||||
|
||||
app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"})
|
||||
|
||||
expect(t, args[0], "my-arg")
|
||||
expect(t, args[1], "--")
|
||||
expect(t, args[2], "notAFlagAtAll")
|
||||
}
|
||||
|
||||
func TestApp_Float64Flag(t *testing.T) {
|
||||
var meters float64
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Flags = []cli.Flag{
|
||||
cli.Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"},
|
||||
}
|
||||
app.Action = func(c *cli.Context) {
|
||||
meters = c.Float64("height")
|
||||
}
|
||||
|
||||
app.Run([]string{"", "--height", "1.93"})
|
||||
expect(t, meters, 1.93)
|
||||
}
|
||||
|
||||
func TestApp_ParseSliceFlags(t *testing.T) {
|
||||
var parsedOption, firstArg string
|
||||
var parsedIntSlice []int
|
||||
var parsedStringSlice []string
|
||||
|
||||
app := cli.NewApp()
|
||||
command := cli.Command{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.IntSliceFlag{Name: "p", Value: &cli.IntSlice{}, Usage: "set one or more ip addr"},
|
||||
cli.StringSliceFlag{Name: "ip", Value: &cli.StringSlice{}, Usage: "set one or more ports to open"},
|
||||
},
|
||||
Action: func(c *cli.Context) {
|
||||
parsedIntSlice = c.IntSlice("p")
|
||||
parsedStringSlice = c.StringSlice("ip")
|
||||
parsedOption = c.String("option")
|
||||
firstArg = c.Args().First()
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{command}
|
||||
|
||||
app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"})
|
||||
|
||||
IntsEquals := func(a, b []int) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
StrsEquals := func(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
for i, v := range a {
|
||||
if v != b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
var expectedIntSlice = []int{22, 80}
|
||||
var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"}
|
||||
|
||||
if !IntsEquals(parsedIntSlice, expectedIntSlice) {
|
||||
t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice)
|
||||
}
|
||||
|
||||
if !StrsEquals(parsedStringSlice, expectedStringSlice) {
|
||||
t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApp_DefaultStdout(t *testing.T) {
|
||||
app := cli.NewApp()
|
||||
|
||||
if app.Writer != os.Stdout {
|
||||
t.Error("Default output writer not set.")
|
||||
}
|
||||
}
|
||||
|
||||
type mockWriter struct {
|
||||
written []byte
|
||||
}
|
||||
|
||||
func (fw *mockWriter) Write(p []byte) (n int, err error) {
|
||||
if fw.written == nil {
|
||||
fw.written = p
|
||||
} else {
|
||||
fw.written = append(fw.written, p...)
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (fw *mockWriter) GetWritten() (b []byte) {
|
||||
return fw.written
|
||||
}
|
||||
|
||||
func TestApp_SetStdout(t *testing.T) {
|
||||
w := &mockWriter{}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Name = "test"
|
||||
app.Writer = w
|
||||
|
||||
err := app.Run([]string{"help"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Run error: %s", err)
|
||||
}
|
||||
|
||||
if len(w.written) == 0 {
|
||||
t.Error("App did not write output to desired writer.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestApp_BeforeFunc(t *testing.T) {
|
||||
beforeRun, subcommandRun := false, false
|
||||
beforeError := fmt.Errorf("fail")
|
||||
var err error
|
||||
|
||||
app := cli.NewApp()
|
||||
|
||||
app.Before = func(c *cli.Context) error {
|
||||
beforeRun = true
|
||||
s := c.String("opt")
|
||||
if s == "fail" {
|
||||
return beforeError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
cli.Command{
|
||||
Name: "sub",
|
||||
Action: func(c *cli.Context) {
|
||||
subcommandRun = true
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{Name: "opt"},
|
||||
}
|
||||
|
||||
// run with the Before() func succeeding
|
||||
err = app.Run([]string{"command", "--opt", "succeed", "sub"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Run error: %s", err)
|
||||
}
|
||||
|
||||
if beforeRun == false {
|
||||
t.Errorf("Before() not executed when expected")
|
||||
}
|
||||
|
||||
if subcommandRun == false {
|
||||
t.Errorf("Subcommand not executed when expected")
|
||||
}
|
||||
|
||||
// reset
|
||||
beforeRun, subcommandRun = false, false
|
||||
|
||||
// run with the Before() func failing
|
||||
err = app.Run([]string{"command", "--opt", "fail", "sub"})
|
||||
|
||||
// should be the same error produced by the Before func
|
||||
if err != beforeError {
|
||||
t.Errorf("Run error expected, but not received")
|
||||
}
|
||||
|
||||
if beforeRun == false {
|
||||
t.Errorf("Before() not executed when expected")
|
||||
}
|
||||
|
||||
if subcommandRun == true {
|
||||
t.Errorf("Subcommand executed when NOT expected")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestApp_AfterFunc(t *testing.T) {
|
||||
afterRun, subcommandRun := false, false
|
||||
afterError := fmt.Errorf("fail")
|
||||
var err error
|
||||
|
||||
app := cli.NewApp()
|
||||
|
||||
app.After = func(c *cli.Context) error {
|
||||
afterRun = true
|
||||
s := c.String("opt")
|
||||
if s == "fail" {
|
||||
return afterError
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
cli.Command{
|
||||
Name: "sub",
|
||||
Action: func(c *cli.Context) {
|
||||
subcommandRun = true
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.StringFlag{Name: "opt"},
|
||||
}
|
||||
|
||||
// run with the After() func succeeding
|
||||
err = app.Run([]string{"command", "--opt", "succeed", "sub"})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Run error: %s", err)
|
||||
}
|
||||
|
||||
if afterRun == false {
|
||||
t.Errorf("After() not executed when expected")
|
||||
}
|
||||
|
||||
if subcommandRun == false {
|
||||
t.Errorf("Subcommand not executed when expected")
|
||||
}
|
||||
|
||||
// reset
|
||||
afterRun, subcommandRun = false, false
|
||||
|
||||
// run with the Before() func failing
|
||||
err = app.Run([]string{"command", "--opt", "fail", "sub"})
|
||||
|
||||
// should be the same error produced by the Before func
|
||||
if err != afterError {
|
||||
t.Errorf("Run error expected, but not received")
|
||||
}
|
||||
|
||||
if afterRun == false {
|
||||
t.Errorf("After() not executed when expected")
|
||||
}
|
||||
|
||||
if subcommandRun == false {
|
||||
t.Errorf("Subcommand not executed when expected")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppNoHelpFlag(t *testing.T) {
|
||||
oldFlag := cli.HelpFlag
|
||||
defer func() {
|
||||
cli.HelpFlag = oldFlag
|
||||
}()
|
||||
|
||||
cli.HelpFlag = cli.BoolFlag{}
|
||||
|
||||
app := cli.NewApp()
|
||||
err := app.Run([]string{"test", "-h"})
|
||||
|
||||
if err != flag.ErrHelp {
|
||||
t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppHelpPrinter(t *testing.T) {
|
||||
oldPrinter := cli.HelpPrinter
|
||||
defer func() {
|
||||
cli.HelpPrinter = oldPrinter
|
||||
}()
|
||||
|
||||
var wasCalled = false
|
||||
cli.HelpPrinter = func(template string, data interface{}) {
|
||||
wasCalled = true
|
||||
}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.Run([]string{"-h"})
|
||||
|
||||
if wasCalled == false {
|
||||
t.Errorf("Help printer expected to be called, but was not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppVersionPrinter(t *testing.T) {
|
||||
oldPrinter := cli.VersionPrinter
|
||||
defer func() {
|
||||
cli.VersionPrinter = oldPrinter
|
||||
}()
|
||||
|
||||
var wasCalled = false
|
||||
cli.VersionPrinter = func(c *cli.Context) {
|
||||
wasCalled = true
|
||||
}
|
||||
|
||||
app := cli.NewApp()
|
||||
ctx := cli.NewContext(app, nil, nil)
|
||||
cli.ShowVersion(ctx)
|
||||
|
||||
if wasCalled == false {
|
||||
t.Errorf("Version printer expected to be called, but was not")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppCommandNotFound(t *testing.T) {
|
||||
beforeRun, subcommandRun := false, false
|
||||
app := cli.NewApp()
|
||||
|
||||
app.CommandNotFound = func(c *cli.Context, command string) {
|
||||
beforeRun = true
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
cli.Command{
|
||||
Name: "bar",
|
||||
Action: func(c *cli.Context) {
|
||||
subcommandRun = true
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app.Run([]string{"command", "foo"})
|
||||
|
||||
expect(t, beforeRun, true)
|
||||
expect(t, subcommandRun, false)
|
||||
}
|
||||
|
||||
func TestGlobalFlagsInSubcommands(t *testing.T) {
|
||||
subcommandRun := false
|
||||
app := cli.NewApp()
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{Name: "debug, d", Usage: "Enable debugging"},
|
||||
}
|
||||
|
||||
app.Commands = []cli.Command{
|
||||
cli.Command{
|
||||
Name: "foo",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "bar",
|
||||
Action: func(c *cli.Context) {
|
||||
if c.GlobalBool("debug") {
|
||||
subcommandRun = true
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app.Run([]string{"command", "-d", "foo", "bar"})
|
||||
|
||||
expect(t, subcommandRun, true)
|
||||
}
|
16
Godeps/_workspace/src/github.com/codegangsta/cli/appveyor.yml
generated
vendored
16
Godeps/_workspace/src/github.com/codegangsta/cli/appveyor.yml
generated
vendored
@ -1,16 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
install:
|
||||
- go version
|
||||
- go env
|
||||
|
||||
build_script:
|
||||
- cd %APPVEYOR_BUILD_FOLDER%
|
||||
- go vet ./...
|
||||
- go test -v ./...
|
||||
|
||||
test: off
|
||||
|
||||
deploy: off
|
7
Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete
generated
vendored
7
Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete
generated
vendored
@ -1,14 +1,13 @@
|
||||
#! /bin/bash
|
||||
|
||||
: ${PROG:=$(basename ${BASH_SOURCE})}
|
||||
|
||||
_cli_bash_autocomplete() {
|
||||
local cur opts base
|
||||
local cur prev opts base
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -F _cli_bash_autocomplete $PROG
|
||||
complete -F _cli_bash_autocomplete $PROG
|
21
Godeps/_workspace/src/github.com/codegangsta/cli/cli.go
generated
vendored
21
Godeps/_workspace/src/github.com/codegangsta/cli/cli.go
generated
vendored
@ -17,24 +17,3 @@
|
||||
// app.Run(os.Args)
|
||||
// }
|
||||
package cli
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type MultiError struct {
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func NewMultiError(err ...error) MultiError {
|
||||
return MultiError{Errors: err}
|
||||
}
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
errs := make([]string, len(m.Errors))
|
||||
for i, err := range m.Errors {
|
||||
errs[i] = err.Error()
|
||||
}
|
||||
|
||||
return strings.Join(errs, "\n")
|
||||
}
|
||||
|
100
Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go
generated
vendored
Normal file
100
Godeps/_workspace/src/github.com/codegangsta/cli/cli_test.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "todo"
|
||||
app.Usage = "task list on the command line"
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "add",
|
||||
Aliases: []string{"a"},
|
||||
Usage: "add a task to the list",
|
||||
Action: func(c *cli.Context) {
|
||||
println("added task: ", c.Args().First())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "complete",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "complete a task on the list",
|
||||
Action: func(c *cli.Context) {
|
||||
println("completed task: ", c.Args().First())
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
|
||||
func ExampleSubcommand() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "say"
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "hello",
|
||||
Aliases: []string{"hi"},
|
||||
Usage: "use it to see a description",
|
||||
Description: "This is how we describe hello the function",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "english",
|
||||
Aliases: []string{"en"},
|
||||
Usage: "sends a greeting in english",
|
||||
Description: "greets someone in english",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "name",
|
||||
Value: "Bob",
|
||||
Usage: "Name of the person to greet",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) {
|
||||
println("Hello, ", c.String("name"))
|
||||
},
|
||||
}, {
|
||||
Name: "spanish",
|
||||
Aliases: []string{"sp"},
|
||||
Usage: "sends a greeting in spanish",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "surname",
|
||||
Value: "Jones",
|
||||
Usage: "Surname of the person to greet",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) {
|
||||
println("Hola, ", c.String("surname"))
|
||||
},
|
||||
}, {
|
||||
Name: "french",
|
||||
Aliases: []string{"fr"},
|
||||
Usage: "sends a greeting in french",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "nickname",
|
||||
Value: "Stevie",
|
||||
Usage: "Nickname of the person to greet",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) {
|
||||
println("Bonjour, ", c.String("nickname"))
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "bye",
|
||||
Usage: "says goodbye",
|
||||
Action: func(c *cli.Context) {
|
||||
println("bye")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
143
Godeps/_workspace/src/github.com/codegangsta/cli/command.go
generated
vendored
143
Godeps/_workspace/src/github.com/codegangsta/cli/command.go
generated
vendored
@ -16,12 +16,8 @@ type Command struct {
|
||||
Aliases []string
|
||||
// A short description of the usage of this command
|
||||
Usage string
|
||||
// Custom text to show on USAGE section of help
|
||||
UsageText string
|
||||
// A longer explanation of how the command works
|
||||
Description string
|
||||
// A short description of the arguments of this command
|
||||
ArgsUsage string
|
||||
// The function to call when checking for bash command completions
|
||||
BashComplete func(context *Context)
|
||||
// An action to execute before any sub-subcommands are run, but after the context is ready
|
||||
@ -32,10 +28,6 @@ type Command struct {
|
||||
After func(context *Context) error
|
||||
// The function to call when this command is invoked
|
||||
Action func(context *Context)
|
||||
// Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages.
|
||||
// This function is able to replace the original error messages.
|
||||
// If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted.
|
||||
OnUsageError func(context *Context, err error) error
|
||||
// List of child commands
|
||||
Subcommands []Command
|
||||
// List of flags to parse
|
||||
@ -44,24 +36,12 @@ type Command struct {
|
||||
SkipFlagParsing bool
|
||||
// Boolean to hide built-in help command
|
||||
HideHelp bool
|
||||
|
||||
// Full name of command for help, defaults to full command name, including parent commands.
|
||||
HelpName string
|
||||
commandNamePath []string
|
||||
}
|
||||
|
||||
// Returns the full name of the command.
|
||||
// For subcommands this ensures that parent commands are part of the command path
|
||||
func (c Command) FullName() string {
|
||||
if c.commandNamePath == nil {
|
||||
return c.Name
|
||||
}
|
||||
return strings.Join(c.commandNamePath, " ")
|
||||
}
|
||||
|
||||
// Invokes the command given the context, parses ctx.Args() to generate command-specific flags
|
||||
func (c Command) Run(ctx *Context) (err error) {
|
||||
if len(c.Subcommands) > 0 {
|
||||
func (c Command) Run(ctx *Context) error {
|
||||
|
||||
if len(c.Subcommands) > 0 || c.Before != nil || c.After != nil {
|
||||
return c.startApp(ctx)
|
||||
}
|
||||
|
||||
@ -80,54 +60,41 @@ func (c Command) Run(ctx *Context) (err error) {
|
||||
set := flagSet(c.Name, c.Flags)
|
||||
set.SetOutput(ioutil.Discard)
|
||||
|
||||
if !c.SkipFlagParsing {
|
||||
firstFlagIndex := -1
|
||||
terminatorIndex := -1
|
||||
for index, arg := range ctx.Args() {
|
||||
if arg == "--" {
|
||||
terminatorIndex = index
|
||||
break
|
||||
} else if arg == "-" {
|
||||
// Do nothing. A dash alone is not really a flag.
|
||||
continue
|
||||
} else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 {
|
||||
firstFlagIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
if firstFlagIndex > -1 {
|
||||
args := ctx.Args()
|
||||
regularArgs := make([]string, len(args[1:firstFlagIndex]))
|
||||
copy(regularArgs, args[1:firstFlagIndex])
|
||||
|
||||
var flagArgs []string
|
||||
if terminatorIndex > -1 {
|
||||
flagArgs = args[firstFlagIndex:terminatorIndex]
|
||||
regularArgs = append(regularArgs, args[terminatorIndex:]...)
|
||||
} else {
|
||||
flagArgs = args[firstFlagIndex:]
|
||||
}
|
||||
|
||||
err = set.Parse(append(flagArgs, regularArgs...))
|
||||
} else {
|
||||
err = set.Parse(ctx.Args().Tail())
|
||||
}
|
||||
} else {
|
||||
if c.SkipFlagParsing {
|
||||
err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...))
|
||||
firstFlagIndex := -1
|
||||
terminatorIndex := -1
|
||||
for index, arg := range ctx.Args() {
|
||||
if arg == "--" {
|
||||
terminatorIndex = index
|
||||
break
|
||||
} else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 {
|
||||
firstFlagIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if c.OnUsageError != nil {
|
||||
err := c.OnUsageError(ctx, err)
|
||||
return err
|
||||
var err error
|
||||
if firstFlagIndex > -1 && !c.SkipFlagParsing {
|
||||
args := ctx.Args()
|
||||
regularArgs := make([]string, len(args[1:firstFlagIndex]))
|
||||
copy(regularArgs, args[1:firstFlagIndex])
|
||||
|
||||
var flagArgs []string
|
||||
if terminatorIndex > -1 {
|
||||
flagArgs = args[firstFlagIndex:terminatorIndex]
|
||||
regularArgs = append(regularArgs, args[terminatorIndex:]...)
|
||||
} else {
|
||||
fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.")
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
ShowCommandHelp(ctx, c.Name)
|
||||
return err
|
||||
flagArgs = args[firstFlagIndex:]
|
||||
}
|
||||
|
||||
err = set.Parse(append(flagArgs, regularArgs...))
|
||||
} else {
|
||||
err = set.Parse(ctx.Args().Tail())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprint(ctx.App.Writer, "Incorrect Usage.\n\n")
|
||||
ShowCommandHelp(ctx, c.Name)
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
return err
|
||||
}
|
||||
|
||||
nerr := normalizeFlags(c.Flags, set)
|
||||
@ -135,9 +102,10 @@ func (c Command) Run(ctx *Context) (err error) {
|
||||
fmt.Fprintln(ctx.App.Writer, nerr)
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
ShowCommandHelp(ctx, c.Name)
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
return nerr
|
||||
}
|
||||
context := NewContext(ctx.App, set, ctx)
|
||||
context := NewContext(ctx.App, set, ctx.globalSet)
|
||||
|
||||
if checkCommandCompletions(context, c.Name) {
|
||||
return nil
|
||||
@ -146,30 +114,6 @@ func (c Command) Run(ctx *Context) (err error) {
|
||||
if checkCommandHelp(context, c.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if c.After != nil {
|
||||
defer func() {
|
||||
afterErr := c.After(context)
|
||||
if afterErr != nil {
|
||||
if err != nil {
|
||||
err = NewMultiError(err, afterErr)
|
||||
} else {
|
||||
err = afterErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if c.Before != nil {
|
||||
err := c.Before(context)
|
||||
if err != nil {
|
||||
fmt.Fprintln(ctx.App.Writer, err)
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
ShowCommandHelp(ctx, c.Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
context.Command = c
|
||||
c.Action(context)
|
||||
return nil
|
||||
@ -200,12 +144,6 @@ func (c Command) startApp(ctx *Context) error {
|
||||
|
||||
// set the name and usage
|
||||
app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name)
|
||||
if c.HelpName == "" {
|
||||
app.HelpName = c.HelpName
|
||||
} else {
|
||||
app.HelpName = app.Name
|
||||
}
|
||||
|
||||
if c.Description != "" {
|
||||
app.Usage = c.Description
|
||||
} else {
|
||||
@ -220,13 +158,6 @@ func (c Command) startApp(ctx *Context) error {
|
||||
app.Flags = c.Flags
|
||||
app.HideHelp = c.HideHelp
|
||||
|
||||
app.Version = ctx.App.Version
|
||||
app.HideVersion = ctx.App.HideVersion
|
||||
app.Compiled = ctx.App.Compiled
|
||||
app.Author = ctx.App.Author
|
||||
app.Email = ctx.App.Email
|
||||
app.Writer = ctx.App.Writer
|
||||
|
||||
// bash completion
|
||||
app.EnableBashCompletion = ctx.App.EnableBashCompletion
|
||||
if c.BashComplete != nil {
|
||||
@ -242,9 +173,5 @@ func (c Command) startApp(ctx *Context) error {
|
||||
app.Action = helpSubcommand.Action
|
||||
}
|
||||
|
||||
for index, cc := range app.Commands {
|
||||
app.Commands[index].commandNamePath = []string{c.Name, cc.Name}
|
||||
}
|
||||
|
||||
return app.RunAsSubcommand(ctx)
|
||||
}
|
||||
|
49
Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go
generated
vendored
Normal file
49
Godeps/_workspace/src/github.com/codegangsta/cli/command_test.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func TestCommandDoNotIgnoreFlags(t *testing.T) {
|
||||
app := cli.NewApp()
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
test := []string{"blah", "blah", "-break"}
|
||||
set.Parse(test)
|
||||
|
||||
c := cli.NewContext(app, set, set)
|
||||
|
||||
command := cli.Command{
|
||||
Name: "test-cmd",
|
||||
Aliases: []string{"tc"},
|
||||
Usage: "this is for testing",
|
||||
Description: "testing",
|
||||
Action: func(_ *cli.Context) {},
|
||||
}
|
||||
err := command.Run(c)
|
||||
|
||||
expect(t, err.Error(), "flag provided but not defined: -break")
|
||||
}
|
||||
|
||||
func TestCommandIgnoreFlags(t *testing.T) {
|
||||
app := cli.NewApp()
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
test := []string{"blah", "blah"}
|
||||
set.Parse(test)
|
||||
|
||||
c := cli.NewContext(app, set, set)
|
||||
|
||||
command := cli.Command{
|
||||
Name: "test-cmd",
|
||||
Aliases: []string{"tc"},
|
||||
Usage: "this is for testing",
|
||||
Description: "testing",
|
||||
Action: func(_ *cli.Context) {},
|
||||
SkipFlagParsing: true,
|
||||
}
|
||||
err := command.Run(c)
|
||||
|
||||
expect(t, err, nil)
|
||||
}
|
78
Godeps/_workspace/src/github.com/codegangsta/cli/context.go
generated
vendored
78
Godeps/_workspace/src/github.com/codegangsta/cli/context.go
generated
vendored
@ -16,14 +16,14 @@ type Context struct {
|
||||
App *App
|
||||
Command Command
|
||||
flagSet *flag.FlagSet
|
||||
globalSet *flag.FlagSet
|
||||
setFlags map[string]bool
|
||||
globalSetFlags map[string]bool
|
||||
parentContext *Context
|
||||
}
|
||||
|
||||
// Creates a new context. For use in when invoking an App or Command action.
|
||||
func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context {
|
||||
return &Context{App: app, flagSet: set, parentContext: parentCtx}
|
||||
func NewContext(app *App, set *flag.FlagSet, globalSet *flag.FlagSet) *Context {
|
||||
return &Context{App: app, flagSet: set, globalSet: globalSet}
|
||||
}
|
||||
|
||||
// Looks up the value of a local int flag, returns 0 if no int flag exists
|
||||
@ -73,58 +73,37 @@ func (c *Context) Generic(name string) interface{} {
|
||||
|
||||
// Looks up the value of a global int flag, returns 0 if no int flag exists
|
||||
func (c *Context) GlobalInt(name string) int {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupInt(name, fs)
|
||||
}
|
||||
return 0
|
||||
return lookupInt(name, c.globalSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists
|
||||
func (c *Context) GlobalDuration(name string) time.Duration {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupDuration(name, fs)
|
||||
}
|
||||
return 0
|
||||
return lookupDuration(name, c.globalSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a global bool flag, returns false if no bool flag exists
|
||||
func (c *Context) GlobalBool(name string) bool {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupBool(name, fs)
|
||||
}
|
||||
return false
|
||||
return lookupBool(name, c.globalSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a global string flag, returns "" if no string flag exists
|
||||
func (c *Context) GlobalString(name string) string {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupString(name, fs)
|
||||
}
|
||||
return ""
|
||||
return lookupString(name, c.globalSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a global string slice flag, returns nil if no string slice flag exists
|
||||
func (c *Context) GlobalStringSlice(name string) []string {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupStringSlice(name, fs)
|
||||
}
|
||||
return nil
|
||||
return lookupStringSlice(name, c.globalSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a global int slice flag, returns nil if no int slice flag exists
|
||||
func (c *Context) GlobalIntSlice(name string) []int {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupIntSlice(name, fs)
|
||||
}
|
||||
return nil
|
||||
return lookupIntSlice(name, c.globalSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a global generic flag, returns nil if no generic flag exists
|
||||
func (c *Context) GlobalGeneric(name string) interface{} {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupGeneric(name, fs)
|
||||
}
|
||||
return nil
|
||||
return lookupGeneric(name, c.globalSet)
|
||||
}
|
||||
|
||||
// Returns the number of flags set
|
||||
@ -147,23 +126,17 @@ func (c *Context) IsSet(name string) bool {
|
||||
func (c *Context) GlobalIsSet(name string) bool {
|
||||
if c.globalSetFlags == nil {
|
||||
c.globalSetFlags = make(map[string]bool)
|
||||
ctx := c
|
||||
if ctx.parentContext != nil {
|
||||
ctx = ctx.parentContext
|
||||
}
|
||||
for ; ctx != nil && c.globalSetFlags[name] == false; ctx = ctx.parentContext {
|
||||
ctx.flagSet.Visit(func(f *flag.Flag) {
|
||||
c.globalSetFlags[f.Name] = true
|
||||
})
|
||||
}
|
||||
c.globalSet.Visit(func(f *flag.Flag) {
|
||||
c.globalSetFlags[f.Name] = true
|
||||
})
|
||||
}
|
||||
return c.globalSetFlags[name]
|
||||
return c.globalSetFlags[name] == true
|
||||
}
|
||||
|
||||
// Returns a slice of flag names used in this context.
|
||||
func (c *Context) FlagNames() (names []string) {
|
||||
for _, flag := range c.Command.Flags {
|
||||
name := strings.Split(flag.GetName(), ",")[0]
|
||||
name := strings.Split(flag.getName(), ",")[0]
|
||||
if name == "help" {
|
||||
continue
|
||||
}
|
||||
@ -175,7 +148,7 @@ func (c *Context) FlagNames() (names []string) {
|
||||
// Returns a slice of global flag names used by the app.
|
||||
func (c *Context) GlobalFlagNames() (names []string) {
|
||||
for _, flag := range c.App.Flags {
|
||||
name := strings.Split(flag.GetName(), ",")[0]
|
||||
name := strings.Split(flag.getName(), ",")[0]
|
||||
if name == "help" || name == "version" {
|
||||
continue
|
||||
}
|
||||
@ -184,11 +157,6 @@ func (c *Context) GlobalFlagNames() (names []string) {
|
||||
return
|
||||
}
|
||||
|
||||
// Returns the parent context, if any
|
||||
func (c *Context) Parent() *Context {
|
||||
return c.parentContext
|
||||
}
|
||||
|
||||
type Args []string
|
||||
|
||||
// Returns the command line arguments associated with the context.
|
||||
@ -233,18 +201,6 @@ func (a Args) Swap(from, to int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet {
|
||||
if ctx.parentContext != nil {
|
||||
ctx = ctx.parentContext
|
||||
}
|
||||
for ; ctx != nil; ctx = ctx.parentContext {
|
||||
if f := ctx.flagSet.Lookup(name); f != nil {
|
||||
return ctx.flagSet
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupInt(name string, set *flag.FlagSet) int {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
@ -360,7 +316,7 @@ func normalizeFlags(flags []Flag, set *flag.FlagSet) error {
|
||||
visited[f.Name] = true
|
||||
})
|
||||
for _, f := range flags {
|
||||
parts := strings.Split(f.GetName(), ",")
|
||||
parts := strings.Split(f.getName(), ",")
|
||||
if len(parts) == 1 {
|
||||
continue
|
||||
}
|
||||
|
111
Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go
generated
vendored
Normal file
111
Godeps/_workspace/src/github.com/codegangsta/cli/context_test.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func TestNewContext(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Int("myflag", 12, "doc")
|
||||
globalSet := flag.NewFlagSet("test", 0)
|
||||
globalSet.Int("myflag", 42, "doc")
|
||||
command := cli.Command{Name: "mycommand"}
|
||||
c := cli.NewContext(nil, set, globalSet)
|
||||
c.Command = command
|
||||
expect(t, c.Int("myflag"), 12)
|
||||
expect(t, c.GlobalInt("myflag"), 42)
|
||||
expect(t, c.Command.Name, "mycommand")
|
||||
}
|
||||
|
||||
func TestContext_Int(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Int("myflag", 12, "doc")
|
||||
c := cli.NewContext(nil, set, set)
|
||||
expect(t, c.Int("myflag"), 12)
|
||||
}
|
||||
|
||||
func TestContext_Duration(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Duration("myflag", time.Duration(12*time.Second), "doc")
|
||||
c := cli.NewContext(nil, set, set)
|
||||
expect(t, c.Duration("myflag"), time.Duration(12*time.Second))
|
||||
}
|
||||
|
||||
func TestContext_String(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.String("myflag", "hello world", "doc")
|
||||
c := cli.NewContext(nil, set, set)
|
||||
expect(t, c.String("myflag"), "hello world")
|
||||
}
|
||||
|
||||
func TestContext_Bool(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Bool("myflag", false, "doc")
|
||||
c := cli.NewContext(nil, set, set)
|
||||
expect(t, c.Bool("myflag"), false)
|
||||
}
|
||||
|
||||
func TestContext_BoolT(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Bool("myflag", true, "doc")
|
||||
c := cli.NewContext(nil, set, set)
|
||||
expect(t, c.BoolT("myflag"), true)
|
||||
}
|
||||
|
||||
func TestContext_Args(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Bool("myflag", false, "doc")
|
||||
c := cli.NewContext(nil, set, set)
|
||||
set.Parse([]string{"--myflag", "bat", "baz"})
|
||||
expect(t, len(c.Args()), 2)
|
||||
expect(t, c.Bool("myflag"), true)
|
||||
}
|
||||
|
||||
func TestContext_IsSet(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Bool("myflag", false, "doc")
|
||||
set.String("otherflag", "hello world", "doc")
|
||||
globalSet := flag.NewFlagSet("test", 0)
|
||||
globalSet.Bool("myflagGlobal", true, "doc")
|
||||
c := cli.NewContext(nil, set, globalSet)
|
||||
set.Parse([]string{"--myflag", "bat", "baz"})
|
||||
globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"})
|
||||
expect(t, c.IsSet("myflag"), true)
|
||||
expect(t, c.IsSet("otherflag"), false)
|
||||
expect(t, c.IsSet("bogusflag"), false)
|
||||
expect(t, c.IsSet("myflagGlobal"), false)
|
||||
}
|
||||
|
||||
func TestContext_GlobalIsSet(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Bool("myflag", false, "doc")
|
||||
set.String("otherflag", "hello world", "doc")
|
||||
globalSet := flag.NewFlagSet("test", 0)
|
||||
globalSet.Bool("myflagGlobal", true, "doc")
|
||||
globalSet.Bool("myflagGlobalUnset", true, "doc")
|
||||
c := cli.NewContext(nil, set, globalSet)
|
||||
set.Parse([]string{"--myflag", "bat", "baz"})
|
||||
globalSet.Parse([]string{"--myflagGlobal", "bat", "baz"})
|
||||
expect(t, c.GlobalIsSet("myflag"), false)
|
||||
expect(t, c.GlobalIsSet("otherflag"), false)
|
||||
expect(t, c.GlobalIsSet("bogusflag"), false)
|
||||
expect(t, c.GlobalIsSet("myflagGlobal"), true)
|
||||
expect(t, c.GlobalIsSet("myflagGlobalUnset"), false)
|
||||
expect(t, c.GlobalIsSet("bogusGlobal"), false)
|
||||
}
|
||||
|
||||
func TestContext_NumFlags(t *testing.T) {
|
||||
set := flag.NewFlagSet("test", 0)
|
||||
set.Bool("myflag", false, "doc")
|
||||
set.String("otherflag", "hello world", "doc")
|
||||
globalSet := flag.NewFlagSet("test", 0)
|
||||
globalSet.Bool("myflagGlobal", true, "doc")
|
||||
c := cli.NewContext(nil, set, globalSet)
|
||||
set.Parse([]string{"--myflag", "--otherflag=foo"})
|
||||
globalSet.Parse([]string{"--myflagGlobal"})
|
||||
expect(t, c.NumFlags(), 2)
|
||||
}
|
180
Godeps/_workspace/src/github.com/codegangsta/cli/flag.go
generated
vendored
180
Godeps/_workspace/src/github.com/codegangsta/cli/flag.go
generated
vendored
@ -4,7 +4,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -30,13 +29,13 @@ var HelpFlag = BoolFlag{
|
||||
}
|
||||
|
||||
// Flag is a common interface related to parsing flags in cli.
|
||||
// For more advanced flag parsing techniques, it is recommended that
|
||||
// For more advanced flag parsing techniques, it is recomended that
|
||||
// this interface be implemented.
|
||||
type Flag interface {
|
||||
fmt.Stringer
|
||||
// Apply Flag settings to the given flag set
|
||||
Apply(*flag.FlagSet)
|
||||
GetName() string
|
||||
getName() string
|
||||
}
|
||||
|
||||
func flagSet(name string, flags []Flag) *flag.FlagSet {
|
||||
@ -74,18 +73,7 @@ type GenericFlag struct {
|
||||
// help text to the user (uses the String() method of the generic flag to show
|
||||
// the value)
|
||||
func (f GenericFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage))
|
||||
}
|
||||
|
||||
func (f GenericFlag) FormatValueHelp() string {
|
||||
if f.Value == nil {
|
||||
return ""
|
||||
}
|
||||
s := f.Value.String()
|
||||
if len(s) == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("\"%s\"", s)
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s%s \"%v\"\t%v", prefixFor(f.Name), f.Name, f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply takes the flagset and calls Set on the generic flag with the value
|
||||
@ -107,31 +95,25 @@ func (f GenericFlag) Apply(set *flag.FlagSet) {
|
||||
})
|
||||
}
|
||||
|
||||
func (f GenericFlag) GetName() string {
|
||||
func (f GenericFlag) getName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// StringSlice is an opaque type for []string to satisfy flag.Value
|
||||
type StringSlice []string
|
||||
|
||||
// Set appends the string value to the list of values
|
||||
func (f *StringSlice) Set(value string) error {
|
||||
*f = append(*f, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f *StringSlice) String() string {
|
||||
return fmt.Sprintf("%s", *f)
|
||||
}
|
||||
|
||||
// Value returns the slice of strings set by this flag
|
||||
func (f *StringSlice) Value() []string {
|
||||
return *f
|
||||
}
|
||||
|
||||
// StringSlice is a string flag that can be specified multiple times on the
|
||||
// command-line
|
||||
type StringSliceFlag struct {
|
||||
Name string
|
||||
Value *StringSlice
|
||||
@ -139,14 +121,12 @@ type StringSliceFlag struct {
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f StringSliceFlag) String() string {
|
||||
firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ")
|
||||
pref := prefixFor(firstName)
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f StringSliceFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
@ -164,22 +144,18 @@ func (f StringSliceFlag) Apply(set *flag.FlagSet) {
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Value == nil {
|
||||
f.Value = &StringSlice{}
|
||||
}
|
||||
set.Var(f.Value, name, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f StringSliceFlag) GetName() string {
|
||||
func (f StringSliceFlag) getName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// StringSlice is an opaque type for []int to satisfy flag.Value
|
||||
type IntSlice []int
|
||||
|
||||
// Set parses the value into an integer and appends it to the list of values
|
||||
func (f *IntSlice) Set(value string) error {
|
||||
|
||||
tmp, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -189,18 +165,14 @@ func (f *IntSlice) Set(value string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f *IntSlice) String() string {
|
||||
return fmt.Sprintf("%d", *f)
|
||||
}
|
||||
|
||||
// Value returns the slice of ints set by this flag
|
||||
func (f *IntSlice) Value() []int {
|
||||
return *f
|
||||
}
|
||||
|
||||
// IntSliceFlag is an int flag that can be specified multiple times on the
|
||||
// command-line
|
||||
type IntSliceFlag struct {
|
||||
Name string
|
||||
Value *IntSlice
|
||||
@ -208,14 +180,12 @@ type IntSliceFlag struct {
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f IntSliceFlag) String() string {
|
||||
firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ")
|
||||
pref := prefixFor(firstName)
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f IntSliceFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
@ -236,31 +206,24 @@ func (f IntSliceFlag) Apply(set *flag.FlagSet) {
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Value == nil {
|
||||
f.Value = &IntSlice{}
|
||||
}
|
||||
set.Var(f.Value, name, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f IntSliceFlag) GetName() string {
|
||||
func (f IntSliceFlag) getName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// BoolFlag is a switch that defaults to false
|
||||
type BoolFlag struct {
|
||||
Name string
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *bool
|
||||
Name string
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f BoolFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f BoolFlag) Apply(set *flag.FlagSet) {
|
||||
val := false
|
||||
if f.EnvVar != "" {
|
||||
@ -277,33 +240,24 @@ func (f BoolFlag) Apply(set *flag.FlagSet) {
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.BoolVar(f.Destination, name, val, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Bool(name, val, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f BoolFlag) GetName() string {
|
||||
func (f BoolFlag) getName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// BoolTFlag this represents a boolean flag that is true by default, but can
|
||||
// still be set to false by --some-flag=false
|
||||
type BoolTFlag struct {
|
||||
Name string
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *bool
|
||||
Name string
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f BoolTFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f BoolTFlag) Apply(set *flag.FlagSet) {
|
||||
val := true
|
||||
if f.EnvVar != "" {
|
||||
@ -320,41 +274,34 @@ func (f BoolTFlag) Apply(set *flag.FlagSet) {
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.BoolVar(f.Destination, name, val, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Bool(name, val, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f BoolTFlag) GetName() string {
|
||||
func (f BoolTFlag) getName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// StringFlag represents a flag that takes as string value
|
||||
type StringFlag struct {
|
||||
Name string
|
||||
Value string
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *string
|
||||
Name string
|
||||
Value string
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f StringFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage))
|
||||
}
|
||||
var fmtString string
|
||||
fmtString = "%s %v\t%v"
|
||||
|
||||
func (f StringFlag) FormatValueHelp() string {
|
||||
s := f.Value
|
||||
if len(s) == 0 {
|
||||
return ""
|
||||
if len(f.Value) > 0 {
|
||||
fmtString = "%s \"%v\"\t%v"
|
||||
} else {
|
||||
fmtString = "%s %v\t%v"
|
||||
}
|
||||
return fmt.Sprintf("\"%s\"", s)
|
||||
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf(fmtString, prefixedNames(f.Name), f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f StringFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
@ -367,34 +314,25 @@ func (f StringFlag) Apply(set *flag.FlagSet) {
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.StringVar(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.String(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f StringFlag) GetName() string {
|
||||
func (f StringFlag) getName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// IntFlag is a flag that takes an integer
|
||||
// Errors if the value provided cannot be parsed
|
||||
type IntFlag struct {
|
||||
Name string
|
||||
Value int
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *int
|
||||
Name string
|
||||
Value int
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f IntFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f IntFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
@ -410,34 +348,25 @@ func (f IntFlag) Apply(set *flag.FlagSet) {
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.IntVar(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Int(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f IntFlag) GetName() string {
|
||||
func (f IntFlag) getName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// DurationFlag is a flag that takes a duration specified in Go's duration
|
||||
// format: https://golang.org/pkg/time/#ParseDuration
|
||||
type DurationFlag struct {
|
||||
Name string
|
||||
Value time.Duration
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *time.Duration
|
||||
Name string
|
||||
Value time.Duration
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f DurationFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f DurationFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
@ -453,34 +382,25 @@ func (f DurationFlag) Apply(set *flag.FlagSet) {
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.DurationVar(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Duration(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f DurationFlag) GetName() string {
|
||||
func (f DurationFlag) getName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// Float64Flag is a flag that takes an float value
|
||||
// Errors if the value provided cannot be parsed
|
||||
type Float64Flag struct {
|
||||
Name string
|
||||
Value float64
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *float64
|
||||
Name string
|
||||
Value float64
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f Float64Flag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f Float64Flag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
@ -495,15 +415,11 @@ func (f Float64Flag) Apply(set *flag.FlagSet) {
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.Float64Var(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Float64(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f Float64Flag) GetName() string {
|
||||
func (f Float64Flag) getName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
@ -532,15 +448,7 @@ func prefixedNames(fullName string) (prefixed string) {
|
||||
func withEnvHint(envVar, str string) string {
|
||||
envText := ""
|
||||
if envVar != "" {
|
||||
prefix := "$"
|
||||
suffix := ""
|
||||
sep := ", $"
|
||||
if runtime.GOOS == "windows" {
|
||||
prefix = "%"
|
||||
suffix = "%"
|
||||
sep = "%, %"
|
||||
}
|
||||
envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix)
|
||||
envText = fmt.Sprintf(" [$%s]", strings.Join(strings.Split(envVar, ","), ", $"))
|
||||
}
|
||||
return str + envText
|
||||
}
|
||||
|
742
Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go
generated
vendored
Normal file
742
Godeps/_workspace/src/github.com/codegangsta/cli/flag_test.go
generated
vendored
Normal file
@ -0,0 +1,742 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
var boolFlagTests = []struct {
|
||||
name string
|
||||
expected string
|
||||
}{
|
||||
{"help", "--help\t"},
|
||||
{"h", "-h\t"},
|
||||
}
|
||||
|
||||
func TestBoolFlagHelpOutput(t *testing.T) {
|
||||
|
||||
for _, test := range boolFlagTests {
|
||||
flag := cli.BoolFlag{Name: test.name}
|
||||
output := flag.String()
|
||||
|
||||
if output != test.expected {
|
||||
t.Errorf("%s does not match %s", output, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var stringFlagTests = []struct {
|
||||
name string
|
||||
value string
|
||||
expected string
|
||||
}{
|
||||
{"help", "", "--help \t"},
|
||||
{"h", "", "-h \t"},
|
||||
{"h", "", "-h \t"},
|
||||
{"test", "Something", "--test \"Something\"\t"},
|
||||
}
|
||||
|
||||
func TestStringFlagHelpOutput(t *testing.T) {
|
||||
|
||||
for _, test := range stringFlagTests {
|
||||
flag := cli.StringFlag{Name: test.name, Value: test.value}
|
||||
output := flag.String()
|
||||
|
||||
if output != test.expected {
|
||||
t.Errorf("%s does not match %s", output, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringFlagWithEnvVarHelpOutput(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_FOO", "derp")
|
||||
for _, test := range stringFlagTests {
|
||||
flag := cli.StringFlag{Name: test.name, Value: test.value, EnvVar: "APP_FOO"}
|
||||
output := flag.String()
|
||||
|
||||
if !strings.HasSuffix(output, " [$APP_FOO]") {
|
||||
t.Errorf("%s does not end with [$APP_FOO]", output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var stringSliceFlagTests = []struct {
|
||||
name string
|
||||
value *cli.StringSlice
|
||||
expected string
|
||||
}{
|
||||
{"help", func() *cli.StringSlice {
|
||||
s := &cli.StringSlice{}
|
||||
s.Set("")
|
||||
return s
|
||||
}(), "--help [--help option --help option]\t"},
|
||||
{"h", func() *cli.StringSlice {
|
||||
s := &cli.StringSlice{}
|
||||
s.Set("")
|
||||
return s
|
||||
}(), "-h [-h option -h option]\t"},
|
||||
{"h", func() *cli.StringSlice {
|
||||
s := &cli.StringSlice{}
|
||||
s.Set("")
|
||||
return s
|
||||
}(), "-h [-h option -h option]\t"},
|
||||
{"test", func() *cli.StringSlice {
|
||||
s := &cli.StringSlice{}
|
||||
s.Set("Something")
|
||||
return s
|
||||
}(), "--test [--test option --test option]\t"},
|
||||
}
|
||||
|
||||
func TestStringSliceFlagHelpOutput(t *testing.T) {
|
||||
|
||||
for _, test := range stringSliceFlagTests {
|
||||
flag := cli.StringSliceFlag{Name: test.name, Value: test.value}
|
||||
output := flag.String()
|
||||
|
||||
if output != test.expected {
|
||||
t.Errorf("%q does not match %q", output, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringSliceFlagWithEnvVarHelpOutput(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_QWWX", "11,4")
|
||||
for _, test := range stringSliceFlagTests {
|
||||
flag := cli.StringSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_QWWX"}
|
||||
output := flag.String()
|
||||
|
||||
if !strings.HasSuffix(output, " [$APP_QWWX]") {
|
||||
t.Errorf("%q does not end with [$APP_QWWX]", output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var intFlagTests = []struct {
|
||||
name string
|
||||
expected string
|
||||
}{
|
||||
{"help", "--help \"0\"\t"},
|
||||
{"h", "-h \"0\"\t"},
|
||||
}
|
||||
|
||||
func TestIntFlagHelpOutput(t *testing.T) {
|
||||
|
||||
for _, test := range intFlagTests {
|
||||
flag := cli.IntFlag{Name: test.name}
|
||||
output := flag.String()
|
||||
|
||||
if output != test.expected {
|
||||
t.Errorf("%s does not match %s", output, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntFlagWithEnvVarHelpOutput(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_BAR", "2")
|
||||
for _, test := range intFlagTests {
|
||||
flag := cli.IntFlag{Name: test.name, EnvVar: "APP_BAR"}
|
||||
output := flag.String()
|
||||
|
||||
if !strings.HasSuffix(output, " [$APP_BAR]") {
|
||||
t.Errorf("%s does not end with [$APP_BAR]", output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var durationFlagTests = []struct {
|
||||
name string
|
||||
expected string
|
||||
}{
|
||||
{"help", "--help \"0\"\t"},
|
||||
{"h", "-h \"0\"\t"},
|
||||
}
|
||||
|
||||
func TestDurationFlagHelpOutput(t *testing.T) {
|
||||
|
||||
for _, test := range durationFlagTests {
|
||||
flag := cli.DurationFlag{Name: test.name}
|
||||
output := flag.String()
|
||||
|
||||
if output != test.expected {
|
||||
t.Errorf("%s does not match %s", output, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDurationFlagWithEnvVarHelpOutput(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_BAR", "2h3m6s")
|
||||
for _, test := range durationFlagTests {
|
||||
flag := cli.DurationFlag{Name: test.name, EnvVar: "APP_BAR"}
|
||||
output := flag.String()
|
||||
|
||||
if !strings.HasSuffix(output, " [$APP_BAR]") {
|
||||
t.Errorf("%s does not end with [$APP_BAR]", output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var intSliceFlagTests = []struct {
|
||||
name string
|
||||
value *cli.IntSlice
|
||||
expected string
|
||||
}{
|
||||
{"help", &cli.IntSlice{}, "--help [--help option --help option]\t"},
|
||||
{"h", &cli.IntSlice{}, "-h [-h option -h option]\t"},
|
||||
{"h", &cli.IntSlice{}, "-h [-h option -h option]\t"},
|
||||
{"test", func() *cli.IntSlice {
|
||||
i := &cli.IntSlice{}
|
||||
i.Set("9")
|
||||
return i
|
||||
}(), "--test [--test option --test option]\t"},
|
||||
}
|
||||
|
||||
func TestIntSliceFlagHelpOutput(t *testing.T) {
|
||||
|
||||
for _, test := range intSliceFlagTests {
|
||||
flag := cli.IntSliceFlag{Name: test.name, Value: test.value}
|
||||
output := flag.String()
|
||||
|
||||
if output != test.expected {
|
||||
t.Errorf("%q does not match %q", output, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntSliceFlagWithEnvVarHelpOutput(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_SMURF", "42,3")
|
||||
for _, test := range intSliceFlagTests {
|
||||
flag := cli.IntSliceFlag{Name: test.name, Value: test.value, EnvVar: "APP_SMURF"}
|
||||
output := flag.String()
|
||||
|
||||
if !strings.HasSuffix(output, " [$APP_SMURF]") {
|
||||
t.Errorf("%q does not end with [$APP_SMURF]", output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var float64FlagTests = []struct {
|
||||
name string
|
||||
expected string
|
||||
}{
|
||||
{"help", "--help \"0\"\t"},
|
||||
{"h", "-h \"0\"\t"},
|
||||
}
|
||||
|
||||
func TestFloat64FlagHelpOutput(t *testing.T) {
|
||||
|
||||
for _, test := range float64FlagTests {
|
||||
flag := cli.Float64Flag{Name: test.name}
|
||||
output := flag.String()
|
||||
|
||||
if output != test.expected {
|
||||
t.Errorf("%s does not match %s", output, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloat64FlagWithEnvVarHelpOutput(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_BAZ", "99.4")
|
||||
for _, test := range float64FlagTests {
|
||||
flag := cli.Float64Flag{Name: test.name, EnvVar: "APP_BAZ"}
|
||||
output := flag.String()
|
||||
|
||||
if !strings.HasSuffix(output, " [$APP_BAZ]") {
|
||||
t.Errorf("%s does not end with [$APP_BAZ]", output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var genericFlagTests = []struct {
|
||||
name string
|
||||
value cli.Generic
|
||||
expected string
|
||||
}{
|
||||
{"test", &Parser{"abc", "def"}, "--test \"abc,def\"\ttest flag"},
|
||||
{"t", &Parser{"abc", "def"}, "-t \"abc,def\"\ttest flag"},
|
||||
}
|
||||
|
||||
func TestGenericFlagHelpOutput(t *testing.T) {
|
||||
|
||||
for _, test := range genericFlagTests {
|
||||
flag := cli.GenericFlag{Name: test.name, Value: test.value, Usage: "test flag"}
|
||||
output := flag.String()
|
||||
|
||||
if output != test.expected {
|
||||
t.Errorf("%q does not match %q", output, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenericFlagWithEnvVarHelpOutput(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_ZAP", "3")
|
||||
for _, test := range genericFlagTests {
|
||||
flag := cli.GenericFlag{Name: test.name, EnvVar: "APP_ZAP"}
|
||||
output := flag.String()
|
||||
|
||||
if !strings.HasSuffix(output, " [$APP_ZAP]") {
|
||||
t.Errorf("%s does not end with [$APP_ZAP]", output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseMultiString(t *testing.T) {
|
||||
(&cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{Name: "serve, s"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.String("serve") != "10" {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.String("s") != "10" {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}).Run([]string{"run", "-s", "10"})
|
||||
}
|
||||
|
||||
func TestParseMultiStringFromEnv(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_COUNT", "20")
|
||||
(&cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{Name: "count, c", EnvVar: "APP_COUNT"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.String("count") != "20" {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.String("c") != "20" {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}).Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiStringFromEnvCascade(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_COUNT", "20")
|
||||
(&cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{Name: "count, c", EnvVar: "COMPAT_COUNT,APP_COUNT"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.String("count") != "20" {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.String("c") != "20" {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}).Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiStringSlice(t *testing.T) {
|
||||
(&cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.StringSliceFlag{Name: "serve, s", Value: &cli.StringSlice{}},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if !reflect.DeepEqual(ctx.StringSlice("serve"), []string{"10", "20"}) {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if !reflect.DeepEqual(ctx.StringSlice("s"), []string{"10", "20"}) {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}).Run([]string{"run", "-s", "10", "-s", "20"})
|
||||
}
|
||||
|
||||
func TestParseMultiStringSliceFromEnv(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_INTERVALS", "20,30,40")
|
||||
|
||||
(&cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "APP_INTERVALS"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) {
|
||||
t.Errorf("main name not set from env")
|
||||
}
|
||||
if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) {
|
||||
t.Errorf("short name not set from env")
|
||||
}
|
||||
},
|
||||
}).Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiStringSliceFromEnvCascade(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_INTERVALS", "20,30,40")
|
||||
|
||||
(&cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.StringSliceFlag{Name: "intervals, i", Value: &cli.StringSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if !reflect.DeepEqual(ctx.StringSlice("intervals"), []string{"20", "30", "40"}) {
|
||||
t.Errorf("main name not set from env")
|
||||
}
|
||||
if !reflect.DeepEqual(ctx.StringSlice("i"), []string{"20", "30", "40"}) {
|
||||
t.Errorf("short name not set from env")
|
||||
}
|
||||
},
|
||||
}).Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiInt(t *testing.T) {
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.IntFlag{Name: "serve, s"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.Int("serve") != 10 {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.Int("s") != 10 {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run", "-s", "10"})
|
||||
}
|
||||
|
||||
func TestParseMultiIntFromEnv(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_TIMEOUT_SECONDS", "10")
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.IntFlag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.Int("timeout") != 10 {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.Int("t") != 10 {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiIntFromEnvCascade(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_TIMEOUT_SECONDS", "10")
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.IntFlag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.Int("timeout") != 10 {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.Int("t") != 10 {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiIntSlice(t *testing.T) {
|
||||
(&cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.IntSliceFlag{Name: "serve, s", Value: &cli.IntSlice{}},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if !reflect.DeepEqual(ctx.IntSlice("serve"), []int{10, 20}) {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if !reflect.DeepEqual(ctx.IntSlice("s"), []int{10, 20}) {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}).Run([]string{"run", "-s", "10", "-s", "20"})
|
||||
}
|
||||
|
||||
func TestParseMultiIntSliceFromEnv(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_INTERVALS", "20,30,40")
|
||||
|
||||
(&cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "APP_INTERVALS"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) {
|
||||
t.Errorf("main name not set from env")
|
||||
}
|
||||
if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) {
|
||||
t.Errorf("short name not set from env")
|
||||
}
|
||||
},
|
||||
}).Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiIntSliceFromEnvCascade(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_INTERVALS", "20,30,40")
|
||||
|
||||
(&cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.IntSliceFlag{Name: "intervals, i", Value: &cli.IntSlice{}, EnvVar: "COMPAT_INTERVALS,APP_INTERVALS"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if !reflect.DeepEqual(ctx.IntSlice("intervals"), []int{20, 30, 40}) {
|
||||
t.Errorf("main name not set from env")
|
||||
}
|
||||
if !reflect.DeepEqual(ctx.IntSlice("i"), []int{20, 30, 40}) {
|
||||
t.Errorf("short name not set from env")
|
||||
}
|
||||
},
|
||||
}).Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiFloat64(t *testing.T) {
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.Float64Flag{Name: "serve, s"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.Float64("serve") != 10.2 {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.Float64("s") != 10.2 {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run", "-s", "10.2"})
|
||||
}
|
||||
|
||||
func TestParseMultiFloat64FromEnv(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_TIMEOUT_SECONDS", "15.5")
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.Float64Flag{Name: "timeout, t", EnvVar: "APP_TIMEOUT_SECONDS"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.Float64("timeout") != 15.5 {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.Float64("t") != 15.5 {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiFloat64FromEnvCascade(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_TIMEOUT_SECONDS", "15.5")
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.Float64Flag{Name: "timeout, t", EnvVar: "COMPAT_TIMEOUT_SECONDS,APP_TIMEOUT_SECONDS"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.Float64("timeout") != 15.5 {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.Float64("t") != 15.5 {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiBool(t *testing.T) {
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{Name: "serve, s"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.Bool("serve") != true {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.Bool("s") != true {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run", "--serve"})
|
||||
}
|
||||
|
||||
func TestParseMultiBoolFromEnv(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_DEBUG", "1")
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{Name: "debug, d", EnvVar: "APP_DEBUG"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.Bool("debug") != true {
|
||||
t.Errorf("main name not set from env")
|
||||
}
|
||||
if ctx.Bool("d") != true {
|
||||
t.Errorf("short name not set from env")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiBoolFromEnvCascade(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_DEBUG", "1")
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.Bool("debug") != true {
|
||||
t.Errorf("main name not set from env")
|
||||
}
|
||||
if ctx.Bool("d") != true {
|
||||
t.Errorf("short name not set from env")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiBoolT(t *testing.T) {
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolTFlag{Name: "serve, s"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.BoolT("serve") != true {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if ctx.BoolT("s") != true {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run", "--serve"})
|
||||
}
|
||||
|
||||
func TestParseMultiBoolTFromEnv(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_DEBUG", "0")
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolTFlag{Name: "debug, d", EnvVar: "APP_DEBUG"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.BoolT("debug") != false {
|
||||
t.Errorf("main name not set from env")
|
||||
}
|
||||
if ctx.BoolT("d") != false {
|
||||
t.Errorf("short name not set from env")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseMultiBoolTFromEnvCascade(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_DEBUG", "0")
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolTFlag{Name: "debug, d", EnvVar: "COMPAT_DEBUG,APP_DEBUG"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if ctx.BoolT("debug") != false {
|
||||
t.Errorf("main name not set from env")
|
||||
}
|
||||
if ctx.BoolT("d") != false {
|
||||
t.Errorf("short name not set from env")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run"})
|
||||
}
|
||||
|
||||
type Parser [2]string
|
||||
|
||||
func (p *Parser) Set(value string) error {
|
||||
parts := strings.Split(value, ",")
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("invalid format")
|
||||
}
|
||||
|
||||
(*p)[0] = parts[0]
|
||||
(*p)[1] = parts[1]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Parser) String() string {
|
||||
return fmt.Sprintf("%s,%s", p[0], p[1])
|
||||
}
|
||||
|
||||
func TestParseGeneric(t *testing.T) {
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{Name: "serve, s", Value: &Parser{}},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"10", "20"}) {
|
||||
t.Errorf("main name not set")
|
||||
}
|
||||
if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"10", "20"}) {
|
||||
t.Errorf("short name not set")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run", "-s", "10,20"})
|
||||
}
|
||||
|
||||
func TestParseGenericFromEnv(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_SERVE", "20,30")
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{Name: "serve, s", Value: &Parser{}, EnvVar: "APP_SERVE"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if !reflect.DeepEqual(ctx.Generic("serve"), &Parser{"20", "30"}) {
|
||||
t.Errorf("main name not set from env")
|
||||
}
|
||||
if !reflect.DeepEqual(ctx.Generic("s"), &Parser{"20", "30"}) {
|
||||
t.Errorf("short name not set from env")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run"})
|
||||
}
|
||||
|
||||
func TestParseGenericFromEnvCascade(t *testing.T) {
|
||||
os.Clearenv()
|
||||
os.Setenv("APP_FOO", "99,2000")
|
||||
a := cli.App{
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{Name: "foos", Value: &Parser{}, EnvVar: "COMPAT_FOO,APP_FOO"},
|
||||
},
|
||||
Action: func(ctx *cli.Context) {
|
||||
if !reflect.DeepEqual(ctx.Generic("foos"), &Parser{"99", "2000"}) {
|
||||
t.Errorf("value not set from env")
|
||||
}
|
||||
},
|
||||
}
|
||||
a.Run([]string{"run"})
|
||||
}
|
113
Godeps/_workspace/src/github.com/codegangsta/cli/help.go
generated
vendored
113
Godeps/_workspace/src/github.com/codegangsta/cli/help.go
generated
vendored
@ -1,12 +1,6 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
)
|
||||
import "fmt"
|
||||
|
||||
// The text template for the Default help topic.
|
||||
// cli.go uses text/template to render templates. You can
|
||||
@ -15,33 +9,30 @@ var AppHelpTemplate = `NAME:
|
||||
{{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
|
||||
{{if .Version}}
|
||||
{{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
|
||||
VERSION:
|
||||
{{.Version}}
|
||||
{{end}}{{if len .Authors}}
|
||||
AUTHOR(S):
|
||||
{{range .Authors}}{{ . }}{{end}}
|
||||
{{end}}{{if .Commands}}
|
||||
|
||||
AUTHOR(S):
|
||||
{{range .Authors}}{{ . }}
|
||||
{{end}}
|
||||
COMMANDS:
|
||||
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
{{end}}{{if .Flags}}
|
||||
GLOBAL OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}{{end}}{{if .Copyright }}
|
||||
COPYRIGHT:
|
||||
{{.Copyright}}
|
||||
{{end}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
// The text template for the command help topic.
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var CommandHelpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
{{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}}{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Description}}
|
||||
command {{.Name}}{{if .Flags}} [command options]{{end}} [arguments...]{{if .Description}}
|
||||
|
||||
DESCRIPTION:
|
||||
{{.Description}}{{end}}{{if .Flags}}
|
||||
@ -55,10 +46,10 @@ OPTIONS:
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var SubcommandHelpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
{{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}} command{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
|
||||
{{.Name}} command{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
|
||||
COMMANDS:
|
||||
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
|
||||
@ -69,10 +60,9 @@ OPTIONS:
|
||||
`
|
||||
|
||||
var helpCommand = Command{
|
||||
Name: "help",
|
||||
Aliases: []string{"h"},
|
||||
Usage: "Shows a list of commands or help for one command",
|
||||
ArgsUsage: "[command]",
|
||||
Name: "help",
|
||||
Aliases: []string{"h"},
|
||||
Usage: "Shows a list of commands or help for one command",
|
||||
Action: func(c *Context) {
|
||||
args := c.Args()
|
||||
if args.Present() {
|
||||
@ -84,10 +74,9 @@ var helpCommand = Command{
|
||||
}
|
||||
|
||||
var helpSubcommand = Command{
|
||||
Name: "help",
|
||||
Aliases: []string{"h"},
|
||||
Usage: "Shows a list of commands or help for one command",
|
||||
ArgsUsage: "[command]",
|
||||
Name: "help",
|
||||
Aliases: []string{"h"},
|
||||
Usage: "Shows a list of commands or help for one command",
|
||||
Action: func(c *Context) {
|
||||
args := c.Args()
|
||||
if args.Present() {
|
||||
@ -98,16 +87,16 @@ var helpSubcommand = Command{
|
||||
},
|
||||
}
|
||||
|
||||
// Prints help for the App or Command
|
||||
type helpPrinter func(w io.Writer, templ string, data interface{})
|
||||
// Prints help for the App
|
||||
type helpPrinter func(templ string, data interface{})
|
||||
|
||||
var HelpPrinter helpPrinter = printHelp
|
||||
var HelpPrinter helpPrinter = nil
|
||||
|
||||
// Prints version for the App
|
||||
var VersionPrinter = printVersion
|
||||
|
||||
func ShowAppHelp(c *Context) {
|
||||
HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
|
||||
HelpPrinter(AppHelpTemplate, c.App)
|
||||
}
|
||||
|
||||
// Prints the list of subcommands as the default app completion method
|
||||
@ -120,24 +109,24 @@ func DefaultAppComplete(c *Context) {
|
||||
}
|
||||
|
||||
// Prints help for the given command
|
||||
func ShowCommandHelp(ctx *Context, command string) {
|
||||
func ShowCommandHelp(c *Context, command string) {
|
||||
// show the subcommand help for a command with subcommands
|
||||
if command == "" {
|
||||
HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
|
||||
HelpPrinter(SubcommandHelpTemplate, c.App)
|
||||
return
|
||||
}
|
||||
|
||||
for _, c := range ctx.App.Commands {
|
||||
for _, c := range c.App.Commands {
|
||||
if c.HasName(command) {
|
||||
HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
|
||||
HelpPrinter(CommandHelpTemplate, c)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.App.CommandNotFound != nil {
|
||||
ctx.App.CommandNotFound(ctx, command)
|
||||
if c.App.CommandNotFound != nil {
|
||||
c.App.CommandNotFound(c, command)
|
||||
} else {
|
||||
fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command)
|
||||
fmt.Fprintf(c.App.Writer, "No help topic for '%v'\n", command)
|
||||
}
|
||||
}
|
||||
|
||||
@ -171,44 +160,22 @@ func ShowCommandCompletions(ctx *Context, command string) {
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp(out io.Writer, templ string, data interface{}) {
|
||||
funcMap := template.FuncMap{
|
||||
"join": strings.Join,
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(out, 0, 8, 1, '\t', 0)
|
||||
t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
|
||||
err := t.Execute(w, data)
|
||||
if err != nil {
|
||||
// If the writer is closed, t.Execute will fail, and there's nothing
|
||||
// we can do to recover. We could send this to os.Stderr if we need.
|
||||
return
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
func checkVersion(c *Context) bool {
|
||||
found := false
|
||||
if VersionFlag.Name != "" {
|
||||
eachName(VersionFlag.Name, func(name string) {
|
||||
if c.GlobalBool(name) || c.Bool(name) {
|
||||
found = true
|
||||
}
|
||||
})
|
||||
if c.GlobalBool("version") {
|
||||
ShowVersion(c)
|
||||
return true
|
||||
}
|
||||
return found
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkHelp(c *Context) bool {
|
||||
found := false
|
||||
if HelpFlag.Name != "" {
|
||||
eachName(HelpFlag.Name, func(name string) {
|
||||
if c.GlobalBool(name) || c.Bool(name) {
|
||||
found = true
|
||||
}
|
||||
})
|
||||
if c.GlobalBool("h") || c.GlobalBool("help") {
|
||||
ShowAppHelp(c)
|
||||
return true
|
||||
}
|
||||
return found
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkCommandHelp(c *Context, name string) bool {
|
||||
|
19
Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go
generated
vendored
Normal file
19
Godeps/_workspace/src/github.com/codegangsta/cli/helpers_test.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package cli_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
/* Test Helpers */
|
||||
func expect(t *testing.T, a interface{}, b interface{}) {
|
||||
if a != b {
|
||||
t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
|
||||
}
|
||||
}
|
||||
|
||||
func refute(t *testing.T, a interface{}, b interface{}) {
|
||||
if a == b {
|
||||
t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
|
||||
}
|
||||
}
|
13
Godeps/_workspace/src/github.com/davecgh/go-spew/LICENSE
generated
vendored
13
Godeps/_workspace/src/github.com/davecgh/go-spew/LICENSE
generated
vendored
@ -1,13 +0,0 @@
|
||||
Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
151
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
151
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
@ -1,151 +0,0 @@
|
||||
// Copyright (c) 2015 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine and "-tags disableunsafe"
|
||||
// is not added to the go build command line.
|
||||
// +build !appengine,!disableunsafe
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
var (
|
||||
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
|
||||
// internal reflect.Value fields. These values are valid before golang
|
||||
// commit ecccf07e7f9d which changed the format. The are also valid
|
||||
// after commit 82f48826c6c7 which changed the format again to mirror
|
||||
// the original format. Code in the init function updates these offsets
|
||||
// as necessary.
|
||||
offsetPtr = uintptr(ptrSize)
|
||||
offsetScalar = uintptr(0)
|
||||
offsetFlag = uintptr(ptrSize * 2)
|
||||
|
||||
// flagKindWidth and flagKindShift indicate various bits that the
|
||||
// reflect package uses internally to track kind information.
|
||||
//
|
||||
// flagRO indicates whether or not the value field of a reflect.Value is
|
||||
// read-only.
|
||||
//
|
||||
// flagIndir indicates whether the value field of a reflect.Value is
|
||||
// the actual data or a pointer to the data.
|
||||
//
|
||||
// These values are valid before golang commit 90a7c3c86944 which
|
||||
// changed their positions. Code in the init function updates these
|
||||
// flags as necessary.
|
||||
flagKindWidth = uintptr(5)
|
||||
flagKindShift = uintptr(flagKindWidth - 1)
|
||||
flagRO = uintptr(1 << 0)
|
||||
flagIndir = uintptr(1 << 1)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Older versions of reflect.Value stored small integers directly in the
|
||||
// ptr field (which is named val in the older versions). Versions
|
||||
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
|
||||
// scalar for this purpose which unfortunately came before the flag
|
||||
// field, so the offset of the flag field is different for those
|
||||
// versions.
|
||||
//
|
||||
// This code constructs a new reflect.Value from a known small integer
|
||||
// and checks if the size of the reflect.Value struct indicates it has
|
||||
// the scalar field. When it does, the offsets are updated accordingly.
|
||||
vv := reflect.ValueOf(0xf00)
|
||||
if unsafe.Sizeof(vv) == (ptrSize * 4) {
|
||||
offsetScalar = ptrSize * 2
|
||||
offsetFlag = ptrSize * 3
|
||||
}
|
||||
|
||||
// Commit 90a7c3c86944 changed the flag positions such that the low
|
||||
// order bits are the kind. This code extracts the kind from the flags
|
||||
// field and ensures it's the correct type. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are updated
|
||||
// accordingly.
|
||||
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
|
||||
upfv := *(*uintptr)(upf)
|
||||
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
|
||||
flagKindShift = 0
|
||||
flagRO = 1 << 5
|
||||
flagIndir = 1 << 6
|
||||
|
||||
// Commit adf9b30e5594 modified the flags to separate the
|
||||
// flagRO flag into two bits which specifies whether or not the
|
||||
// field is embedded. This causes flagIndir to move over a bit
|
||||
// and means that flagRO is the combination of either of the
|
||||
// original flagRO bit and the new bit.
|
||||
//
|
||||
// This code detects the change by extracting what used to be
|
||||
// the indirect bit to ensure it's set. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are
|
||||
// updated accordingly.
|
||||
if upfv&flagIndir == 0 {
|
||||
flagRO = 3 << 5
|
||||
flagIndir = 1 << 7
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
|
||||
indirects := 1
|
||||
vt := v.Type()
|
||||
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
|
||||
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
|
||||
if rvf&flagIndir != 0 {
|
||||
vt = reflect.PtrTo(v.Type())
|
||||
indirects++
|
||||
} else if offsetScalar != 0 {
|
||||
// The value is in the scalar field when it's not one of the
|
||||
// reference types.
|
||||
switch vt.Kind() {
|
||||
case reflect.Uintptr:
|
||||
case reflect.Chan:
|
||||
case reflect.Func:
|
||||
case reflect.Map:
|
||||
case reflect.Ptr:
|
||||
case reflect.UnsafePointer:
|
||||
default:
|
||||
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
|
||||
offsetScalar)
|
||||
}
|
||||
}
|
||||
|
||||
pv := reflect.NewAt(vt, upv)
|
||||
rv = pv
|
||||
for i := 0; i < indirects; i++ {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
return rv
|
||||
}
|
37
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
37
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
@ -1,37 +0,0 @@
|
||||
// Copyright (c) 2015 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when either the code is running on Google App Engine or "-tags disableunsafe"
|
||||
// is added to the go build command line.
|
||||
// +build appengine disableunsafe
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
135
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go
generated
vendored
135
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go
generated
vendored
@ -23,8 +23,116 @@ import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
var (
|
||||
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
|
||||
// internal reflect.Value fields. These values are valid before golang
|
||||
// commit ecccf07e7f9d which changed the format. The are also valid
|
||||
// after commit 82f48826c6c7 which changed the format again to mirror
|
||||
// the original format. Code in the init function updates these offsets
|
||||
// as necessary.
|
||||
offsetPtr = uintptr(ptrSize)
|
||||
offsetScalar = uintptr(0)
|
||||
offsetFlag = uintptr(ptrSize * 2)
|
||||
|
||||
// flagKindWidth and flagKindShift indicate various bits that the
|
||||
// reflect package uses internally to track kind information.
|
||||
//
|
||||
// flagRO indicates whether or not the value field of a reflect.Value is
|
||||
// read-only.
|
||||
//
|
||||
// flagIndir indicates whether the value field of a reflect.Value is
|
||||
// the actual data or a pointer to the data.
|
||||
//
|
||||
// These values are valid before golang commit 90a7c3c86944 which
|
||||
// changed their positions. Code in the init function updates these
|
||||
// flags as necessary.
|
||||
flagKindWidth = uintptr(5)
|
||||
flagKindShift = uintptr(flagKindWidth - 1)
|
||||
flagRO = uintptr(1 << 0)
|
||||
flagIndir = uintptr(1 << 1)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Older versions of reflect.Value stored small integers directly in the
|
||||
// ptr field (which is named val in the older versions). Versions
|
||||
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
|
||||
// scalar for this purpose which unfortunately came before the flag
|
||||
// field, so the offset of the flag field is different for those
|
||||
// versions.
|
||||
//
|
||||
// This code constructs a new reflect.Value from a known small integer
|
||||
// and checks if the size of the reflect.Value struct indicates it has
|
||||
// the scalar field. When it does, the offsets are updated accordingly.
|
||||
vv := reflect.ValueOf(0xf00)
|
||||
if unsafe.Sizeof(vv) == (ptrSize * 4) {
|
||||
offsetScalar = ptrSize * 2
|
||||
offsetFlag = ptrSize * 3
|
||||
}
|
||||
|
||||
// Commit 90a7c3c86944 changed the flag positions such that the low
|
||||
// order bits are the kind. This code extracts the kind from the flags
|
||||
// field and ensures it's the correct type. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are updated
|
||||
// accordingly.
|
||||
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
|
||||
upfv := *(*uintptr)(upf)
|
||||
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
|
||||
flagKindShift = 0
|
||||
flagRO = 1 << 5
|
||||
flagIndir = 1 << 6
|
||||
}
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
|
||||
indirects := 1
|
||||
vt := v.Type()
|
||||
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
|
||||
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
|
||||
if rvf&flagIndir != 0 {
|
||||
vt = reflect.PtrTo(v.Type())
|
||||
indirects++
|
||||
} else if offsetScalar != 0 {
|
||||
// The value is in the scalar field when it's not one of the
|
||||
// reference types.
|
||||
switch vt.Kind() {
|
||||
case reflect.Uintptr:
|
||||
case reflect.Chan:
|
||||
case reflect.Func:
|
||||
case reflect.Map:
|
||||
case reflect.Ptr:
|
||||
case reflect.UnsafePointer:
|
||||
default:
|
||||
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
|
||||
offsetScalar)
|
||||
}
|
||||
}
|
||||
|
||||
pv := reflect.NewAt(vt, upv)
|
||||
rv = pv
|
||||
for i := 0; i < indirects; i++ {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
@ -86,14 +194,9 @@ func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool)
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
// to enforce visibility rules. We use unsafe to bypass these restrictions
|
||||
// since this package does not mutate the values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
@ -103,15 +206,21 @@ func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool)
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
var viface interface{}
|
||||
if !cs.DisablePointerMethods {
|
||||
if !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
viface = v.Addr().Interface()
|
||||
} else {
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
viface = v.Interface()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
switch iface := viface.(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
|
298
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common_test.go
generated
vendored
Normal file
298
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common_test.go
generated
vendored
Normal file
@ -0,0 +1,298 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
// custom type to test Stinger interface on non-pointer receiver.
|
||||
type stringer string
|
||||
|
||||
// String implements the Stringer interface for testing invocation of custom
|
||||
// stringers on types with non-pointer receivers.
|
||||
func (s stringer) String() string {
|
||||
return "stringer " + string(s)
|
||||
}
|
||||
|
||||
// custom type to test Stinger interface on pointer receiver.
|
||||
type pstringer string
|
||||
|
||||
// String implements the Stringer interface for testing invocation of custom
|
||||
// stringers on types with only pointer receivers.
|
||||
func (s *pstringer) String() string {
|
||||
return "stringer " + string(*s)
|
||||
}
|
||||
|
||||
// xref1 and xref2 are cross referencing structs for testing circular reference
|
||||
// detection.
|
||||
type xref1 struct {
|
||||
ps2 *xref2
|
||||
}
|
||||
type xref2 struct {
|
||||
ps1 *xref1
|
||||
}
|
||||
|
||||
// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
|
||||
// reference for testing detection.
|
||||
type indirCir1 struct {
|
||||
ps2 *indirCir2
|
||||
}
|
||||
type indirCir2 struct {
|
||||
ps3 *indirCir3
|
||||
}
|
||||
type indirCir3 struct {
|
||||
ps1 *indirCir1
|
||||
}
|
||||
|
||||
// embed is used to test embedded structures.
|
||||
type embed struct {
|
||||
a string
|
||||
}
|
||||
|
||||
// embedwrap is used to test embedded structures.
|
||||
type embedwrap struct {
|
||||
*embed
|
||||
e *embed
|
||||
}
|
||||
|
||||
// panicer is used to intentionally cause a panic for testing spew properly
|
||||
// handles them
|
||||
type panicer int
|
||||
|
||||
func (p panicer) String() string {
|
||||
panic("test panic")
|
||||
}
|
||||
|
||||
// customError is used to test custom error interface invocation.
|
||||
type customError int
|
||||
|
||||
func (e customError) Error() string {
|
||||
return fmt.Sprintf("error: %d", int(e))
|
||||
}
|
||||
|
||||
// stringizeWants converts a slice of wanted test output into a format suitable
|
||||
// for a test error message.
|
||||
func stringizeWants(wants []string) string {
|
||||
s := ""
|
||||
for i, want := range wants {
|
||||
if i > 0 {
|
||||
s += fmt.Sprintf("want%d: %s", i+1, want)
|
||||
} else {
|
||||
s += "want: " + want
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// testFailed returns whether or not a test failed by checking if the result
|
||||
// of the test is in the slice of wanted strings.
|
||||
func testFailed(result string, wants []string) bool {
|
||||
for _, want := range wants {
|
||||
if result == want {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type sortableStruct struct {
|
||||
x int
|
||||
}
|
||||
|
||||
func (ss sortableStruct) String() string {
|
||||
return fmt.Sprintf("ss.%d", ss.x)
|
||||
}
|
||||
|
||||
type unsortableStruct struct {
|
||||
x int
|
||||
}
|
||||
|
||||
type sortTestCase struct {
|
||||
input []reflect.Value
|
||||
expected []reflect.Value
|
||||
}
|
||||
|
||||
func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
|
||||
getInterfaces := func(values []reflect.Value) []interface{} {
|
||||
interfaces := []interface{}{}
|
||||
for _, v := range values {
|
||||
interfaces = append(interfaces, v.Interface())
|
||||
}
|
||||
return interfaces
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
spew.SortValues(test.input, cs)
|
||||
// reflect.DeepEqual cannot really make sense of reflect.Value,
|
||||
// probably because of all the pointer tricks. For instance,
|
||||
// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
|
||||
// instead.
|
||||
input := getInterfaces(test.input)
|
||||
expected := getInterfaces(test.expected)
|
||||
if !reflect.DeepEqual(input, expected) {
|
||||
t.Errorf("Sort mismatch:\n %v != %v", input, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSortValues ensures the sort functionality for relect.Value based sorting
|
||||
// works as intended.
|
||||
func TestSortValues(t *testing.T) {
|
||||
v := reflect.ValueOf
|
||||
|
||||
a := v("a")
|
||||
b := v("b")
|
||||
c := v("c")
|
||||
embedA := v(embed{"a"})
|
||||
embedB := v(embed{"b"})
|
||||
embedC := v(embed{"c"})
|
||||
tests := []sortTestCase{
|
||||
// No values.
|
||||
{
|
||||
[]reflect.Value{},
|
||||
[]reflect.Value{},
|
||||
},
|
||||
// Bools.
|
||||
{
|
||||
[]reflect.Value{v(false), v(true), v(false)},
|
||||
[]reflect.Value{v(false), v(false), v(true)},
|
||||
},
|
||||
// Ints.
|
||||
{
|
||||
[]reflect.Value{v(2), v(1), v(3)},
|
||||
[]reflect.Value{v(1), v(2), v(3)},
|
||||
},
|
||||
// Uints.
|
||||
{
|
||||
[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
|
||||
[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
|
||||
},
|
||||
// Floats.
|
||||
{
|
||||
[]reflect.Value{v(2.0), v(1.0), v(3.0)},
|
||||
[]reflect.Value{v(1.0), v(2.0), v(3.0)},
|
||||
},
|
||||
// Strings.
|
||||
{
|
||||
[]reflect.Value{b, a, c},
|
||||
[]reflect.Value{a, b, c},
|
||||
},
|
||||
// Array
|
||||
{
|
||||
[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
|
||||
[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
|
||||
},
|
||||
// Uintptrs.
|
||||
{
|
||||
[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
|
||||
[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
|
||||
},
|
||||
// SortableStructs.
|
||||
{
|
||||
// Note: not sorted - DisableMethods is set.
|
||||
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
|
||||
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
|
||||
},
|
||||
// UnsortableStructs.
|
||||
{
|
||||
// Note: not sorted - SpewKeys is false.
|
||||
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
|
||||
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
|
||||
},
|
||||
// Invalid.
|
||||
{
|
||||
[]reflect.Value{embedB, embedA, embedC},
|
||||
[]reflect.Value{embedB, embedA, embedC},
|
||||
},
|
||||
}
|
||||
cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
|
||||
helpTestSortValues(tests, &cs, t)
|
||||
}
|
||||
|
||||
// TestSortValuesWithMethods ensures the sort functionality for relect.Value
|
||||
// based sorting works as intended when using string methods.
|
||||
func TestSortValuesWithMethods(t *testing.T) {
|
||||
v := reflect.ValueOf
|
||||
|
||||
a := v("a")
|
||||
b := v("b")
|
||||
c := v("c")
|
||||
tests := []sortTestCase{
|
||||
// Ints.
|
||||
{
|
||||
[]reflect.Value{v(2), v(1), v(3)},
|
||||
[]reflect.Value{v(1), v(2), v(3)},
|
||||
},
|
||||
// Strings.
|
||||
{
|
||||
[]reflect.Value{b, a, c},
|
||||
[]reflect.Value{a, b, c},
|
||||
},
|
||||
// SortableStructs.
|
||||
{
|
||||
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
|
||||
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
|
||||
},
|
||||
// UnsortableStructs.
|
||||
{
|
||||
// Note: not sorted - SpewKeys is false.
|
||||
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
|
||||
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
|
||||
},
|
||||
}
|
||||
cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
|
||||
helpTestSortValues(tests, &cs, t)
|
||||
}
|
||||
|
||||
// TestSortValuesWithSpew ensures the sort functionality for relect.Value
|
||||
// based sorting works as intended when using spew to stringify keys.
|
||||
func TestSortValuesWithSpew(t *testing.T) {
|
||||
v := reflect.ValueOf
|
||||
|
||||
a := v("a")
|
||||
b := v("b")
|
||||
c := v("c")
|
||||
tests := []sortTestCase{
|
||||
// Ints.
|
||||
{
|
||||
[]reflect.Value{v(2), v(1), v(3)},
|
||||
[]reflect.Value{v(1), v(2), v(3)},
|
||||
},
|
||||
// Strings.
|
||||
{
|
||||
[]reflect.Value{b, a, c},
|
||||
[]reflect.Value{a, b, c},
|
||||
},
|
||||
// SortableStructs.
|
||||
{
|
||||
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
|
||||
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
|
||||
},
|
||||
// UnsortableStructs.
|
||||
{
|
||||
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
|
||||
[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
|
||||
},
|
||||
}
|
||||
cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
|
||||
helpTestSortValues(tests, &cs, t)
|
||||
}
|
5
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go
generated
vendored
5
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go
generated
vendored
@ -61,10 +61,7 @@ type ConfigState struct {
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "disableunsafe" build tag specified.
|
||||
// inside these interface methods.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
|
29
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
29
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
@ -181,28 +181,25 @@ func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// We need an addressable interface to convert the type back
|
||||
// into a byte slice. However, the reflect package won't give
|
||||
// us an interface on certain things like unexported struct
|
||||
// fields in order to enforce visibility rules. We use unsafe
|
||||
// to bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
// Use the existing uint8 slice if it can be type
|
||||
// asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
|
1021
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump_test.go
generated
vendored
Normal file
1021
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
97
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpcgo_test.go
generated
vendored
Normal file
97
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpcgo_test.go
generated
vendored
Normal file
@ -0,0 +1,97 @@
|
||||
// Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when both cgo is supported and "-tags testcgo" is added to the go test
|
||||
// command line. This means the cgo tests are only added (and hence run) when
|
||||
// specifially requested. This configuration is used because spew itself
|
||||
// does not require cgo to run even though it does handle certain cgo types
|
||||
// specially. Rather than forcing all clients to require cgo and an external
|
||||
// C compiler just to run the tests, this scheme makes them optional.
|
||||
// +build cgo,testcgo
|
||||
|
||||
package spew_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/davecgh/go-spew/spew/testdata"
|
||||
)
|
||||
|
||||
func addCgoDumpTests() {
|
||||
// C char pointer.
|
||||
v := testdata.GetCgoCharPointer()
|
||||
nv := testdata.GetCgoNullCharPointer()
|
||||
pv := &v
|
||||
vcAddr := fmt.Sprintf("%p", v)
|
||||
vAddr := fmt.Sprintf("%p", pv)
|
||||
pvAddr := fmt.Sprintf("%p", &pv)
|
||||
vt := "*testdata._Ctype_char"
|
||||
vs := "116"
|
||||
addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
|
||||
addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
|
||||
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
|
||||
addDumpTest(nv, "("+vt+")(<nil>)\n")
|
||||
|
||||
// C char array.
|
||||
v2, v2l, v2c := testdata.GetCgoCharArray()
|
||||
v2Len := fmt.Sprintf("%d", v2l)
|
||||
v2Cap := fmt.Sprintf("%d", v2c)
|
||||
v2t := "[6]testdata._Ctype_char"
|
||||
v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
|
||||
"{\n 00000000 74 65 73 74 32 00 " +
|
||||
" |test2.|\n}"
|
||||
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
|
||||
|
||||
// C unsigned char array.
|
||||
v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
|
||||
v3Len := fmt.Sprintf("%d", v3l)
|
||||
v3Cap := fmt.Sprintf("%d", v3c)
|
||||
v3t := "[6]testdata._Ctype_unsignedchar"
|
||||
v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
|
||||
"{\n 00000000 74 65 73 74 33 00 " +
|
||||
" |test3.|\n}"
|
||||
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
|
||||
|
||||
// C signed char array.
|
||||
v4, v4l, v4c := testdata.GetCgoSignedCharArray()
|
||||
v4Len := fmt.Sprintf("%d", v4l)
|
||||
v4Cap := fmt.Sprintf("%d", v4c)
|
||||
v4t := "[6]testdata._Ctype_schar"
|
||||
v4t2 := "testdata._Ctype_schar"
|
||||
v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
|
||||
"{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
|
||||
") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
|
||||
") 0\n}"
|
||||
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
|
||||
|
||||
// C uint8_t array.
|
||||
v5, v5l, v5c := testdata.GetCgoUint8tArray()
|
||||
v5Len := fmt.Sprintf("%d", v5l)
|
||||
v5Cap := fmt.Sprintf("%d", v5c)
|
||||
v5t := "[6]testdata._Ctype_uint8_t"
|
||||
v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
|
||||
"{\n 00000000 74 65 73 74 35 00 " +
|
||||
" |test5.|\n}"
|
||||
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
|
||||
|
||||
// C typedefed unsigned char array.
|
||||
v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
|
||||
v6Len := fmt.Sprintf("%d", v6l)
|
||||
v6Cap := fmt.Sprintf("%d", v6c)
|
||||
v6t := "[6]testdata._Ctype_custom_uchar_t"
|
||||
v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
|
||||
"{\n 00000000 74 65 73 74 36 00 " +
|
||||
" |test6.|\n}"
|
||||
addDumpTest(v6, "("+v6t+") "+v6s+"\n")
|
||||
}
|
26
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
generated
vendored
Normal file
26
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
// Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when either cgo is not supported or "-tags testcgo" is not added to the go
|
||||
// test command line. This file intentionally does not setup any cgo tests in
|
||||
// this scenario.
|
||||
// +build !cgo !testcgo
|
||||
|
||||
package spew_test
|
||||
|
||||
func addCgoDumpTests() {
|
||||
// Don't add any tests for cgo since this file is only compiled when
|
||||
// there should not be any cgo tests.
|
||||
}
|
230
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/example_test.go
generated
vendored
Normal file
230
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/example_test.go
generated
vendored
Normal file
@ -0,0 +1,230 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
type Flag int
|
||||
|
||||
const (
|
||||
flagOne Flag = iota
|
||||
flagTwo
|
||||
)
|
||||
|
||||
var flagStrings = map[Flag]string{
|
||||
flagOne: "flagOne",
|
||||
flagTwo: "flagTwo",
|
||||
}
|
||||
|
||||
func (f Flag) String() string {
|
||||
if s, ok := flagStrings[f]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("Unknown flag (%d)", int(f))
|
||||
}
|
||||
|
||||
type Bar struct {
|
||||
flag Flag
|
||||
data uintptr
|
||||
}
|
||||
|
||||
type Foo struct {
|
||||
unexportedField Bar
|
||||
ExportedField map[interface{}]interface{}
|
||||
}
|
||||
|
||||
// This example demonstrates how to use Dump to dump variables to stdout.
|
||||
func ExampleDump() {
|
||||
// The following package level declarations are assumed for this example:
|
||||
/*
|
||||
type Flag int
|
||||
|
||||
const (
|
||||
flagOne Flag = iota
|
||||
flagTwo
|
||||
)
|
||||
|
||||
var flagStrings = map[Flag]string{
|
||||
flagOne: "flagOne",
|
||||
flagTwo: "flagTwo",
|
||||
}
|
||||
|
||||
func (f Flag) String() string {
|
||||
if s, ok := flagStrings[f]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("Unknown flag (%d)", int(f))
|
||||
}
|
||||
|
||||
type Bar struct {
|
||||
flag Flag
|
||||
data uintptr
|
||||
}
|
||||
|
||||
type Foo struct {
|
||||
unexportedField Bar
|
||||
ExportedField map[interface{}]interface{}
|
||||
}
|
||||
*/
|
||||
|
||||
// Setup some sample data structures for the example.
|
||||
bar := Bar{Flag(flagTwo), uintptr(0)}
|
||||
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
|
||||
f := Flag(5)
|
||||
b := []byte{
|
||||
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
|
||||
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
|
||||
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
|
||||
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
|
||||
0x31, 0x32,
|
||||
}
|
||||
|
||||
// Dump!
|
||||
spew.Dump(s1, f, b)
|
||||
|
||||
// Output:
|
||||
// (spew_test.Foo) {
|
||||
// unexportedField: (spew_test.Bar) {
|
||||
// flag: (spew_test.Flag) flagTwo,
|
||||
// data: (uintptr) <nil>
|
||||
// },
|
||||
// ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
// (string) (len=3) "one": (bool) true
|
||||
// }
|
||||
// }
|
||||
// (spew_test.Flag) Unknown flag (5)
|
||||
// ([]uint8) (len=34 cap=34) {
|
||||
// 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
// 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
// 00000020 31 32 |12|
|
||||
// }
|
||||
//
|
||||
}
|
||||
|
||||
// This example demonstrates how to use Printf to display a variable with a
|
||||
// format string and inline formatting.
|
||||
func ExamplePrintf() {
|
||||
// Create a double pointer to a uint 8.
|
||||
ui8 := uint8(5)
|
||||
pui8 := &ui8
|
||||
ppui8 := &pui8
|
||||
|
||||
// Create a circular data type.
|
||||
type circular struct {
|
||||
ui8 uint8
|
||||
c *circular
|
||||
}
|
||||
c := circular{ui8: 1}
|
||||
c.c = &c
|
||||
|
||||
// Print!
|
||||
spew.Printf("ppui8: %v\n", ppui8)
|
||||
spew.Printf("circular: %v\n", c)
|
||||
|
||||
// Output:
|
||||
// ppui8: <**>5
|
||||
// circular: {1 <*>{1 <*><shown>}}
|
||||
}
|
||||
|
||||
// This example demonstrates how to use a ConfigState.
|
||||
func ExampleConfigState() {
|
||||
// Modify the indent level of the ConfigState only. The global
|
||||
// configuration is not modified.
|
||||
scs := spew.ConfigState{Indent: "\t"}
|
||||
|
||||
// Output using the ConfigState instance.
|
||||
v := map[string]int{"one": 1}
|
||||
scs.Printf("v: %v\n", v)
|
||||
scs.Dump(v)
|
||||
|
||||
// Output:
|
||||
// v: map[one:1]
|
||||
// (map[string]int) (len=1) {
|
||||
// (string) (len=3) "one": (int) 1
|
||||
// }
|
||||
}
|
||||
|
||||
// This example demonstrates how to use ConfigState.Dump to dump variables to
|
||||
// stdout
|
||||
func ExampleConfigState_Dump() {
|
||||
// See the top-level Dump example for details on the types used in this
|
||||
// example.
|
||||
|
||||
// Create two ConfigState instances with different indentation.
|
||||
scs := spew.ConfigState{Indent: "\t"}
|
||||
scs2 := spew.ConfigState{Indent: " "}
|
||||
|
||||
// Setup some sample data structures for the example.
|
||||
bar := Bar{Flag(flagTwo), uintptr(0)}
|
||||
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
|
||||
|
||||
// Dump using the ConfigState instances.
|
||||
scs.Dump(s1)
|
||||
scs2.Dump(s1)
|
||||
|
||||
// Output:
|
||||
// (spew_test.Foo) {
|
||||
// unexportedField: (spew_test.Bar) {
|
||||
// flag: (spew_test.Flag) flagTwo,
|
||||
// data: (uintptr) <nil>
|
||||
// },
|
||||
// ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
// (string) (len=3) "one": (bool) true
|
||||
// }
|
||||
// }
|
||||
// (spew_test.Foo) {
|
||||
// unexportedField: (spew_test.Bar) {
|
||||
// flag: (spew_test.Flag) flagTwo,
|
||||
// data: (uintptr) <nil>
|
||||
// },
|
||||
// ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
// (string) (len=3) "one": (bool) true
|
||||
// }
|
||||
// }
|
||||
//
|
||||
}
|
||||
|
||||
// This example demonstrates how to use ConfigState.Printf to display a variable
|
||||
// with a format string and inline formatting.
|
||||
func ExampleConfigState_Printf() {
|
||||
// See the top-level Dump example for details on the types used in this
|
||||
// example.
|
||||
|
||||
// Create two ConfigState instances and modify the method handling of the
|
||||
// first ConfigState only.
|
||||
scs := spew.NewDefaultConfig()
|
||||
scs2 := spew.NewDefaultConfig()
|
||||
scs.DisableMethods = true
|
||||
|
||||
// Alternatively
|
||||
// scs := spew.ConfigState{Indent: " ", DisableMethods: true}
|
||||
// scs2 := spew.ConfigState{Indent: " "}
|
||||
|
||||
// This is of type Flag which implements a Stringer and has raw value 1.
|
||||
f := flagTwo
|
||||
|
||||
// Dump using the ConfigState instances.
|
||||
scs.Printf("f: %v\n", f)
|
||||
scs2.Printf("f: %v\n", f)
|
||||
|
||||
// Output:
|
||||
// f: 1
|
||||
// f: flagTwo
|
||||
}
|
1535
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format_test.go
generated
vendored
Normal file
1535
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
156
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/internal_test.go
generated
vendored
Normal file
156
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/internal_test.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
This test file is part of the spew package rather than than the spew_test
|
||||
package because it needs access to internals to properly test certain cases
|
||||
which are not possible via the public interface since they should never happen.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// dummyFmtState implements a fake fmt.State to use for testing invalid
|
||||
// reflect.Value handling. This is necessary because the fmt package catches
|
||||
// invalid values before invoking the formatter on them.
|
||||
type dummyFmtState struct {
|
||||
bytes.Buffer
|
||||
}
|
||||
|
||||
func (dfs *dummyFmtState) Flag(f int) bool {
|
||||
if f == int('+') {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (dfs *dummyFmtState) Precision() (int, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (dfs *dummyFmtState) Width() (int, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// TestInvalidReflectValue ensures the dump and formatter code handles an
|
||||
// invalid reflect value properly. This needs access to internal state since it
|
||||
// should never happen in real code and therefore can't be tested via the public
|
||||
// API.
|
||||
func TestInvalidReflectValue(t *testing.T) {
|
||||
i := 1
|
||||
|
||||
// Dump invalid reflect value.
|
||||
v := new(reflect.Value)
|
||||
buf := new(bytes.Buffer)
|
||||
d := dumpState{w: buf, cs: &Config}
|
||||
d.dump(*v)
|
||||
s := buf.String()
|
||||
want := "<invalid>"
|
||||
if s != want {
|
||||
t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want)
|
||||
}
|
||||
i++
|
||||
|
||||
// Formatter invalid reflect value.
|
||||
buf2 := new(dummyFmtState)
|
||||
f := formatState{value: *v, cs: &Config, fs: buf2}
|
||||
f.format(*v)
|
||||
s = buf2.String()
|
||||
want = "<invalid>"
|
||||
if s != want {
|
||||
t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want)
|
||||
}
|
||||
}
|
||||
|
||||
// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
|
||||
// the maximum kind value which does not exist. This is needed to test the
|
||||
// fallback code which punts to the standard fmt library for new types that
|
||||
// might get added to the language.
|
||||
func changeKind(v *reflect.Value, readOnly bool) {
|
||||
rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
|
||||
*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if readOnly {
|
||||
*rvf |= flagRO
|
||||
} else {
|
||||
*rvf &= ^uintptr(flagRO)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAddedReflectValue tests functionaly of the dump and formatter code which
|
||||
// falls back to the standard fmt library for new types that might get added to
|
||||
// the language.
|
||||
func TestAddedReflectValue(t *testing.T) {
|
||||
i := 1
|
||||
|
||||
// Dump using a reflect.Value that is exported.
|
||||
v := reflect.ValueOf(int8(5))
|
||||
changeKind(&v, false)
|
||||
buf := new(bytes.Buffer)
|
||||
d := dumpState{w: buf, cs: &Config}
|
||||
d.dump(v)
|
||||
s := buf.String()
|
||||
want := "(int8) 5"
|
||||
if s != want {
|
||||
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
|
||||
}
|
||||
i++
|
||||
|
||||
// Dump using a reflect.Value that is not exported.
|
||||
changeKind(&v, true)
|
||||
buf.Reset()
|
||||
d.dump(v)
|
||||
s = buf.String()
|
||||
want = "(int8) <int8 Value>"
|
||||
if s != want {
|
||||
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
|
||||
}
|
||||
i++
|
||||
|
||||
// Formatter using a reflect.Value that is exported.
|
||||
changeKind(&v, false)
|
||||
buf2 := new(dummyFmtState)
|
||||
f := formatState{value: v, cs: &Config, fs: buf2}
|
||||
f.format(v)
|
||||
s = buf2.String()
|
||||
want = "5"
|
||||
if s != want {
|
||||
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
|
||||
}
|
||||
i++
|
||||
|
||||
// Formatter using a reflect.Value that is not exported.
|
||||
changeKind(&v, true)
|
||||
buf2.Reset()
|
||||
f = formatState{value: v, cs: &Config, fs: buf2}
|
||||
f.format(v)
|
||||
s = buf2.String()
|
||||
want = "<int8 Value>"
|
||||
if s != want {
|
||||
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
|
||||
}
|
||||
}
|
||||
|
||||
// SortValues makes the internal sortValues function available to the test
|
||||
// package.
|
||||
func SortValues(values []reflect.Value, cs *ConfigState) {
|
||||
sortValues(values, cs)
|
||||
}
|
308
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew_test.go
generated
vendored
Normal file
308
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew_test.go
generated
vendored
Normal file
@ -0,0 +1,308 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// spewFunc is used to identify which public function of the spew package or
|
||||
// ConfigState a test applies to.
|
||||
type spewFunc int
|
||||
|
||||
const (
|
||||
fCSFdump spewFunc = iota
|
||||
fCSFprint
|
||||
fCSFprintf
|
||||
fCSFprintln
|
||||
fCSPrint
|
||||
fCSPrintln
|
||||
fCSSdump
|
||||
fCSSprint
|
||||
fCSSprintf
|
||||
fCSSprintln
|
||||
fCSErrorf
|
||||
fCSNewFormatter
|
||||
fErrorf
|
||||
fFprint
|
||||
fFprintln
|
||||
fPrint
|
||||
fPrintln
|
||||
fSdump
|
||||
fSprint
|
||||
fSprintf
|
||||
fSprintln
|
||||
)
|
||||
|
||||
// Map of spewFunc values to names for pretty printing.
|
||||
var spewFuncStrings = map[spewFunc]string{
|
||||
fCSFdump: "ConfigState.Fdump",
|
||||
fCSFprint: "ConfigState.Fprint",
|
||||
fCSFprintf: "ConfigState.Fprintf",
|
||||
fCSFprintln: "ConfigState.Fprintln",
|
||||
fCSSdump: "ConfigState.Sdump",
|
||||
fCSPrint: "ConfigState.Print",
|
||||
fCSPrintln: "ConfigState.Println",
|
||||
fCSSprint: "ConfigState.Sprint",
|
||||
fCSSprintf: "ConfigState.Sprintf",
|
||||
fCSSprintln: "ConfigState.Sprintln",
|
||||
fCSErrorf: "ConfigState.Errorf",
|
||||
fCSNewFormatter: "ConfigState.NewFormatter",
|
||||
fErrorf: "spew.Errorf",
|
||||
fFprint: "spew.Fprint",
|
||||
fFprintln: "spew.Fprintln",
|
||||
fPrint: "spew.Print",
|
||||
fPrintln: "spew.Println",
|
||||
fSdump: "spew.Sdump",
|
||||
fSprint: "spew.Sprint",
|
||||
fSprintf: "spew.Sprintf",
|
||||
fSprintln: "spew.Sprintln",
|
||||
}
|
||||
|
||||
func (f spewFunc) String() string {
|
||||
if s, ok := spewFuncStrings[f]; ok {
|
||||
return s
|
||||
}
|
||||
return fmt.Sprintf("Unknown spewFunc (%d)", int(f))
|
||||
}
|
||||
|
||||
// spewTest is used to describe a test to be performed against the public
|
||||
// functions of the spew package or ConfigState.
|
||||
type spewTest struct {
|
||||
cs *spew.ConfigState
|
||||
f spewFunc
|
||||
format string
|
||||
in interface{}
|
||||
want string
|
||||
}
|
||||
|
||||
// spewTests houses the tests to be performed against the public functions of
|
||||
// the spew package and ConfigState.
|
||||
//
|
||||
// These tests are only intended to ensure the public functions are exercised
|
||||
// and are intentionally not exhaustive of types. The exhaustive type
|
||||
// tests are handled in the dump and format tests.
|
||||
var spewTests []spewTest
|
||||
|
||||
// redirStdout is a helper function to return the standard output from f as a
|
||||
// byte slice.
|
||||
func redirStdout(f func()) ([]byte, error) {
|
||||
tempFile, err := ioutil.TempFile("", "ss-test")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileName := tempFile.Name()
|
||||
defer os.Remove(fileName) // Ignore error
|
||||
|
||||
origStdout := os.Stdout
|
||||
os.Stdout = tempFile
|
||||
f()
|
||||
os.Stdout = origStdout
|
||||
tempFile.Close()
|
||||
|
||||
return ioutil.ReadFile(fileName)
|
||||
}
|
||||
|
||||
func initSpewTests() {
|
||||
// Config states with various settings.
|
||||
scsDefault := spew.NewDefaultConfig()
|
||||
scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true}
|
||||
scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
|
||||
scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
|
||||
scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
|
||||
|
||||
// Variables for tests on types which implement Stringer interface with and
|
||||
// without a pointer receiver.
|
||||
ts := stringer("test")
|
||||
tps := pstringer("test")
|
||||
|
||||
// depthTester is used to test max depth handling for structs, array, slices
|
||||
// and maps.
|
||||
type depthTester struct {
|
||||
ic indirCir1
|
||||
arr [1]string
|
||||
slice []string
|
||||
m map[string]int
|
||||
}
|
||||
dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"},
|
||||
map[string]int{"one": 1}}
|
||||
|
||||
// Variable for tests on types which implement error interface.
|
||||
te := customError(10)
|
||||
|
||||
spewTests = []spewTest{
|
||||
{scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"},
|
||||
{scsDefault, fCSFprint, "", int16(32767), "32767"},
|
||||
{scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"},
|
||||
{scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"},
|
||||
{scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"},
|
||||
{scsDefault, fCSPrintln, "", uint8(255), "255\n"},
|
||||
{scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"},
|
||||
{scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"},
|
||||
{scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"},
|
||||
{scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"},
|
||||
{scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"},
|
||||
{scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"},
|
||||
{scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"},
|
||||
{scsDefault, fFprint, "", float32(3.14), "3.14"},
|
||||
{scsDefault, fFprintln, "", float64(6.28), "6.28\n"},
|
||||
{scsDefault, fPrint, "", true, "true"},
|
||||
{scsDefault, fPrintln, "", false, "false\n"},
|
||||
{scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"},
|
||||
{scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"},
|
||||
{scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"},
|
||||
{scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"},
|
||||
{scsNoMethods, fCSFprint, "", ts, "test"},
|
||||
{scsNoMethods, fCSFprint, "", &ts, "<*>test"},
|
||||
{scsNoMethods, fCSFprint, "", tps, "test"},
|
||||
{scsNoMethods, fCSFprint, "", &tps, "<*>test"},
|
||||
{scsNoPmethods, fCSFprint, "", ts, "stringer test"},
|
||||
{scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"},
|
||||
{scsNoPmethods, fCSFprint, "", tps, "test"},
|
||||
{scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"},
|
||||
{scsMaxDepth, fCSFprint, "", dt, "{{<max>} [<max>] [<max>] map[<max>]}"},
|
||||
{scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" +
|
||||
" ic: (spew_test.indirCir1) {\n <max depth reached>\n },\n" +
|
||||
" arr: ([1]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
|
||||
" slice: ([]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
|
||||
" m: (map[string]int) (len=1) {\n <max depth reached>\n }\n}\n"},
|
||||
{scsContinue, fCSFprint, "", ts, "(stringer test) test"},
|
||||
{scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " +
|
||||
"(len=4) (stringer test) \"test\"\n"},
|
||||
{scsContinue, fCSFprint, "", te, "(error: 10) 10"},
|
||||
{scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
|
||||
"(error: 10) 10\n"},
|
||||
}
|
||||
}
|
||||
|
||||
// TestSpew executes all of the tests described by spewTests.
|
||||
func TestSpew(t *testing.T) {
|
||||
initSpewTests()
|
||||
|
||||
t.Logf("Running %d tests", len(spewTests))
|
||||
for i, test := range spewTests {
|
||||
buf := new(bytes.Buffer)
|
||||
switch test.f {
|
||||
case fCSFdump:
|
||||
test.cs.Fdump(buf, test.in)
|
||||
|
||||
case fCSFprint:
|
||||
test.cs.Fprint(buf, test.in)
|
||||
|
||||
case fCSFprintf:
|
||||
test.cs.Fprintf(buf, test.format, test.in)
|
||||
|
||||
case fCSFprintln:
|
||||
test.cs.Fprintln(buf, test.in)
|
||||
|
||||
case fCSPrint:
|
||||
b, err := redirStdout(func() { test.cs.Print(test.in) })
|
||||
if err != nil {
|
||||
t.Errorf("%v #%d %v", test.f, i, err)
|
||||
continue
|
||||
}
|
||||
buf.Write(b)
|
||||
|
||||
case fCSPrintln:
|
||||
b, err := redirStdout(func() { test.cs.Println(test.in) })
|
||||
if err != nil {
|
||||
t.Errorf("%v #%d %v", test.f, i, err)
|
||||
continue
|
||||
}
|
||||
buf.Write(b)
|
||||
|
||||
case fCSSdump:
|
||||
str := test.cs.Sdump(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fCSSprint:
|
||||
str := test.cs.Sprint(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fCSSprintf:
|
||||
str := test.cs.Sprintf(test.format, test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fCSSprintln:
|
||||
str := test.cs.Sprintln(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fCSErrorf:
|
||||
err := test.cs.Errorf(test.format, test.in)
|
||||
buf.WriteString(err.Error())
|
||||
|
||||
case fCSNewFormatter:
|
||||
fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))
|
||||
|
||||
case fErrorf:
|
||||
err := spew.Errorf(test.format, test.in)
|
||||
buf.WriteString(err.Error())
|
||||
|
||||
case fFprint:
|
||||
spew.Fprint(buf, test.in)
|
||||
|
||||
case fFprintln:
|
||||
spew.Fprintln(buf, test.in)
|
||||
|
||||
case fPrint:
|
||||
b, err := redirStdout(func() { spew.Print(test.in) })
|
||||
if err != nil {
|
||||
t.Errorf("%v #%d %v", test.f, i, err)
|
||||
continue
|
||||
}
|
||||
buf.Write(b)
|
||||
|
||||
case fPrintln:
|
||||
b, err := redirStdout(func() { spew.Println(test.in) })
|
||||
if err != nil {
|
||||
t.Errorf("%v #%d %v", test.f, i, err)
|
||||
continue
|
||||
}
|
||||
buf.Write(b)
|
||||
|
||||
case fSdump:
|
||||
str := spew.Sdump(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fSprint:
|
||||
str := spew.Sprint(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fSprintf:
|
||||
str := spew.Sprintf(test.format, test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
case fSprintln:
|
||||
str := spew.Sprintln(test.in)
|
||||
buf.WriteString(str)
|
||||
|
||||
default:
|
||||
t.Errorf("%v #%d unrecognized function", test.f, i)
|
||||
continue
|
||||
}
|
||||
s := buf.String()
|
||||
if test.want != s {
|
||||
t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
82
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
generated
vendored
Normal file
82
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
// Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when both cgo is supported and "-tags testcgo" is added to the go test
|
||||
// command line. This code should really only be in the dumpcgo_test.go file,
|
||||
// but unfortunately Go will not allow cgo in test files, so this is a
|
||||
// workaround to allow cgo types to be tested. This configuration is used
|
||||
// because spew itself does not require cgo to run even though it does handle
|
||||
// certain cgo types specially. Rather than forcing all clients to require cgo
|
||||
// and an external C compiler just to run the tests, this scheme makes them
|
||||
// optional.
|
||||
// +build cgo,testcgo
|
||||
|
||||
package testdata
|
||||
|
||||
/*
|
||||
#include <stdint.h>
|
||||
typedef unsigned char custom_uchar_t;
|
||||
|
||||
char *ncp = 0;
|
||||
char *cp = "test";
|
||||
char ca[6] = {'t', 'e', 's', 't', '2', '\0'};
|
||||
unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'};
|
||||
signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'};
|
||||
uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
|
||||
custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
|
||||
*/
|
||||
import "C"
|
||||
|
||||
// GetCgoNullCharPointer returns a null char pointer via cgo. This is only
|
||||
// used for tests.
|
||||
func GetCgoNullCharPointer() interface{} {
|
||||
return C.ncp
|
||||
}
|
||||
|
||||
// GetCgoCharPointer returns a char pointer via cgo. This is only used for
|
||||
// tests.
|
||||
func GetCgoCharPointer() interface{} {
|
||||
return C.cp
|
||||
}
|
||||
|
||||
// GetCgoCharArray returns a char array via cgo and the array's len and cap.
|
||||
// This is only used for tests.
|
||||
func GetCgoCharArray() (interface{}, int, int) {
|
||||
return C.ca, len(C.ca), cap(C.ca)
|
||||
}
|
||||
|
||||
// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
|
||||
// array's len and cap. This is only used for tests.
|
||||
func GetCgoUnsignedCharArray() (interface{}, int, int) {
|
||||
return C.uca, len(C.uca), cap(C.uca)
|
||||
}
|
||||
|
||||
// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
|
||||
// and cap. This is only used for tests.
|
||||
func GetCgoSignedCharArray() (interface{}, int, int) {
|
||||
return C.sca, len(C.sca), cap(C.sca)
|
||||
}
|
||||
|
||||
// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
|
||||
// cap. This is only used for tests.
|
||||
func GetCgoUint8tArray() (interface{}, int, int) {
|
||||
return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
|
||||
}
|
||||
|
||||
// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
|
||||
// cgo and the array's len and cap. This is only used for tests.
|
||||
func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
|
||||
return C.tuca, len(C.tuca), cap(C.tuca)
|
||||
}
|
161
Godeps/_workspace/src/github.com/ethereum/ethash/cmake/modules/CMakeParseArguments.cmake
generated
vendored
Normal file
161
Godeps/_workspace/src/github.com/ethereum/ethash/cmake/modules/CMakeParseArguments.cmake
generated
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
#.rst:
|
||||
# CMakeParseArguments
|
||||
# -------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
# CMAKE_PARSE_ARGUMENTS(<prefix> <options> <one_value_keywords>
|
||||
# <multi_value_keywords> args...)
|
||||
#
|
||||
# CMAKE_PARSE_ARGUMENTS() is intended to be used in macros or functions
|
||||
# for parsing the arguments given to that macro or function. It
|
||||
# processes the arguments and defines a set of variables which hold the
|
||||
# values of the respective options.
|
||||
#
|
||||
# The <options> argument contains all options for the respective macro,
|
||||
# i.e. keywords which can be used when calling the macro without any
|
||||
# value following, like e.g. the OPTIONAL keyword of the install()
|
||||
# command.
|
||||
#
|
||||
# The <one_value_keywords> argument contains all keywords for this macro
|
||||
# which are followed by one value, like e.g. DESTINATION keyword of the
|
||||
# install() command.
|
||||
#
|
||||
# The <multi_value_keywords> argument contains all keywords for this
|
||||
# macro which can be followed by more than one value, like e.g. the
|
||||
# TARGETS or FILES keywords of the install() command.
|
||||
#
|
||||
# When done, CMAKE_PARSE_ARGUMENTS() will have defined for each of the
|
||||
# keywords listed in <options>, <one_value_keywords> and
|
||||
# <multi_value_keywords> a variable composed of the given <prefix>
|
||||
# followed by "_" and the name of the respective keyword. These
|
||||
# variables will then hold the respective value from the argument list.
|
||||
# For the <options> keywords this will be TRUE or FALSE.
|
||||
#
|
||||
# All remaining arguments are collected in a variable
|
||||
# <prefix>_UNPARSED_ARGUMENTS, this can be checked afterwards to see
|
||||
# whether your macro was called with unrecognized parameters.
|
||||
#
|
||||
# As an example here a my_install() macro, which takes similar arguments
|
||||
# as the real install() command:
|
||||
#
|
||||
# ::
|
||||
#
|
||||
# function(MY_INSTALL)
|
||||
# set(options OPTIONAL FAST)
|
||||
# set(oneValueArgs DESTINATION RENAME)
|
||||
# set(multiValueArgs TARGETS CONFIGURATIONS)
|
||||
# cmake_parse_arguments(MY_INSTALL "${options}" "${oneValueArgs}"
|
||||
# "${multiValueArgs}" ${ARGN} )
|
||||
# ...
|
||||
#
|
||||
#
|
||||
#
|
||||
# Assume my_install() has been called like this:
|
||||
#
|
||||
# ::
|
||||
#
|
||||
# my_install(TARGETS foo bar DESTINATION bin OPTIONAL blub)
|
||||
#
|
||||
#
|
||||
#
|
||||
# After the cmake_parse_arguments() call the macro will have set the
|
||||
# following variables:
|
||||
#
|
||||
# ::
|
||||
#
|
||||
# MY_INSTALL_OPTIONAL = TRUE
|
||||
# MY_INSTALL_FAST = FALSE (this option was not used when calling my_install()
|
||||
# MY_INSTALL_DESTINATION = "bin"
|
||||
# MY_INSTALL_RENAME = "" (was not used)
|
||||
# MY_INSTALL_TARGETS = "foo;bar"
|
||||
# MY_INSTALL_CONFIGURATIONS = "" (was not used)
|
||||
# MY_INSTALL_UNPARSED_ARGUMENTS = "blub" (no value expected after "OPTIONAL"
|
||||
#
|
||||
#
|
||||
#
|
||||
# You can then continue and process these variables.
|
||||
#
|
||||
# Keywords terminate lists of values, e.g. if directly after a
|
||||
# one_value_keyword another recognized keyword follows, this is
|
||||
# interpreted as the beginning of the new option. E.g.
|
||||
# my_install(TARGETS foo DESTINATION OPTIONAL) would result in
|
||||
# MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION
|
||||
# would be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor.
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2010 Alexander Neundorf <neundorf@kde.org>
|
||||
#
|
||||
# Distributed under the OSI-approved BSD License (the "License");
|
||||
# see accompanying file Copyright.txt for details.
|
||||
#
|
||||
# This software is distributed WITHOUT ANY WARRANTY; without even the
|
||||
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the License for more information.
|
||||
#=============================================================================
|
||||
# (To distribute this file outside of CMake, substitute the full
|
||||
# License text for the above reference.)
|
||||
|
||||
|
||||
if(__CMAKE_PARSE_ARGUMENTS_INCLUDED)
|
||||
return()
|
||||
endif()
|
||||
set(__CMAKE_PARSE_ARGUMENTS_INCLUDED TRUE)
|
||||
|
||||
|
||||
function(CMAKE_PARSE_ARGUMENTS prefix _optionNames _singleArgNames _multiArgNames)
|
||||
# first set all result variables to empty/FALSE
|
||||
foreach(arg_name ${_singleArgNames} ${_multiArgNames})
|
||||
set(${prefix}_${arg_name})
|
||||
endforeach()
|
||||
|
||||
foreach(option ${_optionNames})
|
||||
set(${prefix}_${option} FALSE)
|
||||
endforeach()
|
||||
|
||||
set(${prefix}_UNPARSED_ARGUMENTS)
|
||||
|
||||
set(insideValues FALSE)
|
||||
set(currentArgName)
|
||||
|
||||
# now iterate over all arguments and fill the result variables
|
||||
foreach(currentArg ${ARGN})
|
||||
list(FIND _optionNames "${currentArg}" optionIndex) # ... then this marks the end of the arguments belonging to this keyword
|
||||
list(FIND _singleArgNames "${currentArg}" singleArgIndex) # ... then this marks the end of the arguments belonging to this keyword
|
||||
list(FIND _multiArgNames "${currentArg}" multiArgIndex) # ... then this marks the end of the arguments belonging to this keyword
|
||||
|
||||
if(${optionIndex} EQUAL -1 AND ${singleArgIndex} EQUAL -1 AND ${multiArgIndex} EQUAL -1)
|
||||
if(insideValues)
|
||||
if("${insideValues}" STREQUAL "SINGLE")
|
||||
set(${prefix}_${currentArgName} ${currentArg})
|
||||
set(insideValues FALSE)
|
||||
elseif("${insideValues}" STREQUAL "MULTI")
|
||||
list(APPEND ${prefix}_${currentArgName} ${currentArg})
|
||||
endif()
|
||||
else()
|
||||
list(APPEND ${prefix}_UNPARSED_ARGUMENTS ${currentArg})
|
||||
endif()
|
||||
else()
|
||||
if(NOT ${optionIndex} EQUAL -1)
|
||||
set(${prefix}_${currentArg} TRUE)
|
||||
set(insideValues FALSE)
|
||||
elseif(NOT ${singleArgIndex} EQUAL -1)
|
||||
set(currentArgName ${currentArg})
|
||||
set(${prefix}_${currentArgName})
|
||||
set(insideValues "SINGLE")
|
||||
elseif(NOT ${multiArgIndex} EQUAL -1)
|
||||
set(currentArgName ${currentArg})
|
||||
set(${prefix}_${currentArgName})
|
||||
set(insideValues "MULTI")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
endforeach()
|
||||
|
||||
# propagate the result variables to the caller:
|
||||
foreach(arg_name ${_singleArgNames} ${_multiArgNames} ${_optionNames})
|
||||
set(${prefix}_${arg_name} ${${prefix}_${arg_name}} PARENT_SCOPE)
|
||||
endforeach()
|
||||
set(${prefix}_UNPARSED_ARGUMENTS ${${prefix}_UNPARSED_ARGUMENTS} PARENT_SCOPE)
|
||||
|
||||
endfunction()
|
108
Godeps/_workspace/src/github.com/ethereum/ethash/cmake/modules/FindCryptoPP.cmake
generated
vendored
Normal file
108
Godeps/_workspace/src/github.com/ethereum/ethash/cmake/modules/FindCryptoPP.cmake
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
# Module for locating the Crypto++ encryption library.
|
||||
#
|
||||
# Customizable variables:
|
||||
# CRYPTOPP_ROOT_DIR
|
||||
# This variable points to the CryptoPP root directory. On Windows the
|
||||
# library location typically will have to be provided explicitly using the
|
||||
# -D command-line option. The directory should include the include/cryptopp,
|
||||
# lib and/or bin sub-directories.
|
||||
#
|
||||
# Read-only variables:
|
||||
# CRYPTOPP_FOUND
|
||||
# Indicates whether the library has been found.
|
||||
#
|
||||
# CRYPTOPP_INCLUDE_DIRS
|
||||
# Points to the CryptoPP include directory.
|
||||
#
|
||||
# CRYPTOPP_LIBRARIES
|
||||
# Points to the CryptoPP libraries that should be passed to
|
||||
# target_link_libararies.
|
||||
#
|
||||
#
|
||||
# Copyright (c) 2012 Sergiu Dotenco
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
INCLUDE (FindPackageHandleStandardArgs)
|
||||
|
||||
FIND_PATH (CRYPTOPP_ROOT_DIR
|
||||
NAMES cryptopp/cryptlib.h include/cryptopp/cryptlib.h
|
||||
PATHS ENV CRYPTOPPROOT
|
||||
DOC "CryptoPP root directory")
|
||||
|
||||
# Re-use the previous path:
|
||||
FIND_PATH (CRYPTOPP_INCLUDE_DIR
|
||||
NAMES cryptopp/cryptlib.h
|
||||
HINTS ${CRYPTOPP_ROOT_DIR}
|
||||
PATH_SUFFIXES include
|
||||
DOC "CryptoPP include directory")
|
||||
|
||||
FIND_LIBRARY (CRYPTOPP_LIBRARY_DEBUG
|
||||
NAMES cryptlibd cryptoppd
|
||||
HINTS ${CRYPTOPP_ROOT_DIR}
|
||||
PATH_SUFFIXES lib
|
||||
DOC "CryptoPP debug library")
|
||||
|
||||
FIND_LIBRARY (CRYPTOPP_LIBRARY_RELEASE
|
||||
NAMES cryptlib cryptopp
|
||||
HINTS ${CRYPTOPP_ROOT_DIR}
|
||||
PATH_SUFFIXES lib
|
||||
DOC "CryptoPP release library")
|
||||
|
||||
IF (CRYPTOPP_LIBRARY_DEBUG AND CRYPTOPP_LIBRARY_RELEASE)
|
||||
SET (CRYPTOPP_LIBRARY
|
||||
optimized ${CRYPTOPP_LIBRARY_RELEASE}
|
||||
debug ${CRYPTOPP_LIBRARY_DEBUG} CACHE DOC "CryptoPP library")
|
||||
ELSEIF (CRYPTOPP_LIBRARY_RELEASE)
|
||||
SET (CRYPTOPP_LIBRARY ${CRYPTOPP_LIBRARY_RELEASE} CACHE DOC
|
||||
"CryptoPP library")
|
||||
ENDIF (CRYPTOPP_LIBRARY_DEBUG AND CRYPTOPP_LIBRARY_RELEASE)
|
||||
|
||||
IF (CRYPTOPP_INCLUDE_DIR)
|
||||
SET (_CRYPTOPP_VERSION_HEADER ${CRYPTOPP_INCLUDE_DIR}/cryptopp/config.h)
|
||||
|
||||
IF (EXISTS ${_CRYPTOPP_VERSION_HEADER})
|
||||
FILE (STRINGS ${_CRYPTOPP_VERSION_HEADER} _CRYPTOPP_VERSION_TMP REGEX
|
||||
"^#define CRYPTOPP_VERSION[ \t]+[0-9]+$")
|
||||
|
||||
STRING (REGEX REPLACE
|
||||
"^#define CRYPTOPP_VERSION[ \t]+([0-9]+)" "\\1" _CRYPTOPP_VERSION_TMP
|
||||
${_CRYPTOPP_VERSION_TMP})
|
||||
|
||||
STRING (REGEX REPLACE "([0-9]+)[0-9][0-9]" "\\1" CRYPTOPP_VERSION_MAJOR
|
||||
${_CRYPTOPP_VERSION_TMP})
|
||||
STRING (REGEX REPLACE "[0-9]([0-9])[0-9]" "\\1" CRYPTOPP_VERSION_MINOR
|
||||
${_CRYPTOPP_VERSION_TMP})
|
||||
STRING (REGEX REPLACE "[0-9][0-9]([0-9])" "\\1" CRYPTOPP_VERSION_PATCH
|
||||
${_CRYPTOPP_VERSION_TMP})
|
||||
|
||||
SET (CRYPTOPP_VERSION_COUNT 3)
|
||||
SET (CRYPTOPP_VERSION
|
||||
${CRYPTOPP_VERSION_MAJOR}.${CRYPTOPP_VERSION_MINOR}.${CRYPTOPP_VERSION_PATCH})
|
||||
ENDIF (EXISTS ${_CRYPTOPP_VERSION_HEADER})
|
||||
ENDIF (CRYPTOPP_INCLUDE_DIR)
|
||||
|
||||
SET (CRYPTOPP_INCLUDE_DIRS ${CRYPTOPP_INCLUDE_DIR})
|
||||
SET (CRYPTOPP_LIBRARIES ${CRYPTOPP_LIBRARY})
|
||||
|
||||
MARK_AS_ADVANCED (CRYPTOPP_INCLUDE_DIR CRYPTOPP_LIBRARY CRYPTOPP_LIBRARY_DEBUG
|
||||
CRYPTOPP_LIBRARY_RELEASE)
|
||||
|
||||
FIND_PACKAGE_HANDLE_STANDARD_ARGS (CryptoPP REQUIRED_VARS CRYPTOPP_ROOT_DIR
|
||||
CRYPTOPP_INCLUDE_DIR CRYPTOPP_LIBRARY VERSION_VAR CRYPTOPP_VERSION)
|
148
Godeps/_workspace/src/github.com/ethereum/ethash/cmake/modules/FindOpenCL.cmake
generated
vendored
Normal file
148
Godeps/_workspace/src/github.com/ethereum/ethash/cmake/modules/FindOpenCL.cmake
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
#.rst:
|
||||
# FindOpenCL
|
||||
# ----------
|
||||
#
|
||||
# Try to find OpenCL
|
||||
#
|
||||
# Once done this will define::
|
||||
#
|
||||
# OpenCL_FOUND - True if OpenCL was found
|
||||
# OpenCL_INCLUDE_DIRS - include directories for OpenCL
|
||||
# OpenCL_LIBRARIES - link against this library to use OpenCL
|
||||
# OpenCL_VERSION_STRING - Highest supported OpenCL version (eg. 1.2)
|
||||
# OpenCL_VERSION_MAJOR - The major version of the OpenCL implementation
|
||||
# OpenCL_VERSION_MINOR - The minor version of the OpenCL implementation
|
||||
#
|
||||
# The module will also define two cache variables::
|
||||
#
|
||||
# OpenCL_INCLUDE_DIR - the OpenCL include directory
|
||||
# OpenCL_LIBRARY - the path to the OpenCL library
|
||||
#
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2014 Matthaeus G. Chajdas
|
||||
#
|
||||
# Distributed under the OSI-approved BSD License (the "License");
|
||||
# see accompanying file Copyright.txt for details.
|
||||
#
|
||||
# This software is distributed WITHOUT ANY WARRANTY; without even the
|
||||
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the License for more information.
|
||||
#=============================================================================
|
||||
# (To distribute this file outside of CMake, substitute the full
|
||||
# License text for the above reference.)
|
||||
|
||||
function(_FIND_OPENCL_VERSION)
|
||||
include(CheckSymbolExists)
|
||||
include(CMakePushCheckState)
|
||||
set(CMAKE_REQUIRED_QUIET ${OpenCL_FIND_QUIETLY})
|
||||
|
||||
CMAKE_PUSH_CHECK_STATE()
|
||||
foreach(VERSION "2_0" "1_2" "1_1" "1_0")
|
||||
set(CMAKE_REQUIRED_INCLUDES "${OpenCL_INCLUDE_DIR}")
|
||||
|
||||
if(APPLE)
|
||||
CHECK_SYMBOL_EXISTS(
|
||||
CL_VERSION_${VERSION}
|
||||
"${OpenCL_INCLUDE_DIR}/OpenCL/cl.h"
|
||||
OPENCL_VERSION_${VERSION})
|
||||
else()
|
||||
CHECK_SYMBOL_EXISTS(
|
||||
CL_VERSION_${VERSION}
|
||||
"${OpenCL_INCLUDE_DIR}/CL/cl.h"
|
||||
OPENCL_VERSION_${VERSION})
|
||||
endif()
|
||||
|
||||
if(OPENCL_VERSION_${VERSION})
|
||||
string(REPLACE "_" "." VERSION "${VERSION}")
|
||||
set(OpenCL_VERSION_STRING ${VERSION} PARENT_SCOPE)
|
||||
string(REGEX MATCHALL "[0-9]+" version_components "${VERSION}")
|
||||
list(GET version_components 0 major_version)
|
||||
list(GET version_components 1 minor_version)
|
||||
set(OpenCL_VERSION_MAJOR ${major_version} PARENT_SCOPE)
|
||||
set(OpenCL_VERSION_MINOR ${minor_version} PARENT_SCOPE)
|
||||
break()
|
||||
endif()
|
||||
endforeach()
|
||||
CMAKE_POP_CHECK_STATE()
|
||||
endfunction()
|
||||
|
||||
find_path(OpenCL_INCLUDE_DIR
|
||||
NAMES
|
||||
CL/cl.h OpenCL/cl.h
|
||||
PATHS
|
||||
ENV "PROGRAMFILES(X86)"
|
||||
ENV AMDAPPSDKROOT
|
||||
ENV INTELOCLSDKROOT
|
||||
ENV NVSDKCOMPUTE_ROOT
|
||||
ENV CUDA_PATH
|
||||
ENV ATISTREAMSDKROOT
|
||||
PATH_SUFFIXES
|
||||
include
|
||||
OpenCL/common/inc
|
||||
"AMD APP/include")
|
||||
|
||||
_FIND_OPENCL_VERSION()
|
||||
|
||||
if(WIN32)
|
||||
if(CMAKE_SIZEOF_VOID_P EQUAL 4)
|
||||
find_library(OpenCL_LIBRARY
|
||||
NAMES OpenCL
|
||||
PATHS
|
||||
ENV "PROGRAMFILES(X86)"
|
||||
ENV AMDAPPSDKROOT
|
||||
ENV INTELOCLSDKROOT
|
||||
ENV CUDA_PATH
|
||||
ENV NVSDKCOMPUTE_ROOT
|
||||
ENV ATISTREAMSDKROOT
|
||||
PATH_SUFFIXES
|
||||
"AMD APP/lib/x86"
|
||||
lib/x86
|
||||
lib/Win32
|
||||
OpenCL/common/lib/Win32)
|
||||
elseif(CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
find_library(OpenCL_LIBRARY
|
||||
NAMES OpenCL
|
||||
PATHS
|
||||
ENV "PROGRAMFILES(X86)"
|
||||
ENV AMDAPPSDKROOT
|
||||
ENV INTELOCLSDKROOT
|
||||
ENV CUDA_PATH
|
||||
ENV NVSDKCOMPUTE_ROOT
|
||||
ENV ATISTREAMSDKROOT
|
||||
PATH_SUFFIXES
|
||||
"AMD APP/lib/x86_64"
|
||||
lib/x86_64
|
||||
lib/x64
|
||||
OpenCL/common/lib/x64)
|
||||
endif()
|
||||
else()
|
||||
find_library(OpenCL_LIBRARY
|
||||
NAMES OpenCL
|
||||
PATHS
|
||||
ENV "PROGRAMFILES(X86)"
|
||||
ENV AMDAPPSDKROOT
|
||||
ENV INTELOCLSDKROOT
|
||||
ENV CUDA_PATH
|
||||
ENV NVSDKCOMPUTE_ROOT
|
||||
ENV ATISTREAMSDKROOT
|
||||
PATH_SUFFIXES
|
||||
"AMD APP/lib/x86_64"
|
||||
lib/x86_64
|
||||
lib/x64
|
||||
OpenCL/common/lib/x64)
|
||||
endif()
|
||||
|
||||
set(OpenCL_LIBRARIES ${OpenCL_LIBRARY})
|
||||
set(OpenCL_INCLUDE_DIRS ${OpenCL_INCLUDE_DIR})
|
||||
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/FindPackageHandleStandardArgs.cmake)
|
||||
find_package_handle_standard_args(
|
||||
OpenCL
|
||||
FOUND_VAR OpenCL_FOUND
|
||||
REQUIRED_VARS OpenCL_LIBRARY OpenCL_INCLUDE_DIR
|
||||
VERSION_VAR OpenCL_VERSION_STRING)
|
||||
|
||||
mark_as_advanced(
|
||||
OpenCL_INCLUDE_DIR
|
||||
OpenCL_LIBRARY)
|
382
Godeps/_workspace/src/github.com/ethereum/ethash/cmake/modules/FindPackageHandleStandardArgs.cmake
generated
vendored
Normal file
382
Godeps/_workspace/src/github.com/ethereum/ethash/cmake/modules/FindPackageHandleStandardArgs.cmake
generated
vendored
Normal file
@ -0,0 +1,382 @@
|
||||
#.rst:
|
||||
# FindPackageHandleStandardArgs
|
||||
# -----------------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
# FIND_PACKAGE_HANDLE_STANDARD_ARGS(<name> ... )
|
||||
#
|
||||
# This function is intended to be used in FindXXX.cmake modules files.
|
||||
# It handles the REQUIRED, QUIET and version-related arguments to
|
||||
# find_package(). It also sets the <packagename>_FOUND variable. The
|
||||
# package is considered found if all variables <var1>... listed contain
|
||||
# valid results, e.g. valid filepaths.
|
||||
#
|
||||
# There are two modes of this function. The first argument in both
|
||||
# modes is the name of the Find-module where it is called (in original
|
||||
# casing).
|
||||
#
|
||||
# The first simple mode looks like this:
|
||||
#
|
||||
# ::
|
||||
#
|
||||
# FIND_PACKAGE_HANDLE_STANDARD_ARGS(<name>
|
||||
# (DEFAULT_MSG|"Custom failure message") <var1>...<varN> )
|
||||
#
|
||||
# If the variables <var1> to <varN> are all valid, then
|
||||
# <UPPERCASED_NAME>_FOUND will be set to TRUE. If DEFAULT_MSG is given
|
||||
# as second argument, then the function will generate itself useful
|
||||
# success and error messages. You can also supply a custom error
|
||||
# message for the failure case. This is not recommended.
|
||||
#
|
||||
# The second mode is more powerful and also supports version checking:
|
||||
#
|
||||
# ::
|
||||
#
|
||||
# FIND_PACKAGE_HANDLE_STANDARD_ARGS(NAME
|
||||
# [FOUND_VAR <resultVar>]
|
||||
# [REQUIRED_VARS <var1>...<varN>]
|
||||
# [VERSION_VAR <versionvar>]
|
||||
# [HANDLE_COMPONENTS]
|
||||
# [CONFIG_MODE]
|
||||
# [FAIL_MESSAGE "Custom failure message"] )
|
||||
#
|
||||
# In this mode, the name of the result-variable can be set either to
|
||||
# either <UPPERCASED_NAME>_FOUND or <OriginalCase_Name>_FOUND using the
|
||||
# FOUND_VAR option. Other names for the result-variable are not
|
||||
# allowed. So for a Find-module named FindFooBar.cmake, the two
|
||||
# possible names are FooBar_FOUND and FOOBAR_FOUND. It is recommended
|
||||
# to use the original case version. If the FOUND_VAR option is not
|
||||
# used, the default is <UPPERCASED_NAME>_FOUND.
|
||||
#
|
||||
# As in the simple mode, if <var1> through <varN> are all valid,
|
||||
# <packagename>_FOUND will be set to TRUE. After REQUIRED_VARS the
|
||||
# variables which are required for this package are listed. Following
|
||||
# VERSION_VAR the name of the variable can be specified which holds the
|
||||
# version of the package which has been found. If this is done, this
|
||||
# version will be checked against the (potentially) specified required
|
||||
# version used in the find_package() call. The EXACT keyword is also
|
||||
# handled. The default messages include information about the required
|
||||
# version and the version which has been actually found, both if the
|
||||
# version is ok or not. If the package supports components, use the
|
||||
# HANDLE_COMPONENTS option to enable handling them. In this case,
|
||||
# find_package_handle_standard_args() will report which components have
|
||||
# been found and which are missing, and the <packagename>_FOUND variable
|
||||
# will be set to FALSE if any of the required components (i.e. not the
|
||||
# ones listed after OPTIONAL_COMPONENTS) are missing. Use the option
|
||||
# CONFIG_MODE if your FindXXX.cmake module is a wrapper for a
|
||||
# find_package(... NO_MODULE) call. In this case VERSION_VAR will be
|
||||
# set to <NAME>_VERSION and the macro will automatically check whether
|
||||
# the Config module was found. Via FAIL_MESSAGE a custom failure
|
||||
# message can be specified, if this is not used, the default message
|
||||
# will be displayed.
|
||||
#
|
||||
# Example for mode 1:
|
||||
#
|
||||
# ::
|
||||
#
|
||||
# find_package_handle_standard_args(LibXml2 DEFAULT_MSG
|
||||
# LIBXML2_LIBRARY LIBXML2_INCLUDE_DIR)
|
||||
#
|
||||
#
|
||||
#
|
||||
# LibXml2 is considered to be found, if both LIBXML2_LIBRARY and
|
||||
# LIBXML2_INCLUDE_DIR are valid. Then also LIBXML2_FOUND is set to
|
||||
# TRUE. If it is not found and REQUIRED was used, it fails with
|
||||
# FATAL_ERROR, independent whether QUIET was used or not. If it is
|
||||
# found, success will be reported, including the content of <var1>. On
|
||||
# repeated Cmake runs, the same message won't be printed again.
|
||||
#
|
||||
# Example for mode 2:
|
||||
#
|
||||
# ::
|
||||
#
|
||||
# find_package_handle_standard_args(LibXslt
|
||||
# FOUND_VAR LibXslt_FOUND
|
||||
# REQUIRED_VARS LibXslt_LIBRARIES LibXslt_INCLUDE_DIRS
|
||||
# VERSION_VAR LibXslt_VERSION_STRING)
|
||||
#
|
||||
# In this case, LibXslt is considered to be found if the variable(s)
|
||||
# listed after REQUIRED_VAR are all valid, i.e. LibXslt_LIBRARIES and
|
||||
# LibXslt_INCLUDE_DIRS in this case. The result will then be stored in
|
||||
# LibXslt_FOUND . Also the version of LibXslt will be checked by using
|
||||
# the version contained in LibXslt_VERSION_STRING. Since no
|
||||
# FAIL_MESSAGE is given, the default messages will be printed.
|
||||
#
|
||||
# Another example for mode 2:
|
||||
#
|
||||
# ::
|
||||
#
|
||||
# find_package(Automoc4 QUIET NO_MODULE HINTS /opt/automoc4)
|
||||
# find_package_handle_standard_args(Automoc4 CONFIG_MODE)
|
||||
#
|
||||
# In this case, FindAutmoc4.cmake wraps a call to find_package(Automoc4
|
||||
# NO_MODULE) and adds an additional search directory for automoc4. Here
|
||||
# the result will be stored in AUTOMOC4_FOUND. The following
|
||||
# FIND_PACKAGE_HANDLE_STANDARD_ARGS() call produces a proper
|
||||
# success/error message.
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2007-2009 Kitware, Inc.
|
||||
#
|
||||
# Distributed under the OSI-approved BSD License (the "License");
|
||||
# see accompanying file Copyright.txt for details.
|
||||
#
|
||||
# This software is distributed WITHOUT ANY WARRANTY; without even the
|
||||
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the License for more information.
|
||||
#=============================================================================
|
||||
# (To distribute this file outside of CMake, substitute the full
|
||||
# License text for the above reference.)
|
||||
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/FindPackageMessage.cmake)
|
||||
include(${CMAKE_CURRENT_LIST_DIR}/CMakeParseArguments.cmake)
|
||||
|
||||
# internal helper macro
|
||||
macro(_FPHSA_FAILURE_MESSAGE _msg)
|
||||
if (${_NAME}_FIND_REQUIRED)
|
||||
message(FATAL_ERROR "${_msg}")
|
||||
else ()
|
||||
if (NOT ${_NAME}_FIND_QUIETLY)
|
||||
message(STATUS "${_msg}")
|
||||
endif ()
|
||||
endif ()
|
||||
endmacro()
|
||||
|
||||
|
||||
# internal helper macro to generate the failure message when used in CONFIG_MODE:
|
||||
macro(_FPHSA_HANDLE_FAILURE_CONFIG_MODE)
|
||||
# <name>_CONFIG is set, but FOUND is false, this means that some other of the REQUIRED_VARS was not found:
|
||||
if(${_NAME}_CONFIG)
|
||||
_FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: missing: ${MISSING_VARS} (found ${${_NAME}_CONFIG} ${VERSION_MSG})")
|
||||
else()
|
||||
# If _CONSIDERED_CONFIGS is set, the config-file has been found, but no suitable version.
|
||||
# List them all in the error message:
|
||||
if(${_NAME}_CONSIDERED_CONFIGS)
|
||||
set(configsText "")
|
||||
list(LENGTH ${_NAME}_CONSIDERED_CONFIGS configsCount)
|
||||
math(EXPR configsCount "${configsCount} - 1")
|
||||
foreach(currentConfigIndex RANGE ${configsCount})
|
||||
list(GET ${_NAME}_CONSIDERED_CONFIGS ${currentConfigIndex} filename)
|
||||
list(GET ${_NAME}_CONSIDERED_VERSIONS ${currentConfigIndex} version)
|
||||
set(configsText "${configsText} ${filename} (version ${version})\n")
|
||||
endforeach()
|
||||
if (${_NAME}_NOT_FOUND_MESSAGE)
|
||||
set(configsText "${configsText} Reason given by package: ${${_NAME}_NOT_FOUND_MESSAGE}\n")
|
||||
endif()
|
||||
_FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE} ${VERSION_MSG}, checked the following files:\n${configsText}")
|
||||
|
||||
else()
|
||||
# Simple case: No Config-file was found at all:
|
||||
_FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: found neither ${_NAME}Config.cmake nor ${_NAME_LOWER}-config.cmake ${VERSION_MSG}")
|
||||
endif()
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
|
||||
function(FIND_PACKAGE_HANDLE_STANDARD_ARGS _NAME _FIRST_ARG)
|
||||
|
||||
# set up the arguments for CMAKE_PARSE_ARGUMENTS and check whether we are in
|
||||
# new extended or in the "old" mode:
|
||||
set(options CONFIG_MODE HANDLE_COMPONENTS)
|
||||
set(oneValueArgs FAIL_MESSAGE VERSION_VAR FOUND_VAR)
|
||||
set(multiValueArgs REQUIRED_VARS)
|
||||
set(_KEYWORDS_FOR_EXTENDED_MODE ${options} ${oneValueArgs} ${multiValueArgs} )
|
||||
list(FIND _KEYWORDS_FOR_EXTENDED_MODE "${_FIRST_ARG}" INDEX)
|
||||
|
||||
if(${INDEX} EQUAL -1)
|
||||
set(FPHSA_FAIL_MESSAGE ${_FIRST_ARG})
|
||||
set(FPHSA_REQUIRED_VARS ${ARGN})
|
||||
set(FPHSA_VERSION_VAR)
|
||||
else()
|
||||
|
||||
CMAKE_PARSE_ARGUMENTS(FPHSA "${options}" "${oneValueArgs}" "${multiValueArgs}" ${_FIRST_ARG} ${ARGN})
|
||||
|
||||
if(FPHSA_UNPARSED_ARGUMENTS)
|
||||
message(FATAL_ERROR "Unknown keywords given to FIND_PACKAGE_HANDLE_STANDARD_ARGS(): \"${FPHSA_UNPARSED_ARGUMENTS}\"")
|
||||
endif()
|
||||
|
||||
if(NOT FPHSA_FAIL_MESSAGE)
|
||||
set(FPHSA_FAIL_MESSAGE "DEFAULT_MSG")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# now that we collected all arguments, process them
|
||||
|
||||
if("x${FPHSA_FAIL_MESSAGE}" STREQUAL "xDEFAULT_MSG")
|
||||
set(FPHSA_FAIL_MESSAGE "Could NOT find ${_NAME}")
|
||||
endif()
|
||||
|
||||
# In config-mode, we rely on the variable <package>_CONFIG, which is set by find_package()
|
||||
# when it successfully found the config-file, including version checking:
|
||||
if(FPHSA_CONFIG_MODE)
|
||||
list(INSERT FPHSA_REQUIRED_VARS 0 ${_NAME}_CONFIG)
|
||||
list(REMOVE_DUPLICATES FPHSA_REQUIRED_VARS)
|
||||
set(FPHSA_VERSION_VAR ${_NAME}_VERSION)
|
||||
endif()
|
||||
|
||||
if(NOT FPHSA_REQUIRED_VARS)
|
||||
message(FATAL_ERROR "No REQUIRED_VARS specified for FIND_PACKAGE_HANDLE_STANDARD_ARGS()")
|
||||
endif()
|
||||
|
||||
list(GET FPHSA_REQUIRED_VARS 0 _FIRST_REQUIRED_VAR)
|
||||
|
||||
string(TOUPPER ${_NAME} _NAME_UPPER)
|
||||
string(TOLOWER ${_NAME} _NAME_LOWER)
|
||||
|
||||
if(FPHSA_FOUND_VAR)
|
||||
if(FPHSA_FOUND_VAR MATCHES "^${_NAME}_FOUND$" OR FPHSA_FOUND_VAR MATCHES "^${_NAME_UPPER}_FOUND$")
|
||||
set(_FOUND_VAR ${FPHSA_FOUND_VAR})
|
||||
else()
|
||||
message(FATAL_ERROR "The argument for FOUND_VAR is \"${FPHSA_FOUND_VAR}\", but only \"${_NAME}_FOUND\" and \"${_NAME_UPPER}_FOUND\" are valid names.")
|
||||
endif()
|
||||
else()
|
||||
set(_FOUND_VAR ${_NAME_UPPER}_FOUND)
|
||||
endif()
|
||||
|
||||
# collect all variables which were not found, so they can be printed, so the
|
||||
# user knows better what went wrong (#6375)
|
||||
set(MISSING_VARS "")
|
||||
set(DETAILS "")
|
||||
# check if all passed variables are valid
|
||||
unset(${_FOUND_VAR})
|
||||
foreach(_CURRENT_VAR ${FPHSA_REQUIRED_VARS})
|
||||
if(NOT ${_CURRENT_VAR})
|
||||
set(${_FOUND_VAR} FALSE)
|
||||
set(MISSING_VARS "${MISSING_VARS} ${_CURRENT_VAR}")
|
||||
else()
|
||||
set(DETAILS "${DETAILS}[${${_CURRENT_VAR}}]")
|
||||
endif()
|
||||
endforeach()
|
||||
if(NOT "${${_FOUND_VAR}}" STREQUAL "FALSE")
|
||||
set(${_FOUND_VAR} TRUE)
|
||||
endif()
|
||||
|
||||
# component handling
|
||||
unset(FOUND_COMPONENTS_MSG)
|
||||
unset(MISSING_COMPONENTS_MSG)
|
||||
|
||||
if(FPHSA_HANDLE_COMPONENTS)
|
||||
foreach(comp ${${_NAME}_FIND_COMPONENTS})
|
||||
if(${_NAME}_${comp}_FOUND)
|
||||
|
||||
if(NOT DEFINED FOUND_COMPONENTS_MSG)
|
||||
set(FOUND_COMPONENTS_MSG "found components: ")
|
||||
endif()
|
||||
set(FOUND_COMPONENTS_MSG "${FOUND_COMPONENTS_MSG} ${comp}")
|
||||
|
||||
else()
|
||||
|
||||
if(NOT DEFINED MISSING_COMPONENTS_MSG)
|
||||
set(MISSING_COMPONENTS_MSG "missing components: ")
|
||||
endif()
|
||||
set(MISSING_COMPONENTS_MSG "${MISSING_COMPONENTS_MSG} ${comp}")
|
||||
|
||||
if(${_NAME}_FIND_REQUIRED_${comp})
|
||||
set(${_FOUND_VAR} FALSE)
|
||||
set(MISSING_VARS "${MISSING_VARS} ${comp}")
|
||||
endif()
|
||||
|
||||
endif()
|
||||
endforeach()
|
||||
set(COMPONENT_MSG "${FOUND_COMPONENTS_MSG} ${MISSING_COMPONENTS_MSG}")
|
||||
set(DETAILS "${DETAILS}[c${COMPONENT_MSG}]")
|
||||
endif()
|
||||
|
||||
# version handling:
|
||||
set(VERSION_MSG "")
|
||||
set(VERSION_OK TRUE)
|
||||
set(VERSION ${${FPHSA_VERSION_VAR}})
|
||||
|
||||
# check with DEFINED here as the requested or found version may be "0"
|
||||
if (DEFINED ${_NAME}_FIND_VERSION)
|
||||
if(DEFINED ${FPHSA_VERSION_VAR})
|
||||
|
||||
if(${_NAME}_FIND_VERSION_EXACT) # exact version required
|
||||
# count the dots in the version string
|
||||
string(REGEX REPLACE "[^.]" "" _VERSION_DOTS "${VERSION}")
|
||||
# add one dot because there is one dot more than there are components
|
||||
string(LENGTH "${_VERSION_DOTS}." _VERSION_DOTS)
|
||||
if (_VERSION_DOTS GREATER ${_NAME}_FIND_VERSION_COUNT)
|
||||
# Because of the C++ implementation of find_package() ${_NAME}_FIND_VERSION_COUNT
|
||||
# is at most 4 here. Therefore a simple lookup table is used.
|
||||
if (${_NAME}_FIND_VERSION_COUNT EQUAL 1)
|
||||
set(_VERSION_REGEX "[^.]*")
|
||||
elseif (${_NAME}_FIND_VERSION_COUNT EQUAL 2)
|
||||
set(_VERSION_REGEX "[^.]*\\.[^.]*")
|
||||
elseif (${_NAME}_FIND_VERSION_COUNT EQUAL 3)
|
||||
set(_VERSION_REGEX "[^.]*\\.[^.]*\\.[^.]*")
|
||||
else ()
|
||||
set(_VERSION_REGEX "[^.]*\\.[^.]*\\.[^.]*\\.[^.]*")
|
||||
endif ()
|
||||
string(REGEX REPLACE "^(${_VERSION_REGEX})\\..*" "\\1" _VERSION_HEAD "${VERSION}")
|
||||
unset(_VERSION_REGEX)
|
||||
if (NOT ${_NAME}_FIND_VERSION VERSION_EQUAL _VERSION_HEAD)
|
||||
set(VERSION_MSG "Found unsuitable version \"${VERSION}\", but required is exact version \"${${_NAME}_FIND_VERSION}\"")
|
||||
set(VERSION_OK FALSE)
|
||||
else ()
|
||||
set(VERSION_MSG "(found suitable exact version \"${VERSION}\")")
|
||||
endif ()
|
||||
unset(_VERSION_HEAD)
|
||||
else ()
|
||||
if (NOT "${${_NAME}_FIND_VERSION}" VERSION_EQUAL "${VERSION}")
|
||||
set(VERSION_MSG "Found unsuitable version \"${VERSION}\", but required is exact version \"${${_NAME}_FIND_VERSION}\"")
|
||||
set(VERSION_OK FALSE)
|
||||
else ()
|
||||
set(VERSION_MSG "(found suitable exact version \"${VERSION}\")")
|
||||
endif ()
|
||||
endif ()
|
||||
unset(_VERSION_DOTS)
|
||||
|
||||
else() # minimum version specified:
|
||||
if ("${${_NAME}_FIND_VERSION}" VERSION_GREATER "${VERSION}")
|
||||
set(VERSION_MSG "Found unsuitable version \"${VERSION}\", but required is at least \"${${_NAME}_FIND_VERSION}\"")
|
||||
set(VERSION_OK FALSE)
|
||||
else ()
|
||||
set(VERSION_MSG "(found suitable version \"${VERSION}\", minimum required is \"${${_NAME}_FIND_VERSION}\")")
|
||||
endif ()
|
||||
endif()
|
||||
|
||||
else()
|
||||
|
||||
# if the package was not found, but a version was given, add that to the output:
|
||||
if(${_NAME}_FIND_VERSION_EXACT)
|
||||
set(VERSION_MSG "(Required is exact version \"${${_NAME}_FIND_VERSION}\")")
|
||||
else()
|
||||
set(VERSION_MSG "(Required is at least version \"${${_NAME}_FIND_VERSION}\")")
|
||||
endif()
|
||||
|
||||
endif()
|
||||
else ()
|
||||
if(VERSION)
|
||||
set(VERSION_MSG "(found version \"${VERSION}\")")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
if(VERSION_OK)
|
||||
set(DETAILS "${DETAILS}[v${VERSION}(${${_NAME}_FIND_VERSION})]")
|
||||
else()
|
||||
set(${_FOUND_VAR} FALSE)
|
||||
endif()
|
||||
|
||||
|
||||
# print the result:
|
||||
if (${_FOUND_VAR})
|
||||
FIND_PACKAGE_MESSAGE(${_NAME} "Found ${_NAME}: ${${_FIRST_REQUIRED_VAR}} ${VERSION_MSG} ${COMPONENT_MSG}" "${DETAILS}")
|
||||
else ()
|
||||
|
||||
if(FPHSA_CONFIG_MODE)
|
||||
_FPHSA_HANDLE_FAILURE_CONFIG_MODE()
|
||||
else()
|
||||
if(NOT VERSION_OK)
|
||||
_FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: ${VERSION_MSG} (found ${${_FIRST_REQUIRED_VAR}})")
|
||||
else()
|
||||
_FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE} (missing: ${MISSING_VARS}) ${VERSION_MSG}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
endif ()
|
||||
|
||||
set(${_FOUND_VAR} ${${_FOUND_VAR}} PARENT_SCOPE)
|
||||
|
||||
endfunction()
|
57
Godeps/_workspace/src/github.com/ethereum/ethash/cmake/modules/FindPackageMessage.cmake
generated
vendored
Normal file
57
Godeps/_workspace/src/github.com/ethereum/ethash/cmake/modules/FindPackageMessage.cmake
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
#.rst:
|
||||
# FindPackageMessage
|
||||
# ------------------
|
||||
#
|
||||
#
|
||||
#
|
||||
# FIND_PACKAGE_MESSAGE(<name> "message for user" "find result details")
|
||||
#
|
||||
# This macro is intended to be used in FindXXX.cmake modules files. It
|
||||
# will print a message once for each unique find result. This is useful
|
||||
# for telling the user where a package was found. The first argument
|
||||
# specifies the name (XXX) of the package. The second argument
|
||||
# specifies the message to display. The third argument lists details
|
||||
# about the find result so that if they change the message will be
|
||||
# displayed again. The macro also obeys the QUIET argument to the
|
||||
# find_package command.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# ::
|
||||
#
|
||||
# if(X11_FOUND)
|
||||
# FIND_PACKAGE_MESSAGE(X11 "Found X11: ${X11_X11_LIB}"
|
||||
# "[${X11_X11_LIB}][${X11_INCLUDE_DIR}]")
|
||||
# else()
|
||||
# ...
|
||||
# endif()
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2008-2009 Kitware, Inc.
|
||||
#
|
||||
# Distributed under the OSI-approved BSD License (the "License");
|
||||
# see accompanying file Copyright.txt for details.
|
||||
#
|
||||
# This software is distributed WITHOUT ANY WARRANTY; without even the
|
||||
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the License for more information.
|
||||
#=============================================================================
|
||||
# (To distribute this file outside of CMake, substitute the full
|
||||
# License text for the above reference.)
|
||||
|
||||
function(FIND_PACKAGE_MESSAGE pkg msg details)
|
||||
# Avoid printing a message repeatedly for the same find result.
|
||||
if(NOT ${pkg}_FIND_QUIETLY)
|
||||
string(REPLACE "\n" "" details "${details}")
|
||||
set(DETAILS_VAR FIND_PACKAGE_MESSAGE_DETAILS_${pkg})
|
||||
if(NOT "${details}" STREQUAL "${${DETAILS_VAR}}")
|
||||
# The message has not yet been printed.
|
||||
message(STATUS "${msg}")
|
||||
|
||||
# Save the find details in the cache to avoid printing the same
|
||||
# message again.
|
||||
set("${DETAILS_VAR}" "${details}"
|
||||
CACHE INTERNAL "Details about finding ${pkg}")
|
||||
endif()
|
||||
endif()
|
||||
endfunction()
|
13
Godeps/_workspace/src/github.com/ethereum/ethash/cryptopp/CMakeLists.txt
generated
vendored
Normal file
13
Godeps/_workspace/src/github.com/ethereum/ethash/cryptopp/CMakeLists.txt
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
set(LIBRARY cryptopp)
|
||||
|
||||
include_directories(../../cryptopp)
|
||||
|
||||
# todo, subset
|
||||
file(GLOB HEADERS "../../cryptopp/*.h")
|
||||
file(GLOB SOURCE "../../cryptopp/*.cpp")
|
||||
|
||||
add_library(${LIBRARY} ${HEADERS} ${SOURCE})
|
||||
|
||||
set(CRYPTOPP_INCLUDE_DIRS "../.." "../../../" PARENT_SCOPE)
|
||||
set(CRYPTOPP_LIBRARIES ${LIBRARY} PARENT_SCOPE)
|
||||
set(CRYPTOPP_FOUND TRUE PARENT_SCOPE)
|
137
Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
generated
vendored
137
Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
generated
vendored
@ -1,21 +1,3 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2015 Lefteris Karapetsas <lefteris@refu.co>
|
||||
// Copyright 2015 Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
/*
|
||||
@ -48,8 +30,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
sharedLight = new(Light)
|
||||
minDifficulty = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
sharedLight = new(Light)
|
||||
)
|
||||
|
||||
const (
|
||||
@ -75,7 +57,6 @@ func defaultDir() string {
|
||||
// and automatic memory management.
|
||||
type cache struct {
|
||||
epoch uint64
|
||||
used time.Time
|
||||
test bool
|
||||
|
||||
gen sync.Once // ensures cache is only generated once.
|
||||
@ -105,25 +86,14 @@ func freeCache(cache *cache) {
|
||||
cache.ptr = nil
|
||||
}
|
||||
|
||||
func (cache *cache) compute(dagSize uint64, hash common.Hash, nonce uint64) (ok bool, mixDigest, result common.Hash) {
|
||||
ret := C.ethash_light_compute_internal(cache.ptr, C.uint64_t(dagSize), hashToH256(hash), C.uint64_t(nonce))
|
||||
// Make sure cache is live until after the C call.
|
||||
// This is important because a GC might happen and execute
|
||||
// the finalizer before the call completes.
|
||||
_ = cache
|
||||
return bool(ret.success), h256ToHash(ret.mix_hash), h256ToHash(ret.result)
|
||||
}
|
||||
|
||||
// Light implements the Verify half of the proof of work. It uses a few small
|
||||
// in-memory caches to verify the nonces found by Full.
|
||||
// Light implements the Verify half of the proof of work.
|
||||
// It uses a small in-memory cache to verify the nonces
|
||||
// found by Full.
|
||||
type Light struct {
|
||||
test bool // If set, use a smaller cache size
|
||||
|
||||
mu sync.Mutex // Protects the per-epoch map of verification caches
|
||||
caches map[uint64]*cache // Currently maintained verification caches
|
||||
future *cache // Pre-generated cache for the estimated future DAG
|
||||
|
||||
NumCaches int // Maximum number of caches to keep before eviction (only init, don't modify)
|
||||
test bool // if set use a smaller cache size
|
||||
mu sync.Mutex // protects current
|
||||
current *cache // last cache which was generated.
|
||||
// TODO: keep multiple caches.
|
||||
}
|
||||
|
||||
// Verify checks whether the block's nonce is valid.
|
||||
@ -149,23 +119,29 @@ func (l *Light) Verify(block pow.Block) bool {
|
||||
|
||||
cache := l.getCache(blockNum)
|
||||
dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))
|
||||
|
||||
if l.test {
|
||||
dagSize = dagSizeForTesting
|
||||
}
|
||||
// Recompute the hash using the cache.
|
||||
ok, mixDigest, result := cache.compute(uint64(dagSize), block.HashNoNonce(), block.Nonce())
|
||||
if !ok {
|
||||
hash := hashToH256(block.HashNoNonce())
|
||||
ret := C.ethash_light_compute_internal(cache.ptr, dagSize, hash, C.uint64_t(block.Nonce()))
|
||||
if !ret.success {
|
||||
return false
|
||||
}
|
||||
|
||||
// avoid mixdigest malleability as it's not included in a block's "hashNononce"
|
||||
if block.MixDigest() != mixDigest {
|
||||
if block.MixDigest() != h256ToHash(ret.mix_hash) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Make sure cache is live until after the C call.
|
||||
// This is important because a GC might happen and execute
|
||||
// the finalizer before the call completes.
|
||||
_ = cache
|
||||
// The actual check.
|
||||
target := new(big.Int).Div(maxUint256, difficulty)
|
||||
return result.Big().Cmp(target) <= 0
|
||||
target := new(big.Int).Div(minDifficulty, difficulty)
|
||||
return h256ToHash(ret.result).Big().Cmp(target) <= 0
|
||||
}
|
||||
|
||||
func h256ToHash(in C.ethash_h256_t) common.Hash {
|
||||
@ -179,49 +155,16 @@ func hashToH256(in common.Hash) C.ethash_h256_t {
|
||||
func (l *Light) getCache(blockNum uint64) *cache {
|
||||
var c *cache
|
||||
epoch := blockNum / epochLength
|
||||
|
||||
// If we have a PoW for that epoch, use that
|
||||
// Update or reuse the last cache.
|
||||
l.mu.Lock()
|
||||
if l.caches == nil {
|
||||
l.caches = make(map[uint64]*cache)
|
||||
if l.current != nil && l.current.epoch == epoch {
|
||||
c = l.current
|
||||
} else {
|
||||
c = &cache{epoch: epoch, test: l.test}
|
||||
l.current = c
|
||||
}
|
||||
if l.NumCaches == 0 {
|
||||
l.NumCaches = 3
|
||||
}
|
||||
c = l.caches[epoch]
|
||||
if c == nil {
|
||||
// No cached DAG, evict the oldest if the cache limit was reached
|
||||
if len(l.caches) >= l.NumCaches {
|
||||
var evict *cache
|
||||
for _, cache := range l.caches {
|
||||
if evict == nil || evict.used.After(cache.used) {
|
||||
evict = cache
|
||||
}
|
||||
}
|
||||
glog.V(logger.Debug).Infof("Evicting DAG for epoch %d in favour of epoch %d", evict.epoch, epoch)
|
||||
delete(l.caches, evict.epoch)
|
||||
}
|
||||
// If we have the new DAG pre-generated, use that, otherwise create a new one
|
||||
if l.future != nil && l.future.epoch == epoch {
|
||||
glog.V(logger.Debug).Infof("Using pre-generated DAG for epoch %d", epoch)
|
||||
c, l.future = l.future, nil
|
||||
} else {
|
||||
glog.V(logger.Debug).Infof("No pre-generated DAG available, creating new for epoch %d", epoch)
|
||||
c = &cache{epoch: epoch, test: l.test}
|
||||
}
|
||||
l.caches[epoch] = c
|
||||
|
||||
// If we just used up the future cache, or need a refresh, regenerate
|
||||
if l.future == nil || l.future.epoch <= epoch {
|
||||
glog.V(logger.Debug).Infof("Pre-generating DAG for epoch %d", epoch+1)
|
||||
l.future = &cache{epoch: epoch + 1, test: l.test}
|
||||
go l.future.generate()
|
||||
}
|
||||
}
|
||||
c.used = time.Now()
|
||||
l.mu.Unlock()
|
||||
|
||||
// Wait for generation finish and return the cache
|
||||
// Wait for the cache to finish generating.
|
||||
c.generate()
|
||||
return c
|
||||
}
|
||||
@ -256,7 +199,7 @@ func (d *dag) generate() {
|
||||
if d.dir == "" {
|
||||
d.dir = DefaultDir
|
||||
}
|
||||
glog.V(logger.Info).Infof("Generating DAG for epoch %d (size %d) (%x)", d.epoch, dagSize, seedHash)
|
||||
glog.V(logger.Info).Infof("Generating DAG for epoch %d (%x)", d.epoch, seedHash)
|
||||
// Generate a temporary cache.
|
||||
// TODO: this could share the cache with Light
|
||||
cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
|
||||
@ -277,18 +220,14 @@ func (d *dag) generate() {
|
||||
})
|
||||
}
|
||||
|
||||
func freeDAG(d *dag) {
|
||||
C.ethash_full_delete(d.ptr)
|
||||
d.ptr = nil
|
||||
}
|
||||
|
||||
func (d *dag) Ptr() unsafe.Pointer {
|
||||
return unsafe.Pointer(d.ptr.data)
|
||||
func freeDAG(h *dag) {
|
||||
C.ethash_full_delete(h.ptr)
|
||||
h.ptr = nil
|
||||
}
|
||||
|
||||
//export ethashGoCallback
|
||||
func ethashGoCallback(percent C.unsigned) C.int {
|
||||
glog.V(logger.Info).Infof("Generating DAG: %d%%", percent)
|
||||
glog.V(logger.Info).Infof("Still generating DAG: %d%%", percent)
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -334,7 +273,7 @@ func (pow *Full) getDAG(blockNum uint64) (d *dag) {
|
||||
return d
|
||||
}
|
||||
|
||||
func (pow *Full) Search(block pow.Block, stop <-chan struct{}, index int) (nonce uint64, mixDigest []byte) {
|
||||
func (pow *Full) Search(block pow.Block, stop <-chan struct{}) (nonce uint64, mixDigest []byte) {
|
||||
dag := pow.getDAG(block.NumberU64())
|
||||
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
@ -347,7 +286,7 @@ func (pow *Full) Search(block pow.Block, stop <-chan struct{}, index int) (nonce
|
||||
|
||||
nonce = uint64(r.Int63())
|
||||
hash := hashToH256(block.HashNoNonce())
|
||||
target := new(big.Int).Div(maxUint256, diff)
|
||||
target := new(big.Int).Div(minDifficulty, diff)
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
@ -401,13 +340,9 @@ type Ethash struct {
|
||||
}
|
||||
|
||||
// New creates an instance of the proof of work.
|
||||
// A single instance of Light is shared across all instances
|
||||
// created with New.
|
||||
func New() *Ethash {
|
||||
return &Ethash{new(Light), &Full{turbo: true}}
|
||||
}
|
||||
|
||||
// NewShared creates an instance of the proof of work., where a single instance
|
||||
// of the Light cache is shared across all instances created with NewShared.
|
||||
func NewShared() *Ethash {
|
||||
return &Ethash{sharedLight, &Full{turbo: true}}
|
||||
}
|
||||
|
||||
|
628
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go
generated
vendored
628
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go
generated
vendored
@ -1,628 +0,0 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build opencl
|
||||
|
||||
package ethash
|
||||
|
||||
//#cgo LDFLAGS: -w
|
||||
//#include <stdint.h>
|
||||
//#include <string.h>
|
||||
//#include "src/libethash/internal.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
mrand "math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Gustav-Simonsson/go-opencl/cl"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
This code have two main entry points:
|
||||
|
||||
1. The initCL(...) function configures one or more OpenCL device
|
||||
(for now only GPU) and loads the Ethash DAG onto device memory
|
||||
|
||||
2. The Search(...) function loads a Ethash nonce into device(s) memory and
|
||||
executes the Ethash OpenCL kernel.
|
||||
|
||||
Throughout the code, we refer to "host memory" and "device memory".
|
||||
For most systems (e.g. regular PC GPU miner) the host memory is RAM and
|
||||
device memory is the GPU global memory (e.g. GDDR5).
|
||||
|
||||
References mentioned in code comments:
|
||||
|
||||
1. https://github.com/ethereum/wiki/wiki/Ethash
|
||||
2. https://github.com/ethereum/cpp-ethereum/blob/develop/libethash-cl/ethash_cl_miner.cpp
|
||||
3. https://www.khronos.org/registry/cl/sdk/1.2/docs/man/xhtml/
|
||||
4. http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_OpenCL_Programming_User_Guide.pdf
|
||||
|
||||
*/
|
||||
|
||||
type OpenCLDevice struct {
|
||||
deviceId int
|
||||
device *cl.Device
|
||||
openCL11 bool // OpenCL version 1.1 and 1.2 are handled a bit different
|
||||
openCL12 bool
|
||||
|
||||
dagBuf *cl.MemObject // Ethash full DAG in device mem
|
||||
headerBuf *cl.MemObject // Hash of block-to-mine in device mem
|
||||
searchBuffers []*cl.MemObject
|
||||
|
||||
searchKernel *cl.Kernel
|
||||
hashKernel *cl.Kernel
|
||||
|
||||
queue *cl.CommandQueue
|
||||
ctx *cl.Context
|
||||
workGroupSize int
|
||||
|
||||
nonceRand *mrand.Rand // seeded by crypto/rand, see comments where it's initialised
|
||||
result common.Hash
|
||||
}
|
||||
|
||||
type OpenCLMiner struct {
|
||||
mu sync.Mutex
|
||||
|
||||
ethash *Ethash // Ethash full DAG & cache in host mem
|
||||
|
||||
deviceIds []int
|
||||
devices []*OpenCLDevice
|
||||
|
||||
dagSize uint64
|
||||
|
||||
hashRate int32 // Go atomics & uint64 have some issues; int32 is supported on all platforms
|
||||
}
|
||||
|
||||
type pendingSearch struct {
|
||||
bufIndex uint32
|
||||
startNonce uint64
|
||||
}
|
||||
|
||||
const (
|
||||
SIZEOF_UINT32 = 4
|
||||
|
||||
// See [1]
|
||||
ethashMixBytesLen = 128
|
||||
ethashAccesses = 64
|
||||
|
||||
// See [4]
|
||||
workGroupSize = 32 // must be multiple of 8
|
||||
maxSearchResults = 63
|
||||
searchBufSize = 2
|
||||
globalWorkSize = 1024 * 256
|
||||
)
|
||||
|
||||
func NewCL(deviceIds []int) *OpenCLMiner {
|
||||
ids := make([]int, len(deviceIds))
|
||||
copy(ids, deviceIds)
|
||||
return &OpenCLMiner{
|
||||
ethash: New(),
|
||||
dagSize: 0, // to see if we need to update DAG.
|
||||
deviceIds: ids,
|
||||
}
|
||||
}
|
||||
|
||||
func PrintDevices() {
|
||||
fmt.Println("=============================================")
|
||||
fmt.Println("============ OpenCL Device Info =============")
|
||||
fmt.Println("=============================================")
|
||||
|
||||
var found []*cl.Device
|
||||
|
||||
platforms, err := cl.GetPlatforms()
|
||||
if err != nil {
|
||||
fmt.Println("Plaform error (check your OpenCL installation):", err)
|
||||
return
|
||||
}
|
||||
|
||||
for i, p := range platforms {
|
||||
fmt.Println("Platform id ", i)
|
||||
fmt.Println("Platform Name ", p.Name())
|
||||
fmt.Println("Platform Vendor ", p.Vendor())
|
||||
fmt.Println("Platform Version ", p.Version())
|
||||
fmt.Println("Platform Extensions ", p.Extensions())
|
||||
fmt.Println("Platform Profile ", p.Profile())
|
||||
fmt.Println("")
|
||||
|
||||
devices, err := cl.GetDevices(p, cl.DeviceTypeGPU)
|
||||
if err != nil {
|
||||
fmt.Println("Device error (check your GPU drivers) :", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, d := range devices {
|
||||
fmt.Println("Device OpenCL id ", i)
|
||||
fmt.Println("Device id for mining ", len(found))
|
||||
fmt.Println("Device Name ", d.Name())
|
||||
fmt.Println("Vendor ", d.Vendor())
|
||||
fmt.Println("Version ", d.Version())
|
||||
fmt.Println("Driver version ", d.DriverVersion())
|
||||
fmt.Println("Address bits ", d.AddressBits())
|
||||
fmt.Println("Max clock freq ", d.MaxClockFrequency())
|
||||
fmt.Println("Global mem size ", d.GlobalMemSize())
|
||||
fmt.Println("Max constant buffer size", d.MaxConstantBufferSize())
|
||||
fmt.Println("Max mem alloc size ", d.MaxMemAllocSize())
|
||||
fmt.Println("Max compute units ", d.MaxComputeUnits())
|
||||
fmt.Println("Max work group size ", d.MaxWorkGroupSize())
|
||||
fmt.Println("Max work item sizes ", d.MaxWorkItemSizes())
|
||||
fmt.Println("=============================================")
|
||||
|
||||
found = append(found, d)
|
||||
}
|
||||
}
|
||||
if len(found) == 0 {
|
||||
fmt.Println("Found no GPU(s). Check that your OS can see the GPU(s)")
|
||||
} else {
|
||||
var idsFormat string
|
||||
for i := 0; i < len(found); i++ {
|
||||
idsFormat += strconv.Itoa(i)
|
||||
if i != len(found)-1 {
|
||||
idsFormat += ","
|
||||
}
|
||||
}
|
||||
fmt.Printf("Found %v devices. Benchmark first GPU: geth gpubench 0\n", len(found))
|
||||
fmt.Printf("Mine using all GPUs: geth --minegpu %v\n", idsFormat)
|
||||
}
|
||||
}
|
||||
|
||||
// See [2]. We basically do the same here, but the Go OpenCL bindings
|
||||
// are at a slightly higher abtraction level.
|
||||
func InitCL(blockNum uint64, c *OpenCLMiner) error {
|
||||
platforms, err := cl.GetPlatforms()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Plaform error: %v\nCheck your OpenCL installation and then run geth gpuinfo", err)
|
||||
}
|
||||
|
||||
var devices []*cl.Device
|
||||
for _, p := range platforms {
|
||||
ds, err := cl.GetDevices(p, cl.DeviceTypeGPU)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Devices error: %v\nCheck your GPU drivers and then run geth gpuinfo", err)
|
||||
}
|
||||
for _, d := range ds {
|
||||
devices = append(devices, d)
|
||||
}
|
||||
}
|
||||
|
||||
pow := New()
|
||||
_ = pow.getDAG(blockNum) // generates DAG if we don't have it
|
||||
pow.Light.getCache(blockNum) // and cache
|
||||
|
||||
c.ethash = pow
|
||||
dagSize := uint64(C.ethash_get_datasize(C.uint64_t(blockNum)))
|
||||
c.dagSize = dagSize
|
||||
|
||||
for _, id := range c.deviceIds {
|
||||
if id > len(devices)-1 {
|
||||
return fmt.Errorf("Device id not found. See available device ids with: geth gpuinfo")
|
||||
} else {
|
||||
err := initCLDevice(id, devices[id], c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(c.devices) == 0 {
|
||||
return fmt.Errorf("No GPU devices found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initCLDevice(deviceId int, device *cl.Device, c *OpenCLMiner) error {
|
||||
devMaxAlloc := uint64(device.MaxMemAllocSize())
|
||||
devGlobalMem := uint64(device.GlobalMemSize())
|
||||
|
||||
// TODO: more fine grained version logic
|
||||
if device.Version() == "OpenCL 1.0" {
|
||||
fmt.Println("Device OpenCL version not supported: ", device.Version())
|
||||
return fmt.Errorf("opencl version not supported")
|
||||
}
|
||||
|
||||
var cl11, cl12 bool
|
||||
if device.Version() == "OpenCL 1.1" {
|
||||
cl11 = true
|
||||
}
|
||||
if device.Version() == "OpenCL 1.2" {
|
||||
cl12 = true
|
||||
}
|
||||
|
||||
// log warnings but carry on; some device drivers report inaccurate values
|
||||
if c.dagSize > devGlobalMem {
|
||||
fmt.Printf("WARNING: device memory may be insufficient: %v. DAG size: %v.\n", devGlobalMem, c.dagSize)
|
||||
}
|
||||
|
||||
if c.dagSize > devMaxAlloc {
|
||||
fmt.Printf("WARNING: DAG size (%v) larger than device max memory allocation size (%v).\n", c.dagSize, devMaxAlloc)
|
||||
fmt.Printf("You probably have to export GPU_MAX_ALLOC_PERCENT=95\n")
|
||||
}
|
||||
|
||||
fmt.Printf("Initialising device %v: %v\n", deviceId, device.Name())
|
||||
|
||||
context, err := cl.CreateContext([]*cl.Device{device})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed creating context: %v", err)
|
||||
}
|
||||
|
||||
// TODO: test running with CL_QUEUE_PROFILING_ENABLE for profiling?
|
||||
queue, err := context.CreateCommandQueue(device, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("command queue err: %v", err)
|
||||
}
|
||||
|
||||
// See [4] section 3.2 and [3] "clBuildProgram".
|
||||
// The OpenCL kernel code is compiled at run-time.
|
||||
kvs := make(map[string]string, 4)
|
||||
kvs["GROUP_SIZE"] = strconv.FormatUint(workGroupSize, 10)
|
||||
kvs["DAG_SIZE"] = strconv.FormatUint(c.dagSize/ethashMixBytesLen, 10)
|
||||
kvs["ACCESSES"] = strconv.FormatUint(ethashAccesses, 10)
|
||||
kvs["MAX_OUTPUTS"] = strconv.FormatUint(maxSearchResults, 10)
|
||||
kernelCode := replaceWords(kernel, kvs)
|
||||
|
||||
program, err := context.CreateProgramWithSource([]string{kernelCode})
|
||||
if err != nil {
|
||||
return fmt.Errorf("program err: %v", err)
|
||||
}
|
||||
|
||||
/* if using AMD OpenCL impl, you can set this to debug on x86 CPU device.
|
||||
see AMD OpenCL programming guide section 4.2
|
||||
|
||||
export in shell before running:
|
||||
export AMD_OCL_BUILD_OPTIONS_APPEND="-g -O0"
|
||||
export CPU_MAX_COMPUTE_UNITS=1
|
||||
|
||||
buildOpts := "-g -cl-opt-disable"
|
||||
|
||||
*/
|
||||
buildOpts := ""
|
||||
err = program.BuildProgram([]*cl.Device{device}, buildOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("program build err: %v", err)
|
||||
}
|
||||
|
||||
var searchKernelName, hashKernelName string
|
||||
searchKernelName = "ethash_search"
|
||||
hashKernelName = "ethash_hash"
|
||||
|
||||
searchKernel, err := program.CreateKernel(searchKernelName)
|
||||
hashKernel, err := program.CreateKernel(hashKernelName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kernel err: %v", err)
|
||||
}
|
||||
|
||||
// TODO: when this DAG size appears, patch the Go bindings
|
||||
// (context.go) to work with uint64 as size_t
|
||||
if c.dagSize > math.MaxInt32 {
|
||||
fmt.Println("DAG too large for allocation.")
|
||||
return fmt.Errorf("DAG too large for alloc")
|
||||
}
|
||||
|
||||
// TODO: patch up Go bindings to work with size_t, will overflow if > maxint32
|
||||
// TODO: fuck. shit's gonna overflow around 2017-06-09 12:17:02
|
||||
dagBuf := *(new(*cl.MemObject))
|
||||
dagBuf, err = context.CreateEmptyBuffer(cl.MemReadOnly, int(c.dagSize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("allocating dag buf failed: %v", err)
|
||||
}
|
||||
|
||||
// write DAG to device mem
|
||||
dagPtr := unsafe.Pointer(c.ethash.Full.current.ptr.data)
|
||||
_, err = queue.EnqueueWriteBuffer(dagBuf, true, 0, int(c.dagSize), dagPtr, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing to dag buf failed: %v", err)
|
||||
}
|
||||
|
||||
searchBuffers := make([]*cl.MemObject, searchBufSize)
|
||||
for i := 0; i < searchBufSize; i++ {
|
||||
searchBuff, err := context.CreateEmptyBuffer(cl.MemWriteOnly, (1+maxSearchResults)*SIZEOF_UINT32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("search buffer err: %v", err)
|
||||
}
|
||||
searchBuffers[i] = searchBuff
|
||||
}
|
||||
|
||||
headerBuf, err := context.CreateEmptyBuffer(cl.MemReadOnly, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("header buffer err: %v", err)
|
||||
}
|
||||
|
||||
// Unique, random nonces are crucial for mining efficieny.
|
||||
// While we do not need cryptographically secure PRNG for nonces,
|
||||
// we want to have uniform distribution and minimal repetition of nonces.
|
||||
// We could guarantee strict uniqueness of nonces by generating unique ranges,
|
||||
// but a int64 seed from crypto/rand should be good enough.
|
||||
// we then use math/rand for speed and to avoid draining OS entropy pool
|
||||
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nonceRand := mrand.New(mrand.NewSource(seed.Int64()))
|
||||
|
||||
deviceStruct := &OpenCLDevice{
|
||||
deviceId: deviceId,
|
||||
device: device,
|
||||
openCL11: cl11,
|
||||
openCL12: cl12,
|
||||
|
||||
dagBuf: dagBuf,
|
||||
headerBuf: headerBuf,
|
||||
searchBuffers: searchBuffers,
|
||||
|
||||
searchKernel: searchKernel,
|
||||
hashKernel: hashKernel,
|
||||
|
||||
queue: queue,
|
||||
ctx: context,
|
||||
|
||||
workGroupSize: workGroupSize,
|
||||
|
||||
nonceRand: nonceRand,
|
||||
}
|
||||
c.devices = append(c.devices, deviceStruct)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *OpenCLMiner) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) {
|
||||
c.mu.Lock()
|
||||
newDagSize := uint64(C.ethash_get_datasize(C.uint64_t(block.NumberU64())))
|
||||
if newDagSize > c.dagSize {
|
||||
// TODO: clean up buffers from previous DAG?
|
||||
err := InitCL(block.NumberU64(), c)
|
||||
if err != nil {
|
||||
fmt.Println("OpenCL init error: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// Avoid unneeded OpenCL initialisation if we received stop while running InitCL
|
||||
select {
|
||||
case <-stop:
|
||||
return 0, []byte{0}
|
||||
default:
|
||||
}
|
||||
|
||||
headerHash := block.HashNoNonce()
|
||||
diff := block.Difficulty()
|
||||
target256 := new(big.Int).Div(maxUint256, diff)
|
||||
target64 := new(big.Int).Rsh(target256, 192).Uint64()
|
||||
var zero uint32 = 0
|
||||
|
||||
d := c.devices[index]
|
||||
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.headerBuf, false, 0, 32, unsafe.Pointer(&headerHash[0]), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
for i := 0; i < searchBufSize; i++ {
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[i], false, 0, 4, unsafe.Pointer(&zero), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
|
||||
// wait for all search buffers to complete
|
||||
err = d.queue.Finish()
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clFinish : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(1, d.headerBuf)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(2, d.dagBuf)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(4, target64)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
err = d.searchKernel.SetArg(5, uint32(math.MaxUint32))
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
// wait on this before returning
|
||||
var preReturnEvent *cl.Event
|
||||
if d.openCL12 {
|
||||
preReturnEvent, err = d.ctx.CreateUserEvent()
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search create CL user event : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
|
||||
pending := make([]pendingSearch, 0, searchBufSize)
|
||||
var p *pendingSearch
|
||||
searchBufIndex := uint32(0)
|
||||
var checkNonce uint64
|
||||
loops := int64(0)
|
||||
prevHashRate := int32(0)
|
||||
start := time.Now().UnixNano()
|
||||
// we grab a single random nonce and sets this as argument to the kernel search function
|
||||
// the device will then add each local threads gid to the nonce, creating a unique nonce
|
||||
// for each device computing unit executing in parallel
|
||||
initNonce := uint64(d.nonceRand.Int63())
|
||||
for nonce := initNonce; ; nonce += uint64(globalWorkSize) {
|
||||
select {
|
||||
case <-stop:
|
||||
|
||||
/*
|
||||
if d.openCL12 {
|
||||
err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search WaitForEvents: ", err)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
atomic.AddInt32(&c.hashRate, -prevHashRate)
|
||||
return 0, []byte{0}
|
||||
default:
|
||||
}
|
||||
|
||||
if (loops % (1 << 7)) == 0 {
|
||||
elapsed := time.Now().UnixNano() - start
|
||||
// TODO: verify if this is correct hash rate calculation
|
||||
hashes := (float64(1e9) / float64(elapsed)) * float64(loops*1024*256)
|
||||
hashrateDiff := int32(hashes) - prevHashRate
|
||||
prevHashRate = int32(hashes)
|
||||
atomic.AddInt32(&c.hashRate, hashrateDiff)
|
||||
}
|
||||
loops++
|
||||
|
||||
err = d.searchKernel.SetArg(0, d.searchBuffers[searchBufIndex])
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
err = d.searchKernel.SetArg(3, nonce)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
// execute kernel
|
||||
_, err := d.queue.EnqueueNDRangeKernel(
|
||||
d.searchKernel,
|
||||
[]int{0},
|
||||
[]int{globalWorkSize},
|
||||
[]int{d.workGroupSize},
|
||||
nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueNDRangeKernel : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
pending = append(pending, pendingSearch{bufIndex: searchBufIndex, startNonce: nonce})
|
||||
searchBufIndex = (searchBufIndex + 1) % searchBufSize
|
||||
|
||||
if len(pending) == searchBufSize {
|
||||
p = &(pending[searchBufIndex])
|
||||
cres, _, err := d.queue.EnqueueMapBuffer(d.searchBuffers[p.bufIndex], true,
|
||||
cl.MapFlagRead, 0, (1+maxSearchResults)*SIZEOF_UINT32,
|
||||
nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueMapBuffer: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
results := cres.ByteSlice()
|
||||
nfound := binary.LittleEndian.Uint32(results)
|
||||
nfound = uint32(math.Min(float64(nfound), float64(maxSearchResults)))
|
||||
// OpenCL returns the offsets from the start nonce
|
||||
for i := uint32(0); i < nfound; i++ {
|
||||
lo := (i + 1) * SIZEOF_UINT32
|
||||
hi := (i + 2) * SIZEOF_UINT32
|
||||
upperNonce := uint64(binary.LittleEndian.Uint32(results[lo:hi]))
|
||||
checkNonce = p.startNonce + upperNonce
|
||||
if checkNonce != 0 {
|
||||
// We verify that the nonce is indeed a solution by
|
||||
// executing the Ethash verification function (on the CPU).
|
||||
cache := c.ethash.Light.getCache(block.NumberU64())
|
||||
ok, mixDigest, result := cache.compute(c.dagSize, headerHash, checkNonce)
|
||||
|
||||
// TODO: return result first
|
||||
if ok && result.Big().Cmp(target256) <= 0 {
|
||||
_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueUnmapMemObject: ", err)
|
||||
}
|
||||
if d.openCL12 {
|
||||
err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search WaitForEvents: ", err)
|
||||
}
|
||||
}
|
||||
return checkNonce, mixDigest.Bytes()
|
||||
}
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[p.bufIndex], false, 0, 4, unsafe.Pointer(&zero), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search cl: EnqueueWriteBuffer", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueUnMapMemObject: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
pending = append(pending[:searchBufIndex], pending[searchBufIndex+1:]...)
|
||||
}
|
||||
}
|
||||
if d.openCL12 {
|
||||
err := cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clWaitForEvents: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
func (c *OpenCLMiner) Verify(block pow.Block) bool {
|
||||
return c.ethash.Light.Verify(block)
|
||||
}
|
||||
func (c *OpenCLMiner) GetHashrate() int64 {
|
||||
return int64(atomic.LoadInt32(&c.hashRate))
|
||||
}
|
||||
func (c *OpenCLMiner) Turbo(on bool) {
|
||||
// This is GPU mining. Always be turbo.
|
||||
}
|
||||
|
||||
func replaceWords(text string, kvs map[string]string) string {
|
||||
for k, v := range kvs {
|
||||
text = strings.Replace(text, k, v, -1)
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
func logErr(err error) {
|
||||
if err != nil {
|
||||
fmt.Println("Error in OpenCL call:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func argErr(err error) error {
|
||||
return fmt.Errorf("arg err: %v", err)
|
||||
}
|
600
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl_kernel_go_str.go
generated
vendored
600
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl_kernel_go_str.go
generated
vendored
@ -1,600 +0,0 @@
|
||||
package ethash
|
||||
|
||||
/* DO NOT EDIT!!!
|
||||
|
||||
This code is version controlled at
|
||||
https://github.com/ethereum/cpp-ethereum/blob/develop/libethash-cl/ethash_cl_miner_kernel.cl
|
||||
|
||||
If needed change it there first, then copy over here.
|
||||
*/
|
||||
|
||||
const kernel = `
|
||||
// author Tim Hughes <tim@twistedfury.com>
|
||||
// Tested on Radeon HD 7850
|
||||
// Hashrate: 15940347 hashes/s
|
||||
// Bandwidth: 124533 MB/s
|
||||
// search kernel should fit in <= 84 VGPRS (3 wavefronts)
|
||||
|
||||
#define THREADS_PER_HASH (128 / 16)
|
||||
#define HASHES_PER_LOOP (GROUP_SIZE / THREADS_PER_HASH)
|
||||
|
||||
#define FNV_PRIME 0x01000193
|
||||
|
||||
__constant uint2 const Keccak_f1600_RC[24] = {
|
||||
(uint2)(0x00000001, 0x00000000),
|
||||
(uint2)(0x00008082, 0x00000000),
|
||||
(uint2)(0x0000808a, 0x80000000),
|
||||
(uint2)(0x80008000, 0x80000000),
|
||||
(uint2)(0x0000808b, 0x00000000),
|
||||
(uint2)(0x80000001, 0x00000000),
|
||||
(uint2)(0x80008081, 0x80000000),
|
||||
(uint2)(0x00008009, 0x80000000),
|
||||
(uint2)(0x0000008a, 0x00000000),
|
||||
(uint2)(0x00000088, 0x00000000),
|
||||
(uint2)(0x80008009, 0x00000000),
|
||||
(uint2)(0x8000000a, 0x00000000),
|
||||
(uint2)(0x8000808b, 0x00000000),
|
||||
(uint2)(0x0000008b, 0x80000000),
|
||||
(uint2)(0x00008089, 0x80000000),
|
||||
(uint2)(0x00008003, 0x80000000),
|
||||
(uint2)(0x00008002, 0x80000000),
|
||||
(uint2)(0x00000080, 0x80000000),
|
||||
(uint2)(0x0000800a, 0x00000000),
|
||||
(uint2)(0x8000000a, 0x80000000),
|
||||
(uint2)(0x80008081, 0x80000000),
|
||||
(uint2)(0x00008080, 0x80000000),
|
||||
(uint2)(0x80000001, 0x00000000),
|
||||
(uint2)(0x80008008, 0x80000000),
|
||||
};
|
||||
|
||||
void keccak_f1600_round(uint2* a, uint r, uint out_size)
|
||||
{
|
||||
#if !__ENDIAN_LITTLE__
|
||||
for (uint i = 0; i != 25; ++i)
|
||||
a[i] = a[i].yx;
|
||||
#endif
|
||||
|
||||
uint2 b[25];
|
||||
uint2 t;
|
||||
|
||||
// Theta
|
||||
b[0] = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20];
|
||||
b[1] = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21];
|
||||
b[2] = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22];
|
||||
b[3] = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23];
|
||||
b[4] = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24];
|
||||
t = b[4] ^ (uint2)(b[1].x << 1 | b[1].y >> 31, b[1].y << 1 | b[1].x >> 31);
|
||||
a[0] ^= t;
|
||||
a[5] ^= t;
|
||||
a[10] ^= t;
|
||||
a[15] ^= t;
|
||||
a[20] ^= t;
|
||||
t = b[0] ^ (uint2)(b[2].x << 1 | b[2].y >> 31, b[2].y << 1 | b[2].x >> 31);
|
||||
a[1] ^= t;
|
||||
a[6] ^= t;
|
||||
a[11] ^= t;
|
||||
a[16] ^= t;
|
||||
a[21] ^= t;
|
||||
t = b[1] ^ (uint2)(b[3].x << 1 | b[3].y >> 31, b[3].y << 1 | b[3].x >> 31);
|
||||
a[2] ^= t;
|
||||
a[7] ^= t;
|
||||
a[12] ^= t;
|
||||
a[17] ^= t;
|
||||
a[22] ^= t;
|
||||
t = b[2] ^ (uint2)(b[4].x << 1 | b[4].y >> 31, b[4].y << 1 | b[4].x >> 31);
|
||||
a[3] ^= t;
|
||||
a[8] ^= t;
|
||||
a[13] ^= t;
|
||||
a[18] ^= t;
|
||||
a[23] ^= t;
|
||||
t = b[3] ^ (uint2)(b[0].x << 1 | b[0].y >> 31, b[0].y << 1 | b[0].x >> 31);
|
||||
a[4] ^= t;
|
||||
a[9] ^= t;
|
||||
a[14] ^= t;
|
||||
a[19] ^= t;
|
||||
a[24] ^= t;
|
||||
|
||||
// Rho Pi
|
||||
b[0] = a[0];
|
||||
b[10] = (uint2)(a[1].x << 1 | a[1].y >> 31, a[1].y << 1 | a[1].x >> 31);
|
||||
b[7] = (uint2)(a[10].x << 3 | a[10].y >> 29, a[10].y << 3 | a[10].x >> 29);
|
||||
b[11] = (uint2)(a[7].x << 6 | a[7].y >> 26, a[7].y << 6 | a[7].x >> 26);
|
||||
b[17] = (uint2)(a[11].x << 10 | a[11].y >> 22, a[11].y << 10 | a[11].x >> 22);
|
||||
b[18] = (uint2)(a[17].x << 15 | a[17].y >> 17, a[17].y << 15 | a[17].x >> 17);
|
||||
b[3] = (uint2)(a[18].x << 21 | a[18].y >> 11, a[18].y << 21 | a[18].x >> 11);
|
||||
b[5] = (uint2)(a[3].x << 28 | a[3].y >> 4, a[3].y << 28 | a[3].x >> 4);
|
||||
b[16] = (uint2)(a[5].y << 4 | a[5].x >> 28, a[5].x << 4 | a[5].y >> 28);
|
||||
b[8] = (uint2)(a[16].y << 13 | a[16].x >> 19, a[16].x << 13 | a[16].y >> 19);
|
||||
b[21] = (uint2)(a[8].y << 23 | a[8].x >> 9, a[8].x << 23 | a[8].y >> 9);
|
||||
b[24] = (uint2)(a[21].x << 2 | a[21].y >> 30, a[21].y << 2 | a[21].x >> 30);
|
||||
b[4] = (uint2)(a[24].x << 14 | a[24].y >> 18, a[24].y << 14 | a[24].x >> 18);
|
||||
b[15] = (uint2)(a[4].x << 27 | a[4].y >> 5, a[4].y << 27 | a[4].x >> 5);
|
||||
b[23] = (uint2)(a[15].y << 9 | a[15].x >> 23, a[15].x << 9 | a[15].y >> 23);
|
||||
b[19] = (uint2)(a[23].y << 24 | a[23].x >> 8, a[23].x << 24 | a[23].y >> 8);
|
||||
b[13] = (uint2)(a[19].x << 8 | a[19].y >> 24, a[19].y << 8 | a[19].x >> 24);
|
||||
b[12] = (uint2)(a[13].x << 25 | a[13].y >> 7, a[13].y << 25 | a[13].x >> 7);
|
||||
b[2] = (uint2)(a[12].y << 11 | a[12].x >> 21, a[12].x << 11 | a[12].y >> 21);
|
||||
b[20] = (uint2)(a[2].y << 30 | a[2].x >> 2, a[2].x << 30 | a[2].y >> 2);
|
||||
b[14] = (uint2)(a[20].x << 18 | a[20].y >> 14, a[20].y << 18 | a[20].x >> 14);
|
||||
b[22] = (uint2)(a[14].y << 7 | a[14].x >> 25, a[14].x << 7 | a[14].y >> 25);
|
||||
b[9] = (uint2)(a[22].y << 29 | a[22].x >> 3, a[22].x << 29 | a[22].y >> 3);
|
||||
b[6] = (uint2)(a[9].x << 20 | a[9].y >> 12, a[9].y << 20 | a[9].x >> 12);
|
||||
b[1] = (uint2)(a[6].y << 12 | a[6].x >> 20, a[6].x << 12 | a[6].y >> 20);
|
||||
|
||||
// Chi
|
||||
a[0] = bitselect(b[0] ^ b[2], b[0], b[1]);
|
||||
a[1] = bitselect(b[1] ^ b[3], b[1], b[2]);
|
||||
a[2] = bitselect(b[2] ^ b[4], b[2], b[3]);
|
||||
a[3] = bitselect(b[3] ^ b[0], b[3], b[4]);
|
||||
if (out_size >= 4)
|
||||
{
|
||||
a[4] = bitselect(b[4] ^ b[1], b[4], b[0]);
|
||||
a[5] = bitselect(b[5] ^ b[7], b[5], b[6]);
|
||||
a[6] = bitselect(b[6] ^ b[8], b[6], b[7]);
|
||||
a[7] = bitselect(b[7] ^ b[9], b[7], b[8]);
|
||||
a[8] = bitselect(b[8] ^ b[5], b[8], b[9]);
|
||||
if (out_size >= 8)
|
||||
{
|
||||
a[9] = bitselect(b[9] ^ b[6], b[9], b[5]);
|
||||
a[10] = bitselect(b[10] ^ b[12], b[10], b[11]);
|
||||
a[11] = bitselect(b[11] ^ b[13], b[11], b[12]);
|
||||
a[12] = bitselect(b[12] ^ b[14], b[12], b[13]);
|
||||
a[13] = bitselect(b[13] ^ b[10], b[13], b[14]);
|
||||
a[14] = bitselect(b[14] ^ b[11], b[14], b[10]);
|
||||
a[15] = bitselect(b[15] ^ b[17], b[15], b[16]);
|
||||
a[16] = bitselect(b[16] ^ b[18], b[16], b[17]);
|
||||
a[17] = bitselect(b[17] ^ b[19], b[17], b[18]);
|
||||
a[18] = bitselect(b[18] ^ b[15], b[18], b[19]);
|
||||
a[19] = bitselect(b[19] ^ b[16], b[19], b[15]);
|
||||
a[20] = bitselect(b[20] ^ b[22], b[20], b[21]);
|
||||
a[21] = bitselect(b[21] ^ b[23], b[21], b[22]);
|
||||
a[22] = bitselect(b[22] ^ b[24], b[22], b[23]);
|
||||
a[23] = bitselect(b[23] ^ b[20], b[23], b[24]);
|
||||
a[24] = bitselect(b[24] ^ b[21], b[24], b[20]);
|
||||
}
|
||||
}
|
||||
|
||||
// Iota
|
||||
a[0] ^= Keccak_f1600_RC[r];
|
||||
|
||||
#if !__ENDIAN_LITTLE__
|
||||
for (uint i = 0; i != 25; ++i)
|
||||
a[i] = a[i].yx;
|
||||
#endif
|
||||
}
|
||||
|
||||
void keccak_f1600_no_absorb(ulong* a, uint in_size, uint out_size, uint isolate)
|
||||
{
|
||||
for (uint i = in_size; i != 25; ++i)
|
||||
{
|
||||
a[i] = 0;
|
||||
}
|
||||
#if __ENDIAN_LITTLE__
|
||||
a[in_size] ^= 0x0000000000000001;
|
||||
a[24-out_size*2] ^= 0x8000000000000000;
|
||||
#else
|
||||
a[in_size] ^= 0x0100000000000000;
|
||||
a[24-out_size*2] ^= 0x0000000000000080;
|
||||
#endif
|
||||
|
||||
// Originally I unrolled the first and last rounds to interface
|
||||
// better with surrounding code, however I haven't done this
|
||||
// without causing the AMD compiler to blow up the VGPR usage.
|
||||
uint r = 0;
|
||||
do
|
||||
{
|
||||
// This dynamic branch stops the AMD compiler unrolling the loop
|
||||
// and additionally saves about 33% of the VGPRs, enough to gain another
|
||||
// wavefront. Ideally we'd get 4 in flight, but 3 is the best I can
|
||||
// massage out of the compiler. It doesn't really seem to matter how
|
||||
// much we try and help the compiler save VGPRs because it seems to throw
|
||||
// that information away, hence the implementation of keccak here
|
||||
// doesn't bother.
|
||||
if (isolate)
|
||||
{
|
||||
keccak_f1600_round((uint2*)a, r++, 25);
|
||||
}
|
||||
}
|
||||
while (r < 23);
|
||||
|
||||
// final round optimised for digest size
|
||||
keccak_f1600_round((uint2*)a, r++, out_size);
|
||||
}
|
||||
|
||||
#define copy(dst, src, count) for (uint i = 0; i != count; ++i) { (dst)[i] = (src)[i]; }
|
||||
|
||||
#define countof(x) (sizeof(x) / sizeof(x[0]))
|
||||
|
||||
uint fnv(uint x, uint y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
}
|
||||
|
||||
uint4 fnv4(uint4 x, uint4 y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
}
|
||||
|
||||
uint fnv_reduce(uint4 v)
|
||||
{
|
||||
return fnv(fnv(fnv(v.x, v.y), v.z), v.w);
|
||||
}
|
||||
|
||||
typedef union
|
||||
{
|
||||
ulong ulongs[32 / sizeof(ulong)];
|
||||
uint uints[32 / sizeof(uint)];
|
||||
} hash32_t;
|
||||
|
||||
typedef union
|
||||
{
|
||||
ulong ulongs[64 / sizeof(ulong)];
|
||||
uint4 uint4s[64 / sizeof(uint4)];
|
||||
} hash64_t;
|
||||
|
||||
typedef union
|
||||
{
|
||||
uint uints[128 / sizeof(uint)];
|
||||
uint4 uint4s[128 / sizeof(uint4)];
|
||||
} hash128_t;
|
||||
|
||||
hash64_t init_hash(__constant hash32_t const* header, ulong nonce, uint isolate)
|
||||
{
|
||||
hash64_t init;
|
||||
uint const init_size = countof(init.ulongs);
|
||||
uint const hash_size = countof(header->ulongs);
|
||||
|
||||
// sha3_512(header .. nonce)
|
||||
ulong state[25];
|
||||
copy(state, header->ulongs, hash_size);
|
||||
state[hash_size] = nonce;
|
||||
keccak_f1600_no_absorb(state, hash_size + 1, init_size, isolate);
|
||||
|
||||
copy(init.ulongs, state, init_size);
|
||||
return init;
|
||||
}
|
||||
|
||||
uint inner_loop_chunks(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, __global hash128_t const* g_dag1, __global hash128_t const* g_dag2, __global hash128_t const* g_dag3, uint isolate)
|
||||
{
|
||||
uint4 mix = init;
|
||||
|
||||
// share init0
|
||||
if (thread_id == 0)
|
||||
*share = mix.x;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
uint init0 = *share;
|
||||
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
bool update_share = thread_id == (a/4) % THREADS_PER_HASH;
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != 4; ++i)
|
||||
{
|
||||
if (update_share)
|
||||
{
|
||||
uint m[4] = { mix.x, mix.y, mix.z, mix.w };
|
||||
*share = fnv(init0 ^ (a+i), m[i]) % DAG_SIZE;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
mix = fnv4(mix, *share>=3 * DAG_SIZE / 4 ? g_dag3[*share - 3 * DAG_SIZE / 4].uint4s[thread_id] : *share>=DAG_SIZE / 2 ? g_dag2[*share - DAG_SIZE / 2].uint4s[thread_id] : *share>=DAG_SIZE / 4 ? g_dag1[*share - DAG_SIZE / 4].uint4s[thread_id]:g_dag[*share].uint4s[thread_id]);
|
||||
}
|
||||
} while ((a += 4) != (ACCESSES & isolate));
|
||||
|
||||
return fnv_reduce(mix);
|
||||
}
|
||||
|
||||
|
||||
|
||||
uint inner_loop(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, uint isolate)
|
||||
{
|
||||
uint4 mix = init;
|
||||
|
||||
// share init0
|
||||
if (thread_id == 0)
|
||||
*share = mix.x;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
uint init0 = *share;
|
||||
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
bool update_share = thread_id == (a/4) % THREADS_PER_HASH;
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != 4; ++i)
|
||||
{
|
||||
if (update_share)
|
||||
{
|
||||
uint m[4] = { mix.x, mix.y, mix.z, mix.w };
|
||||
*share = fnv(init0 ^ (a+i), m[i]) % DAG_SIZE;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
mix = fnv4(mix, g_dag[*share].uint4s[thread_id]);
|
||||
}
|
||||
}
|
||||
while ((a += 4) != (ACCESSES & isolate));
|
||||
|
||||
return fnv_reduce(mix);
|
||||
}
|
||||
|
||||
|
||||
hash32_t final_hash(hash64_t const* init, hash32_t const* mix, uint isolate)
|
||||
{
|
||||
ulong state[25];
|
||||
|
||||
hash32_t hash;
|
||||
uint const hash_size = countof(hash.ulongs);
|
||||
uint const init_size = countof(init->ulongs);
|
||||
uint const mix_size = countof(mix->ulongs);
|
||||
|
||||
// keccak_256(keccak_512(header..nonce) .. mix);
|
||||
copy(state, init->ulongs, init_size);
|
||||
copy(state + init_size, mix->ulongs, mix_size);
|
||||
keccak_f1600_no_absorb(state, init_size+mix_size, hash_size, isolate);
|
||||
|
||||
// copy out
|
||||
copy(hash.ulongs, state, hash_size);
|
||||
return hash;
|
||||
}
|
||||
|
||||
hash32_t compute_hash_simple(
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
hash128_t mix;
|
||||
for (uint i = 0; i != countof(mix.uint4s); ++i)
|
||||
{
|
||||
mix.uint4s[i] = init.uint4s[i % countof(init.uint4s)];
|
||||
}
|
||||
|
||||
uint mix_val = mix.uints[0];
|
||||
uint init0 = mix.uints[0];
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
uint pi = fnv(init0 ^ a, mix_val) % DAG_SIZE;
|
||||
uint n = (a+1) % countof(mix.uints);
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != countof(mix.uints); ++i)
|
||||
{
|
||||
mix.uints[i] = fnv(mix.uints[i], g_dag[pi].uints[i]);
|
||||
mix_val = i == n ? mix.uints[i] : mix_val;
|
||||
}
|
||||
}
|
||||
while (++a != (ACCESSES & isolate));
|
||||
|
||||
// reduce to output
|
||||
hash32_t fnv_mix;
|
||||
for (uint i = 0; i != countof(fnv_mix.uints); ++i)
|
||||
{
|
||||
fnv_mix.uints[i] = fnv_reduce(mix.uint4s[i]);
|
||||
}
|
||||
|
||||
return final_hash(&init, &fnv_mix, isolate);
|
||||
}
|
||||
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
hash64_t init;
|
||||
uint pad; // avoid lds bank conflicts
|
||||
};
|
||||
hash32_t mix;
|
||||
} compute_hash_share;
|
||||
|
||||
|
||||
hash32_t compute_hash(
|
||||
__local compute_hash_share* share,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
|
||||
// Compute one init hash per work item.
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
// Threads work together in this phase in groups of 8.
|
||||
uint const thread_id = gid % THREADS_PER_HASH;
|
||||
uint const hash_id = (gid % GROUP_SIZE) / THREADS_PER_HASH;
|
||||
|
||||
hash32_t mix;
|
||||
uint i = 0;
|
||||
do
|
||||
{
|
||||
// share init with other threads
|
||||
if (i == thread_id)
|
||||
share[hash_id].init = init;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint4 thread_init = share[hash_id].init.uint4s[thread_id % (64 / sizeof(uint4))];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint thread_mix = inner_loop(thread_init, thread_id, share[hash_id].mix.uints, g_dag, isolate);
|
||||
|
||||
share[hash_id].mix.uints[thread_id] = thread_mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (i == thread_id)
|
||||
mix = share[hash_id].mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
while (++i != (THREADS_PER_HASH & isolate));
|
||||
|
||||
return final_hash(&init, &mix, isolate);
|
||||
}
|
||||
|
||||
|
||||
hash32_t compute_hash_chunks(
|
||||
__local compute_hash_share* share,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
|
||||
// Compute one init hash per work item.
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
// Threads work together in this phase in groups of 8.
|
||||
uint const thread_id = gid % THREADS_PER_HASH;
|
||||
uint const hash_id = (gid % GROUP_SIZE) / THREADS_PER_HASH;
|
||||
|
||||
hash32_t mix;
|
||||
uint i = 0;
|
||||
do
|
||||
{
|
||||
// share init with other threads
|
||||
if (i == thread_id)
|
||||
share[hash_id].init = init;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint4 thread_init = share[hash_id].init.uint4s[thread_id % (64 / sizeof(uint4))];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint thread_mix = inner_loop_chunks(thread_init, thread_id, share[hash_id].mix.uints, g_dag, g_dag1, g_dag2, g_dag3, isolate);
|
||||
|
||||
share[hash_id].mix.uints[thread_id] = thread_mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (i == thread_id)
|
||||
mix = share[hash_id].mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
while (++i != (THREADS_PER_HASH & isolate));
|
||||
|
||||
return final_hash(&init, &mix, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash_simple(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search_simple(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate);
|
||||
|
||||
if (hash.ulongs[countof(hash.ulongs)-1] < target)
|
||||
{
|
||||
uint slot = min(convert_uint(MAX_OUTPUTS), convert_uint(atomic_inc(&g_output[0]) + 1));
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate);
|
||||
|
||||
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
|
||||
{
|
||||
uint slot = min((uint)MAX_OUTPUTS, atomic_inc(&g_output[0]) + 1);
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash_chunks(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash_chunks(share, g_header, g_dag, g_dag1, g_dag2, g_dag3,start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search_chunks(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash_chunks(share, g_header, g_dag, g_dag1, g_dag2, g_dag3, start_nonce + gid, isolate);
|
||||
|
||||
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
|
||||
{
|
||||
uint slot = min(convert_uint(MAX_OUTPUTS), convert_uint(atomic_inc(&g_output[0]) + 1));
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
`
|
204
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_test.go
generated
vendored
Normal file
204
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_test.go
generated
vendored
Normal file
@ -0,0 +1,204 @@
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// glog.SetV(6)
|
||||
// glog.SetToStderr(true)
|
||||
}
|
||||
|
||||
type testBlock struct {
|
||||
difficulty *big.Int
|
||||
hashNoNonce common.Hash
|
||||
nonce uint64
|
||||
mixDigest common.Hash
|
||||
number uint64
|
||||
}
|
||||
|
||||
func (b *testBlock) Difficulty() *big.Int { return b.difficulty }
|
||||
func (b *testBlock) HashNoNonce() common.Hash { return b.hashNoNonce }
|
||||
func (b *testBlock) Nonce() uint64 { return b.nonce }
|
||||
func (b *testBlock) MixDigest() common.Hash { return b.mixDigest }
|
||||
func (b *testBlock) NumberU64() uint64 { return b.number }
|
||||
|
||||
var validBlocks = []*testBlock{
|
||||
// from proof of concept nine testnet, epoch 0
|
||||
{
|
||||
number: 22,
|
||||
hashNoNonce: common.HexToHash("372eca2454ead349c3df0ab5d00b0b706b23e49d469387db91811cee0358fc6d"),
|
||||
difficulty: big.NewInt(132416),
|
||||
nonce: 0x495732e0ed7a801c,
|
||||
mixDigest: common.HexToHash("2f74cdeb198af0b9abe65d22d372e22fb2d474371774a9583c1cc427a07939f5"),
|
||||
},
|
||||
// from proof of concept nine testnet, epoch 1
|
||||
{
|
||||
number: 30001,
|
||||
hashNoNonce: common.HexToHash("7e44356ee3441623bc72a683fd3708fdf75e971bbe294f33e539eedad4b92b34"),
|
||||
difficulty: big.NewInt(1532671),
|
||||
nonce: 0x318df1c8adef7e5e,
|
||||
mixDigest: common.HexToHash("144b180aad09ae3c81fb07be92c8e6351b5646dda80e6844ae1b697e55ddde84"),
|
||||
},
|
||||
// from proof of concept nine testnet, epoch 2
|
||||
{
|
||||
number: 60000,
|
||||
hashNoNonce: common.HexToHash("5fc898f16035bf5ac9c6d9077ae1e3d5fc1ecc3c9fd5bee8bb00e810fdacbaa0"),
|
||||
difficulty: big.NewInt(2467358),
|
||||
nonce: 0x50377003e5d830ca,
|
||||
mixDigest: common.HexToHash("ab546a5b73c452ae86dadd36f0ed83a6745226717d3798832d1b20b489e82063"),
|
||||
},
|
||||
}
|
||||
|
||||
var invalidZeroDiffBlock = testBlock{
|
||||
number: 61440000,
|
||||
hashNoNonce: crypto.Sha3Hash([]byte("foo")),
|
||||
difficulty: big.NewInt(0),
|
||||
nonce: 0xcafebabec00000fe,
|
||||
mixDigest: crypto.Sha3Hash([]byte("bar")),
|
||||
}
|
||||
|
||||
func TestEthashVerifyValid(t *testing.T) {
|
||||
eth := New()
|
||||
for i, block := range validBlocks {
|
||||
if !eth.Verify(block) {
|
||||
t.Errorf("block %d (%x) did not validate.", i, block.hashNoNonce[:6])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEthashVerifyInvalid(t *testing.T) {
|
||||
eth := New()
|
||||
if eth.Verify(&invalidZeroDiffBlock) {
|
||||
t.Errorf("should not validate - we just ensure it does not panic on this block")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEthashConcurrentVerify(t *testing.T) {
|
||||
eth, err := NewForTesting()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(eth.Full.Dir)
|
||||
|
||||
block := &testBlock{difficulty: big.NewInt(10)}
|
||||
nonce, md := eth.Search(block, nil)
|
||||
block.nonce = nonce
|
||||
block.mixDigest = common.BytesToHash(md)
|
||||
|
||||
// Verify the block concurrently to check for data races.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(100)
|
||||
for i := 0; i < 100; i++ {
|
||||
go func() {
|
||||
if !eth.Verify(block) {
|
||||
t.Error("Block could not be verified")
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func TestEthashConcurrentSearch(t *testing.T) {
|
||||
eth, err := NewForTesting()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
eth.Turbo(true)
|
||||
defer os.RemoveAll(eth.Full.Dir)
|
||||
|
||||
type searchRes struct {
|
||||
n uint64
|
||||
md []byte
|
||||
}
|
||||
|
||||
var (
|
||||
block = &testBlock{difficulty: big.NewInt(35000)}
|
||||
nsearch = 10
|
||||
wg = new(sync.WaitGroup)
|
||||
found = make(chan searchRes)
|
||||
stop = make(chan struct{})
|
||||
)
|
||||
rand.Read(block.hashNoNonce[:])
|
||||
wg.Add(nsearch)
|
||||
// launch n searches concurrently.
|
||||
for i := 0; i < nsearch; i++ {
|
||||
go func() {
|
||||
nonce, md := eth.Search(block, stop)
|
||||
select {
|
||||
case found <- searchRes{n: nonce, md: md}:
|
||||
case <-stop:
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// wait for one of them to find the nonce
|
||||
res := <-found
|
||||
// stop the others
|
||||
close(stop)
|
||||
wg.Wait()
|
||||
|
||||
block.nonce = res.n
|
||||
block.mixDigest = common.BytesToHash(res.md)
|
||||
if !eth.Verify(block) {
|
||||
t.Error("Block could not be verified")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEthashSearchAcrossEpoch(t *testing.T) {
|
||||
eth, err := NewForTesting()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(eth.Full.Dir)
|
||||
|
||||
for i := epochLength - 40; i < epochLength+40; i++ {
|
||||
block := &testBlock{number: i, difficulty: big.NewInt(90)}
|
||||
rand.Read(block.hashNoNonce[:])
|
||||
nonce, md := eth.Search(block, nil)
|
||||
block.nonce = nonce
|
||||
block.mixDigest = common.BytesToHash(md)
|
||||
if !eth.Verify(block) {
|
||||
t.Fatalf("Block could not be verified")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSeedHash(t *testing.T) {
|
||||
seed0, err := GetSeedHash(0)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get seedHash for block 0: %v", err)
|
||||
}
|
||||
if bytes.Compare(seed0, make([]byte, 32)) != 0 {
|
||||
log.Printf("seedHash for block 0 should be 0s, was: %v\n", seed0)
|
||||
}
|
||||
seed1, err := GetSeedHash(30000)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// From python:
|
||||
// > from pyethash import get_seedhash
|
||||
// > get_seedhash(30000)
|
||||
expectedSeed1, err := hex.DecodeString("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if bytes.Compare(seed1, expectedSeed1) != 0 {
|
||||
log.Printf("seedHash for block 1 should be: %v,\nactual value: %v\n", expectedSeed1, seed1)
|
||||
}
|
||||
|
||||
}
|
16
Godeps/_workspace/src/github.com/ethereum/ethash/ethashc.go
generated
vendored
16
Godeps/_workspace/src/github.com/ethereum/ethash/ethashc.go
generated
vendored
@ -1,19 +1,3 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
/*
|
||||
|
@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
Copyright (c) 2015 Tim Hughes
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
190
Godeps/_workspace/src/github.com/ethereum/ethash/js/ethash.js
generated
vendored
Normal file
190
Godeps/_workspace/src/github.com/ethereum/ethash/js/ethash.js
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
// ethash.js
|
||||
// Tim Hughes <tim@twistedfury.com>
|
||||
// Revision 19
|
||||
|
||||
/*jslint node: true, shadow:true */
|
||||
"use strict";
|
||||
|
||||
var Keccak = require('./keccak');
|
||||
var util = require('./util');
|
||||
|
||||
// 32-bit unsigned modulo
|
||||
function mod32(x, n)
|
||||
{
|
||||
return (x>>>0) % (n>>>0);
|
||||
}
|
||||
|
||||
function fnv(x, y)
|
||||
{
|
||||
// js integer multiply by 0x01000193 will lose precision
|
||||
return ((x*0x01000000 | 0) + (x*0x193 | 0)) ^ y;
|
||||
}
|
||||
|
||||
function computeCache(params, seedWords)
|
||||
{
|
||||
var cache = new Uint32Array(params.cacheSize >> 2);
|
||||
var cacheNodeCount = params.cacheSize >> 6;
|
||||
|
||||
// Initialize cache
|
||||
var keccak = new Keccak();
|
||||
keccak.digestWords(cache, 0, 16, seedWords, 0, seedWords.length);
|
||||
for (var n = 1; n < cacheNodeCount; ++n)
|
||||
{
|
||||
keccak.digestWords(cache, n<<4, 16, cache, (n-1)<<4, 16);
|
||||
}
|
||||
|
||||
var tmp = new Uint32Array(16);
|
||||
|
||||
// Do randmemohash passes
|
||||
for (var r = 0; r < params.cacheRounds; ++r)
|
||||
{
|
||||
for (var n = 0; n < cacheNodeCount; ++n)
|
||||
{
|
||||
var p0 = mod32(n + cacheNodeCount - 1, cacheNodeCount) << 4;
|
||||
var p1 = mod32(cache[n<<4|0], cacheNodeCount) << 4;
|
||||
|
||||
for (var w = 0; w < 16; w=(w+1)|0)
|
||||
{
|
||||
tmp[w] = cache[p0 | w] ^ cache[p1 | w];
|
||||
}
|
||||
|
||||
keccak.digestWords(cache, n<<4, 16, tmp, 0, tmp.length);
|
||||
}
|
||||
}
|
||||
return cache;
|
||||
}
|
||||
|
||||
function computeDagNode(o_node, params, cache, keccak, nodeIndex)
|
||||
{
|
||||
var cacheNodeCount = params.cacheSize >> 6;
|
||||
var dagParents = params.dagParents;
|
||||
|
||||
var c = (nodeIndex % cacheNodeCount) << 4;
|
||||
var mix = o_node;
|
||||
for (var w = 0; w < 16; ++w)
|
||||
{
|
||||
mix[w] = cache[c|w];
|
||||
}
|
||||
mix[0] ^= nodeIndex;
|
||||
keccak.digestWords(mix, 0, 16, mix, 0, 16);
|
||||
|
||||
for (var p = 0; p < dagParents; ++p)
|
||||
{
|
||||
// compute cache node (word) index
|
||||
c = mod32(fnv(nodeIndex ^ p, mix[p&15]), cacheNodeCount) << 4;
|
||||
|
||||
for (var w = 0; w < 16; ++w)
|
||||
{
|
||||
mix[w] = fnv(mix[w], cache[c|w]);
|
||||
}
|
||||
}
|
||||
|
||||
keccak.digestWords(mix, 0, 16, mix, 0, 16);
|
||||
}
|
||||
|
||||
function computeHashInner(mix, params, cache, keccak, tempNode)
|
||||
{
|
||||
var mixParents = params.mixParents|0;
|
||||
var mixWordCount = params.mixSize >> 2;
|
||||
var mixNodeCount = mixWordCount >> 4;
|
||||
var dagPageCount = (params.dagSize / params.mixSize) >> 0;
|
||||
|
||||
// grab initial first word
|
||||
var s0 = mix[0];
|
||||
|
||||
// initialise mix from initial 64 bytes
|
||||
for (var w = 16; w < mixWordCount; ++w)
|
||||
{
|
||||
mix[w] = mix[w & 15];
|
||||
}
|
||||
|
||||
for (var a = 0; a < mixParents; ++a)
|
||||
{
|
||||
var p = mod32(fnv(s0 ^ a, mix[a & (mixWordCount-1)]), dagPageCount);
|
||||
var d = (p * mixNodeCount)|0;
|
||||
|
||||
for (var n = 0, w = 0; n < mixNodeCount; ++n, w += 16)
|
||||
{
|
||||
computeDagNode(tempNode, params, cache, keccak, (d + n)|0);
|
||||
|
||||
for (var v = 0; v < 16; ++v)
|
||||
{
|
||||
mix[w|v] = fnv(mix[w|v], tempNode[v]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function convertSeed(seed)
|
||||
{
|
||||
// todo, reconcile with spec, byte ordering?
|
||||
// todo, big-endian conversion
|
||||
var newSeed = util.toWords(seed);
|
||||
if (newSeed === null)
|
||||
throw Error("Invalid seed '" + seed + "'");
|
||||
return newSeed;
|
||||
}
|
||||
|
||||
exports.defaultParams = function()
|
||||
{
|
||||
return {
|
||||
cacheSize: 1048384,
|
||||
cacheRounds: 3,
|
||||
dagSize: 1073739904,
|
||||
dagParents: 256,
|
||||
mixSize: 128,
|
||||
mixParents: 64,
|
||||
};
|
||||
};
|
||||
|
||||
exports.Ethash = function(params, seed)
|
||||
{
|
||||
// precompute cache and related values
|
||||
seed = convertSeed(seed);
|
||||
var cache = computeCache(params, seed);
|
||||
|
||||
// preallocate buffers/etc
|
||||
var initBuf = new ArrayBuffer(96);
|
||||
var initBytes = new Uint8Array(initBuf);
|
||||
var initWords = new Uint32Array(initBuf);
|
||||
var mixWords = new Uint32Array(params.mixSize / 4);
|
||||
var tempNode = new Uint32Array(16);
|
||||
var keccak = new Keccak();
|
||||
|
||||
var retWords = new Uint32Array(8);
|
||||
var retBytes = new Uint8Array(retWords.buffer); // supposedly read-only
|
||||
|
||||
this.hash = function(header, nonce)
|
||||
{
|
||||
// compute initial hash
|
||||
initBytes.set(header, 0);
|
||||
initBytes.set(nonce, 32);
|
||||
keccak.digestWords(initWords, 0, 16, initWords, 0, 8 + nonce.length/4);
|
||||
|
||||
// compute mix
|
||||
for (var i = 0; i != 16; ++i)
|
||||
{
|
||||
mixWords[i] = initWords[i];
|
||||
}
|
||||
computeHashInner(mixWords, params, cache, keccak, tempNode);
|
||||
|
||||
// compress mix and append to initWords
|
||||
for (var i = 0; i != mixWords.length; i += 4)
|
||||
{
|
||||
initWords[16 + i/4] = fnv(fnv(fnv(mixWords[i], mixWords[i+1]), mixWords[i+2]), mixWords[i+3]);
|
||||
}
|
||||
|
||||
// final Keccak hashes
|
||||
keccak.digestWords(retWords, 0, 8, initWords, 0, 24); // Keccak-256(s + cmix)
|
||||
return retBytes;
|
||||
};
|
||||
|
||||
this.cacheDigest = function()
|
||||
{
|
||||
return keccak.digest(32, new Uint8Array(cache.buffer));
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
404
Godeps/_workspace/src/github.com/ethereum/ethash/js/keccak.js
generated
vendored
Normal file
404
Godeps/_workspace/src/github.com/ethereum/ethash/js/keccak.js
generated
vendored
Normal file
@ -0,0 +1,404 @@
|
||||
// keccak.js
|
||||
// Tim Hughes <tim@twistedfury.com>
|
||||
// derived from Markku-Juhani O. Saarinen's C code (http://keccak.noekeon.org/readable_code.html)
|
||||
|
||||
/*jslint node: true, shadow:true */
|
||||
"use strict";
|
||||
|
||||
var Keccak_f1600_RC = new Uint32Array([
|
||||
0x00000001, 0x00000000,
|
||||
0x00008082, 0x00000000,
|
||||
0x0000808a, 0x80000000,
|
||||
0x80008000, 0x80000000,
|
||||
0x0000808b, 0x00000000,
|
||||
0x80000001, 0x00000000,
|
||||
0x80008081, 0x80000000,
|
||||
0x00008009, 0x80000000,
|
||||
0x0000008a, 0x00000000,
|
||||
0x00000088, 0x00000000,
|
||||
0x80008009, 0x00000000,
|
||||
0x8000000a, 0x00000000,
|
||||
0x8000808b, 0x00000000,
|
||||
0x0000008b, 0x80000000,
|
||||
0x00008089, 0x80000000,
|
||||
0x00008003, 0x80000000,
|
||||
0x00008002, 0x80000000,
|
||||
0x00000080, 0x80000000,
|
||||
0x0000800a, 0x00000000,
|
||||
0x8000000a, 0x80000000,
|
||||
0x80008081, 0x80000000,
|
||||
0x00008080, 0x80000000,
|
||||
0x80000001, 0x00000000,
|
||||
0x80008008, 0x80000000
|
||||
]);
|
||||
|
||||
function keccak_f1600(outState, outOffset, outSize, inState)
|
||||
{
|
||||
// todo, handle big endian loads
|
||||
var a00l = inState[0]|0;
|
||||
var a00h = inState[1]|0;
|
||||
var a01l = inState[2]|0;
|
||||
var a01h = inState[3]|0;
|
||||
var a02l = inState[4]|0;
|
||||
var a02h = inState[5]|0;
|
||||
var a03l = inState[6]|0;
|
||||
var a03h = inState[7]|0;
|
||||
var a04l = inState[8]|0;
|
||||
var a04h = inState[9]|0;
|
||||
var a05l = inState[10]|0;
|
||||
var a05h = inState[11]|0;
|
||||
var a06l = inState[12]|0;
|
||||
var a06h = inState[13]|0;
|
||||
var a07l = inState[14]|0;
|
||||
var a07h = inState[15]|0;
|
||||
var a08l = inState[16]|0;
|
||||
var a08h = inState[17]|0;
|
||||
var a09l = inState[18]|0;
|
||||
var a09h = inState[19]|0;
|
||||
var a10l = inState[20]|0;
|
||||
var a10h = inState[21]|0;
|
||||
var a11l = inState[22]|0;
|
||||
var a11h = inState[23]|0;
|
||||
var a12l = inState[24]|0;
|
||||
var a12h = inState[25]|0;
|
||||
var a13l = inState[26]|0;
|
||||
var a13h = inState[27]|0;
|
||||
var a14l = inState[28]|0;
|
||||
var a14h = inState[29]|0;
|
||||
var a15l = inState[30]|0;
|
||||
var a15h = inState[31]|0;
|
||||
var a16l = inState[32]|0;
|
||||
var a16h = inState[33]|0;
|
||||
var a17l = inState[34]|0;
|
||||
var a17h = inState[35]|0;
|
||||
var a18l = inState[36]|0;
|
||||
var a18h = inState[37]|0;
|
||||
var a19l = inState[38]|0;
|
||||
var a19h = inState[39]|0;
|
||||
var a20l = inState[40]|0;
|
||||
var a20h = inState[41]|0;
|
||||
var a21l = inState[42]|0;
|
||||
var a21h = inState[43]|0;
|
||||
var a22l = inState[44]|0;
|
||||
var a22h = inState[45]|0;
|
||||
var a23l = inState[46]|0;
|
||||
var a23h = inState[47]|0;
|
||||
var a24l = inState[48]|0;
|
||||
var a24h = inState[49]|0;
|
||||
var b00l, b00h, b01l, b01h, b02l, b02h, b03l, b03h, b04l, b04h;
|
||||
var b05l, b05h, b06l, b06h, b07l, b07h, b08l, b08h, b09l, b09h;
|
||||
var b10l, b10h, b11l, b11h, b12l, b12h, b13l, b13h, b14l, b14h;
|
||||
var b15l, b15h, b16l, b16h, b17l, b17h, b18l, b18h, b19l, b19h;
|
||||
var b20l, b20h, b21l, b21h, b22l, b22h, b23l, b23h, b24l, b24h;
|
||||
var tl, nl;
|
||||
var th, nh;
|
||||
|
||||
for (var r = 0; r < 48; r = (r+2)|0)
|
||||
{
|
||||
// Theta
|
||||
b00l = a00l ^ a05l ^ a10l ^ a15l ^ a20l;
|
||||
b00h = a00h ^ a05h ^ a10h ^ a15h ^ a20h;
|
||||
b01l = a01l ^ a06l ^ a11l ^ a16l ^ a21l;
|
||||
b01h = a01h ^ a06h ^ a11h ^ a16h ^ a21h;
|
||||
b02l = a02l ^ a07l ^ a12l ^ a17l ^ a22l;
|
||||
b02h = a02h ^ a07h ^ a12h ^ a17h ^ a22h;
|
||||
b03l = a03l ^ a08l ^ a13l ^ a18l ^ a23l;
|
||||
b03h = a03h ^ a08h ^ a13h ^ a18h ^ a23h;
|
||||
b04l = a04l ^ a09l ^ a14l ^ a19l ^ a24l;
|
||||
b04h = a04h ^ a09h ^ a14h ^ a19h ^ a24h;
|
||||
tl = b04l ^ (b01l << 1 | b01h >>> 31);
|
||||
th = b04h ^ (b01h << 1 | b01l >>> 31);
|
||||
a00l ^= tl;
|
||||
a00h ^= th;
|
||||
a05l ^= tl;
|
||||
a05h ^= th;
|
||||
a10l ^= tl;
|
||||
a10h ^= th;
|
||||
a15l ^= tl;
|
||||
a15h ^= th;
|
||||
a20l ^= tl;
|
||||
a20h ^= th;
|
||||
tl = b00l ^ (b02l << 1 | b02h >>> 31);
|
||||
th = b00h ^ (b02h << 1 | b02l >>> 31);
|
||||
a01l ^= tl;
|
||||
a01h ^= th;
|
||||
a06l ^= tl;
|
||||
a06h ^= th;
|
||||
a11l ^= tl;
|
||||
a11h ^= th;
|
||||
a16l ^= tl;
|
||||
a16h ^= th;
|
||||
a21l ^= tl;
|
||||
a21h ^= th;
|
||||
tl = b01l ^ (b03l << 1 | b03h >>> 31);
|
||||
th = b01h ^ (b03h << 1 | b03l >>> 31);
|
||||
a02l ^= tl;
|
||||
a02h ^= th;
|
||||
a07l ^= tl;
|
||||
a07h ^= th;
|
||||
a12l ^= tl;
|
||||
a12h ^= th;
|
||||
a17l ^= tl;
|
||||
a17h ^= th;
|
||||
a22l ^= tl;
|
||||
a22h ^= th;
|
||||
tl = b02l ^ (b04l << 1 | b04h >>> 31);
|
||||
th = b02h ^ (b04h << 1 | b04l >>> 31);
|
||||
a03l ^= tl;
|
||||
a03h ^= th;
|
||||
a08l ^= tl;
|
||||
a08h ^= th;
|
||||
a13l ^= tl;
|
||||
a13h ^= th;
|
||||
a18l ^= tl;
|
||||
a18h ^= th;
|
||||
a23l ^= tl;
|
||||
a23h ^= th;
|
||||
tl = b03l ^ (b00l << 1 | b00h >>> 31);
|
||||
th = b03h ^ (b00h << 1 | b00l >>> 31);
|
||||
a04l ^= tl;
|
||||
a04h ^= th;
|
||||
a09l ^= tl;
|
||||
a09h ^= th;
|
||||
a14l ^= tl;
|
||||
a14h ^= th;
|
||||
a19l ^= tl;
|
||||
a19h ^= th;
|
||||
a24l ^= tl;
|
||||
a24h ^= th;
|
||||
|
||||
// Rho Pi
|
||||
b00l = a00l;
|
||||
b00h = a00h;
|
||||
b10l = a01l << 1 | a01h >>> 31;
|
||||
b10h = a01h << 1 | a01l >>> 31;
|
||||
b07l = a10l << 3 | a10h >>> 29;
|
||||
b07h = a10h << 3 | a10l >>> 29;
|
||||
b11l = a07l << 6 | a07h >>> 26;
|
||||
b11h = a07h << 6 | a07l >>> 26;
|
||||
b17l = a11l << 10 | a11h >>> 22;
|
||||
b17h = a11h << 10 | a11l >>> 22;
|
||||
b18l = a17l << 15 | a17h >>> 17;
|
||||
b18h = a17h << 15 | a17l >>> 17;
|
||||
b03l = a18l << 21 | a18h >>> 11;
|
||||
b03h = a18h << 21 | a18l >>> 11;
|
||||
b05l = a03l << 28 | a03h >>> 4;
|
||||
b05h = a03h << 28 | a03l >>> 4;
|
||||
b16l = a05h << 4 | a05l >>> 28;
|
||||
b16h = a05l << 4 | a05h >>> 28;
|
||||
b08l = a16h << 13 | a16l >>> 19;
|
||||
b08h = a16l << 13 | a16h >>> 19;
|
||||
b21l = a08h << 23 | a08l >>> 9;
|
||||
b21h = a08l << 23 | a08h >>> 9;
|
||||
b24l = a21l << 2 | a21h >>> 30;
|
||||
b24h = a21h << 2 | a21l >>> 30;
|
||||
b04l = a24l << 14 | a24h >>> 18;
|
||||
b04h = a24h << 14 | a24l >>> 18;
|
||||
b15l = a04l << 27 | a04h >>> 5;
|
||||
b15h = a04h << 27 | a04l >>> 5;
|
||||
b23l = a15h << 9 | a15l >>> 23;
|
||||
b23h = a15l << 9 | a15h >>> 23;
|
||||
b19l = a23h << 24 | a23l >>> 8;
|
||||
b19h = a23l << 24 | a23h >>> 8;
|
||||
b13l = a19l << 8 | a19h >>> 24;
|
||||
b13h = a19h << 8 | a19l >>> 24;
|
||||
b12l = a13l << 25 | a13h >>> 7;
|
||||
b12h = a13h << 25 | a13l >>> 7;
|
||||
b02l = a12h << 11 | a12l >>> 21;
|
||||
b02h = a12l << 11 | a12h >>> 21;
|
||||
b20l = a02h << 30 | a02l >>> 2;
|
||||
b20h = a02l << 30 | a02h >>> 2;
|
||||
b14l = a20l << 18 | a20h >>> 14;
|
||||
b14h = a20h << 18 | a20l >>> 14;
|
||||
b22l = a14h << 7 | a14l >>> 25;
|
||||
b22h = a14l << 7 | a14h >>> 25;
|
||||
b09l = a22h << 29 | a22l >>> 3;
|
||||
b09h = a22l << 29 | a22h >>> 3;
|
||||
b06l = a09l << 20 | a09h >>> 12;
|
||||
b06h = a09h << 20 | a09l >>> 12;
|
||||
b01l = a06h << 12 | a06l >>> 20;
|
||||
b01h = a06l << 12 | a06h >>> 20;
|
||||
|
||||
// Chi
|
||||
a00l = b00l ^ ~b01l & b02l;
|
||||
a00h = b00h ^ ~b01h & b02h;
|
||||
a01l = b01l ^ ~b02l & b03l;
|
||||
a01h = b01h ^ ~b02h & b03h;
|
||||
a02l = b02l ^ ~b03l & b04l;
|
||||
a02h = b02h ^ ~b03h & b04h;
|
||||
a03l = b03l ^ ~b04l & b00l;
|
||||
a03h = b03h ^ ~b04h & b00h;
|
||||
a04l = b04l ^ ~b00l & b01l;
|
||||
a04h = b04h ^ ~b00h & b01h;
|
||||
a05l = b05l ^ ~b06l & b07l;
|
||||
a05h = b05h ^ ~b06h & b07h;
|
||||
a06l = b06l ^ ~b07l & b08l;
|
||||
a06h = b06h ^ ~b07h & b08h;
|
||||
a07l = b07l ^ ~b08l & b09l;
|
||||
a07h = b07h ^ ~b08h & b09h;
|
||||
a08l = b08l ^ ~b09l & b05l;
|
||||
a08h = b08h ^ ~b09h & b05h;
|
||||
a09l = b09l ^ ~b05l & b06l;
|
||||
a09h = b09h ^ ~b05h & b06h;
|
||||
a10l = b10l ^ ~b11l & b12l;
|
||||
a10h = b10h ^ ~b11h & b12h;
|
||||
a11l = b11l ^ ~b12l & b13l;
|
||||
a11h = b11h ^ ~b12h & b13h;
|
||||
a12l = b12l ^ ~b13l & b14l;
|
||||
a12h = b12h ^ ~b13h & b14h;
|
||||
a13l = b13l ^ ~b14l & b10l;
|
||||
a13h = b13h ^ ~b14h & b10h;
|
||||
a14l = b14l ^ ~b10l & b11l;
|
||||
a14h = b14h ^ ~b10h & b11h;
|
||||
a15l = b15l ^ ~b16l & b17l;
|
||||
a15h = b15h ^ ~b16h & b17h;
|
||||
a16l = b16l ^ ~b17l & b18l;
|
||||
a16h = b16h ^ ~b17h & b18h;
|
||||
a17l = b17l ^ ~b18l & b19l;
|
||||
a17h = b17h ^ ~b18h & b19h;
|
||||
a18l = b18l ^ ~b19l & b15l;
|
||||
a18h = b18h ^ ~b19h & b15h;
|
||||
a19l = b19l ^ ~b15l & b16l;
|
||||
a19h = b19h ^ ~b15h & b16h;
|
||||
a20l = b20l ^ ~b21l & b22l;
|
||||
a20h = b20h ^ ~b21h & b22h;
|
||||
a21l = b21l ^ ~b22l & b23l;
|
||||
a21h = b21h ^ ~b22h & b23h;
|
||||
a22l = b22l ^ ~b23l & b24l;
|
||||
a22h = b22h ^ ~b23h & b24h;
|
||||
a23l = b23l ^ ~b24l & b20l;
|
||||
a23h = b23h ^ ~b24h & b20h;
|
||||
a24l = b24l ^ ~b20l & b21l;
|
||||
a24h = b24h ^ ~b20h & b21h;
|
||||
|
||||
// Iota
|
||||
a00l ^= Keccak_f1600_RC[r|0];
|
||||
a00h ^= Keccak_f1600_RC[r|1];
|
||||
}
|
||||
|
||||
// todo, handle big-endian stores
|
||||
outState[outOffset|0] = a00l;
|
||||
outState[outOffset|1] = a00h;
|
||||
outState[outOffset|2] = a01l;
|
||||
outState[outOffset|3] = a01h;
|
||||
outState[outOffset|4] = a02l;
|
||||
outState[outOffset|5] = a02h;
|
||||
outState[outOffset|6] = a03l;
|
||||
outState[outOffset|7] = a03h;
|
||||
if (outSize == 8)
|
||||
return;
|
||||
outState[outOffset|8] = a04l;
|
||||
outState[outOffset|9] = a04h;
|
||||
outState[outOffset|10] = a05l;
|
||||
outState[outOffset|11] = a05h;
|
||||
outState[outOffset|12] = a06l;
|
||||
outState[outOffset|13] = a06h;
|
||||
outState[outOffset|14] = a07l;
|
||||
outState[outOffset|15] = a07h;
|
||||
if (outSize == 16)
|
||||
return;
|
||||
outState[outOffset|16] = a08l;
|
||||
outState[outOffset|17] = a08h;
|
||||
outState[outOffset|18] = a09l;
|
||||
outState[outOffset|19] = a09h;
|
||||
outState[outOffset|20] = a10l;
|
||||
outState[outOffset|21] = a10h;
|
||||
outState[outOffset|22] = a11l;
|
||||
outState[outOffset|23] = a11h;
|
||||
outState[outOffset|24] = a12l;
|
||||
outState[outOffset|25] = a12h;
|
||||
outState[outOffset|26] = a13l;
|
||||
outState[outOffset|27] = a13h;
|
||||
outState[outOffset|28] = a14l;
|
||||
outState[outOffset|29] = a14h;
|
||||
outState[outOffset|30] = a15l;
|
||||
outState[outOffset|31] = a15h;
|
||||
outState[outOffset|32] = a16l;
|
||||
outState[outOffset|33] = a16h;
|
||||
outState[outOffset|34] = a17l;
|
||||
outState[outOffset|35] = a17h;
|
||||
outState[outOffset|36] = a18l;
|
||||
outState[outOffset|37] = a18h;
|
||||
outState[outOffset|38] = a19l;
|
||||
outState[outOffset|39] = a19h;
|
||||
outState[outOffset|40] = a20l;
|
||||
outState[outOffset|41] = a20h;
|
||||
outState[outOffset|42] = a21l;
|
||||
outState[outOffset|43] = a21h;
|
||||
outState[outOffset|44] = a22l;
|
||||
outState[outOffset|45] = a22h;
|
||||
outState[outOffset|46] = a23l;
|
||||
outState[outOffset|47] = a23h;
|
||||
outState[outOffset|48] = a24l;
|
||||
outState[outOffset|49] = a24h;
|
||||
}
|
||||
|
||||
var Keccak = function()
|
||||
{
|
||||
var stateBuf = new ArrayBuffer(200);
|
||||
var stateBytes = new Uint8Array(stateBuf);
|
||||
var stateWords = new Uint32Array(stateBuf);
|
||||
|
||||
this.digest = function(oSize, iBytes)
|
||||
{
|
||||
for (var i = 0; i < 50; ++i)
|
||||
{
|
||||
stateWords[i] = 0;
|
||||
}
|
||||
|
||||
var r = 200 - oSize*2;
|
||||
var iLength = iBytes.length;
|
||||
var iOffset = 0;
|
||||
for ( ; ;)
|
||||
{
|
||||
var len = iLength < r ? iLength : r;
|
||||
for (i = 0; i < len; ++i, ++iOffset)
|
||||
{
|
||||
stateBytes[i] ^= iBytes[iOffset];
|
||||
}
|
||||
|
||||
if (iLength < r)
|
||||
break;
|
||||
iLength -= len;
|
||||
|
||||
keccak_f1600(stateWords, 0, 50, stateWords);
|
||||
}
|
||||
|
||||
stateBytes[iLength] ^= 1;
|
||||
stateBytes[r-1] ^= 0x80;
|
||||
keccak_f1600(stateWords, 0, 50, stateWords);
|
||||
return stateBytes.subarray(0, oSize);
|
||||
};
|
||||
|
||||
this.digestWords = function(oWords, oOffset, oLength, iWords, iOffset, iLength)
|
||||
{
|
||||
for (var i = 0; i < 50; ++i)
|
||||
{
|
||||
stateWords[i] = 0;
|
||||
}
|
||||
|
||||
var r = 50 - oLength*2;
|
||||
for (; ; )
|
||||
{
|
||||
var len = iLength < r ? iLength : r;
|
||||
for (i = 0; i < len; ++i, ++iOffset)
|
||||
{
|
||||
stateWords[i] ^= iWords[iOffset];
|
||||
}
|
||||
|
||||
if (iLength < r)
|
||||
break;
|
||||
iLength -= len;
|
||||
|
||||
keccak_f1600(stateWords, 0, 50, stateWords);
|
||||
}
|
||||
|
||||
stateBytes[iLength<<2] ^= 1;
|
||||
stateBytes[(r<<2) - 1] ^= 0x80;
|
||||
keccak_f1600(oWords, oOffset, oLength, stateWords);
|
||||
};
|
||||
};
|
||||
|
||||
module.exports = Keccak;
|
||||
|
||||
|
201
Godeps/_workspace/src/github.com/ethereum/ethash/js/makekeccak.js
generated
vendored
Normal file
201
Godeps/_workspace/src/github.com/ethereum/ethash/js/makekeccak.js
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
#!/usr/bin/env node
|
||||
// makekeccak.js
|
||||
// Tim Hughes <tim@twistedfury.com>
|
||||
|
||||
/*jslint node: true, shadow:true */
|
||||
"use strict";
|
||||
|
||||
var Keccak_f1600_Rho = [
|
||||
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14,
|
||||
27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44
|
||||
];
|
||||
|
||||
var Keccak_f1600_Pi= [
|
||||
10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4,
|
||||
15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1
|
||||
];
|
||||
|
||||
var Keccak_f1600_RC = [
|
||||
0x00000001, 0x00000000,
|
||||
0x00008082, 0x00000000,
|
||||
0x0000808a, 0x80000000,
|
||||
0x80008000, 0x80000000,
|
||||
0x0000808b, 0x00000000,
|
||||
0x80000001, 0x00000000,
|
||||
0x80008081, 0x80000000,
|
||||
0x00008009, 0x80000000,
|
||||
0x0000008a, 0x00000000,
|
||||
0x00000088, 0x00000000,
|
||||
0x80008009, 0x00000000,
|
||||
0x8000000a, 0x00000000,
|
||||
0x8000808b, 0x00000000,
|
||||
0x0000008b, 0x80000000,
|
||||
0x00008089, 0x80000000,
|
||||
0x00008003, 0x80000000,
|
||||
0x00008002, 0x80000000,
|
||||
0x00000080, 0x80000000,
|
||||
0x0000800a, 0x00000000,
|
||||
0x8000000a, 0x80000000,
|
||||
0x80008081, 0x80000000,
|
||||
0x00008080, 0x80000000,
|
||||
0x80000001, 0x00000000,
|
||||
0x80008008, 0x80000000,
|
||||
];
|
||||
|
||||
function makeRotLow(lo, hi, n)
|
||||
{
|
||||
if (n === 0 || n === 32) throw Error("unsupported");
|
||||
if ((n & 0x20) !== 0)
|
||||
{
|
||||
n &= ~0x20;
|
||||
var t = hi;
|
||||
hi = lo;
|
||||
lo = t;
|
||||
}
|
||||
var hir = hi + " >>> " + (32 - n);
|
||||
var los = lo + " << " + n;
|
||||
return los + " | " + hir;
|
||||
}
|
||||
|
||||
function makeRotHigh(lo, hi, n)
|
||||
{
|
||||
if (n === 0 || n === 32) throw Error("unsupported");
|
||||
if ((n & 0x20) !== 0)
|
||||
{
|
||||
n &= ~0x20;
|
||||
var t = hi;
|
||||
hi = lo;
|
||||
lo = t;
|
||||
}
|
||||
var his = hi + " << " + n;
|
||||
var lor = lo + " >>> " + (32 - n);
|
||||
return his + " | " + lor;
|
||||
}
|
||||
|
||||
function makeKeccak_f1600()
|
||||
{
|
||||
var format = function(n)
|
||||
{
|
||||
return n < 10 ? "0"+n : ""+n;
|
||||
};
|
||||
|
||||
var a = function(n, w)
|
||||
{
|
||||
return "a" + format(n) + (w !== 0?'h':'l');
|
||||
};
|
||||
|
||||
var b = function(n, w)
|
||||
{
|
||||
return "b" + format(n) + (w !== 0?'h':'l');
|
||||
};
|
||||
|
||||
var str = "";
|
||||
str += "function keccak_f1600(outState, outOffset, outSize, inState)\n";
|
||||
str += "{\n";
|
||||
|
||||
for (var i = 0; i < 25; ++i)
|
||||
{
|
||||
for (var w = 0; w <= 1; ++w)
|
||||
{
|
||||
str += "\tvar " + a(i,w) + " = inState["+(i<<1|w)+"]|0;\n";
|
||||
}
|
||||
}
|
||||
|
||||
for (var j = 0; j < 5; ++j)
|
||||
{
|
||||
str += "\tvar ";
|
||||
for (var i = 0; i < 5; ++i)
|
||||
{
|
||||
if (i !== 0)
|
||||
str += ", ";
|
||||
str += b(j*5+i,0) + ", " + b(j*5+i,1);
|
||||
}
|
||||
str += ";\n";
|
||||
}
|
||||
|
||||
str += "\tvar tl, th;\n";
|
||||
str += "\n";
|
||||
str += "\tfor (var r = 0; r < 48; r = (r+2)|0)\n";
|
||||
str += "\t{\n";
|
||||
|
||||
|
||||
// Theta
|
||||
str += "\t\t// Theta\n";
|
||||
for (var i = 0; i < 5; ++i)
|
||||
{
|
||||
for (var w = 0; w <= 1; ++w)
|
||||
{
|
||||
str += "\t\t" + b(i,w) + " = " + a(i,w) + " ^ " + a(i+5,w) + " ^ " + a(i+10,w) + " ^ " + a(i+15,w) + " ^ " + a(i+20,w) + ";\n";
|
||||
}
|
||||
}
|
||||
|
||||
for (var i = 0; i < 5; ++i)
|
||||
{
|
||||
var i4 = (i + 4) % 5;
|
||||
var i1 = (i + 1) % 5;
|
||||
str += "\t\ttl = " + b(i4,0) + " ^ (" + b(i1,0) + " << 1 | " + b(i1,1) + " >>> 31);\n";
|
||||
str += "\t\tth = " + b(i4,1) + " ^ (" + b(i1,1) + " << 1 | " + b(i1,0) + " >>> 31);\n";
|
||||
|
||||
for (var j = 0; j < 25; j = (j+5)|0)
|
||||
{
|
||||
str += "\t\t" + a((j+i),0) + " ^= tl;\n";
|
||||
str += "\t\t" + a((j+i),1) + " ^= th;\n";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Rho Pi
|
||||
str += "\n\t\t// Rho Pi\n";
|
||||
for (var w = 0; w <= 1; ++w)
|
||||
{
|
||||
str += "\t\t" + b(0,w) + " = " + a(0,w) + ";\n";
|
||||
}
|
||||
var opi = 1;
|
||||
for (var i = 0; i < 24; ++i)
|
||||
{
|
||||
var pi = Keccak_f1600_Pi[i];
|
||||
str += "\t\t" + b(pi,0) + " = " + makeRotLow(a(opi,0), a(opi,1), Keccak_f1600_Rho[i]) + ";\n";
|
||||
str += "\t\t" + b(pi,1) + " = " + makeRotHigh(a(opi,0), a(opi,1), Keccak_f1600_Rho[i]) + ";\n";
|
||||
opi = pi;
|
||||
}
|
||||
|
||||
// Chi
|
||||
str += "\n\t\t// Chi\n";
|
||||
for (var j = 0; j < 25; j += 5)
|
||||
{
|
||||
for (var i = 0; i < 5; ++i)
|
||||
{
|
||||
for (var w = 0; w <= 1; ++w)
|
||||
{
|
||||
str += "\t\t" + a(j+i,w) + " = " + b(j+i,w) + " ^ ~" + b(j+(i+1)%5,w) + " & " + b(j+(i+2)%5,w) + ";\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Iota
|
||||
str += "\n\t\t// Iota\n";
|
||||
for (var w = 0; w <= 1; ++w)
|
||||
{
|
||||
str += "\t\t" + a(0,w) + " ^= Keccak_f1600_RC[r|" + w + "];\n";
|
||||
}
|
||||
|
||||
|
||||
str += "\t}\n";
|
||||
|
||||
for (var i = 0; i < 25; ++i)
|
||||
{
|
||||
if (i == 4 || i == 8)
|
||||
{
|
||||
str += "\tif (outSize == " + i*2 + ")\n\t\treturn;\n";
|
||||
}
|
||||
for (var w = 0; w <= 1; ++w)
|
||||
{
|
||||
str += "\toutState[outOffset|"+(i<<1|w)+"] = " + a(i,w) + ";\n";
|
||||
}
|
||||
}
|
||||
str += "}\n";
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
console.log(makeKeccak_f1600());
|
53
Godeps/_workspace/src/github.com/ethereum/ethash/js/test.js
generated
vendored
Normal file
53
Godeps/_workspace/src/github.com/ethereum/ethash/js/test.js
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// test.js
|
||||
// Tim Hughes <tim@twistedfury.com>
|
||||
|
||||
/*jslint node: true, shadow:true */
|
||||
"use strict";
|
||||
|
||||
var ethash = require('./ethash');
|
||||
var util = require('./util');
|
||||
var Keccak = require('./keccak');
|
||||
|
||||
// sanity check hash functions
|
||||
var src = util.stringToBytes("");
|
||||
if (util.bytesToHexString(new Keccak().digest(32, src)) != "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") throw Error("Keccak-256 failed");
|
||||
if (util.bytesToHexString(new Keccak().digest(64, src)) != "0eab42de4c3ceb9235fc91acffe746b29c29a8c366b7c60e4e67c466f36a4304c00fa9caf9d87976ba469bcbe06713b435f091ef2769fb160cdab33d3670680e") throw Error("Keccak-512 failed");
|
||||
|
||||
src = new Uint32Array(src.buffer);
|
||||
var dst = new Uint32Array(8);
|
||||
new Keccak().digestWords(dst, 0, dst.length, src, 0, src.length);
|
||||
if (util.wordsToHexString(dst) != "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470") throw Error("Keccak-256 Fast failed");
|
||||
|
||||
var dst = new Uint32Array(16);
|
||||
new Keccak().digestWords(dst, 0, dst.length, src, 0, src.length);
|
||||
if (util.wordsToHexString(dst) != "0eab42de4c3ceb9235fc91acffe746b29c29a8c366b7c60e4e67c466f36a4304c00fa9caf9d87976ba469bcbe06713b435f091ef2769fb160cdab33d3670680e") throw Error("Keccak-512 Fast failed");
|
||||
|
||||
|
||||
// init params
|
||||
var ethashParams = ethash.defaultParams();
|
||||
//ethashParams.cacheRounds = 0;
|
||||
|
||||
// create hasher
|
||||
var seed = util.hexStringToBytes("9410b944535a83d9adf6bbdcc80e051f30676173c16ca0d32d6f1263fc246466")
|
||||
var startTime = new Date().getTime();
|
||||
var hasher = new ethash.Ethash(ethashParams, seed);
|
||||
console.log('Ethash startup took: '+(new Date().getTime() - startTime) + "ms");
|
||||
console.log('Ethash cache hash: ' + util.bytesToHexString(hasher.cacheDigest()));
|
||||
|
||||
var testHexString = "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470";
|
||||
if (testHexString != util.bytesToHexString(util.hexStringToBytes(testHexString)))
|
||||
throw Error("bytesToHexString or hexStringToBytes broken");
|
||||
|
||||
|
||||
var header = util.hexStringToBytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470");
|
||||
var nonce = util.hexStringToBytes("0000000000000000");
|
||||
var hash;
|
||||
|
||||
startTime = new Date().getTime();
|
||||
var trials = 10;
|
||||
for (var i = 0; i < trials; ++i)
|
||||
{
|
||||
hash = hasher.hash(header, nonce);
|
||||
}
|
||||
console.log("Light client hashes averaged: " + (new Date().getTime() - startTime)/trials + "ms");
|
||||
console.log("Hash = " + util.bytesToHexString(hash));
|
100
Godeps/_workspace/src/github.com/ethereum/ethash/js/util.js
generated
vendored
Normal file
100
Godeps/_workspace/src/github.com/ethereum/ethash/js/util.js
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// util.js
|
||||
// Tim Hughes <tim@twistedfury.com>
|
||||
|
||||
/*jslint node: true, shadow:true */
|
||||
"use strict";
|
||||
|
||||
function nibbleToChar(nibble)
|
||||
{
|
||||
return String.fromCharCode((nibble < 10 ? 48 : 87) + nibble);
|
||||
}
|
||||
|
||||
function charToNibble(chr)
|
||||
{
|
||||
if (chr >= 48 && chr <= 57)
|
||||
{
|
||||
return chr - 48;
|
||||
}
|
||||
if (chr >= 65 && chr <= 70)
|
||||
{
|
||||
return chr - 65 + 10;
|
||||
}
|
||||
if (chr >= 97 && chr <= 102)
|
||||
{
|
||||
return chr - 97 + 10;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
function stringToBytes(str)
|
||||
{
|
||||
var bytes = new Uint8Array(str.length);
|
||||
for (var i = 0; i != str.length; ++i)
|
||||
{
|
||||
bytes[i] = str.charCodeAt(i);
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
function hexStringToBytes(str)
|
||||
{
|
||||
var bytes = new Uint8Array(str.length>>>1);
|
||||
for (var i = 0; i != bytes.length; ++i)
|
||||
{
|
||||
bytes[i] = charToNibble(str.charCodeAt(i<<1 | 0)) << 4;
|
||||
bytes[i] |= charToNibble(str.charCodeAt(i<<1 | 1));
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
function bytesToHexString(bytes)
|
||||
{
|
||||
var str = "";
|
||||
for (var i = 0; i != bytes.length; ++i)
|
||||
{
|
||||
str += nibbleToChar(bytes[i] >>> 4);
|
||||
str += nibbleToChar(bytes[i] & 0xf);
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
function wordsToHexString(words)
|
||||
{
|
||||
return bytesToHexString(new Uint8Array(words.buffer));
|
||||
}
|
||||
|
||||
function uint32ToHexString(num)
|
||||
{
|
||||
var buf = new Uint8Array(4);
|
||||
buf[0] = (num >> 24) & 0xff;
|
||||
buf[1] = (num >> 16) & 0xff;
|
||||
buf[2] = (num >> 8) & 0xff;
|
||||
buf[3] = (num >> 0) & 0xff;
|
||||
return bytesToHexString(buf);
|
||||
}
|
||||
|
||||
function toWords(input)
|
||||
{
|
||||
if (input instanceof Uint32Array)
|
||||
{
|
||||
return input;
|
||||
}
|
||||
else if (input instanceof Uint8Array)
|
||||
{
|
||||
var tmp = new Uint8Array((input.length + 3) & ~3);
|
||||
tmp.set(input);
|
||||
return new Uint32Array(tmp.buffer);
|
||||
}
|
||||
else if (typeof input === typeof "")
|
||||
{
|
||||
return toWords(stringToBytes(input));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
exports.stringToBytes = stringToBytes;
|
||||
exports.hexStringToBytes = hexStringToBytes;
|
||||
exports.bytesToHexString = bytesToHexString;
|
||||
exports.wordsToHexString = wordsToHexString;
|
||||
exports.uint32ToHexString = uint32ToHexString;
|
||||
exports.toWords = toWords;
|
58
Godeps/_workspace/src/github.com/ethereum/ethash/src/benchmark/CMakeLists.txt
generated
vendored
Normal file
58
Godeps/_workspace/src/github.com/ethereum/ethash/src/benchmark/CMakeLists.txt
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
include_directories(..)
|
||||
|
||||
set(CMAKE_BUILD_TYPE Release)
|
||||
|
||||
if (MSVC)
|
||||
add_definitions("/openmp")
|
||||
endif()
|
||||
|
||||
# enable C++11, should probably be a bit more specific about compiler
|
||||
if (NOT MSVC)
|
||||
SET(CMAKE_CXX_FLAGS "-std=c++11")
|
||||
endif()
|
||||
|
||||
if (NOT MPI_FOUND)
|
||||
find_package(MPI)
|
||||
endif()
|
||||
|
||||
if (NOT CRYPTOPP_FOUND)
|
||||
find_package(CryptoPP 5.6.2)
|
||||
endif()
|
||||
|
||||
if (CRYPTOPP_FOUND)
|
||||
add_definitions(-DWITH_CRYPTOPP)
|
||||
find_package (Threads REQUIRED)
|
||||
endif()
|
||||
|
||||
if (NOT OpenCL_FOUND)
|
||||
find_package(OpenCL)
|
||||
endif()
|
||||
if (OpenCL_FOUND)
|
||||
add_definitions(-DWITH_OPENCL)
|
||||
include_directories(${OpenCL_INCLUDE_DIRS})
|
||||
list(APPEND FILES ethash_cl_miner.cpp ethash_cl_miner.h)
|
||||
endif()
|
||||
|
||||
if (MPI_FOUND)
|
||||
include_directories(${MPI_INCLUDE_PATH})
|
||||
add_executable (Benchmark_MPI_FULL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_MPI_FULL ${ETHHASH_LIBS} ${MPI_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_MPI_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DFULL -DMPI")
|
||||
|
||||
add_executable (Benchmark_MPI_LIGHT benchmark.cpp)
|
||||
target_link_libraries (Benchmark_MPI_LIGHT ${ETHHASH_LIBS} ${MPI_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_MPI_LIGHT PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} ${MPI_COMPILE_FLAGS} -DMPI")
|
||||
endif()
|
||||
|
||||
add_executable (Benchmark_FULL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_FULL ${ETHHASH_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_FULL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DFULL")
|
||||
|
||||
add_executable (Benchmark_LIGHT benchmark.cpp)
|
||||
target_link_libraries (Benchmark_LIGHT ${ETHHASH_LIBS} ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
||||
if (OpenCL_FOUND)
|
||||
add_executable (Benchmark_CL benchmark.cpp)
|
||||
target_link_libraries (Benchmark_CL ${ETHHASH_LIBS} ethash-cl ${CMAKE_THREAD_LIBS_INIT})
|
||||
SET_TARGET_PROPERTIES(Benchmark_CL PROPERTIES COMPILE_FLAGS "${COMPILE_FLAGS} -DOPENCL")
|
||||
endif()
|
278
Godeps/_workspace/src/github.com/ethereum/ethash/src/benchmark/benchmark.cpp
generated
vendored
Normal file
278
Godeps/_workspace/src/github.com/ethereum/ethash/src/benchmark/benchmark.cpp
generated
vendored
Normal file
@ -0,0 +1,278 @@
|
||||
/*
|
||||
This file is part of cpp-ethereum.
|
||||
|
||||
cpp-ethereum is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
cpp-ethereum is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with cpp-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
/** @file benchmark.cpp
|
||||
* @author Tim Hughes <tim@twistedfury.com>
|
||||
* @date 2015
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <chrono>
|
||||
#include <libethash/ethash.h>
|
||||
#include <libethash/util.h>
|
||||
#ifdef OPENCL
|
||||
#include <libethash-cl/ethash_cl_miner.h>
|
||||
#endif
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
|
||||
#ifdef WITH_CRYPTOPP
|
||||
#include <libethash/sha3_cryptopp.h>
|
||||
#include <string>
|
||||
|
||||
#else
|
||||
#include "libethash/sha3.h"
|
||||
#endif // WITH_CRYPTOPP
|
||||
|
||||
#undef min
|
||||
#undef max
|
||||
|
||||
using std::chrono::high_resolution_clock;
|
||||
|
||||
#if defined(OPENCL)
|
||||
const unsigned trials = 1024*1024*32;
|
||||
#elif defined(FULL)
|
||||
const unsigned trials = 1024*1024/8;
|
||||
#else
|
||||
const unsigned trials = 1024*1024/1024;
|
||||
#endif
|
||||
uint8_t g_hashes[1024*32];
|
||||
|
||||
static char nibbleToChar(unsigned nibble)
|
||||
{
|
||||
return (char) ((nibble >= 10 ? 'a'-10 : '0') + nibble);
|
||||
}
|
||||
|
||||
static uint8_t charToNibble(char chr)
|
||||
{
|
||||
if (chr >= '0' && chr <= '9')
|
||||
{
|
||||
return (uint8_t) (chr - '0');
|
||||
}
|
||||
if (chr >= 'a' && chr <= 'z')
|
||||
{
|
||||
return (uint8_t) (chr - 'a' + 10);
|
||||
}
|
||||
if (chr >= 'A' && chr <= 'Z')
|
||||
{
|
||||
return (uint8_t) (chr - 'A' + 10);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static std::vector<uint8_t> hexStringToBytes(char const* str)
|
||||
{
|
||||
std::vector<uint8_t> bytes(strlen(str) >> 1);
|
||||
for (unsigned i = 0; i != bytes.size(); ++i)
|
||||
{
|
||||
bytes[i] = charToNibble(str[i*2 | 0]) << 4;
|
||||
bytes[i] |= charToNibble(str[i*2 | 1]);
|
||||
}
|
||||
return bytes;
|
||||
}
|
||||
|
||||
static std::string bytesToHexString(uint8_t const* bytes, unsigned size)
|
||||
{
|
||||
std::string str;
|
||||
for (unsigned i = 0; i != size; ++i)
|
||||
{
|
||||
str += nibbleToChar(bytes[i] >> 4);
|
||||
str += nibbleToChar(bytes[i] & 0xf);
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
static std::string bytesToHexString(ethash_h256_t const *hash, unsigned size)
|
||||
{
|
||||
return bytesToHexString((uint8_t*)hash, size);
|
||||
}
|
||||
|
||||
extern "C" int main(void)
|
||||
{
|
||||
// params for ethash
|
||||
ethash_params params;
|
||||
ethash_params_init(¶ms, 0);
|
||||
//params.full_size = 262147 * 4096; // 1GBish;
|
||||
//params.full_size = 32771 * 4096; // 128MBish;
|
||||
//params.full_size = 8209 * 4096; // 8MBish;
|
||||
//params.cache_size = 8209*4096;
|
||||
//params.cache_size = 2053*4096;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t previous_hash;
|
||||
|
||||
memcpy(&seed, hexStringToBytes("9410b944535a83d9adf6bbdcc80e051f30676173c16ca0d32d6f1263fc246466").data(), 32);
|
||||
memcpy(&previous_hash, hexStringToBytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470").data(), 32);
|
||||
|
||||
// allocate page aligned buffer for dataset
|
||||
#ifdef FULL
|
||||
void* full_mem_buf = malloc(params.full_size + 4095);
|
||||
void* full_mem = (void*)((uintptr_t(full_mem_buf) + 4095) & ~4095);
|
||||
#endif
|
||||
void* cache_mem_buf = malloc(params.cache_size + 63);
|
||||
void* cache_mem = (void*)((uintptr_t(cache_mem_buf) + 63) & ~63);
|
||||
|
||||
ethash_cache cache;
|
||||
cache.mem = cache_mem;
|
||||
|
||||
// compute cache or full data
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
ethash_mkcache(&cache, ¶ms, &seed);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
|
||||
ethash_h256_t cache_hash;
|
||||
SHA3_256(&cache_hash, (uint8_t const*)cache_mem, params.cache_size);
|
||||
debugf("ethash_mkcache: %ums, sha3: %s\n", (unsigned)((time*1000)/CLOCKS_PER_SEC), bytesToHexString(&cache_hash, sizeof(cache_hash)).data());
|
||||
|
||||
// print a couple of test hashes
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
ethash_return_value hash;
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, 0);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_light test: %ums, %s\n", (unsigned)time, bytesToHexString(&hash.result, 32).data());
|
||||
}
|
||||
|
||||
#ifdef FULL
|
||||
startTime = high_resolution_clock::now();
|
||||
ethash_compute_full_data(full_mem, ¶ms, &cache);
|
||||
time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_compute_full_data: %ums\n", (unsigned)time);
|
||||
#endif // FULL
|
||||
}
|
||||
|
||||
#ifdef OPENCL
|
||||
ethash_cl_miner miner;
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
if (!miner.init(params, &seed))
|
||||
exit(-1);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_cl_miner init: %ums\n", (unsigned)time);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef FULL
|
||||
{
|
||||
auto startTime = high_resolution_clock::now();
|
||||
ethash_return_value hash;
|
||||
ethash_full(&hash, full_mem, ¶ms, &previous_hash, 0);
|
||||
auto time = std::chrono::duration_cast<std::chrono::milliseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("ethash_full test: %uns\n", (unsigned)time);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef OPENCL
|
||||
// validate 1024 hashes against CPU
|
||||
miner.hash(g_hashes, (uint8_t*)&previous_hash, 0, 1024);
|
||||
for (unsigned i = 0; i != 1024; ++i)
|
||||
{
|
||||
ethash_return_value hash;
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, i);
|
||||
if (memcmp(&hash.result, g_hashes + 32*i, 32) != 0)
|
||||
{
|
||||
debugf("nonce %u failed: %s %s\n", i, bytesToHexString(g_hashes + 32*i, 32).c_str(), bytesToHexString(&hash.result, 32).c_str());
|
||||
static unsigned c = 0;
|
||||
if (++c == 16)
|
||||
{
|
||||
exit(-1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ensure nothing else is going on
|
||||
miner.finish();
|
||||
#endif
|
||||
|
||||
auto startTime = high_resolution_clock::now();
|
||||
unsigned hash_count = trials;
|
||||
|
||||
#ifdef OPENCL
|
||||
{
|
||||
struct search_hook : ethash_cl_miner::search_hook
|
||||
{
|
||||
unsigned hash_count;
|
||||
std::vector<uint64_t> nonce_vec;
|
||||
|
||||
virtual bool found(uint64_t const* nonces, uint32_t count)
|
||||
{
|
||||
nonce_vec.insert(nonce_vec.end(), nonces, nonces + count);
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool searched(uint64_t start_nonce, uint32_t count)
|
||||
{
|
||||
// do nothing
|
||||
hash_count += count;
|
||||
return hash_count >= trials;
|
||||
}
|
||||
};
|
||||
search_hook hook;
|
||||
hook.hash_count = 0;
|
||||
|
||||
miner.search((uint8_t*)&previous_hash, 0x000000ffffffffff, hook);
|
||||
|
||||
for (unsigned i = 0; i != hook.nonce_vec.size(); ++i)
|
||||
{
|
||||
uint64_t nonce = hook.nonce_vec[i];
|
||||
ethash_return_value hash;
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, nonce);
|
||||
debugf("found: %.8x%.8x -> %s\n", unsigned(nonce>>32), unsigned(nonce), bytesToHexString(&hash.result, 32).c_str());
|
||||
}
|
||||
|
||||
hash_count = hook.hash_count;
|
||||
}
|
||||
#else
|
||||
{
|
||||
//#pragma omp parallel for
|
||||
for (int nonce = 0; nonce < trials; ++nonce)
|
||||
{
|
||||
ethash_return_value hash;
|
||||
#ifdef FULL
|
||||
ethash_full(&hash, full_mem, ¶ms, &previous_hash, nonce);
|
||||
#else
|
||||
ethash_light(&hash, &cache, ¶ms, &previous_hash, nonce);
|
||||
#endif // FULL
|
||||
}
|
||||
}
|
||||
#endif
|
||||
auto time = std::chrono::duration_cast<std::chrono::microseconds>(high_resolution_clock::now() - startTime).count();
|
||||
debugf("Search took: %ums\n", (unsigned)time/1000);
|
||||
|
||||
unsigned read_size = ETHASH_ACCESSES * ETHASH_MIX_BYTES;
|
||||
#if defined(OPENCL) || defined(FULL)
|
||||
debugf(
|
||||
"hashrate: %8.2f Mh/s, bw: %8.2f GB/s\n",
|
||||
(double)hash_count * (1000*1000)/time / (1000*1000),
|
||||
(double)hash_count*read_size * (1000*1000)/time / (1024*1024*1024)
|
||||
);
|
||||
#else
|
||||
debugf(
|
||||
"hashrate: %8.2f Kh/s, bw: %8.2f MB/s\n",
|
||||
(double)hash_count * (1000*1000)/time / (1000),
|
||||
(double)hash_count*read_size * (1000*1000)/time / (1024*1024)
|
||||
);
|
||||
#endif
|
||||
|
||||
free(cache_mem_buf);
|
||||
#ifdef FULL
|
||||
free(full_mem_buf);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
8
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
generated
vendored
8
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/endian.h
generated
vendored
@ -35,14 +35,10 @@
|
||||
#elif defined(__FreeBSD__) || defined(__DragonFly__) || defined(__NetBSD__)
|
||||
#define ethash_swap_u32(input_) bswap32(input_)
|
||||
#define ethash_swap_u64(input_) bswap64(input_)
|
||||
#elif defined(__OpenBSD__)
|
||||
#include <endian.h>
|
||||
#define ethash_swap_u32(input_) swap32(input_)
|
||||
#define ethash_swap_u64(input_) swap64(input_)
|
||||
#else // posix
|
||||
#include <byteswap.h>
|
||||
#define ethash_swap_u32(input_) bswap_32(input_)
|
||||
#define ethash_swap_u64(input_) bswap_64(input_)
|
||||
#define ethash_swap_u32(input_) __bswap_32(input_)
|
||||
#define ethash_swap_u64(input_) __bswap_64(input_)
|
||||
#endif
|
||||
|
||||
|
||||
|
4
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/fnv.h
generated
vendored
4
Godeps/_workspace/src/github.com/ethereum/ethash/src/libethash/fnv.h
generated
vendored
@ -29,10 +29,6 @@ extern "C" {
|
||||
|
||||
#define FNV_PRIME 0x01000193
|
||||
|
||||
/* The FNV-1 spec multiplies the prime with the input one byte (octet) in turn.
|
||||
We instead multiply it with the full 32-bit input.
|
||||
This gives a different result compared to a canonical FNV-1 implementation.
|
||||
*/
|
||||
static inline uint32_t fnv_hash(uint32_t const x, uint32_t const y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
|
267
Godeps/_workspace/src/github.com/ethereum/ethash/src/python/core.c
generated
vendored
Normal file
267
Godeps/_workspace/src/github.com/ethereum/ethash/src/python/core.c
generated
vendored
Normal file
@ -0,0 +1,267 @@
|
||||
#include <Python.h>
|
||||
#include <alloca.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
#include "../libethash/ethash.h"
|
||||
#include "../libethash/internal.h"
|
||||
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
#define PY_STRING_FORMAT "y#"
|
||||
#define PY_CONST_STRING_FORMAT "y"
|
||||
#else
|
||||
#define PY_STRING_FORMAT "s#"
|
||||
#define PY_CONST_STRING_FORMAT "s"
|
||||
#endif
|
||||
|
||||
#define MIX_WORDS (ETHASH_MIX_BYTES/4)
|
||||
|
||||
static PyObject *
|
||||
mkcache_bytes(PyObject *self, PyObject *args) {
|
||||
unsigned long block_number;
|
||||
unsigned long cache_size;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "k", &block_number))
|
||||
return 0;
|
||||
|
||||
ethash_light_t L = ethash_light_new(block_number);
|
||||
PyObject * val = Py_BuildValue(PY_STRING_FORMAT, L->cache, L->cache_size);
|
||||
free(L->cache);
|
||||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
static PyObject *
|
||||
calc_dataset_bytes(PyObject *self, PyObject *args) {
|
||||
char *cache_bytes;
|
||||
unsigned long full_size;
|
||||
int cache_size;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "k" PY_STRING_FORMAT, &full_size, &cache_bytes, &cache_size))
|
||||
return 0;
|
||||
|
||||
if (full_size % MIX_WORDS != 0) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %lu)", MIX_WORDS, full_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cache_size % ETHASH_HASH_BYTES != 0) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "The size of the cache must be a multiple of %i bytes (was %i)", ETHASH_HASH_BYTES, cache_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ethash_params params;
|
||||
params.cache_size = (size_t) cache_size;
|
||||
params.full_size = (size_t) full_size;
|
||||
ethash_cache cache;
|
||||
cache.mem = (void *) cache_bytes;
|
||||
void *mem = malloc(params.full_size);
|
||||
ethash_compute_full_data(mem, ¶ms, &cache);
|
||||
PyObject * val = Py_BuildValue(PY_STRING_FORMAT, (char *) mem, full_size);
|
||||
free(mem);
|
||||
return val;
|
||||
}*/
|
||||
|
||||
// hashimoto_light(full_size, cache, header, nonce)
|
||||
static PyObject *
|
||||
hashimoto_light(PyObject *self, PyObject *args) {
|
||||
char *cache_bytes;
|
||||
char *header;
|
||||
unsigned long block_number;
|
||||
unsigned long long nonce;
|
||||
int cache_size, header_size;
|
||||
if (!PyArg_ParseTuple(args, "k" PY_STRING_FORMAT PY_STRING_FORMAT "K", &block_number, &cache_bytes, &cache_size, &header, &header_size, &nonce))
|
||||
return 0;
|
||||
if (header_size != 32) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Seed must be 32 bytes long (was %i)", header_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
struct ethash_light *s;
|
||||
s = calloc(sizeof(*s), 1);
|
||||
s->cache = cache_bytes;
|
||||
s->cache_size = cache_size;
|
||||
s->block_number = block_number;
|
||||
struct ethash_h256 *h;
|
||||
h = calloc(sizeof(*h), 1);
|
||||
for (int i = 0; i < 32; i++) h->b[i] = header[i];
|
||||
struct ethash_return_value out = ethash_light_compute(s, *h, nonce);
|
||||
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "," PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}",
|
||||
"mix digest", &out.mix_hash, 32,
|
||||
"result", &out.result, 32);
|
||||
}
|
||||
/*
|
||||
// hashimoto_full(dataset, header, nonce)
|
||||
static PyObject *
|
||||
hashimoto_full(PyObject *self, PyObject *args) {
|
||||
char *full_bytes;
|
||||
char *header;
|
||||
unsigned long long nonce;
|
||||
int full_size, header_size;
|
||||
|
||||
if (!PyArg_ParseTuple(args, PY_STRING_FORMAT PY_STRING_FORMAT "K", &full_bytes, &full_size, &header, &header_size, &nonce))
|
||||
return 0;
|
||||
|
||||
if (full_size % MIX_WORDS != 0) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %i)", MIX_WORDS, full_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (header_size != 32) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Header must be 32 bytes long (was %i)", header_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
ethash_return_value out;
|
||||
ethash_params params;
|
||||
params.full_size = (size_t) full_size;
|
||||
ethash_full(&out, (void *) full_bytes, ¶ms, (ethash_h256_t *) header, nonce);
|
||||
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT "}",
|
||||
"mix digest", &out.mix_hash, 32,
|
||||
"result", &out.result, 32);
|
||||
}
|
||||
|
||||
// mine(dataset_bytes, header, difficulty_bytes)
|
||||
static PyObject *
|
||||
mine(PyObject *self, PyObject *args) {
|
||||
char *full_bytes;
|
||||
char *header;
|
||||
char *difficulty;
|
||||
srand(time(0));
|
||||
uint64_t nonce = ((uint64_t) rand()) << 32 | rand();
|
||||
int full_size, header_size, difficulty_size;
|
||||
|
||||
if (!PyArg_ParseTuple(args, PY_STRING_FORMAT PY_STRING_FORMAT PY_STRING_FORMAT, &full_bytes, &full_size, &header, &header_size, &difficulty, &difficulty_size))
|
||||
return 0;
|
||||
|
||||
if (full_size % MIX_WORDS != 0) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "The size of data set must be a multiple of %i bytes (was %i)", MIX_WORDS, full_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (header_size != 32) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Header must be 32 bytes long (was %i)", header_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (difficulty_size != 32) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Difficulty must be an array of 32 bytes (only had %i)", difficulty_size);
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ethash_return_value out;
|
||||
ethash_params params;
|
||||
params.full_size = (size_t) full_size;
|
||||
|
||||
// TODO: Multi threading?
|
||||
do {
|
||||
ethash_full(&out, (void *) full_bytes, ¶ms, (const ethash_h256_t *) header, nonce++);
|
||||
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
|
||||
} while (!ethash_check_difficulty(&out.result, (const ethash_h256_t *) difficulty));
|
||||
|
||||
return Py_BuildValue("{" PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":" PY_STRING_FORMAT ", " PY_CONST_STRING_FORMAT ":K}",
|
||||
"mix digest", &out.mix_hash, 32,
|
||||
"result", &out.result, 32,
|
||||
"nonce", nonce);
|
||||
}
|
||||
*/
|
||||
|
||||
//get_seedhash(block_number)
|
||||
static PyObject *
|
||||
get_seedhash(PyObject *self, PyObject *args) {
|
||||
unsigned long block_number;
|
||||
if (!PyArg_ParseTuple(args, "k", &block_number))
|
||||
return 0;
|
||||
if (block_number >= ETHASH_EPOCH_LENGTH * 2048) {
|
||||
char error_message[1024];
|
||||
sprintf(error_message, "Block number must be less than %i (was %lu)", ETHASH_EPOCH_LENGTH * 2048, block_number);
|
||||
|
||||
PyErr_SetString(PyExc_ValueError, error_message);
|
||||
return 0;
|
||||
}
|
||||
ethash_h256_t seedhash = ethash_get_seedhash(block_number);
|
||||
return Py_BuildValue(PY_STRING_FORMAT, (char *) &seedhash, 32);
|
||||
}
|
||||
|
||||
static PyMethodDef PyethashMethods[] =
|
||||
{
|
||||
{"get_seedhash", get_seedhash, METH_VARARGS,
|
||||
"get_seedhash(block_number)\n\n"
|
||||
"Gets the seedhash for a block."},
|
||||
{"mkcache_bytes", mkcache_bytes, METH_VARARGS,
|
||||
"mkcache_bytes(block_number)\n\n"
|
||||
"Makes a byte array for the cache for given block number\n"},
|
||||
/*{"calc_dataset_bytes", calc_dataset_bytes, METH_VARARGS,
|
||||
"calc_dataset_bytes(full_size, cache_bytes)\n\n"
|
||||
"Makes a byte array for the dataset for a given size given cache bytes"},*/
|
||||
{"hashimoto_light", hashimoto_light, METH_VARARGS,
|
||||
"hashimoto_light(block_number, cache_bytes, header, nonce)\n\n"
|
||||
"Runs the hashimoto hashing function just using cache bytes. Takes an int (full_size), byte array (cache_bytes), another byte array (header), and an int (nonce). Returns an object containing the mix digest, and hash result."},
|
||||
/*{"hashimoto_full", hashimoto_full, METH_VARARGS,
|
||||
"hashimoto_full(dataset_bytes, header, nonce)\n\n"
|
||||
"Runs the hashimoto hashing function using the dataset bytes. Useful for testing. Returns an object containing the mix digest (byte array), and hash result (another byte array)."},
|
||||
{"mine", mine, METH_VARARGS,
|
||||
"mine(dataset_bytes, header, difficulty_bytes)\n\n"
|
||||
"Mine for an adequate header. Returns an object containing the mix digest (byte array), hash result (another byte array) and nonce (an int)."},*/
|
||||
{NULL, NULL, 0, NULL}
|
||||
};
|
||||
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
static struct PyModuleDef PyethashModule = {
|
||||
PyModuleDef_HEAD_INIT,
|
||||
"pyethash",
|
||||
"...",
|
||||
-1,
|
||||
PyethashMethods
|
||||
};
|
||||
|
||||
PyMODINIT_FUNC PyInit_pyethash(void) {
|
||||
PyObject *module = PyModule_Create(&PyethashModule);
|
||||
// Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions
|
||||
PyModule_AddIntConstant(module, "REVISION", (long) ETHASH_REVISION);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) ETHASH_DATASET_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) ETHASH_DATASET_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) ETHASH_CACHE_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) ETHASH_CACHE_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) ETHASH_EPOCH_LENGTH);
|
||||
PyModule_AddIntConstant(module, "MIX_BYTES", (long) ETHASH_MIX_BYTES);
|
||||
PyModule_AddIntConstant(module, "HASH_BYTES", (long) ETHASH_HASH_BYTES);
|
||||
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) ETHASH_DATASET_PARENTS);
|
||||
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) ETHASH_CACHE_ROUNDS);
|
||||
PyModule_AddIntConstant(module, "ACCESSES", (long) ETHASH_ACCESSES);
|
||||
return module;
|
||||
}
|
||||
#else
|
||||
PyMODINIT_FUNC
|
||||
initpyethash(void) {
|
||||
PyObject *module = Py_InitModule("pyethash", PyethashMethods);
|
||||
// Following Spec: https://github.com/ethereum/wiki/wiki/Ethash#definitions
|
||||
PyModule_AddIntConstant(module, "REVISION", (long) ETHASH_REVISION);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_INIT", (long) ETHASH_DATASET_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "DATASET_BYTES_GROWTH", (long) ETHASH_DATASET_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_INIT", (long) ETHASH_CACHE_BYTES_INIT);
|
||||
PyModule_AddIntConstant(module, "CACHE_BYTES_GROWTH", (long) ETHASH_CACHE_BYTES_GROWTH);
|
||||
PyModule_AddIntConstant(module, "EPOCH_LENGTH", (long) ETHASH_EPOCH_LENGTH);
|
||||
PyModule_AddIntConstant(module, "MIX_BYTES", (long) ETHASH_MIX_BYTES);
|
||||
PyModule_AddIntConstant(module, "HASH_BYTES", (long) ETHASH_HASH_BYTES);
|
||||
PyModule_AddIntConstant(module, "DATASET_PARENTS", (long) ETHASH_DATASET_PARENTS);
|
||||
PyModule_AddIntConstant(module, "CACHE_ROUNDS", (long) ETHASH_CACHE_ROUNDS);
|
||||
PyModule_AddIntConstant(module, "ACCESSES", (long) ETHASH_ACCESSES);
|
||||
}
|
||||
#endif
|
66
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/CMakeLists.txt
generated
vendored
Normal file
66
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/CMakeLists.txt
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
if (MSVC)
|
||||
if (NOT BOOST_ROOT)
|
||||
set (BOOST_ROOT "$ENV{BOOST_ROOT}")
|
||||
endif()
|
||||
set (CMAKE_PREFIX_PATH BOOST_ROOT)
|
||||
endif()
|
||||
|
||||
IF( NOT Boost_FOUND )
|
||||
# use multithreaded boost libraries, with -mt suffix
|
||||
set(Boost_USE_MULTITHREADED ON)
|
||||
|
||||
if (MSVC)
|
||||
# TODO handle other msvc versions or it will fail find them
|
||||
set(Boost_COMPILER -vc120)
|
||||
# use static boost libraries *.lib
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
elseif (APPLE)
|
||||
|
||||
# use static boost libraries *.a
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
|
||||
elseif (UNIX)
|
||||
# use dynamic boost libraries .dll
|
||||
set(Boost_USE_STATIC_LIBS OFF)
|
||||
|
||||
endif()
|
||||
find_package(Boost 1.48.0 COMPONENTS unit_test_framework system filesystem)
|
||||
ENDIF()
|
||||
|
||||
IF (Boost_FOUND)
|
||||
message(STATUS "boost header: ${Boost_INCLUDE_DIRS}")
|
||||
message(STATUS "boost libs : ${Boost_LIBRARIES}")
|
||||
|
||||
include_directories( ${Boost_INCLUDE_DIR} )
|
||||
include_directories(../../src)
|
||||
|
||||
link_directories(${Boost_LIBRARY_DIRS})
|
||||
file(GLOB HEADERS "*.h")
|
||||
if ((NOT MSVC) AND (NOT APPLE))
|
||||
ADD_DEFINITIONS(-DBOOST_TEST_DYN_LINK)
|
||||
endif()
|
||||
if (NOT CRYPTOPP_FOUND)
|
||||
find_package (CryptoPP)
|
||||
endif()
|
||||
|
||||
if (CRYPTOPP_FOUND)
|
||||
add_definitions(-DWITH_CRYPTOPP)
|
||||
endif()
|
||||
|
||||
if (NOT MSVC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ")
|
||||
endif()
|
||||
|
||||
add_executable (Test "./test.cpp" ${HEADERS})
|
||||
target_link_libraries(Test ${ETHHASH_LIBS})
|
||||
target_link_libraries(Test ${Boost_FILESYSTEM_LIBRARIES})
|
||||
target_link_libraries(Test ${Boost_SYSTEM_LIBRARIES})
|
||||
target_link_libraries(Test ${Boost_UNIT_TEST_FRAMEWORK_LIBRARIES})
|
||||
|
||||
if (CRYPTOPP_FOUND)
|
||||
TARGET_LINK_LIBRARIES(Test ${CRYPTOPP_LIBRARIES})
|
||||
endif()
|
||||
|
||||
enable_testing ()
|
||||
add_test(NAME ethash COMMAND Test)
|
||||
ENDIF()
|
669
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.cpp
generated
vendored
Normal file
669
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.cpp
generated
vendored
Normal file
@ -0,0 +1,669 @@
|
||||
#include <iomanip>
|
||||
#include <libethash/fnv.h>
|
||||
#include <libethash/ethash.h>
|
||||
#include <libethash/internal.h>
|
||||
#include <libethash/io.h>
|
||||
|
||||
#ifdef WITH_CRYPTOPP
|
||||
|
||||
#include <libethash/sha3_cryptopp.h>
|
||||
|
||||
#else
|
||||
#include <libethash/sha3.h>
|
||||
#endif // WITH_CRYPTOPP
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#include <Shlobj.h>
|
||||
#endif
|
||||
|
||||
#define BOOST_TEST_MODULE Daggerhashimoto
|
||||
#define BOOST_TEST_MAIN
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <vector>
|
||||
#include <boost/filesystem.hpp>
|
||||
#include <boost/test/unit_test.hpp>
|
||||
|
||||
using namespace std;
|
||||
using byte = uint8_t;
|
||||
using bytes = std::vector<byte>;
|
||||
namespace fs = boost::filesystem;
|
||||
|
||||
// Just an alloca "wrapper" to silence uint64_t to size_t conversion warnings in windows
|
||||
// consider replacing alloca calls with something better though!
|
||||
#define our_alloca(param__) alloca((size_t)(param__))
|
||||
|
||||
|
||||
// some functions taken from eth::dev for convenience.
|
||||
std::string bytesToHexString(const uint8_t *str, const uint64_t s)
|
||||
{
|
||||
std::ostringstream ret;
|
||||
|
||||
for (size_t i = 0; i < s; ++i)
|
||||
ret << std::hex << std::setfill('0') << std::setw(2) << std::nouppercase << (int) str[i];
|
||||
|
||||
return ret.str();
|
||||
}
|
||||
|
||||
std::string blockhashToHexString(ethash_h256_t* _hash)
|
||||
{
|
||||
return bytesToHexString((uint8_t*)_hash, 32);
|
||||
}
|
||||
|
||||
int fromHex(char _i)
|
||||
{
|
||||
if (_i >= '0' && _i <= '9')
|
||||
return _i - '0';
|
||||
if (_i >= 'a' && _i <= 'f')
|
||||
return _i - 'a' + 10;
|
||||
if (_i >= 'A' && _i <= 'F')
|
||||
return _i - 'A' + 10;
|
||||
|
||||
BOOST_REQUIRE_MESSAGE(false, "should never get here");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bytes hexStringToBytes(std::string const& _s)
|
||||
{
|
||||
unsigned s = (_s[0] == '0' && _s[1] == 'x') ? 2 : 0;
|
||||
std::vector<uint8_t> ret;
|
||||
ret.reserve((_s.size() - s + 1) / 2);
|
||||
|
||||
if (_s.size() % 2)
|
||||
try
|
||||
{
|
||||
ret.push_back(fromHex(_s[s++]));
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
ret.push_back(0);
|
||||
}
|
||||
for (unsigned i = s; i < _s.size(); i += 2)
|
||||
try
|
||||
{
|
||||
ret.push_back((byte)(fromHex(_s[i]) * 16 + fromHex(_s[i + 1])));
|
||||
}
|
||||
catch (...){
|
||||
ret.push_back(0);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
ethash_h256_t stringToBlockhash(std::string const& _s)
|
||||
{
|
||||
ethash_h256_t ret;
|
||||
bytes b = hexStringToBytes(_s);
|
||||
memcpy(&ret, b.data(), b.size());
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
BOOST_AUTO_TEST_CASE(fnv_hash_check) {
|
||||
uint32_t x = 1235U;
|
||||
const uint32_t
|
||||
y = 9999999U,
|
||||
expected = (FNV_PRIME * x) ^y;
|
||||
|
||||
x = fnv_hash(x, y);
|
||||
|
||||
BOOST_REQUIRE_MESSAGE(x == expected,
|
||||
"\nexpected: " << expected << "\n"
|
||||
<< "actual: " << x << "\n");
|
||||
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(SHA256_check) {
|
||||
ethash_h256_t input;
|
||||
ethash_h256_t out;
|
||||
memcpy(&input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
SHA3_256(&out, (uint8_t*)&input, 32);
|
||||
const std::string
|
||||
expected = "2b5ddf6f4d21c23de216f44d5e4bdc68e044b71897837ea74c83908be7037cd7",
|
||||
actual = bytesToHexString((uint8_t*)&out, 32);
|
||||
BOOST_REQUIRE_MESSAGE(expected == actual,
|
||||
"\nexpected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(SHA512_check) {
|
||||
uint8_t input[64], out[64];
|
||||
memcpy(input, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 64);
|
||||
SHA3_512(out, input, 64);
|
||||
const std::string
|
||||
expected = "0be8a1d334b4655fe58c6b38789f984bb13225684e86b20517a55ab2386c7b61c306f25e0627c60064cecd6d80cd67a82b3890bd1289b7ceb473aad56a359405",
|
||||
actual = bytesToHexString(out, 64);
|
||||
BOOST_REQUIRE_MESSAGE(expected == actual,
|
||||
"\nexpected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_swap_endian32) {
|
||||
uint32_t v32 = (uint32_t)0xBAADF00D;
|
||||
v32 = ethash_swap_u32(v32);
|
||||
BOOST_REQUIRE_EQUAL(v32, (uint32_t)0x0DF0ADBA);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_swap_endian64) {
|
||||
uint64_t v64 = (uint64_t)0xFEE1DEADDEADBEEF;
|
||||
v64 = ethash_swap_u64(v64);
|
||||
BOOST_REQUIRE_EQUAL(v64, (uint64_t)0xEFBEADDEADDEE1FE);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_check) {
|
||||
uint64_t full_size = ethash_get_datasize(0);
|
||||
uint64_t cache_size = ethash_get_cachesize(0);
|
||||
BOOST_REQUIRE_MESSAGE(full_size < ETHASH_DATASET_BYTES_INIT,
|
||||
"\nfull size: " << full_size << "\n"
|
||||
<< "should be less than or equal to: " << ETHASH_DATASET_BYTES_INIT << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(full_size + 20 * ETHASH_MIX_BYTES >= ETHASH_DATASET_BYTES_INIT,
|
||||
"\nfull size + 20*MIX_BYTES: " << full_size + 20 * ETHASH_MIX_BYTES << "\n"
|
||||
<< "should be greater than or equal to: " << ETHASH_DATASET_BYTES_INIT << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(cache_size < ETHASH_DATASET_BYTES_INIT / 32,
|
||||
"\ncache size: " << cache_size << "\n"
|
||||
<< "should be less than or equal to: " << ETHASH_DATASET_BYTES_INIT / 32 << "\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ethash_params_init_genesis_calcifide_check) {
|
||||
uint64_t full_size = ethash_get_datasize(0);
|
||||
uint64_t cache_size = ethash_get_cachesize(0);
|
||||
const uint32_t expected_full_size = 1073739904;
|
||||
const uint32_t expected_cache_size = 16776896;
|
||||
BOOST_REQUIRE_MESSAGE(full_size == expected_full_size,
|
||||
"\nexpected: " << expected_cache_size << "\n"
|
||||
<< "actual: " << full_size << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(cache_size == expected_cache_size,
|
||||
"\nexpected: " << expected_cache_size << "\n"
|
||||
<< "actual: " << cache_size << "\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ethash_check_difficulty_check) {
|
||||
ethash_h256_t hash;
|
||||
ethash_h256_t target;
|
||||
memcpy(&hash, "11111111111111111111111111111111", 32);
|
||||
memcpy(&target, "22222222222222222222222222222222", 32);
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
ethash_check_difficulty(&hash, &target),
|
||||
"\nexpected \"" << std::string((char *) &hash, 32).c_str() << "\" to have the same or less difficulty than \"" << std::string((char *) &target, 32).c_str() << "\"\n");
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
ethash_check_difficulty(&hash, &hash), "");
|
||||
// "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << hash << "\"\n");
|
||||
memcpy(&target, "11111111111111111111111111111112", 32);
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
ethash_check_difficulty(&hash, &target), "");
|
||||
// "\nexpected \"" << hash << "\" to have the same or less difficulty than \"" << target << "\"\n");
|
||||
memcpy(&target, "11111111111111111111111111111110", 32);
|
||||
BOOST_REQUIRE_MESSAGE(
|
||||
!ethash_check_difficulty(&hash, &target), "");
|
||||
// "\nexpected \"" << hash << "\" to have more difficulty than \"" << target << "\"\n");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_io_mutable_name) {
|
||||
char mutable_name[DAG_MUTABLE_NAME_MAX_SIZE];
|
||||
// should have at least 8 bytes provided since this is what we test :)
|
||||
ethash_h256_t seed1 = ethash_h256_static_init(0, 10, 65, 255, 34, 55, 22, 8);
|
||||
ethash_io_mutable_name(1, &seed1, mutable_name);
|
||||
BOOST_REQUIRE_EQUAL(0, strcmp(mutable_name, "full-R1-000a41ff22371608"));
|
||||
ethash_h256_t seed2 = ethash_h256_static_init(0, 0, 0, 0, 0, 0, 0, 0);
|
||||
ethash_io_mutable_name(44, &seed2, mutable_name);
|
||||
BOOST_REQUIRE_EQUAL(0, strcmp(mutable_name, "full-R44-0000000000000000"));
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_dir_creation) {
|
||||
ethash_h256_t seedhash;
|
||||
FILE *f = NULL;
|
||||
memset(&seedhash, 0, 32);
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 64, false)
|
||||
);
|
||||
BOOST_REQUIRE(f);
|
||||
|
||||
// let's make sure that the directory was created
|
||||
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
|
||||
|
||||
// cleanup
|
||||
fclose(f);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_match) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
FILE* f;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
NULL
|
||||
);
|
||||
BOOST_ASSERT(full);
|
||||
// let's make sure that the directory was created
|
||||
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
|
||||
// delete the full here so that memory is properly unmapped and FILE handler freed
|
||||
ethash_full_delete(full);
|
||||
// and check that we have a match when checking again
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seed, &f, full_size, false)
|
||||
);
|
||||
BOOST_REQUIRE(f);
|
||||
|
||||
// cleanup
|
||||
fclose(f);
|
||||
ethash_light_delete(light);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_io_memo_file_size_mismatch) {
|
||||
static const int blockn = 0;
|
||||
ethash_h256_t seedhash = ethash_get_seedhash(blockn);
|
||||
FILE *f = NULL;
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 64, false)
|
||||
);
|
||||
BOOST_REQUIRE(f);
|
||||
fclose(f);
|
||||
|
||||
// let's make sure that the directory was created
|
||||
BOOST_REQUIRE(fs::is_directory(fs::path("./test_ethash_directory/")));
|
||||
// and check that we get the size mismatch detected if we request diffferent size
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_SIZE_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seedhash, &f, 65, false)
|
||||
);
|
||||
|
||||
// cleanup
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_ethash_get_default_dirname) {
|
||||
char result[256];
|
||||
// this is really not an easy thing to test for in a unit test
|
||||
// TODO: Improve this test ...
|
||||
#ifdef _WIN32
|
||||
char homedir[256];
|
||||
BOOST_REQUIRE(SUCCEEDED(SHGetFolderPathA(NULL, CSIDL_PROFILE, NULL, 0, (CHAR*)homedir)));
|
||||
BOOST_REQUIRE(ethash_get_default_dirname(result, 256));
|
||||
std::string res = std::string(homedir) + std::string("\\AppData\\Local\\Ethash\\");
|
||||
#else
|
||||
char* homedir = getenv("HOME");
|
||||
BOOST_REQUIRE(ethash_get_default_dirname(result, 256));
|
||||
std::string res = std::string(homedir) + std::string("/.ethash/");
|
||||
#endif
|
||||
BOOST_CHECK_MESSAGE(strcmp(res.c_str(), result) == 0,
|
||||
"Expected \"" + res + "\" but got \"" + std::string(result) + "\""
|
||||
);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(light_and_full_client_checks) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
ethash_h256_t difficulty;
|
||||
ethash_return_value_t light_out;
|
||||
ethash_return_value_t full_out;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
// Set the difficulty
|
||||
ethash_h256_set(&difficulty, 0, 197);
|
||||
ethash_h256_set(&difficulty, 1, 90);
|
||||
for (int i = 2; i < 32; i++)
|
||||
ethash_h256_set(&difficulty, i, 255);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
NULL
|
||||
);
|
||||
BOOST_ASSERT(full);
|
||||
{
|
||||
const std::string
|
||||
expected = "2da2b506f21070e1143d908e867962486d6b0a02e31d468fd5e3a7143aafa76a14201f63374314e2a6aaf84ad2eb57105dea3378378965a1b3873453bb2b78f9a8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995ca8620b2ebeca41fbc773bb837b5e724d6eb2de570d99858df0d7d97067fb8103b21757873b735097b35d3bea8fd1c359a9e8a63c1540c76c9784cf8d975e995c259440b89fa3481c2c33171477c305c8e1e421f8d8f6d59585449d0034f3e421808d8da6bbd0b6378f567647cc6c4ba6c434592b198ad444e7284905b7c6adaf70bf43ec2daa7bd5e8951aa609ab472c124cf9eba3d38cff5091dc3f58409edcc386c743c3bd66f92408796ee1e82dd149eaefbf52b00ce33014a6eb3e50625413b072a58bc01da28262f42cbe4f87d4abc2bf287d15618405a1fe4e386fcdafbb171064bd99901d8f81dd6789396ce5e364ac944bbbd75a7827291c70b42d26385910cd53ca535ab29433dd5c5714d26e0dce95514c5ef866329c12e958097e84462197c2b32087849dab33e88b11da61d52f9dbc0b92cc61f742c07dbbf751c49d7678624ee60dfbe62e5e8c47a03d8247643f3d16ad8c8e663953bcda1f59d7e2d4a9bf0768e789432212621967a8f41121ad1df6ae1fa78782530695414c6213942865b2730375019105cae91a4c17a558d4b63059661d9f108362143107babe0b848de412e4da59168cce82bfbff3c99e022dd6ac1e559db991f2e3f7bb910cefd173e65ed00a8d5d416534e2c8416ff23977dbf3eb7180b75c71580d08ce95efeb9b0afe904ea12285a392aff0c8561ff79fca67f694a62b9e52377485c57cc3598d84cac0a9d27960de0cc31ff9bbfe455acaa62c8aa5d2cce96f345da9afe843d258a99c4eaf3650fc62efd81c7b81cd0d534d2d71eeda7a6e315d540b4473c80f8730037dc2ae3e47b986240cfc65ccc565f0d8cde0bc68a57e39a271dda57440b3598bee19f799611d25731a96b5dbbbefdff6f4f656161462633030d62560ea4e9c161cf78fc96a2ca5aaa32453a6c5dea206f766244e8c9d9a8dc61185ce37f1fc804459c5f07434f8ecb34141b8dcae7eae704c950b55556c5f40140c3714b45eddb02637513268778cbf937a33e4e33183685f9deb31ef54e90161e76d969587dd782eaa94e289420e7c2ee908517f5893a26fdb5873d68f92d118d4bcf98d7a4916794d6ab290045e30f9ea00ca547c584b8482b0331ba1539a0f2714fddc3a0b06b0cfbb6a607b8339c39bcfd6640b1f653e9d70ef6c985b",
|
||||
actual = bytesToHexString((uint8_t const *) light->cache, cache_size);
|
||||
|
||||
BOOST_REQUIRE_MESSAGE(expected == actual,
|
||||
"\nexpected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
{
|
||||
node node;
|
||||
ethash_calculate_dag_item(&node, 0, light);
|
||||
const std::string
|
||||
actual = bytesToHexString((uint8_t const *) &node, sizeof(node)),
|
||||
expected = "b1698f829f90b35455804e5185d78f549fcb1bdce2bee006d4d7e68eb154b596be1427769eb1c3c3e93180c760af75f81d1023da6a0ffbe321c153a7c0103597";
|
||||
BOOST_REQUIRE_MESSAGE(actual == expected,
|
||||
"\n" << "expected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
{
|
||||
for (int i = 0; i < full_size / sizeof(node); ++i) {
|
||||
for (uint32_t j = 0; j < 32; ++j) {
|
||||
node expected_node;
|
||||
ethash_calculate_dag_item(&expected_node, j, light);
|
||||
const std::string
|
||||
actual = bytesToHexString((uint8_t const *) &(full->data[j]), sizeof(node)),
|
||||
expected = bytesToHexString((uint8_t const *) &expected_node, sizeof(node));
|
||||
BOOST_REQUIRE_MESSAGE(actual == expected,
|
||||
"\ni: " << j << "\n"
|
||||
<< "expected: " << expected.c_str() << "\n"
|
||||
<< "actual: " << actual.c_str() << "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
uint64_t nonce = 0x7c7c597c;
|
||||
full_out = ethash_full_compute(full, hash, nonce);
|
||||
BOOST_REQUIRE(full_out.success);
|
||||
light_out = ethash_light_compute_internal(light, full_size, hash, nonce);
|
||||
BOOST_REQUIRE(light_out.success);
|
||||
const std::string
|
||||
light_result_string = blockhashToHexString(&light_out.result),
|
||||
full_result_string = blockhashToHexString(&full_out.result);
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
|
||||
"\nlight result: " << light_result_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
const std::string
|
||||
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
|
||||
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
|
||||
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
|
||||
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
|
||||
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
|
||||
ethash_h256_t check_hash;
|
||||
ethash_quick_hash(&check_hash, &hash, nonce, &full_out.mix_hash);
|
||||
const std::string check_hash_string = blockhashToHexString(&check_hash);
|
||||
BOOST_REQUIRE_MESSAGE(check_hash_string == full_result_string,
|
||||
"\ncheck hash string: " << check_hash_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
}
|
||||
{
|
||||
full_out = ethash_full_compute(full, hash, 5);
|
||||
BOOST_REQUIRE(full_out.success);
|
||||
std::string
|
||||
light_result_string = blockhashToHexString(&light_out.result),
|
||||
full_result_string = blockhashToHexString(&full_out.result);
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string != full_result_string,
|
||||
"\nlight result and full result should differ: " << light_result_string.c_str() << "\n");
|
||||
|
||||
light_out = ethash_light_compute_internal(light, full_size, hash, 5);
|
||||
BOOST_REQUIRE(light_out.success);
|
||||
light_result_string = blockhashToHexString(&light_out.result);
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
|
||||
"\nlight result and full result should be the same\n"
|
||||
<< "light result: " << light_result_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
std::string
|
||||
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
|
||||
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
|
||||
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
|
||||
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
|
||||
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
|
||||
BOOST_REQUIRE_MESSAGE(ethash_check_difficulty(&full_out.result, &difficulty),
|
||||
"ethash_check_difficulty failed"
|
||||
);
|
||||
BOOST_REQUIRE_MESSAGE(ethash_quick_check_difficulty(&hash, 5U, &full_out.mix_hash, &difficulty),
|
||||
"ethash_quick_check_difficulty failed"
|
||||
);
|
||||
}
|
||||
ethash_light_delete(light);
|
||||
ethash_full_delete(full);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(ethash_full_new_when_dag_exists_with_wrong_size) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
ethash_return_value_t full_out;
|
||||
ethash_return_value_t light_out;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
// first make a DAG file of "wrong size"
|
||||
FILE *f;
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seed, &f, 64, false)
|
||||
);
|
||||
fclose(f);
|
||||
|
||||
// then create new DAG, which should detect the wrong size and force create a new file
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
BOOST_ASSERT(light);
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
NULL
|
||||
);
|
||||
BOOST_ASSERT(full);
|
||||
{
|
||||
uint64_t nonce = 0x7c7c597c;
|
||||
full_out = ethash_full_compute(full, hash, nonce);
|
||||
BOOST_REQUIRE(full_out.success);
|
||||
light_out = ethash_light_compute_internal(light, full_size, hash, nonce);
|
||||
BOOST_REQUIRE(light_out.success);
|
||||
const std::string
|
||||
light_result_string = blockhashToHexString(&light_out.result),
|
||||
full_result_string = blockhashToHexString(&full_out.result);
|
||||
BOOST_REQUIRE_MESSAGE(light_result_string == full_result_string,
|
||||
"\nlight result: " << light_result_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
const std::string
|
||||
light_mix_hash_string = blockhashToHexString(&light_out.mix_hash),
|
||||
full_mix_hash_string = blockhashToHexString(&full_out.mix_hash);
|
||||
BOOST_REQUIRE_MESSAGE(full_mix_hash_string == light_mix_hash_string,
|
||||
"\nlight mix hash: " << light_mix_hash_string.c_str() << "\n"
|
||||
<< "full mix hash: " << full_mix_hash_string.c_str() << "\n");
|
||||
ethash_h256_t check_hash;
|
||||
ethash_quick_hash(&check_hash, &hash, nonce, &full_out.mix_hash);
|
||||
const std::string check_hash_string = blockhashToHexString(&check_hash);
|
||||
BOOST_REQUIRE_MESSAGE(check_hash_string == full_result_string,
|
||||
"\ncheck hash string: " << check_hash_string.c_str() << "\n"
|
||||
<< "full result: " << full_result_string.c_str() << "\n");
|
||||
}
|
||||
|
||||
ethash_light_delete(light);
|
||||
ethash_full_delete(full);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
static bool g_executed = false;
|
||||
static unsigned g_prev_progress = 0;
|
||||
static int test_full_callback(unsigned _progress)
|
||||
{
|
||||
g_executed = true;
|
||||
BOOST_CHECK(_progress >= g_prev_progress);
|
||||
g_prev_progress = _progress;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_full_callback_that_fails(unsigned _progress)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int test_full_callback_create_incomplete_dag(unsigned _progress)
|
||||
{
|
||||
if (_progress >= 30) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(full_client_callback) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
test_full_callback
|
||||
);
|
||||
BOOST_ASSERT(full);
|
||||
BOOST_CHECK(g_executed);
|
||||
BOOST_REQUIRE_EQUAL(g_prev_progress, 100);
|
||||
|
||||
ethash_full_delete(full);
|
||||
ethash_light_delete(light);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(failing_full_client_callback) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
test_full_callback_that_fails
|
||||
);
|
||||
BOOST_ASSERT(!full);
|
||||
ethash_light_delete(light);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_incomplete_dag_file) {
|
||||
uint64_t full_size;
|
||||
uint64_t cache_size;
|
||||
ethash_h256_t seed;
|
||||
ethash_h256_t hash;
|
||||
memcpy(&seed, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
memcpy(&hash, "~~~X~~~~~~~~~~~~~~~~~~~~~~~~~~~~", 32);
|
||||
|
||||
cache_size = 1024;
|
||||
full_size = 1024 * 32;
|
||||
|
||||
ethash_light_t light = ethash_light_new_internal(cache_size, &seed);
|
||||
// create a full but stop at 30%, so no magic number is written
|
||||
ethash_full_t full = ethash_full_new_internal(
|
||||
"./test_ethash_directory/",
|
||||
seed,
|
||||
full_size,
|
||||
light,
|
||||
test_full_callback_create_incomplete_dag
|
||||
);
|
||||
BOOST_ASSERT(!full);
|
||||
FILE *f = NULL;
|
||||
// confirm that we get a size_mismatch because the magic number is missing
|
||||
BOOST_REQUIRE_EQUAL(
|
||||
ETHASH_IO_MEMO_SIZE_MISMATCH,
|
||||
ethash_io_prepare("./test_ethash_directory/", seed, &f, full_size, false)
|
||||
);
|
||||
ethash_light_delete(light);
|
||||
fs::remove_all("./test_ethash_directory/");
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_block22_verification) {
|
||||
// from POC-9 testnet, epoch 0
|
||||
ethash_light_t light = ethash_light_new(22);
|
||||
ethash_h256_t seedhash = stringToBlockhash("372eca2454ead349c3df0ab5d00b0b706b23e49d469387db91811cee0358fc6d");
|
||||
BOOST_ASSERT(light);
|
||||
ethash_return_value_t ret = ethash_light_compute(
|
||||
light,
|
||||
seedhash,
|
||||
0x495732e0ed7a801cU
|
||||
);
|
||||
BOOST_REQUIRE_EQUAL(blockhashToHexString(&ret.result), "00000b184f1fdd88bfd94c86c39e65db0c36144d5e43f745f722196e730cb614");
|
||||
ethash_h256_t difficulty = ethash_h256_static_init(0x2, 0x5, 0x40);
|
||||
BOOST_REQUIRE(ethash_check_difficulty(&ret.result, &difficulty));
|
||||
ethash_light_delete(light);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_block30001_verification) {
|
||||
// from POC-9 testnet, epoch 1
|
||||
ethash_light_t light = ethash_light_new(30001);
|
||||
ethash_h256_t seedhash = stringToBlockhash("7e44356ee3441623bc72a683fd3708fdf75e971bbe294f33e539eedad4b92b34");
|
||||
BOOST_ASSERT(light);
|
||||
ethash_return_value_t ret = ethash_light_compute(
|
||||
light,
|
||||
seedhash,
|
||||
0x318df1c8adef7e5eU
|
||||
);
|
||||
ethash_h256_t difficulty = ethash_h256_static_init(0x17, 0x62, 0xff);
|
||||
BOOST_REQUIRE(ethash_check_difficulty(&ret.result, &difficulty));
|
||||
ethash_light_delete(light);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(test_block60000_verification) {
|
||||
// from POC-9 testnet, epoch 2
|
||||
ethash_light_t light = ethash_light_new(60000);
|
||||
ethash_h256_t seedhash = stringToBlockhash("5fc898f16035bf5ac9c6d9077ae1e3d5fc1ecc3c9fd5bee8bb00e810fdacbaa0");
|
||||
BOOST_ASSERT(light);
|
||||
ethash_return_value_t ret = ethash_light_compute(
|
||||
light,
|
||||
seedhash,
|
||||
0x50377003e5d830caU
|
||||
);
|
||||
ethash_h256_t difficulty = ethash_h256_static_init(0x25, 0xa6, 0x1e);
|
||||
BOOST_REQUIRE(ethash_check_difficulty(&ret.result, &difficulty));
|
||||
ethash_light_delete(light);
|
||||
}
|
||||
|
||||
// Test of Full DAG creation with the minimal ethash.h API.
|
||||
// Commented out since travis tests would take too much time.
|
||||
// Uncomment and run on your own machine if you want to confirm
|
||||
// it works fine.
|
||||
#if 0
|
||||
static int progress_cb(unsigned _progress)
|
||||
{
|
||||
printf("CREATING DAG. PROGRESS: %u\n", _progress);
|
||||
fflush(stdout);
|
||||
return 0;
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(full_dag_test) {
|
||||
ethash_light_t light = ethash_light_new(55);
|
||||
BOOST_ASSERT(light);
|
||||
ethash_full_t full = ethash_full_new(light, progress_cb);
|
||||
BOOST_ASSERT(full);
|
||||
ethash_light_delete(light);
|
||||
ethash_full_delete(full);
|
||||
}
|
||||
#endif
|
32
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.sh
generated
vendored
Normal file
32
Godeps/_workspace/src/github.com/ethereum/ethash/test/c/test.sh
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Strict mode
|
||||
set -e
|
||||
|
||||
VALGRIND_ARGS="--tool=memcheck"
|
||||
VALGRIND_ARGS+=" --leak-check=yes"
|
||||
VALGRIND_ARGS+=" --track-origins=yes"
|
||||
VALGRIND_ARGS+=" --show-reachable=yes"
|
||||
VALGRIND_ARGS+=" --num-callers=20"
|
||||
VALGRIND_ARGS+=" --track-fds=yes"
|
||||
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ]; do
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
SOURCE="$(readlink "$SOURCE")"
|
||||
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
|
||||
done
|
||||
TEST_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
rm -rf $TEST_DIR/build
|
||||
mkdir -p $TEST_DIR/build
|
||||
cd $TEST_DIR/build ;
|
||||
cmake ../../.. > /dev/null
|
||||
make Test
|
||||
./test/c/Test
|
||||
|
||||
# If we have valgrind also run memory check tests
|
||||
if hash valgrind 2>/dev/null; then
|
||||
echo "======== Running tests under valgrind ========";
|
||||
cd $TEST_DIR/build/ && valgrind $VALGRIND_ARGS ./test/c/Test
|
||||
fi
|
1
Godeps/_workspace/src/github.com/ethereum/ethash/test/python/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/ethereum/ethash/test/python/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
python-virtual-env/
|
3
Godeps/_workspace/src/github.com/ethereum/ethash/test/python/requirements.txt
generated
vendored
Normal file
3
Godeps/_workspace/src/github.com/ethereum/ethash/test/python/requirements.txt
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
pyethereum==0.7.522
|
||||
nose==1.3.4
|
||||
pysha3==0.3
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user