Compare commits
1572 Commits
Author | SHA1 | Date | |
---|---|---|---|
6c6c7b2af3 | |||
5c93462b5e | |||
701d60c889 | |||
9be07de539 | |||
885c13c2c9 | |||
5bbd7fb390 | |||
79b11121a7 | |||
72af509abe | |||
382c9266e6 | |||
f46adfac28 | |||
514b1587db | |||
66a7ef57e6 | |||
ecca2c3c1b | |||
7a7f6a4f29 | |||
c8e70186a6 | |||
794741b8b2 | |||
48705f8aea | |||
10b3f97c9d | |||
5596b664c4 | |||
10181b57a9 | |||
ac193e36ce | |||
5ba9225fe3 | |||
fc87bc5f52 | |||
c1740e4540 | |||
e3db1236de | |||
02b4d074f6 | |||
2dcb22afec | |||
69c8be7c86 | |||
55e5926f34 | |||
f30179d62e | |||
c4d21bc8e5 | |||
160add8570 | |||
564c8f3ae6 | |||
451ffdb62b | |||
6ff2c02991 | |||
f585f9eee8 | |||
4ea4d2dc34 | |||
1e67378df8 | |||
cc313e78b7 | |||
b0ca1b67ce | |||
03d00361f5 | |||
f90a193f92 | |||
8e14bb1448 | |||
cd6c861dc5 | |||
c91f7beb53 | |||
2bacf36d80 | |||
32d8d42274 | |||
da7d57e07c | |||
8cab3ab435 | |||
8f567dc8a2 | |||
504278e839 | |||
e7408b5552 | |||
1901521ed0 | |||
23b51a68cb | |||
dc92779c0a | |||
d70536b5d4 | |||
07635e43e2 | |||
64a3a3d23c | |||
777540628e | |||
a4df80f47f | |||
bc2a5578c0 | |||
ebf41d16a0 | |||
9d0c51fb0f | |||
08f27428b4 | |||
27a5622e99 | |||
8596fc5974 | |||
ad16aeb0a2 | |||
b872961ec8 | |||
68955ed2eb | |||
ff9a868232 | |||
20b818d206 | |||
63246e2542 | |||
3c48a25762 | |||
286ec5df40 | |||
4ee92f2d19 | |||
f7e39a7724 | |||
79cdbcfe64 | |||
79bf69b556 | |||
28aea46ac0 | |||
fc5f8a3dda | |||
3cc476c8ab | |||
2fd5ba6bd4 | |||
b4b27ebaea | |||
8c037dc487 | |||
3e14837c1c | |||
58f7f977e7 | |||
afdfdebd87 | |||
e311bb520a | |||
1ab3e30698 | |||
314246da78 | |||
bf1e263128 | |||
7e57fee355 | |||
a4da8416ee | |||
998abb9107 | |||
059c767adf | |||
d4f11d9b4f | |||
104375f398 | |||
1bbd400899 | |||
f9fb70d2ee | |||
1335a6cc8c | |||
b70a73cd3e | |||
0b978f91b6 | |||
64d199edf2 | |||
4e0fea4d30 | |||
9bd6068fef | |||
76069eef38 | |||
3df7142b3e | |||
3d123bcde6 | |||
3040243042 | |||
9facf6423d | |||
2403656373 | |||
ef0edc6e32 | |||
133de3d806 | |||
f8d8b56b28 | |||
d8aaa3a215 | |||
6131dd55c5 | |||
02656f9f61 | |||
02aa86e659 | |||
7bbdf3e268 | |||
6ca59d98f8 | |||
833eeb9f23 | |||
2b422b1a47 | |||
73c5aba21f | |||
6a56b15019 | |||
5d9ac49c7e | |||
db568a61e2 | |||
17ce0a37de | |||
26b2d6e1aa | |||
fff6e03a79 | |||
d375193797 | |||
374c49e0ac | |||
10ce8b0e3c | |||
9a7e99f75d | |||
6f8c7b0def | |||
1c45f2f42e | |||
e063d538b8 | |||
43437806fb | |||
8f06b7980d | |||
971079822e | |||
f42bd73ce5 | |||
f5925b0459 | |||
8edaaa227d | |||
bd74882d83 | |||
67439c1dba | |||
f59a49d591 | |||
2b50367fe9 | |||
46cf0a616b | |||
85454e7678 | |||
80de4dc72c | |||
8c2cf3c66c | |||
37e9fcacca | |||
b36f54c684 | |||
3991745c5f | |||
6dd2803b8e | |||
fc0c6c175c | |||
7c74e166b0 | |||
f7848c2aa5 | |||
19866075ac | |||
faafeef79e | |||
cd82b89fde | |||
3780d0b6f7 | |||
ff89a3ddce | |||
aee70ae30b | |||
392151e251 | |||
5b742fb82b | |||
b159cdd8dd | |||
524ca544b2 | |||
1059927f9c | |||
fca6e515d6 | |||
ca436f4b90 | |||
350bb6d9ca | |||
5e805aa865 | |||
455fcc8309 | |||
0cc9b8791e | |||
8b84bd283f | |||
4a260dc1f2 | |||
4371367cd1 | |||
bc0e6a5e68 | |||
60c858a529 | |||
e9b850805e | |||
53f3460ab5 | |||
bdf98b4fcd | |||
c259e6874e | |||
13cda8d9b6 | |||
4f9789b28d | |||
ee748d1451 | |||
0732ad4e47 | |||
3d32690b54 | |||
a602ee90f2 | |||
fc78ce61c0 | |||
ffebf00114 | |||
99da85c895 | |||
f4841ff43d | |||
3a678a15c9 | |||
3e0dbe0eaa | |||
1802682f65 | |||
4d2249773a | |||
8a8fc5f8ef | |||
671ba3791d | |||
9488e7fd5f | |||
23c6fcdbe8 | |||
cf5d4b5541 | |||
b95c2b58f0 | |||
8e9197f2a1 | |||
c65f10a17b | |||
a56f3dc0d9 | |||
47359301a2 | |||
688ee6d1e5 | |||
ad8d519eb5 | |||
9e80d9bee1 | |||
0ff35e170d | |||
8d6a5a3581 | |||
33a24bb731 | |||
2d47c6bfde | |||
d68ad36bb9 | |||
df74a43396 | |||
01cb9948f1 | |||
bf468a81ec | |||
ab5646c532 | |||
3b25012481 | |||
bd381be9c8 | |||
225de7ca0a | |||
bd01cd7183 | |||
0958770625 | |||
4f7a38001f | |||
65f0e905dd | |||
4b8860a7b4 | |||
34ec9913f6 | |||
f25486c3fb | |||
88b4fe7d21 | |||
5e38f7a664 | |||
4c1d0b164b | |||
48ee7f9de7 | |||
fe13949d9d | |||
138f26c93a | |||
8f12d76a47 | |||
be8f8409bc | |||
a633a2d7ea | |||
67aff49822 | |||
8c313eed26 | |||
c4d28aee9b | |||
a0aa071ca6 | |||
c7041fe145 | |||
41318f3776 | |||
65b96dc4b9 | |||
8bbd598ef4 | |||
ae11545bc5 | |||
0550957989 | |||
dfd076244d | |||
6dc32e897a | |||
e4301564c2 | |||
bae7565231 | |||
9e5f03b6c4 | |||
bb366271fe | |||
4a741df757 | |||
5421a08d2f | |||
cf611c50b9 | |||
c008176f9f | |||
f3359d5e58 | |||
feb2932706 | |||
ea1d1825a8 | |||
f321ed23fb | |||
413dc1d265 | |||
fdf2184b1e | |||
3c7338d6c8 | |||
ef8d4711d5 | |||
caa00b7e6d | |||
5603eb9116 | |||
78c04c920d | |||
10a45cb59b | |||
cd88f69715 | |||
46d0d04f97 | |||
514659a023 | |||
01c9cf1cb5 | |||
b751cf3901 | |||
b6e99deee9 | |||
d40179f882 | |||
beb708e6d7 | |||
f2c5b2cc1c | |||
a4277450b2 | |||
b664bedcf2 | |||
eebde1a2e2 | |||
b0b3cf2eeb | |||
c98d9b49bf | |||
0042f13d47 | |||
d432688886 | |||
58a1e13e6d | |||
a1f3878ec5 | |||
a20a02ce0b | |||
9a44e1035e | |||
c62d5422bb | |||
9012863ad7 | |||
a5d08c893d | |||
a4e4c76cb3 | |||
7a11e86442 | |||
4a1d516d78 | |||
b6b0e00198 | |||
3d66ba56ef | |||
767dc6c73d | |||
36e7963467 | |||
98e101ef8e | |||
50c18e6eb8 | |||
60e27b51bc | |||
693d9ccbfb | |||
5c53a5be66 | |||
431cf2a1e4 | |||
4f77857f74 | |||
fade09a7ff | |||
b58a501673 | |||
db6e695002 | |||
335abdceb1 | |||
732273094c | |||
b8793edd83 | |||
eb92522278 | |||
061889d4ea | |||
e3dfd55820 | |||
2fefe4baa0 | |||
ac9865791a | |||
80f7c6c299 | |||
bc24b7a912 | |||
1496b3aff6 | |||
1e9f86b49e | |||
65ea913e29 | |||
9a0e433b13 | |||
04d2de9119 | |||
3285a0fda3 | |||
6171d01b11 | |||
cf87713dd4 | |||
ac92d7c411 | |||
d5a79934dc | |||
0424192e61 | |||
9c2882b2e5 | |||
1a0eb903f1 | |||
0036e2a747 | |||
727eadacca | |||
99cba96f26 | |||
f272879e5a | |||
72dd51e25a | |||
799a469000 | |||
f4d81178d8 | |||
310d2e7ef4 | |||
3ecde4e2aa | |||
a355b401db | |||
cba33029a8 | |||
9702badd83 | |||
067dc2cbf5 | |||
65979770e6 | |||
41bdf49eed | |||
ea11f7dd7a | |||
8df24760d7 | |||
71814bf6c4 | |||
ec1700600a | |||
b0f30b0b37 | |||
e96f2981e2 | |||
09d59da3a1 | |||
280609c99b | |||
309da541de | |||
dd06c85843 | |||
ae40d51410 | |||
b865fad888 | |||
afb17cf071 | |||
08959bbc70 | |||
673c92db6b | |||
c2a494c743 | |||
afdd23b5ca | |||
cb809c03da | |||
45421d3130 | |||
115e7d71cc | |||
dd5ed01f3b | |||
b7ff0d42e3 | |||
c98bce709c | |||
17f0b11942 | |||
6231edcbab | |||
07aae19e5d | |||
b596b4ba5b | |||
8b1e4c4c5e | |||
846d091bd2 | |||
a346aedb90 | |||
ef25b826e6 | |||
261b3e2351 | |||
344f25fb3e | |||
1afaea4bfe | |||
11cf5b7ead | |||
069cb661c3 | |||
3b8915e387 | |||
437ceaa9be | |||
136f78ff0a | |||
aa73420207 | |||
3556962053 | |||
e1e87d8b1a | |||
30cc1c3bf0 | |||
10582a97ca | |||
e16a7ef60f | |||
a816e75662 | |||
3ee75bec9f | |||
04b668b232 | |||
da636c53d6 | |||
2a41e76b39 | |||
4a2c17b1ab | |||
bc75351edf | |||
33b158e0ed | |||
83721a95ce | |||
e7119ce12d | |||
a5f6a1cb7c | |||
e6aff513db | |||
8a4c1fb799 | |||
10a57fc3d4 | |||
a2f23ca9b1 | |||
e20158176d | |||
ef7b9fb7d0 | |||
b0d0fafd68 | |||
90c7155ef4 | |||
df4e7eccf5 | |||
7c707d14d1 | |||
953a995116 | |||
c5840ce12f | |||
3b3989de6a | |||
40976ea1a0 | |||
d18b509e40 | |||
2e4d23a793 | |||
60293820b7 | |||
82defe5c56 | |||
dd483d7d0d | |||
dddebe469b | |||
cf19586cfb | |||
fd5d51c9ae | |||
2ec5cf1673 | |||
36a800a1d2 | |||
93832b633e | |||
021c3c2816 | |||
ff2c966e7f | |||
14cc40a31a | |||
881df0e629 | |||
1c2f6f5597 | |||
d51a9fd6b7 | |||
464f30d301 | |||
8a28408616 | |||
e1dc7ece62 | |||
99127ff2e7 | |||
81d6ec908a | |||
38b4fc8069 | |||
11ab73f6d8 | |||
18e9cb1187 | |||
502a2bd69f | |||
8b517d7f00 | |||
cad071009d | |||
181a3309df | |||
02fa3e3179 | |||
a8eafcdc0e | |||
bcf2465b0b | |||
c3dc01caf1 | |||
cf4faa491a | |||
59966255ad | |||
f8acc0af7e | |||
02a29060d2 | |||
0255ed6335 | |||
96c2ab22e0 | |||
5494e6e7ab | |||
32db571681 | |||
a6af56fa4d | |||
5884606ec3 | |||
f6c0f76cc5 | |||
f9be9a2302 | |||
95f0bd0acf | |||
8dce4c283d | |||
fff16169c6 | |||
5f7eb78918 | |||
e61035c5a3 | |||
37e3f561f1 | |||
ba3bcd16a6 | |||
207bd7d2cd | |||
4047ccad2f | |||
a13e920af0 | |||
f958d7d482 | |||
7cc6abeef6 | |||
54253aae4c | |||
09aabaea9f | |||
ecec454e92 | |||
7b2fc0643f | |||
d2fda73ad7 | |||
5aa21d8b32 | |||
9fc90b6747 | |||
edef84da2b | |||
6430e672c9 | |||
a31d268b76 | |||
e353f9c088 | |||
af48a331bf | |||
80e74fc1e0 | |||
03dffe3efd | |||
cb3f5f8b93 | |||
c7a4d9cf8a | |||
facc47cb5c | |||
6876e92f8d | |||
15f32a8d57 | |||
6d359dbcc6 | |||
65e1095c3f | |||
0cc492f815 | |||
ee05cc4a27 | |||
97f38ce4d6 | |||
732b75325c | |||
7d0ac94809 | |||
906378a32e | |||
cc5654cb59 | |||
b35aa21f9f | |||
409b61fe3c | |||
d5d910e8b6 | |||
5e29f4be93 | |||
b27589517a | |||
2870496124 | |||
43671067fb | |||
30d706c35e | |||
b57680b0b2 | |||
e418bc67d2 | |||
a7b9e484d0 | |||
6b7ae4e751 | |||
436acb4f75 | |||
050ceff1ae | |||
a0cd77e833 | |||
1d1d988aa7 | |||
dd37064a15 | |||
49f1e84253 | |||
9de257505b | |||
706a1e552c | |||
18bbe12425 | |||
04fcae207d | |||
542e42b21e | |||
feeccdf4ec | |||
bfe5eb7f8c | |||
f32b72ca5d | |||
9cd7135516 | |||
bd2c54fa9f | |||
8570ef19eb | |||
d144299af4 | |||
badbaf66b6 | |||
b801be99d4 | |||
cc13d576f0 | |||
71fdaa4238 | |||
158d603528 | |||
9aca9e6deb | |||
0ec1104ba9 | |||
702bef8493 | |||
c76ad94492 | |||
d83a9a8f44 | |||
3d8de95f99 | |||
24b9860c1b | |||
cc303017c3 | |||
49437a02c9 | |||
b319f027a0 | |||
09777952ee | |||
e50a5b7771 | |||
fb98a8c6c2 | |||
96d1a4aee6 | |||
105b37f1b4 | |||
c4a0efafd7 | |||
db93641941 | |||
1cf2ee4597 | |||
baf20010e7 | |||
16afb5c468 | |||
225c28716f | |||
aa9a78e463 | |||
7419d0c382 | |||
4be37e91b9 | |||
1018bf6a00 | |||
37e252587a | |||
bb7dca275c | |||
69ac6cc70e | |||
df1fbe3c06 | |||
8771c3061f | |||
8ff7e55ab5 | |||
37dd9086ec | |||
67c47459f2 | |||
0f4b75bea2 | |||
d42a56afc5 | |||
b4547a560b | |||
04fa6a3744 | |||
26da6daaa9 | |||
e7911ad9ea | |||
11e7a712f4 | |||
61d2150a07 | |||
3fa0fa713b | |||
f1534f5797 | |||
9a2720fb35 | |||
c213fd1fd8 | |||
525116dbff | |||
1c1dc0e0fc | |||
c6e6f1fec2 | |||
da7af44060 | |||
6742fc526f | |||
9b84caf3a5 | |||
06d6685eb5 | |||
1dfd65f6d0 | |||
7242e4f71b | |||
be39bf382a | |||
2ba9374789 | |||
d97dea51ec | |||
24dd0355a3 | |||
6d038e762b | |||
728a299728 | |||
4e4e5fca54 | |||
61ede86737 | |||
dc4c59d42b | |||
0a01f3f607 | |||
f3579f6460 | |||
5c8fa6ae1a | |||
b7d93500f1 | |||
df72e20cc5 | |||
023670f6ba | |||
567d41d936 | |||
3b00a77de5 | |||
288700c4d8 | |||
544247c918 | |||
8cf08e4b25 | |||
eee96a5bb7 | |||
667cd518ce | |||
4c6f4e569e | |||
9c4fd4e9c9 | |||
db2698c87c | |||
07c216d603 | |||
1a00e39539 | |||
92e50adfa3 | |||
2b284e7366 | |||
e7030c4bf5 | |||
a38e1a9c00 | |||
faf713632c | |||
725c2646a0 | |||
f2e94ff94b | |||
090699c0f6 | |||
213b8f9af4 | |||
9184249b39 | |||
280f08be84 | |||
d304da3803 | |||
357d00cdb1 | |||
f3b7dcc5bd | |||
82e7c1d124 | |||
f972691eea | |||
c52ab932e6 | |||
f30733c806 | |||
bf4155846c | |||
230cf2ec91 | |||
7ff75ac2f2 | |||
167be7f260 | |||
7e3762fdc6 | |||
94c71c171f | |||
5f7826270c | |||
b117da2db3 | |||
e02883c0a2 | |||
e588e0ca2b | |||
d4f60d362b | |||
46bcd9a92c | |||
dbd88a1aa4 | |||
965407f238 | |||
96ae35e2ac | |||
dc0a006c7c | |||
2f28a12cdb | |||
35e8308bf7 | |||
fc97c7a38d | |||
48bc07ae97 | |||
a624ce69f7 | |||
d0eba23af3 | |||
43362ab4fb | |||
38e273597c | |||
e8b3e22612 | |||
32ee1b3cd8 | |||
8676aeb798 | |||
37511ec520 | |||
46eea4d105 | |||
0a63c3e362 | |||
45c7cfd2e6 | |||
5c8fe28b72 | |||
50ee279f25 | |||
562ccff822 | |||
11539030cd | |||
aca066f337 | |||
5ee00209d2 | |||
e7bdb00700 | |||
357732a840 | |||
f89dd62776 | |||
1ca20a2697 | |||
23a5d64fd0 | |||
61e6bb1247 | |||
d4fd06c3dc | |||
47af53f9aa | |||
3f923f3902 | |||
189dee26c6 | |||
b9d48b4a93 | |||
ec7f81f4bc | |||
29fac7de44 | |||
555273495b | |||
024d41d0c2 | |||
46ec4357e7 | |||
c6e716eb31 | |||
388803b139 | |||
4ac481b45f | |||
94334c233e | |||
b7f010de52 | |||
a0c011f1a8 | |||
449a850023 | |||
e51f65af1f | |||
037c8b9ae9 | |||
b19e5885fe | |||
9b0af51386 | |||
bf21549faa | |||
6f74fb962e | |||
6ec8135256 | |||
e94dfb78f8 | |||
bdef758d5c | |||
2c4455b12a | |||
c8695fae35 | |||
a973d1d523 | |||
15a609d5d6 | |||
c12f4df910 | |||
72dcd3c58b | |||
4ece9c6cb0 | |||
a07539fb88 | |||
6988e5f074 | |||
aba016da72 | |||
09aef5c0ae | |||
9b161187ec | |||
8883f36fe3 | |||
0850f68fd1 | |||
57f4e90257 | |||
f8f428cc18 | |||
e23e86921b | |||
65ed6a9def | |||
e99c788155 | |||
c7022c1a0c | |||
26cd41f0c7 | |||
fb19846855 | |||
205ea95802 | |||
c5215fdd48 | |||
fad5eb0a87 | |||
b3c0e9d3cc | |||
470b79385b | |||
1ecf99bd0f | |||
e0fb4d1da9 | |||
ac2a0e615b | |||
52bd4e29ff | |||
833e4d1319 | |||
564b60520c | |||
085987ff2c | |||
aaf9cfd18c | |||
7ff686d6ec | |||
0cc9409fda | |||
6dd27e7cff | |||
d0eeb3ebdc | |||
fa99986143 | |||
6ea8eba8ce | |||
9b5c7153c9 | |||
d52b0c32a0 | |||
7734ead520 | |||
1bed9b3fea | |||
8b57c49490 | |||
296450451b | |||
4f5f90222f | |||
f58fb32283 | |||
9c45b4462c | |||
690f6ea1d7 | |||
1c140f7382 | |||
e5a93bf99a | |||
f3c368ca73 | |||
b8823a8b34 | |||
355a42f36d | |||
658bcbcbdc | |||
7669c5b5ec | |||
a390ca5f30 | |||
c46c41eae3 | |||
82aa5b1de6 | |||
12379c697a | |||
f5348e17f8 | |||
a2b4abd89a | |||
6d5e100d0d | |||
1886d03faa | |||
9b62facdd4 | |||
da92f5b2d6 | |||
f1069a30b9 | |||
2718b42828 | |||
fc52f2c007 | |||
0b9070fe01 | |||
c04598f2b0 | |||
96778a1c21 | |||
935d891e9d | |||
682875adff | |||
0126d01435 | |||
946db8ba65 | |||
7814a8e131 | |||
ebc3d232f4 | |||
f087c66f95 | |||
508fdc3496 | |||
d63752ef4d | |||
6fb76443b3 | |||
2eefed84c9 | |||
230530f5ea | |||
17d92233d9 | |||
54a65e6d87 | |||
26d385c18b | |||
da2a22c384 | |||
0fa9a8929c | |||
2a1a531ba3 | |||
51f6b6d33f | |||
b5a100b859 | |||
54fcab20e3 | |||
a2bc90d1d7 | |||
c01f8c3d3c | |||
e4181a7f1b | |||
01f6f2d741 | |||
c5df37c111 | |||
e0ceeab0d1 | |||
93077c98e4 | |||
3dab303826 | |||
3160fd24ba | |||
ce7822c130 | |||
745a3adebd | |||
218ec6c085 | |||
d30d7800e0 | |||
8820d97039 | |||
b52fde7cf7 | |||
2b4d0b6ff9 | |||
21f1370d2a | |||
d78f9b834a | |||
445deb7470 | |||
02b67558e8 | |||
91c8f87fb1 | |||
d056b7fa52 | |||
2a609af518 | |||
1d5d6616ae | |||
b9b3efb09f | |||
0f34d506b5 | |||
5eccc122e8 | |||
681b51aac4 | |||
4268cb8efe | |||
3f1a72908c | |||
2fed476ce1 | |||
6cb39dd3da | |||
88cc1ca55a | |||
1bd9769111 | |||
47372813ef | |||
fc213c873d | |||
972f0bd3db | |||
808310a569 | |||
0a5450fe04 | |||
9bab0b8a24 | |||
17182732f5 | |||
18c77744ff | |||
ac93a6ff6c | |||
13e3b2f433 | |||
f2da6581ba | |||
444fc892b0 | |||
b56aee3697 | |||
35a7dcb162 | |||
e0fde02290 | |||
59b8245bbc | |||
8f9daaa3ba | |||
d3b751e4d9 | |||
7731061903 | |||
b9683d3748 | |||
66979aa468 | |||
93f9c023cc | |||
e0ee0cc66a | |||
9b135a9c20 | |||
e171bf74f8 | |||
bb2e99dfc2 | |||
b37d175e59 | |||
f087633efd | |||
bbce726c8a | |||
bbc4ea4ae8 | |||
2126d81488 | |||
06b381d1c9 | |||
08eea0f0e4 | |||
a1798a8188 | |||
0fac8cba47 | |||
1ca74aba6f | |||
2ce30382d9 | |||
8bc545be2a | |||
891fcd8ce1 | |||
bd06091874 | |||
3e3edcc465 | |||
89a32267f7 | |||
021177ca9b | |||
115364b0a9 | |||
6d15d00ac4 | |||
bdaa43510b | |||
9a51f5c350 | |||
301c0a6303 | |||
65f486ff02 | |||
df096a7771 | |||
0e9a9f243f | |||
12c964b2b7 | |||
cf71f5cd60 | |||
adab2e16bd | |||
a3e3235d97 | |||
2be3c4b0e3 | |||
0ee796632a | |||
1fe67c125d | |||
ba996f5e27 | |||
64bf5bafe9 | |||
4d05bbf2a4 | |||
471990f771 | |||
7b623aab9d | |||
e871ae1270 | |||
c44830ebf3 | |||
3e4a04f34d | |||
38827dd9ca | |||
21fd9f037e | |||
033763eaf7 | |||
2573094df2 | |||
745026b7b4 | |||
a07d955eaa | |||
9d6f4e2e7f | |||
ff07d54843 | |||
e53879328c | |||
b792412d31 | |||
49c6f1053c | |||
4d960f6dc6 | |||
8941665896 | |||
9cc0f60666 | |||
fdb8edf5ea | |||
9ba9fe818d | |||
92224d27b1 | |||
157a4bd926 | |||
29d6881112 | |||
e2692921e1 | |||
b63138c3ec | |||
a59fcc33e6 | |||
07311f3157 | |||
17637ed1bb | |||
f15828e901 | |||
dadd689359 | |||
b750cab56a | |||
485748c416 | |||
080699f7df | |||
8e35f54931 | |||
d44f1a77ee | |||
4181046488 | |||
d7c398b638 | |||
5f5d0aa4ff | |||
9f1520b4c0 | |||
a98e8c0889 | |||
ee445a2ba4 | |||
b2c226cb7d | |||
13614f4e1c | |||
4f9ccdd70f | |||
4e36b1e3da | |||
f12f8a6c14 | |||
c57c54ce96 | |||
c8130df1d9 | |||
af8a742d00 | |||
e67500aa15 | |||
a6d3bf6fc3 | |||
3e617f3cd6 | |||
0fe35b907a | |||
3fc7c97827 | |||
7f79d249a6 | |||
f138374027 | |||
f52a1ae849 | |||
3bc0fe1ee3 | |||
fa0cc27400 | |||
4cb29bde2e | |||
2dcf75a722 | |||
671fd94e25 | |||
717d2f6f9e | |||
8cb95cb916 | |||
56b446190a | |||
86f9e836be | |||
a90a170361 | |||
889a5e0cf1 | |||
9f8bc00cf5 | |||
3363a1c227 | |||
fc9939c4e1 | |||
7267f796e6 | |||
7dfeceb8cc | |||
3807e520ec | |||
7625b1a4f4 | |||
1fc5cc1b59 | |||
61ccb43487 | |||
bf24b120d7 | |||
b70acf3c5b | |||
b5be6b72cb | |||
318ad3c1e4 | |||
e949a2ed2f | |||
8d0108fc5d | |||
ba41efa8a0 | |||
f81660b6db | |||
91bceb4ace | |||
be746628c7 | |||
ec5f531f4b | |||
37e5816bcd | |||
665bb43a4c | |||
e4bf004560 | |||
1609df3275 | |||
24f288770e | |||
65e6319b12 | |||
ec75953f50 | |||
801a13f791 | |||
eea8d6aa96 | |||
2b9cd71d67 | |||
5df83e3bd9 | |||
6061707371 | |||
20899c05a4 | |||
4c8c5e2f74 | |||
d1a95c643e | |||
9c3ea0d32d | |||
67e0894d9e | |||
6cc87a31c6 | |||
b8c766a9c5 | |||
66441c9b4b | |||
18d51d1de8 | |||
01d5fc670b | |||
0f1cbfd3da | |||
586f10ecb1 | |||
978737f5d5 | |||
fa0e057f8a | |||
bca7bfa927 | |||
12d654a6fc | |||
8e64e4383c | |||
f59d8cde26 | |||
94c0519be2 | |||
529c502876 | |||
c04c8f10f0 | |||
e05d35e6e0 | |||
e1e2df656a | |||
f7da5b29f0 | |||
2b4c236773 | |||
a8ca75738a | |||
aad4890082 | |||
e5edd3b983 | |||
a47341cf96 | |||
e46bda5093 | |||
a98d1d67d6 | |||
ba2884f343 | |||
1d80155d5e | |||
a0e42aa4e2 | |||
92959cd4ef | |||
2c802399c3 | |||
8ed72a8470 | |||
0d9a8207d6 | |||
04edbb0703 | |||
e1c1fce92c | |||
c8695209f6 | |||
9b95112a2d | |||
a602c57c8d | |||
f3228592f5 | |||
87b8254da1 | |||
20eab80189 | |||
810389c07a | |||
3badd3782b | |||
be2a264915 | |||
b81baf5423 | |||
64359c9417 | |||
5a3853f83f | |||
532d746036 | |||
c66e18b175 | |||
2d4bd3b3ad | |||
a96d6c68e2 | |||
504815091f | |||
4dd3e7fe35 | |||
4c614909ff | |||
b0a23c73cf | |||
922c1f8f9f | |||
c3c58eb601 | |||
ce3c52d17d | |||
6663d0264e | |||
6e4d623dc8 | |||
8e704d9718 | |||
5bd32bd90d | |||
b9f417e2e6 | |||
9f7b087235 | |||
2cb9738649 | |||
0dc590a9f8 | |||
dfd2c60509 | |||
fd27393df3 | |||
233950da40 | |||
bd0aafb4fd | |||
a672eae3d1 | |||
99d0d771a7 | |||
8dcea0ac07 | |||
7a6c6ec946 | |||
dfe79cc784 | |||
4a439c2359 | |||
4c16c82500 | |||
5513c49c54 | |||
b61f48e5aa | |||
de4b39a1a3 | |||
322502b441 | |||
b7dfd333c5 | |||
178da7c6a9 | |||
d89ea3e6f9 | |||
6c9c1e6712 | |||
b10bcd924b | |||
d8e2e9a41f | |||
1f70b279ba | |||
e33e57684f | |||
a0c6649960 | |||
ca73dea3b9 | |||
648bd22427 | |||
0231d8f86d | |||
a91908e567 | |||
ae33883c2f | |||
4dca5d4db7 | |||
21701190ac | |||
5cd86443ee | |||
779ddb1832 | |||
445feaeef5 | |||
932d973e36 | |||
9eb6f627fa | |||
323c2d6775 | |||
09baeec0da | |||
8247bccf71 | |||
bbf37c0404 | |||
e336b0d60e | |||
5cd4430a8d | |||
23420ff67a | |||
ef50e01c5e | |||
cc0064b267 | |||
1aaa599147 | |||
d27472cc74 | |||
80ea44c485 | |||
dba29970d7 | |||
4cd2617848 | |||
5b31794dfe | |||
355f4b0c15 | |||
bbb5e5d56a | |||
b8bd9a71c8 | |||
be3865211c | |||
0f19cbc6e5 | |||
49da42983a | |||
7db7109a5b | |||
9f8d192991 | |||
760fd65487 | |||
8b1df1a259 | |||
9bc97a5785 | |||
6707f70b4a | |||
df30ef5177 | |||
7e8ad6cad3 | |||
e0e18f3841 | |||
328f0dd631 | |||
15c8d46b04 | |||
052918a6d9 | |||
afe41de6b3 | |||
2ad5dba50a | |||
0b8a14fed7 | |||
ac9013162e | |||
ed2bc7fbe9 | |||
d0c820acd6 | |||
2f9f2cbb19 | |||
bad0de0dcb | |||
3b62c145f8 | |||
3914e82f17 | |||
b522b788b6 | |||
1b73c79234 | |||
36956da4d2 | |||
f4d878f3d8 | |||
61acd18e79 | |||
e1b4acfb6e | |||
d85d3c74db | |||
8ecee175f2 | |||
8b10617bba | |||
90b16a3e85 | |||
4dc1fb923a | |||
b8dec948d4 | |||
87670a7ac1 | |||
63d293cdbe | |||
f0dbec0c93 | |||
79789af2e7 | |||
8639b0fae9 | |||
00665a0b72 | |||
b59c8399fb | |||
289b30715d | |||
7770304576 | |||
890ffa05f8 | |||
89014b4524 | |||
f8608a5228 | |||
437c1e40b2 | |||
53db80da89 | |||
28cc3cc960 | |||
1291778032 | |||
b930baa580 | |||
66ee2dec53 | |||
f2ae2f7eef | |||
88a593d559 | |||
5d9bb0a050 | |||
8048f4d4f6 | |||
d48e6ae66f | |||
4f46bd19d0 | |||
ca49510e6d | |||
25ac04a444 | |||
8e52c2e754 | |||
2bb5ec1e41 | |||
55522373fd | |||
c9471e7782 | |||
5b262ff5ab | |||
e8d0538a00 | |||
a1c63e8be6 | |||
a64b1b4375 | |||
64500ab0fa | |||
00b853418e | |||
8d56bf5ceb | |||
44f419ec0f | |||
177cab5fe7 | |||
187d6a66a5 | |||
6952fe3a5c | |||
b19b7c39ac | |||
9276c4e163 | |||
2cd7a0395d | |||
f2be249385 | |||
81b01f1c2b | |||
a4d9e63d12 | |||
64af2aafda | |||
40cdcf1183 | |||
182d9cb752 | |||
c2ddfb343a | |||
9e9bfc4e26 | |||
f63c6c008f | |||
eb2f01aee8 | |||
30fb5c3e81 | |||
c780901cd5 | |||
ca419f3cd8 | |||
a45421baaf | |||
be6a3696a9 | |||
7943ecb812 | |||
16d8397e30 | |||
1a6682c21d | |||
82b14a05f2 | |||
e66b158f0b | |||
c88e435724 | |||
eeb2a1a6e3 | |||
7335a70a02 | |||
1b7b2ba216 | |||
3c836dd71b | |||
90fce8bfa6 | |||
1f1ea18b54 | |||
07caa3fccd | |||
83fc6fdb34 | |||
2acb9a6ea7 | |||
e482b5694f | |||
6c959207db | |||
71e8ae01b8 | |||
a7cc3248fe | |||
82e09c17a9 | |||
ae341b31c8 | |||
ab7adb0027 | |||
b4b5921dd0 | |||
c683e4aaa2 | |||
b7159818f9 | |||
d4b55fc5b2 | |||
4f7627972e | |||
920e1ccfe0 | |||
54ea317375 | |||
0731b44809 | |||
cb84e3f029 | |||
bb6115b737 | |||
d8715fba1a | |||
b4cc8cbac4 | |||
c3a77d6268 | |||
ba8c4c6b1a | |||
44bc2e80dd | |||
4e8cec05ab | |||
afecb93e2e | |||
437c3863f1 | |||
710435b51b | |||
cd791bd855 | |||
7cc6b801e0 | |||
5a3844981b | |||
863d166c7b | |||
b04219fdbb | |||
61734cc7ae | |||
0951524ca2 | |||
b0a6b979a3 | |||
7f2b077da4 | |||
06ac31cf1a | |||
2e14aff80f | |||
a59a93f476 | |||
e859f36967 | |||
25ed5fedda | |||
3778f1bf77 | |||
f85f46461f | |||
5bb517355e | |||
22eea17b06 | |||
c8d6efd602 | |||
b6b17e5648 | |||
88b012ad3b | |||
581b320b9d | |||
b42a5b118f | |||
eeb322ae64 | |||
52ede09b17 | |||
f5c432bcab | |||
7c12e8ea44 | |||
30860491ba | |||
0f6f83a709 | |||
e3fe634f99 | |||
dc3e969e14 | |||
b7f6404168 | |||
b6b5ec8f75 | |||
ca37730c9d | |||
6fb8ae2bd6 | |||
2924fdfcf7 | |||
eac390f289 | |||
de54273f51 | |||
6b727c0440 | |||
2c6be49d20 | |||
a42b7355f4 | |||
b4a5251391 | |||
a183ea29f9 | |||
affffb39b3 | |||
0ef327bbee | |||
795b70423e | |||
49227f65ff | |||
a386b4c983 | |||
5cb3fa2f89 | |||
43d716280e | |||
cc6170d7fc | |||
4d300e4dec | |||
1f58b2d084 | |||
6903fa2d47 | |||
319ae90184 | |||
7b884e0075 | |||
3e7b4ae0c3 | |||
3b75d0ce23 | |||
48807de305 | |||
1d7d1a3499 | |||
969008dbb0 | |||
6976e602f6 | |||
3b087e03ea | |||
6c8b023298 | |||
8dbf59fec5 | |||
ac0f8b81ae | |||
5fc032a9d1 | |||
806e3cd075 | |||
8923325d5c | |||
a2b2c8adc9 | |||
d20238c2a7 | |||
4ce83bf57b | |||
b500953425 | |||
2f99720901 | |||
6bd9008025 | |||
c97df052a9 | |||
7d9c5e0f7d | |||
d62d5fe59a | |||
d8cec35b10 | |||
f81cff539a | |||
5fff491bbd | |||
781915f183 | |||
5b44a819d3 | |||
056f15aa53 | |||
37bda7e029 | |||
454dc9b7c2 | |||
b3d7a5c5d0 | |||
475521dd74 | |||
3c09c5f12d | |||
1a9e66915b | |||
84d11c19fd | |||
312263c7d9 | |||
3369783e0a | |||
47ff813012 | |||
3b39d4d1c1 | |||
2fab2b310b | |||
c2ac4465cd | |||
bb8059f6aa | |||
4c2cc32f2e | |||
89a3fbc0fb | |||
077353b47e | |||
46621fd2c3 | |||
d6625ac34d | |||
b46b36729f | |||
893fabd336 | |||
44ea0da2b0 | |||
4f4e1026f5 | |||
8c23f20c68 | |||
4f65227971 | |||
e4736fe469 | |||
e32925397b | |||
f5f042ffdc | |||
d445a9aafb | |||
464660651d | |||
8d9141ed9a | |||
4be37222ef | |||
b0d9f7372a | |||
704fde01e8 | |||
0c9a858f2d | |||
b8ba80bff7 | |||
8ec6ccc54d | |||
1ca9e552d9 | |||
8f0a4a25f8 | |||
071af57bcf | |||
d68865f3b1 | |||
a724952f75 | |||
a4215f469c | |||
4c1abcb0ce | |||
1dd272080d | |||
3e3a79ea13 | |||
771655e3fe | |||
60cd5bf939 | |||
91b7690428 | |||
bb01bea4e2 | |||
c145589f25 | |||
c7c82f1b44 | |||
f58ac2b46b | |||
016007bd25 | |||
4ee00b2309 | |||
fb81bc3291 | |||
c646d287f8 | |||
51d4539a79 | |||
f273c64a94 | |||
74ec95e7f6 | |||
65f340bb95 | |||
4d014d6d7e | |||
fe56461387 | |||
56efed71b5 | |||
91f18ffd47 | |||
a4c4125b11 | |||
993b412160 | |||
2c2e389b77 | |||
3291235711 | |||
461cdb593b | |||
7f00e8c033 | |||
a87089fd2d | |||
1e24c2e4f4 | |||
9e56811a37 | |||
6060e098c9 | |||
aa1e052cb4 | |||
1a652afe16 | |||
6f1e45d5ba | |||
91130ea3fc | |||
b7caa1751c | |||
4c3da0f2e1 | |||
39b4ef4b02 | |||
74be4a62c5 | |||
e11489eb5f | |||
f970610c04 | |||
68b48cc045 | |||
7596b33506 | |||
2b7aff202a | |||
e76c55a9b8 | |||
ca211065b6 | |||
c7442ef0d2 | |||
a691aa2a13 | |||
00787fe781 | |||
f5a29eab5c | |||
2b94d7fc7f | |||
51f8ce26cf | |||
092fcaffe4 | |||
7e6ede1051 | |||
ddfef21125 | |||
3af101ccc1 | |||
4f088b6f1c | |||
da9aad8876 | |||
96dc42d99c | |||
1e50f5dd28 | |||
f127799d79 | |||
6b1d73446d | |||
fd4e161497 | |||
6362a9d610 | |||
d55fc35df1 | |||
67e9d33486 | |||
219859f8bb | |||
e0493457d5 | |||
1e3a7d4fab | |||
848dec3da2 | |||
ae0880997b | |||
9eb50a9624 | |||
acecefee88 | |||
ba784bdf36 | |||
c4de28938f | |||
6c33ba14a4 | |||
7a5b571c67 | |||
599e3c7b3f | |||
86bc7795a3 | |||
a3efdb13f2 | |||
3a97280ae8 | |||
4f3f6e28d5 | |||
79ada05756 | |||
c48271958f | |||
ac66d96c5a | |||
e5165aeb27 | |||
970f4c06e1 | |||
22ef7370e7 | |||
a38be3eb48 | |||
73c028c40a | |||
783289068a | |||
bb3651abc8 | |||
f0134f363b | |||
ce88d41907 | |||
0f9539e1e3 | |||
90e07b19ab | |||
63d1d145e2 | |||
c039bb38d4 | |||
861add3d72 | |||
6886913fdf | |||
c75d3b0ede | |||
7c0eb47dfb | |||
dbcdf83ed8 | |||
d31eab94fd | |||
553f08b819 | |||
32258af87b | |||
d251d48439 | |||
f9917c8c7b | |||
5a458da42a | |||
89c6c5bb85 | |||
fdd61b83ff | |||
32559ccad1 | |||
826efc2295 | |||
88f174a014 | |||
780bdb3e80 | |||
828e1e35fd | |||
fc85dd175e | |||
89ba380b3c | |||
b57b6e341e | |||
ad0e6e971e | |||
fdba0cb03c | |||
61ee9f299d | |||
16a23ff740 | |||
1d5d21726a | |||
7b662103a0 | |||
da729e5b38 | |||
5c39a1bb26 | |||
14ae5708d6 | |||
ffaf58f0a9 | |||
4496a44f68 | |||
4f1d92b332 | |||
ab664c7e17 | |||
8ee84584a4 | |||
748d1c171d | |||
a7434fd008 | |||
d9bb8179d3 | |||
ee36057dd5 | |||
a476aabf1a | |||
f3769a97d5 | |||
ca18202eb9 | |||
5eb60a6da2 | |||
f86ea9aad5 | |||
5bcdbb1ce4 | |||
2e530f4889 | |||
7f515b0e88 | |||
b4dd3209b2 | |||
262d92834a | |||
faf663133b | |||
64a6c2c1b6 | |||
847aaffbb8 | |||
a8472e0fdb | |||
1580ec1804 | |||
8906b2fe09 | |||
e798e4fd75 | |||
4b1a7d3868 | |||
e27af97a3c | |||
542b839ec7 | |||
67cd4ee8d2 | |||
c8a8ad97f7 | |||
d87f7a1e81 | |||
adc1b50395 | |||
e86619e75d | |||
b40dc8a1da | |||
86da6feb40 | |||
fe532a98f9 | |||
39ce85cf5d | |||
72e60dea36 | |||
7c1f74713e | |||
a5ff487889 | |||
331b815300 | |||
2348f8e2a8 | |||
251b3c6406 | |||
cc21706c50 | |||
25931f12c1 | |||
91a7a4a786 | |||
d89e57ea8d | |||
5a901bb555 | |||
284f1d6beb | |||
bc6fdad786 | |||
c05db38a5e | |||
34c56766b7 | |||
5479097790 | |||
20d4e527bd | |||
a824c3f02f | |||
bc3b406bff |
6
.dockerignore
Normal file
6
.dockerignore
Normal file
@ -0,0 +1,6 @@
|
||||
**/.git
|
||||
**/*_test.go
|
||||
|
||||
build/_workspace
|
||||
build/_bin
|
||||
tests/testdata
|
6
.github/CONTRIBUTING.md
vendored
6
.github/CONTRIBUTING.md
vendored
@ -8,10 +8,8 @@ and help.
|
||||
## Contributing
|
||||
|
||||
If you'd like to contribute to go-ethereum please fork, fix, commit and
|
||||
send a pull request. Commits who do not comply with the coding standards
|
||||
are ignored (use gofmt!). If you send pull requests make absolute sure that you
|
||||
commit on the `develop` branch and that you do not merge to master.
|
||||
Commits that are directly based on master are simply ignored.
|
||||
send a pull request. Commits which do not comply with the coding standards
|
||||
are ignored (use gofmt!).
|
||||
|
||||
See [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide)
|
||||
for more details on configuring your environment, testing, and
|
||||
|
11
.gitignore
vendored
11
.gitignore
vendored
@ -13,8 +13,7 @@
|
||||
.ethtest
|
||||
*/**/*tx_database*
|
||||
*/**/*dapps*
|
||||
Godeps/_workspace/pkg
|
||||
Godeps/_workspace/bin
|
||||
build/_vendor/pkg
|
||||
|
||||
#*
|
||||
.#*
|
||||
@ -23,17 +22,11 @@ Godeps/_workspace/bin
|
||||
.project
|
||||
.settings
|
||||
|
||||
deploy/osx/Mist.app
|
||||
deploy/osx/Mist\ Installer.dmg
|
||||
cmd/mist/assets/ext/ethereum.js/
|
||||
|
||||
# used by the Makefile
|
||||
/build/_workspace/
|
||||
/build/bin/
|
||||
/geth*.zip
|
||||
|
||||
# travis
|
||||
profile.tmp
|
||||
profile.cov
|
||||
|
||||
# vagrant
|
||||
.vagrant
|
||||
|
6
.gitmodules
vendored
6
.gitmodules
vendored
@ -1,3 +1,3 @@
|
||||
[submodule "cmd/mist/assets/ext/ethereum.js"]
|
||||
path = cmd/mist/assets/ext/ethereum.js
|
||||
url = https://github.com/ethereum/web3.js
|
||||
[submodule "tests"]
|
||||
path = tests/testdata
|
||||
url = https://github.com/ethereum/tests
|
||||
|
48
.mailmap
48
.mailmap
@ -17,6 +17,7 @@ Taylor Gerring <taylor.gerring@gmail.com> <taylor.gerring@ethereum.org>
|
||||
Bas van Kervel <bas@ethdev.com>
|
||||
Bas van Kervel <bas@ethdev.com> <basvankervel@ziggo.nl>
|
||||
Bas van Kervel <bas@ethdev.com> <basvankervel@gmail.com>
|
||||
Bas van Kervel <bas@ethdev.com> <bas-vk@users.noreply.github.com>
|
||||
|
||||
Sven Ehlert <sven@ethdev.com>
|
||||
|
||||
@ -62,4 +63,49 @@ Joseph Chow <ethereum@outlook.com> ethers <TODO>
|
||||
|
||||
Enrique Fynn <enriquefynn@gmail.com>
|
||||
|
||||
Vincent G <caktux@gmail.com>
|
||||
Vincent G <caktux@gmail.com>
|
||||
|
||||
RJ Catalano <rj@erisindustries.com>
|
||||
|
||||
Nchinda Nchinda <nchinda2@gmail.com>
|
||||
|
||||
Aron Fischer <github@aron.guru> <homotopycolimit@users.noreply.github.com>
|
||||
|
||||
Vlad Gluhovsky <gluk256@users.noreply.github.com>
|
||||
|
||||
Ville Sundell <github@solarius.fi>
|
||||
|
||||
Elliot Shepherd <elliot@identitii.com>
|
||||
|
||||
Yohann Léon <sybiload@gmail.com>
|
||||
|
||||
Gregg Dourgarian <greggd@tempworks.com>
|
||||
|
||||
Casey Detrio <cdetrio@gmail.com>
|
||||
|
||||
Jens Agerberg <github@agerberg.me>
|
||||
|
||||
Nick Johnson <arachnid@notdot.net>
|
||||
|
||||
Henning Diedrich <hd@eonblast.com>
|
||||
Henning Diedrich <hd@eonblast.com> Drake Burroughs <wildfyre@hotmail.com>
|
||||
|
||||
Felix Lange <fjl@twurst.com>
|
||||
Felix Lange <fjl@twurst.com> <fjl@users.noreply.github.com>
|
||||
|
||||
Максим Чусовлянов <mchusovlianov@gmail.com>
|
||||
|
||||
Louis Holbrook <dev@holbrook.no>
|
||||
Louis Holbrook <dev@holbrook.no> <nolash@users.noreply.github.com>
|
||||
|
||||
Thomas Bocek <tom@tomp2p.net>
|
||||
|
||||
Victor Tran <vu.tran54@gmail.com>
|
||||
|
||||
Justin Drake <drakefjustin@gmail.com>
|
||||
|
||||
Frank Wang <eternnoir@gmail.com>
|
||||
|
||||
Gary Rong <garyrong0905@gmail.com>
|
||||
|
||||
Guillaume Nicolas <guin56@gmail.com>
|
||||
|
211
.travis.yml
211
.travis.yml
@ -1,29 +1,196 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.4.2
|
||||
install:
|
||||
# - go get code.google.com/p/go.tools/cmd/goimports
|
||||
# - go get github.com/golang/lint/golint
|
||||
# - go get golang.org/x/tools/cmd/vet
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
before_script:
|
||||
# - gofmt -l -w .
|
||||
# - goimports -l -w .
|
||||
# - golint .
|
||||
# - go vet ./...
|
||||
# - go test -race ./...
|
||||
script:
|
||||
- make travis-test-with-coverage
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
env:
|
||||
global:
|
||||
- secure: "U2U1AmkU4NJBgKR/uUAebQY87cNL0+1JHjnLOmmXwxYYyj5ralWb1aSuSH3qSXiT93qLBmtaUkuv9fberHVqrbAeVlztVdUsKAq7JMQH+M99iFkC9UiRMqHmtjWJ0ok4COD1sRYixxi21wb/JrMe3M1iL4QJVS61iltjHhVdM64="
|
||||
go_import_path: github.com/ethereum/go-ethereum
|
||||
sudo: false
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.7.6
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.3
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
# These are the latest Go versions.
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.0
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
- sudo chmod 666 /dev/fuse
|
||||
- sudo chown root:$USER /etc/fuse.conf
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage -misspell
|
||||
|
||||
- os: osx
|
||||
go: 1.9.0
|
||||
sudo: required
|
||||
script:
|
||||
- brew update
|
||||
- brew install caskroom/cask/brew-cask
|
||||
- brew cask install osxfuse
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage -misspell
|
||||
|
||||
# This builder does the Ubuntu PPA and Linux Azure uploads
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.0
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- azure-linux
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- devscripts
|
||||
- debhelper
|
||||
- dput
|
||||
- gcc-multilib
|
||||
- fakeroot
|
||||
script:
|
||||
# Build for the primary platforms that Trusty can manage
|
||||
- go run build/ci.go debsrc -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>" -upload ppa:ethereum/ethereum
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- go run build/ci.go install -arch 386
|
||||
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# Switch over GCC to cross compilation (breaks 386, hence why do it here only)
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
|
||||
- sudo ln -s /usr/include/asm-generic /usr/include/asm
|
||||
|
||||
- GOARM=5 CC=arm-linux-gnueabi-gcc go run build/ci.go install -arch arm
|
||||
- GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- GOARM=6 CC=arm-linux-gnueabi-gcc go run build/ci.go install -arch arm
|
||||
- GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- GOARM=7 CC=arm-linux-gnueabihf-gcc go run build/ci.go install -arch arm
|
||||
- GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
- CC=aarch64-linux-gnu-gcc go run build/ci.go install -arch arm64
|
||||
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Linux Azure MIPS xgo uploads
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
go: 1.9.0
|
||||
env:
|
||||
- azure-linux-mips
|
||||
script:
|
||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v
|
||||
- for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done
|
||||
- go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
- go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v
|
||||
- for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done
|
||||
- go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v
|
||||
- for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done
|
||||
- go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
- go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v
|
||||
- for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done
|
||||
- go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# This builder does the Android Maven and Azure uploads
|
||||
- os: linux
|
||||
dist: precise # Needed for the android tools
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- oracle-java8-installer
|
||||
- oracle-java8-set-default
|
||||
language: android
|
||||
android:
|
||||
components:
|
||||
- platform-tools
|
||||
- tools
|
||||
- android-15
|
||||
- android-19
|
||||
- android-24
|
||||
env:
|
||||
- azure-android
|
||||
- maven-android
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip -o android-ndk-r14b.zip
|
||||
- unzip -q android-ndk-r14b.zip && rm android-ndk-r14b.zip
|
||||
- mv android-ndk-r14b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r14b
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
|
||||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- os: osx
|
||||
go: 1.9.0
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
- cocoapods-ios
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# Build the iOS framework and upload it to CocoaPods and Azure
|
||||
- gem uninstall cocoapods -a -x
|
||||
- gem install cocoapods
|
||||
|
||||
- mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak
|
||||
- sed -i '.bak' 's/repo.join/!repo.join/g' $(dirname `gem which cocoapods`)/cocoapods/sources_manager.rb
|
||||
- if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then git clone --depth=1 https://github.com/CocoaPods/Specs.git ~/.cocoapods/repos/master && pod setup --verbose; fi
|
||||
|
||||
- xctool -version
|
||||
- xcrun simctl list
|
||||
|
||||
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds
|
||||
|
||||
# This builder does the Azure archive purges to avoid accumulating junk
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.9.0
|
||||
env:
|
||||
- azure-purge
|
||||
script:
|
||||
- go run build/ci.go purge -store gethstore/builds -days 14
|
||||
|
||||
install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
- go run build/ci.go install
|
||||
- go run build/ci.go test -coverage
|
||||
|
||||
notifications:
|
||||
webhooks:
|
||||
urls:
|
||||
- https://webhooks.gitter.im/e/e09ccdce1048c5e03445
|
||||
on_success: change
|
||||
on_success: change
|
||||
on_failure: always
|
||||
on_start: false
|
||||
|
51
AUTHORS
51
AUTHORS
@ -1,36 +1,85 @@
|
||||
# This is the official list of go-ethereum authors for copyright purposes.
|
||||
|
||||
Ales Katona <ales@coinbase.com>
|
||||
Alex Leverington <alex@ethdev.com>
|
||||
Alexandre Van de Sande <alex.vandesande@ethdev.com>
|
||||
Aron Fischer <github@aron.guru>
|
||||
Bas van Kervel <bas@ethdev.com>
|
||||
Benjamin Brent <benjamin@benjaminbrent.com>
|
||||
Brian Schroeder <bts@gmail.com>
|
||||
Casey Detrio <cdetrio@gmail.com>
|
||||
Christoph Jentzsch <jentzsch.software@gmail.com>
|
||||
Daniel A. Nagy <nagy.da@gmail.com>
|
||||
Drake Burroughs <wildfyre@hotmail.com>
|
||||
Diego Siqueira <DiSiqueira@users.noreply.github.com>
|
||||
Elliot Shepherd <elliot@identitii.com>
|
||||
Enrique Fynn <enriquefynn@gmail.com>
|
||||
Ethan Buchman <ethan@coinculture.info>
|
||||
Fabian Vogelsteller <fabian@frozeman.de>
|
||||
Fabio Berger <fabioberger1991@gmail.com>
|
||||
Felix Lange <fjl@twurst.com>
|
||||
Frank Wang <eternnoir@gmail.com>
|
||||
Gary Rong <garyrong0905@gmail.com>
|
||||
Gregg Dourgarian <greggd@tempworks.com>
|
||||
Guillaume Nicolas <guin56@gmail.com>
|
||||
Gustav Simonsson <gustav.simonsson@gmail.com>
|
||||
Hao Bryan Cheng <haobcheng@gmail.com>
|
||||
Henning Diedrich <hd@eonblast.com>
|
||||
Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
|
||||
Jae Kwon <jkwon.work@gmail.com>
|
||||
Jamie Pitts <james.pitts@gmail.com>
|
||||
Jason Carver <jacarver@linkedin.com>
|
||||
Jeff R. Allen <jra@nella.org>
|
||||
Jeffrey Wilcke <jeffrey@ethereum.org>
|
||||
Jens Agerberg <github@agerberg.me>
|
||||
Jonathan Brown <jbrown@bluedroplet.com>
|
||||
Joseph Chow <ethereum@outlook.com>
|
||||
Justin Clark-Casey <justincc@justincc.org>
|
||||
Justin Drake <drakefjustin@gmail.com>
|
||||
Kenji Siu <kenji@isuntv.com>
|
||||
Kobi Gurkan <kobigurk@gmail.com>
|
||||
Lefteris Karapetsas <lefteris@refu.co>
|
||||
Leif Jurvetson <leijurv@gmail.com>
|
||||
Lewis Marshall <lewis@lmars.net>
|
||||
Louis Holbrook <dev@holbrook.no>
|
||||
Luca Zeug <luclu@users.noreply.github.com>
|
||||
Maran Hidskes <maran.hidskes@gmail.com>
|
||||
Marek Kotewicz <marek.kotewicz@gmail.com>
|
||||
Martin Holst Swende <martin@swende.se>
|
||||
Matthew Di Ferrante <mattdf@users.noreply.github.com>
|
||||
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
|
||||
Micah Zoltu <micah@zoltu.net>
|
||||
Nchinda Nchinda <nchinda2@gmail.com>
|
||||
Nick Dodson <silentcicero@outlook.com>
|
||||
Nick Johnson <arachnid@notdot.net>
|
||||
Paulo L F Casaretto <pcasaretto@gmail.com>
|
||||
Peter Pratscher <pratscher@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
RJ Catalano <rj@erisindustries.com>
|
||||
Ramesh Nair <ram@hiddentao.com>
|
||||
Ricardo Catalinas Jiménez <r@untroubled.be>
|
||||
Rémy Roy <remyroy@remyroy.com>
|
||||
Shintaro Kaneko <kaneshin0120@gmail.com>
|
||||
Stein Dekker <dekker.stein@gmail.com>
|
||||
Steven Roose <stevenroose@gmail.com>
|
||||
Taylor Gerring <taylor.gerring@gmail.com>
|
||||
Thomas Bocek <tom@tomp2p.net>
|
||||
Tosh Camille <tochecamille@gmail.com>
|
||||
Valentin Wüstholz <wuestholz@users.noreply.github.com>
|
||||
Victor Farazdagi <simple.square@gmail.com>
|
||||
Victor Tran <vu.tran54@gmail.com>
|
||||
Viktor Trón <viktor.tron@gmail.com>
|
||||
Ville Sundell <github@solarius.fi>
|
||||
Vincent G <caktux@gmail.com>
|
||||
Vitalik Buterin <v@buterin.com>
|
||||
Vivek Anand <vivekanand1101@users.noreply.github.com>
|
||||
Vlad Gluhovsky <gluk256@users.noreply.github.com>
|
||||
Yohann Léon <sybiload@gmail.com>
|
||||
Yoichi Hirai <i@yoichihirai.com>
|
||||
Zahoor Mohamed <zahoor@zahoor.in>
|
||||
Zsolt Felföldi <zsfelfoldi@gmail.com>
|
||||
holisticode <holistic.computing@gmail.com>
|
||||
ken10100147 <sunhongping@kanjian.com>
|
||||
ligi <ligi@ligi.de>
|
||||
xiekeyang <xiekeyang@users.noreply.github.com>
|
||||
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
|
||||
Максим Чусовлянов <mchusovlianov@gmail.com>
|
||||
|
16
Dockerfile
Normal file
16
Dockerfile
Normal file
@ -0,0 +1,16 @@
|
||||
# Build Geth in a stock Go builder container
|
||||
FROM golang:1.9-alpine as builder
|
||||
|
||||
RUN apk add --no-cache make gcc musl-dev linux-headers
|
||||
|
||||
ADD . /go-ethereum
|
||||
RUN cd /go-ethereum && make geth
|
||||
|
||||
# Pull Geth into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
|
||||
|
||||
EXPOSE 8545 8546 30303 30303/udp
|
||||
ENTRYPOINT ["geth"]
|
324
Godeps/Godeps.json
generated
324
Godeps/Godeps.json
generated
@ -1,324 +0,0 @@
|
||||
{
|
||||
"ImportPath": "github.com/ethereum/go-ethereum",
|
||||
"GoVersion": "go1.5.2",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "github.com/Gustav-Simonsson/go-opencl/cl",
|
||||
"Rev": "593e01cfc4f3353585015321e01951d4a907d3ef"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cespare/cp",
|
||||
"Rev": "165db2f241fd235aec29ba6d9b1ccd5f1c14637c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codegangsta/cli",
|
||||
"Comment": "1.2.0-215-g0ab42fd",
|
||||
"Rev": "0ab42fd482c27cf2c95e7794ad3bb2082c2ab2d7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ethereum/ethash",
|
||||
"Comment": "v23.1-245-g25b32de",
|
||||
"Rev": "25b32de0c0271065c28c3719c2bfe86959d72f0c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fatih/color",
|
||||
"Comment": "v0.1-12-g9aae6aa",
|
||||
"Rev": "9aae6aaa22315390f03959adca2c4d395b02fcef"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/gizak/termui",
|
||||
"Rev": "08a5d3f67b7d9ec87830ea39c48e570a1f18531f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/snappy",
|
||||
"Rev": "799c780093d646c1b79d30894e22512c319fa137"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/golang-lru/simplelru",
|
||||
"Rev": "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/dcps/internetgateway1",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/dcps/internetgateway2",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/httpu",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/scpd",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/soap",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/huin/goupnp/ssdp",
|
||||
"Rev": "46bde78b11f3f021f2a511df138be9e2fc7506e8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jackpal/gateway",
|
||||
"Rev": "192609c58b8985e645cbe82ddcb28a4362ca0fdc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jackpal/go-nat-pmp",
|
||||
"Rev": "46523a463303c6ede3ddfe45bde1c7ed52ebaacd"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-colorable",
|
||||
"Rev": "9fdad7c47650b7d2e1da50644c1f4ba7f172f252"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-isatty",
|
||||
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mattn/go-runewidth",
|
||||
"Comment": "travisish-44-ge882a96",
|
||||
"Rev": "e882a96ec18dd43fa283187b66af74497c9101c0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/microsoft/go-winio",
|
||||
"Comment": "v0.2.0",
|
||||
"Rev": "9e2895e5f6c3f16473b91d37fae6e89990a4520c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/nsf/termbox-go",
|
||||
"Rev": "362329b0aa6447eadd52edd8d660ec1dff470295"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Comment": "v1.0-6-g0f1a469",
|
||||
"Rev": "0f1a46960a86dcdf5dd30d3e6568a497a997909f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/peterh/liner",
|
||||
"Rev": "ad1edfd30321d8f006ccf05f1e0524adeb943060"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rcrowley/go-metrics",
|
||||
"Rev": "51425a2415d21afadfd55cd93432c0bc69e9598d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rjeczalik/notify",
|
||||
"Rev": "5dd6205716539662f8f14ab513552b41eab69d5d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/ast",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/dbg",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/file",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/parser",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/registry",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/robertkrimen/otto/token",
|
||||
"Rev": "53221230c215611a90762720c9042ac782ef74ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rs/cors",
|
||||
"Rev": "5950cf11d77f8a61b432a25dd4d444b4ced01379"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/cache",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/comparer",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/errors",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/filter",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/iterator",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/journal",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/memdb",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/opt",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/storage",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/table",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb/util",
|
||||
"Rev": "917f41c560270110ceb73c5b38be2a9127387071"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ripemd160",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/scrypt",
|
||||
"Rev": "1f22c0103821b9390939b6776727195525381532"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html",
|
||||
"Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html/atom",
|
||||
"Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/html/charset",
|
||||
"Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/websocket",
|
||||
"Rev": "8968c61983e8f51a91b8c0ef25bf739278c89634"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "50c6bc5e4292a1d4e65c6e9be5f53be28bcbe28e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/charmap",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/htmlindex",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/internal",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/internal/identifier",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/japanese",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/korean",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/simplifiedchinese",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/traditionalchinese",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/encoding/unicode",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/internal/tag",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/internal/utf8internal",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/language",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/runes",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/go/ast/astutil",
|
||||
"Rev": "758728c4b28cfbac299730969ef8f655c4761283"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/imports",
|
||||
"Rev": "758728c4b28cfbac299730969ef8f655c4761283"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/check.v1",
|
||||
"Rev": "4f90aeace3a26ad7021961c297b22c42160c7b25"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/fatih/set.v0",
|
||||
"Comment": "v0.1.0-3-g27c4092",
|
||||
"Rev": "27c40922c40b43fe04554d8223a402af3ea333f3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/karalabe/cookiejar.v2/collections/prque",
|
||||
"Rev": "8dcd6a7f4951f6ff3ee9cbb919a06d8925822e57"
|
||||
}
|
||||
]
|
||||
}
|
5
Godeps/Readme
generated
5
Godeps/Readme
generated
@ -1,5 +0,0 @@
|
||||
This directory tree is generated automatically by godep.
|
||||
|
||||
Please do not edit.
|
||||
|
||||
See https://github.com/tools/godep for more information.
|
2
Godeps/_workspace/.gitignore
generated
vendored
2
Godeps/_workspace/.gitignore
generated
vendored
@ -1,2 +0,0 @@
|
||||
/pkg
|
||||
/bin
|
26
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/cl.go
generated
vendored
26
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/cl.go
generated
vendored
@ -1,26 +0,0 @@
|
||||
/*
|
||||
Package cl provides a binding to the OpenCL api. It's mostly a low-level
|
||||
wrapper that avoids adding functionality while still making the interface
|
||||
a little more friendly and easy to use.
|
||||
|
||||
Resource life-cycle management:
|
||||
|
||||
For any CL object that gets created (buffer, queue, kernel, etc..) you should
|
||||
call object.Release() when finished with it to free the CL resources. This
|
||||
explicitely calls the related clXXXRelease method for the type. However,
|
||||
as a fallback there is a finalizer set for every resource item that takes
|
||||
care of it (eventually) if Release isn't called. In this way you can have
|
||||
better control over the life cycle of resources while having a fall back
|
||||
to avoid leaks. This is similar to how file handles and such are handled
|
||||
in the Go standard packages.
|
||||
*/
|
||||
package cl
|
||||
|
||||
// #include "headers/1.2/opencl.h"
|
||||
// #cgo CFLAGS: -Iheaders/1.2
|
||||
// #cgo darwin LDFLAGS: -framework OpenCL
|
||||
// #cgo linux LDFLAGS: -lOpenCL
|
||||
import "C"
|
||||
import "errors"
|
||||
|
||||
var ErrUnsupported = errors.New("cl: unsupported")
|
161
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/context.go
generated
vendored
161
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/context.go
generated
vendored
@ -1,161 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #include <stdlib.h>
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const maxImageFormats = 256
|
||||
|
||||
type Context struct {
|
||||
clContext C.cl_context
|
||||
devices []*Device
|
||||
}
|
||||
|
||||
type MemObject struct {
|
||||
clMem C.cl_mem
|
||||
size int
|
||||
}
|
||||
|
||||
func releaseContext(c *Context) {
|
||||
if c.clContext != nil {
|
||||
C.clReleaseContext(c.clContext)
|
||||
c.clContext = nil
|
||||
}
|
||||
}
|
||||
|
||||
func releaseMemObject(b *MemObject) {
|
||||
if b.clMem != nil {
|
||||
C.clReleaseMemObject(b.clMem)
|
||||
b.clMem = nil
|
||||
}
|
||||
}
|
||||
|
||||
func newMemObject(mo C.cl_mem, size int) *MemObject {
|
||||
memObject := &MemObject{clMem: mo, size: size}
|
||||
runtime.SetFinalizer(memObject, releaseMemObject)
|
||||
return memObject
|
||||
}
|
||||
|
||||
func (b *MemObject) Release() {
|
||||
releaseMemObject(b)
|
||||
}
|
||||
|
||||
// TODO: properties
|
||||
func CreateContext(devices []*Device) (*Context, error) {
|
||||
deviceIds := buildDeviceIdList(devices)
|
||||
var err C.cl_int
|
||||
clContext := C.clCreateContext(nil, C.cl_uint(len(devices)), &deviceIds[0], nil, nil, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clContext == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
context := &Context{clContext: clContext, devices: devices}
|
||||
runtime.SetFinalizer(context, releaseContext)
|
||||
return context, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) GetSupportedImageFormats(flags MemFlag, imageType MemObjectType) ([]ImageFormat, error) {
|
||||
var formats [maxImageFormats]C.cl_image_format
|
||||
var nFormats C.cl_uint
|
||||
if err := C.clGetSupportedImageFormats(ctx.clContext, C.cl_mem_flags(flags), C.cl_mem_object_type(imageType), maxImageFormats, &formats[0], &nFormats); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
fmts := make([]ImageFormat, nFormats)
|
||||
for i, f := range formats[:nFormats] {
|
||||
fmts[i] = ImageFormat{
|
||||
ChannelOrder: ChannelOrder(f.image_channel_order),
|
||||
ChannelDataType: ChannelDataType(f.image_channel_data_type),
|
||||
}
|
||||
}
|
||||
return fmts, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateCommandQueue(device *Device, properties CommandQueueProperty) (*CommandQueue, error) {
|
||||
var err C.cl_int
|
||||
clQueue := C.clCreateCommandQueue(ctx.clContext, device.id, C.cl_command_queue_properties(properties), &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clQueue == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
commandQueue := &CommandQueue{clQueue: clQueue, device: device}
|
||||
runtime.SetFinalizer(commandQueue, releaseCommandQueue)
|
||||
return commandQueue, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateProgramWithSource(sources []string) (*Program, error) {
|
||||
cSources := make([]*C.char, len(sources))
|
||||
for i, s := range sources {
|
||||
cs := C.CString(s)
|
||||
cSources[i] = cs
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
}
|
||||
var err C.cl_int
|
||||
clProgram := C.clCreateProgramWithSource(ctx.clContext, C.cl_uint(len(sources)), &cSources[0], nil, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clProgram == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
program := &Program{clProgram: clProgram, devices: ctx.devices}
|
||||
runtime.SetFinalizer(program, releaseProgram)
|
||||
return program, nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateBufferUnsafe(flags MemFlag, size int, dataPtr unsafe.Pointer) (*MemObject, error) {
|
||||
var err C.cl_int
|
||||
clBuffer := C.clCreateBuffer(ctx.clContext, C.cl_mem_flags(flags), C.size_t(size), dataPtr, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clBuffer == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
return newMemObject(clBuffer, size), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateEmptyBuffer(flags MemFlag, size int) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, size, nil)
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateEmptyBufferFloat32(flags MemFlag, size int) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, 4*size, nil)
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateBuffer(flags MemFlag, data []byte) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, len(data), unsafe.Pointer(&data[0]))
|
||||
}
|
||||
|
||||
//float64
|
||||
func (ctx *Context) CreateBufferFloat32(flags MemFlag, data []float32) (*MemObject, error) {
|
||||
return ctx.CreateBufferUnsafe(flags, 4*len(data), unsafe.Pointer(&data[0]))
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateUserEvent() (*Event, error) {
|
||||
var err C.cl_int
|
||||
clEvent := C.clCreateUserEvent(ctx.clContext, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
return newEvent(clEvent), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) Release() {
|
||||
releaseContext(ctx)
|
||||
}
|
||||
|
||||
// http://www.khronos.org/registry/cl/sdk/1.2/docs/man/xhtml/clCreateSubBuffer.html
|
||||
// func (memObject *MemObject) CreateSubBuffer(flags MemFlag, bufferCreateType BufferCreateType, )
|
510
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/device.go
generated
vendored
510
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/device.go
generated
vendored
@ -1,510 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #include "cl_ext.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const maxDeviceCount = 64
|
||||
|
||||
type DeviceType uint
|
||||
|
||||
const (
|
||||
DeviceTypeCPU DeviceType = C.CL_DEVICE_TYPE_CPU
|
||||
DeviceTypeGPU DeviceType = C.CL_DEVICE_TYPE_GPU
|
||||
DeviceTypeAccelerator DeviceType = C.CL_DEVICE_TYPE_ACCELERATOR
|
||||
DeviceTypeDefault DeviceType = C.CL_DEVICE_TYPE_DEFAULT
|
||||
DeviceTypeAll DeviceType = C.CL_DEVICE_TYPE_ALL
|
||||
)
|
||||
|
||||
type FPConfig int
|
||||
|
||||
const (
|
||||
FPConfigDenorm FPConfig = C.CL_FP_DENORM // denorms are supported
|
||||
FPConfigInfNaN FPConfig = C.CL_FP_INF_NAN // INF and NaNs are supported
|
||||
FPConfigRoundToNearest FPConfig = C.CL_FP_ROUND_TO_NEAREST // round to nearest even rounding mode supported
|
||||
FPConfigRoundToZero FPConfig = C.CL_FP_ROUND_TO_ZERO // round to zero rounding mode supported
|
||||
FPConfigRoundToInf FPConfig = C.CL_FP_ROUND_TO_INF // round to positive and negative infinity rounding modes supported
|
||||
FPConfigFMA FPConfig = C.CL_FP_FMA // IEEE754-2008 fused multiply-add is supported
|
||||
FPConfigSoftFloat FPConfig = C.CL_FP_SOFT_FLOAT // Basic floating-point operations (such as addition, subtraction, multiplication) are implemented in software
|
||||
)
|
||||
|
||||
var fpConfigNameMap = map[FPConfig]string{
|
||||
FPConfigDenorm: "Denorm",
|
||||
FPConfigInfNaN: "InfNaN",
|
||||
FPConfigRoundToNearest: "RoundToNearest",
|
||||
FPConfigRoundToZero: "RoundToZero",
|
||||
FPConfigRoundToInf: "RoundToInf",
|
||||
FPConfigFMA: "FMA",
|
||||
FPConfigSoftFloat: "SoftFloat",
|
||||
}
|
||||
|
||||
func (c FPConfig) String() string {
|
||||
var parts []string
|
||||
for bit, name := range fpConfigNameMap {
|
||||
if c&bit != 0 {
|
||||
parts = append(parts, name)
|
||||
}
|
||||
}
|
||||
if parts == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(parts, "|")
|
||||
}
|
||||
|
||||
func (dt DeviceType) String() string {
|
||||
var parts []string
|
||||
if dt&DeviceTypeCPU != 0 {
|
||||
parts = append(parts, "CPU")
|
||||
}
|
||||
if dt&DeviceTypeGPU != 0 {
|
||||
parts = append(parts, "GPU")
|
||||
}
|
||||
if dt&DeviceTypeAccelerator != 0 {
|
||||
parts = append(parts, "Accelerator")
|
||||
}
|
||||
if dt&DeviceTypeDefault != 0 {
|
||||
parts = append(parts, "Default")
|
||||
}
|
||||
if parts == nil {
|
||||
parts = append(parts, "None")
|
||||
}
|
||||
return strings.Join(parts, "|")
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
id C.cl_device_id
|
||||
}
|
||||
|
||||
func buildDeviceIdList(devices []*Device) []C.cl_device_id {
|
||||
deviceIds := make([]C.cl_device_id, len(devices))
|
||||
for i, d := range devices {
|
||||
deviceIds[i] = d.id
|
||||
}
|
||||
return deviceIds
|
||||
}
|
||||
|
||||
// Obtain the list of devices available on a platform. 'platform' refers
|
||||
// to the platform returned by GetPlatforms or can be nil. If platform
|
||||
// is nil, the behavior is implementation-defined.
|
||||
func GetDevices(platform *Platform, deviceType DeviceType) ([]*Device, error) {
|
||||
var deviceIds [maxDeviceCount]C.cl_device_id
|
||||
var numDevices C.cl_uint
|
||||
var platformId C.cl_platform_id
|
||||
if platform != nil {
|
||||
platformId = platform.id
|
||||
}
|
||||
if err := C.clGetDeviceIDs(platformId, C.cl_device_type(deviceType), C.cl_uint(maxDeviceCount), &deviceIds[0], &numDevices); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if numDevices > maxDeviceCount {
|
||||
numDevices = maxDeviceCount
|
||||
}
|
||||
devices := make([]*Device, numDevices)
|
||||
for i := 0; i < int(numDevices); i++ {
|
||||
devices[i] = &Device{id: deviceIds[i]}
|
||||
}
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
func (d *Device) nullableId() C.cl_device_id {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
return d.id
|
||||
}
|
||||
|
||||
func (d *Device) GetInfoString(param C.cl_device_info, panicOnError bool) (string, error) {
|
||||
var strC [1024]C.char
|
||||
var strN C.size_t
|
||||
if err := C.clGetDeviceInfo(d.id, param, 1024, unsafe.Pointer(&strC), &strN); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return "", toError(err)
|
||||
}
|
||||
|
||||
// OpenCL strings are NUL-terminated, and the terminator is included in strN
|
||||
// Go strings aren't NUL-terminated, so subtract 1 from the length
|
||||
return C.GoStringN((*C.char)(unsafe.Pointer(&strC)), C.int(strN-1)), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoUint(param C.cl_device_info, panicOnError bool) (uint, error) {
|
||||
var val C.cl_uint
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return 0, toError(err)
|
||||
}
|
||||
return uint(val), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoSize(param C.cl_device_info, panicOnError bool) (int, error) {
|
||||
var val C.size_t
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return 0, toError(err)
|
||||
}
|
||||
return int(val), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoUlong(param C.cl_device_info, panicOnError bool) (int64, error) {
|
||||
var val C.cl_ulong
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return 0, toError(err)
|
||||
}
|
||||
return int64(val), nil
|
||||
}
|
||||
|
||||
func (d *Device) getInfoBool(param C.cl_device_info, panicOnError bool) (bool, error) {
|
||||
var val C.cl_bool
|
||||
if err := C.clGetDeviceInfo(d.id, param, C.size_t(unsafe.Sizeof(val)), unsafe.Pointer(&val), nil); err != C.CL_SUCCESS {
|
||||
if panicOnError {
|
||||
panic("Should never fail")
|
||||
}
|
||||
return false, toError(err)
|
||||
}
|
||||
return val == C.CL_TRUE, nil
|
||||
}
|
||||
|
||||
func (d *Device) Name() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_NAME, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Vendor() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_VENDOR, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Extensions() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_EXTENSIONS, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) OpenCLCVersion() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_OPENCL_C_VERSION, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Profile() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_PROFILE, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) Version() string {
|
||||
str, _ := d.GetInfoString(C.CL_DEVICE_VERSION, true)
|
||||
return str
|
||||
}
|
||||
|
||||
func (d *Device) DriverVersion() string {
|
||||
str, _ := d.GetInfoString(C.CL_DRIVER_VERSION, true)
|
||||
return str
|
||||
}
|
||||
|
||||
// The default compute device address space size specified as an
|
||||
// unsigned integer value in bits. Currently supported values are 32 or 64 bits.
|
||||
func (d *Device) AddressBits() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_ADDRESS_BITS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Size of global memory cache line in bytes.
|
||||
func (d *Device) GlobalMemCachelineSize() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum configured clock frequency of the device in MHz.
|
||||
func (d *Device) MaxClockFrequency() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_CLOCK_FREQUENCY, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// The number of parallel compute units on the OpenCL device.
|
||||
// A work-group executes on a single compute unit. The minimum value is 1.
|
||||
func (d *Device) MaxComputeUnits() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_COMPUTE_UNITS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of arguments declared with the __constant qualifier in a kernel.
|
||||
// The minimum value is 8 for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxConstantArgs() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_CONSTANT_ARGS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of simultaneous image objects that can be read by a kernel.
|
||||
// The minimum value is 128 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) MaxReadImageArgs() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_READ_IMAGE_ARGS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum number of samplers that can be used in a kernel. The minimum
|
||||
// value is 16 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE. (Also see sampler_t.)
|
||||
func (d *Device) MaxSamplers() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_SAMPLERS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum dimensions that specify the global and local work-item IDs used
|
||||
// by the data parallel execution model. (Refer to clEnqueueNDRangeKernel).
|
||||
// The minimum value is 3 for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxWorkItemDimensions() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of simultaneous image objects that can be written to by a
|
||||
// kernel. The minimum value is 8 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) MaxWriteImageArgs() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MAX_WRITE_IMAGE_ARGS, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// The minimum value is the size (in bits) of the largest OpenCL built-in
|
||||
// data type supported by the device (long16 in FULL profile, long16 or
|
||||
// int16 in EMBEDDED profile) for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MemBaseAddrAlign() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_MEM_BASE_ADDR_ALIGN, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthChar() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_CHAR, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthShort() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_SHORT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthInt() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_INT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthLong() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_LONG, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthFloat() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_FLOAT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthDouble() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_DOUBLE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
func (d *Device) NativeVectorWidthHalf() int {
|
||||
val, _ := d.getInfoUint(C.CL_DEVICE_NATIVE_VECTOR_WIDTH_HALF, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max height of 2D image in pixels. The minimum value is 8192
|
||||
// if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image2DMaxHeight() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE2D_MAX_HEIGHT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max width of 2D image or 1D image not created from a buffer object in
|
||||
// pixels. The minimum value is 8192 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image2DMaxWidth() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE2D_MAX_WIDTH, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max depth of 3D image in pixels. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image3DMaxDepth() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE3D_MAX_DEPTH, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max height of 3D image in pixels. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image3DMaxHeight() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE3D_MAX_HEIGHT, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max width of 3D image in pixels. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) Image3DMaxWidth() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE3D_MAX_WIDTH, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max size in bytes of the arguments that can be passed to a kernel. The
|
||||
// minimum value is 1024 for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
// For this minimum value, only a maximum of 128 arguments can be passed to a kernel.
|
||||
func (d *Device) MaxParameterSize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_MAX_PARAMETER_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Maximum number of work-items in a work-group executing a kernel on a
|
||||
// single compute unit, using the data parallel execution model. (Refer
|
||||
// to clEnqueueNDRangeKernel). The minimum value is 1.
|
||||
func (d *Device) MaxWorkGroupSize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_MAX_WORK_GROUP_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Describes the resolution of device timer. This is measured in nanoseconds.
|
||||
func (d *Device) ProfilingTimerResolution() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_PROFILING_TIMER_RESOLUTION, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Size of local memory arena in bytes. The minimum value is 32 KB for
|
||||
// devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) LocalMemSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_LOCAL_MEM_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Max size in bytes of a constant buffer allocation. The minimum value is
|
||||
// 64 KB for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxConstantBufferSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Max size of memory object allocation in bytes. The minimum value is max
|
||||
// (1/4th of CL_DEVICE_GLOBAL_MEM_SIZE, 128*1024*1024) for devices that are
|
||||
// not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxMemAllocSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_MAX_MEM_ALLOC_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Size of global device memory in bytes.
|
||||
func (d *Device) GlobalMemSize() int64 {
|
||||
val, _ := d.getInfoUlong(C.CL_DEVICE_GLOBAL_MEM_SIZE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) Available() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_AVAILABLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) CompilerAvailable() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_COMPILER_AVAILABLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) EndianLittle() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_ENDIAN_LITTLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
// Is CL_TRUE if the device implements error correction for all
|
||||
// accesses to compute device memory (global and constant). Is
|
||||
// CL_FALSE if the device does not implement such error correction.
|
||||
func (d *Device) ErrorCorrectionSupport() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_ERROR_CORRECTION_SUPPORT, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) HostUnifiedMemory() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_HOST_UNIFIED_MEMORY, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) ImageSupport() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_IMAGE_SUPPORT, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) Type() DeviceType {
|
||||
var deviceType C.cl_device_type
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_TYPE, C.size_t(unsafe.Sizeof(deviceType)), unsafe.Pointer(&deviceType), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get device type")
|
||||
}
|
||||
return DeviceType(deviceType)
|
||||
}
|
||||
|
||||
// Describes double precision floating-point capability of the OpenCL device
|
||||
func (d *Device) DoubleFPConfig() FPConfig {
|
||||
var fpConfig C.cl_device_fp_config
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_DOUBLE_FP_CONFIG, C.size_t(unsafe.Sizeof(fpConfig)), unsafe.Pointer(&fpConfig), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get double FP config")
|
||||
}
|
||||
return FPConfig(fpConfig)
|
||||
}
|
||||
|
||||
// Describes the OPTIONAL half precision floating-point capability of the OpenCL device
|
||||
func (d *Device) HalfFPConfig() FPConfig {
|
||||
var fpConfig C.cl_device_fp_config
|
||||
err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_HALF_FP_CONFIG, C.size_t(unsafe.Sizeof(fpConfig)), unsafe.Pointer(&fpConfig), nil)
|
||||
if err != C.CL_SUCCESS {
|
||||
return FPConfig(0)
|
||||
}
|
||||
return FPConfig(fpConfig)
|
||||
}
|
||||
|
||||
// Type of local memory supported. This can be set to CL_LOCAL implying dedicated
|
||||
// local memory storage such as SRAM, or CL_GLOBAL. For custom devices, CL_NONE
|
||||
// can also be returned indicating no local memory support.
|
||||
func (d *Device) LocalMemType() LocalMemType {
|
||||
var memType C.cl_device_local_mem_type
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_LOCAL_MEM_TYPE, C.size_t(unsafe.Sizeof(memType)), unsafe.Pointer(&memType), nil); err != C.CL_SUCCESS {
|
||||
return LocalMemType(C.CL_NONE)
|
||||
}
|
||||
return LocalMemType(memType)
|
||||
}
|
||||
|
||||
// Describes the execution capabilities of the device. The mandated minimum capability is CL_EXEC_KERNEL.
|
||||
func (d *Device) ExecutionCapabilities() ExecCapability {
|
||||
var execCap C.cl_device_exec_capabilities
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_EXECUTION_CAPABILITIES, C.size_t(unsafe.Sizeof(execCap)), unsafe.Pointer(&execCap), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get execution capabilities")
|
||||
}
|
||||
return ExecCapability(execCap)
|
||||
}
|
||||
|
||||
func (d *Device) GlobalMemCacheType() MemCacheType {
|
||||
var memType C.cl_device_mem_cache_type
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_GLOBAL_MEM_CACHE_TYPE, C.size_t(unsafe.Sizeof(memType)), unsafe.Pointer(&memType), nil); err != C.CL_SUCCESS {
|
||||
return MemCacheType(C.CL_NONE)
|
||||
}
|
||||
return MemCacheType(memType)
|
||||
}
|
||||
|
||||
// Maximum number of work-items that can be specified in each dimension of the work-group to clEnqueueNDRangeKernel.
|
||||
//
|
||||
// Returns n size_t entries, where n is the value returned by the query for CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS.
|
||||
//
|
||||
// The minimum value is (1, 1, 1) for devices that are not of type CL_DEVICE_TYPE_CUSTOM.
|
||||
func (d *Device) MaxWorkItemSizes() []int {
|
||||
dims := d.MaxWorkItemDimensions()
|
||||
sizes := make([]C.size_t, dims)
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_MAX_WORK_ITEM_SIZES, C.size_t(int(unsafe.Sizeof(sizes[0]))*dims), unsafe.Pointer(&sizes[0]), nil); err != C.CL_SUCCESS {
|
||||
panic("Failed to get max work item sizes")
|
||||
}
|
||||
intSizes := make([]int, dims)
|
||||
for i, s := range sizes {
|
||||
intSizes[i] = int(s)
|
||||
}
|
||||
return intSizes
|
||||
}
|
51
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/device12.go
generated
vendored
51
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/device12.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
import "unsafe"
|
||||
|
||||
const FPConfigCorrectlyRoundedDivideSqrt FPConfig = C.CL_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT
|
||||
|
||||
func init() {
|
||||
fpConfigNameMap[FPConfigCorrectlyRoundedDivideSqrt] = "CorrectlyRoundedDivideSqrt"
|
||||
}
|
||||
|
||||
func (d *Device) BuiltInKernels() string {
|
||||
str, _ := d.getInfoString(C.CL_DEVICE_BUILT_IN_KERNELS, true)
|
||||
return str
|
||||
}
|
||||
|
||||
// Is CL_FALSE if the implementation does not have a linker available. Is CL_TRUE if the linker is available. This can be CL_FALSE for the embedded platform profile only. This must be CL_TRUE if CL_DEVICE_COMPILER_AVAILABLE is CL_TRUE
|
||||
func (d *Device) LinkerAvailable() bool {
|
||||
val, _ := d.getInfoBool(C.CL_DEVICE_LINKER_AVAILABLE, true)
|
||||
return val
|
||||
}
|
||||
|
||||
func (d *Device) ParentDevice() *Device {
|
||||
var deviceId C.cl_device_id
|
||||
if err := C.clGetDeviceInfo(d.id, C.CL_DEVICE_PARENT_DEVICE, C.size_t(unsafe.Sizeof(deviceId)), unsafe.Pointer(&deviceId), nil); err != C.CL_SUCCESS {
|
||||
panic("ParentDevice failed")
|
||||
}
|
||||
if deviceId == nil {
|
||||
return nil
|
||||
}
|
||||
return &Device{id: deviceId}
|
||||
}
|
||||
|
||||
// Max number of pixels for a 1D image created from a buffer object. The minimum value is 65536 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE.
|
||||
func (d *Device) ImageMaxBufferSize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE_MAX_BUFFER_SIZE, true)
|
||||
return int(val)
|
||||
}
|
||||
|
||||
// Max number of images in a 1D or 2D image array. The minimum value is 2048 if CL_DEVICE_IMAGE_SUPPORT is CL_TRUE
|
||||
func (d *Device) ImageMaxArraySize() int {
|
||||
val, _ := d.getInfoSize(C.CL_DEVICE_IMAGE_MAX_ARRAY_SIZE, true)
|
||||
return int(val)
|
||||
}
|
1210
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl.h
generated
vendored
1210
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl.h
generated
vendored
File diff suppressed because it is too large
Load Diff
315
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_ext.h
generated
vendored
315
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_ext.h
generated
vendored
@ -1,315 +0,0 @@
|
||||
/*******************************************************************************
|
||||
* Copyright (c) 2008-2013 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
******************************************************************************/
|
||||
|
||||
/* $Revision: 11928 $ on $Date: 2010-07-13 09:04:56 -0700 (Tue, 13 Jul 2010) $ */
|
||||
|
||||
/* cl_ext.h contains OpenCL extensions which don't have external */
|
||||
/* (OpenGL, D3D) dependencies. */
|
||||
|
||||
#ifndef __CL_EXT_H
|
||||
#define __CL_EXT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifdef __APPLE__
|
||||
#include <AvailabilityMacros.h>
|
||||
#endif
|
||||
|
||||
#include <cl.h>
|
||||
|
||||
/* cl_khr_fp16 extension - no extension #define since it has no functions */
|
||||
#define CL_DEVICE_HALF_FP_CONFIG 0x1033
|
||||
|
||||
/* Memory object destruction
|
||||
*
|
||||
* Apple extension for use to manage externally allocated buffers used with cl_mem objects with CL_MEM_USE_HOST_PTR
|
||||
*
|
||||
* Registers a user callback function that will be called when the memory object is deleted and its resources
|
||||
* freed. Each call to clSetMemObjectCallbackFn registers the specified user callback function on a callback
|
||||
* stack associated with memobj. The registered user callback functions are called in the reverse order in
|
||||
* which they were registered. The user callback functions are called and then the memory object is deleted
|
||||
* and its resources freed. This provides a mechanism for the application (and libraries) using memobj to be
|
||||
* notified when the memory referenced by host_ptr, specified when the memory object is created and used as
|
||||
* the storage bits for the memory object, can be reused or freed.
|
||||
*
|
||||
* The application may not call CL api's with the cl_mem object passed to the pfn_notify.
|
||||
*
|
||||
* Please check for the "cl_APPLE_SetMemObjectDestructor" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)
|
||||
* before using.
|
||||
*/
|
||||
#define cl_APPLE_SetMemObjectDestructor 1
|
||||
cl_int CL_API_ENTRY clSetMemObjectDestructorAPPLE( cl_mem /* memobj */,
|
||||
void (* /*pfn_notify*/)( cl_mem /* memobj */, void* /*user_data*/),
|
||||
void * /*user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
|
||||
/* Context Logging Functions
|
||||
*
|
||||
* The next three convenience functions are intended to be used as the pfn_notify parameter to clCreateContext().
|
||||
* Please check for the "cl_APPLE_ContextLoggingFunctions" extension using clGetDeviceInfo(CL_DEVICE_EXTENSIONS)
|
||||
* before using.
|
||||
*
|
||||
* clLogMessagesToSystemLog fowards on all log messages to the Apple System Logger
|
||||
*/
|
||||
#define cl_APPLE_ContextLoggingFunctions 1
|
||||
extern void CL_API_ENTRY clLogMessagesToSystemLogAPPLE( const char * /* errstr */,
|
||||
const void * /* private_info */,
|
||||
size_t /* cb */,
|
||||
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
/* clLogMessagesToStdout sends all log messages to the file descriptor stdout */
|
||||
extern void CL_API_ENTRY clLogMessagesToStdoutAPPLE( const char * /* errstr */,
|
||||
const void * /* private_info */,
|
||||
size_t /* cb */,
|
||||
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
/* clLogMessagesToStderr sends all log messages to the file descriptor stderr */
|
||||
extern void CL_API_ENTRY clLogMessagesToStderrAPPLE( const char * /* errstr */,
|
||||
const void * /* private_info */,
|
||||
size_t /* cb */,
|
||||
void * /* user_data */ ) CL_EXT_SUFFIX__VERSION_1_0;
|
||||
|
||||
|
||||
/************************
|
||||
* cl_khr_icd extension *
|
||||
************************/
|
||||
#define cl_khr_icd 1
|
||||
|
||||
/* cl_platform_info */
|
||||
#define CL_PLATFORM_ICD_SUFFIX_KHR 0x0920
|
||||
|
||||
/* Additional Error Codes */
|
||||
#define CL_PLATFORM_NOT_FOUND_KHR -1001
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clIcdGetPlatformIDsKHR(cl_uint /* num_entries */,
|
||||
cl_platform_id * /* platforms */,
|
||||
cl_uint * /* num_platforms */);
|
||||
|
||||
typedef CL_API_ENTRY cl_int (CL_API_CALL *clIcdGetPlatformIDsKHR_fn)(
|
||||
cl_uint /* num_entries */,
|
||||
cl_platform_id * /* platforms */,
|
||||
cl_uint * /* num_platforms */);
|
||||
|
||||
|
||||
/* Extension: cl_khr_image2D_buffer
|
||||
*
|
||||
* This extension allows a 2D image to be created from a cl_mem buffer without a copy.
|
||||
* The type associated with a 2D image created from a buffer in an OpenCL program is image2d_t.
|
||||
* Both the sampler and sampler-less read_image built-in functions are supported for 2D images
|
||||
* and 2D images created from a buffer. Similarly, the write_image built-ins are also supported
|
||||
* for 2D images created from a buffer.
|
||||
*
|
||||
* When the 2D image from buffer is created, the client must specify the width,
|
||||
* height, image format (i.e. channel order and channel data type) and optionally the row pitch
|
||||
*
|
||||
* The pitch specified must be a multiple of CL_DEVICE_IMAGE_PITCH_ALIGNMENT pixels.
|
||||
* The base address of the buffer must be aligned to CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT pixels.
|
||||
*/
|
||||
|
||||
/*************************************
|
||||
* cl_khr_initalize_memory extension *
|
||||
*************************************/
|
||||
|
||||
#define CL_CONTEXT_MEMORY_INITIALIZE_KHR 0x200E
|
||||
|
||||
|
||||
/**************************************
|
||||
* cl_khr_terminate_context extension *
|
||||
**************************************/
|
||||
|
||||
#define CL_DEVICE_TERMINATE_CAPABILITY_KHR 0x200F
|
||||
#define CL_CONTEXT_TERMINATE_KHR 0x2010
|
||||
|
||||
#define cl_khr_terminate_context 1
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL clTerminateContextKHR(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;
|
||||
|
||||
typedef CL_API_ENTRY cl_int (CL_API_CALL *clTerminateContextKHR_fn)(cl_context /* context */) CL_EXT_SUFFIX__VERSION_1_2;
|
||||
|
||||
|
||||
/*
|
||||
* Extension: cl_khr_spir
|
||||
*
|
||||
* This extension adds support to create an OpenCL program object from a
|
||||
* Standard Portable Intermediate Representation (SPIR) instance
|
||||
*/
|
||||
|
||||
#define CL_DEVICE_SPIR_VERSIONS 0x40E0
|
||||
#define CL_PROGRAM_BINARY_TYPE_INTERMEDIATE 0x40E1
|
||||
|
||||
|
||||
/******************************************
|
||||
* cl_nv_device_attribute_query extension *
|
||||
******************************************/
|
||||
/* cl_nv_device_attribute_query extension - no extension #define since it has no functions */
|
||||
#define CL_DEVICE_COMPUTE_CAPABILITY_MAJOR_NV 0x4000
|
||||
#define CL_DEVICE_COMPUTE_CAPABILITY_MINOR_NV 0x4001
|
||||
#define CL_DEVICE_REGISTERS_PER_BLOCK_NV 0x4002
|
||||
#define CL_DEVICE_WARP_SIZE_NV 0x4003
|
||||
#define CL_DEVICE_GPU_OVERLAP_NV 0x4004
|
||||
#define CL_DEVICE_KERNEL_EXEC_TIMEOUT_NV 0x4005
|
||||
#define CL_DEVICE_INTEGRATED_MEMORY_NV 0x4006
|
||||
|
||||
/*********************************
|
||||
* cl_amd_device_attribute_query *
|
||||
*********************************/
|
||||
#define CL_DEVICE_PROFILING_TIMER_OFFSET_AMD 0x4036
|
||||
|
||||
/*********************************
|
||||
* cl_arm_printf extension
|
||||
*********************************/
|
||||
#define CL_PRINTF_CALLBACK_ARM 0x40B0
|
||||
#define CL_PRINTF_BUFFERSIZE_ARM 0x40B1
|
||||
|
||||
#ifdef CL_VERSION_1_1
|
||||
/***********************************
|
||||
* cl_ext_device_fission extension *
|
||||
***********************************/
|
||||
#define cl_ext_device_fission 1
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clReleaseDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef CL_API_ENTRY cl_int
|
||||
(CL_API_CALL *clReleaseDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clRetainDeviceEXT( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef CL_API_ENTRY cl_int
|
||||
(CL_API_CALL *clRetainDeviceEXT_fn)( cl_device_id /*device*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef cl_ulong cl_device_partition_property_ext;
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clCreateSubDevicesEXT( cl_device_id /*in_device*/,
|
||||
const cl_device_partition_property_ext * /* properties */,
|
||||
cl_uint /*num_entries*/,
|
||||
cl_device_id * /*out_devices*/,
|
||||
cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
typedef CL_API_ENTRY cl_int
|
||||
( CL_API_CALL * clCreateSubDevicesEXT_fn)( cl_device_id /*in_device*/,
|
||||
const cl_device_partition_property_ext * /* properties */,
|
||||
cl_uint /*num_entries*/,
|
||||
cl_device_id * /*out_devices*/,
|
||||
cl_uint * /*num_devices*/ ) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
/* cl_device_partition_property_ext */
|
||||
#define CL_DEVICE_PARTITION_EQUALLY_EXT 0x4050
|
||||
#define CL_DEVICE_PARTITION_BY_COUNTS_EXT 0x4051
|
||||
#define CL_DEVICE_PARTITION_BY_NAMES_EXT 0x4052
|
||||
#define CL_DEVICE_PARTITION_BY_AFFINITY_DOMAIN_EXT 0x4053
|
||||
|
||||
/* clDeviceGetInfo selectors */
|
||||
#define CL_DEVICE_PARENT_DEVICE_EXT 0x4054
|
||||
#define CL_DEVICE_PARTITION_TYPES_EXT 0x4055
|
||||
#define CL_DEVICE_AFFINITY_DOMAINS_EXT 0x4056
|
||||
#define CL_DEVICE_REFERENCE_COUNT_EXT 0x4057
|
||||
#define CL_DEVICE_PARTITION_STYLE_EXT 0x4058
|
||||
|
||||
/* error codes */
|
||||
#define CL_DEVICE_PARTITION_FAILED_EXT -1057
|
||||
#define CL_INVALID_PARTITION_COUNT_EXT -1058
|
||||
#define CL_INVALID_PARTITION_NAME_EXT -1059
|
||||
|
||||
/* CL_AFFINITY_DOMAINs */
|
||||
#define CL_AFFINITY_DOMAIN_L1_CACHE_EXT 0x1
|
||||
#define CL_AFFINITY_DOMAIN_L2_CACHE_EXT 0x2
|
||||
#define CL_AFFINITY_DOMAIN_L3_CACHE_EXT 0x3
|
||||
#define CL_AFFINITY_DOMAIN_L4_CACHE_EXT 0x4
|
||||
#define CL_AFFINITY_DOMAIN_NUMA_EXT 0x10
|
||||
#define CL_AFFINITY_DOMAIN_NEXT_FISSIONABLE_EXT 0x100
|
||||
|
||||
/* cl_device_partition_property_ext list terminators */
|
||||
#define CL_PROPERTIES_LIST_END_EXT ((cl_device_partition_property_ext) 0)
|
||||
#define CL_PARTITION_BY_COUNTS_LIST_END_EXT ((cl_device_partition_property_ext) 0)
|
||||
#define CL_PARTITION_BY_NAMES_LIST_END_EXT ((cl_device_partition_property_ext) 0 - 1)
|
||||
|
||||
/*********************************
|
||||
* cl_qcom_ext_host_ptr extension
|
||||
*********************************/
|
||||
|
||||
#define CL_MEM_EXT_HOST_PTR_QCOM (1 << 29)
|
||||
|
||||
#define CL_DEVICE_EXT_MEM_PADDING_IN_BYTES_QCOM 0x40A0
|
||||
#define CL_DEVICE_PAGE_SIZE_QCOM 0x40A1
|
||||
#define CL_IMAGE_ROW_ALIGNMENT_QCOM 0x40A2
|
||||
#define CL_IMAGE_SLICE_ALIGNMENT_QCOM 0x40A3
|
||||
#define CL_MEM_HOST_UNCACHED_QCOM 0x40A4
|
||||
#define CL_MEM_HOST_WRITEBACK_QCOM 0x40A5
|
||||
#define CL_MEM_HOST_WRITETHROUGH_QCOM 0x40A6
|
||||
#define CL_MEM_HOST_WRITE_COMBINING_QCOM 0x40A7
|
||||
|
||||
typedef cl_uint cl_image_pitch_info_qcom;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetDeviceImageInfoQCOM(cl_device_id device,
|
||||
size_t image_width,
|
||||
size_t image_height,
|
||||
const cl_image_format *image_format,
|
||||
cl_image_pitch_info_qcom param_name,
|
||||
size_t param_value_size,
|
||||
void *param_value,
|
||||
size_t *param_value_size_ret);
|
||||
|
||||
typedef struct _cl_mem_ext_host_ptr
|
||||
{
|
||||
/* Type of external memory allocation. */
|
||||
/* Legal values will be defined in layered extensions. */
|
||||
cl_uint allocation_type;
|
||||
|
||||
/* Host cache policy for this external memory allocation. */
|
||||
cl_uint host_cache_policy;
|
||||
|
||||
} cl_mem_ext_host_ptr;
|
||||
|
||||
/*********************************
|
||||
* cl_qcom_ion_host_ptr extension
|
||||
*********************************/
|
||||
|
||||
#define CL_MEM_ION_HOST_PTR_QCOM 0x40A8
|
||||
|
||||
typedef struct _cl_mem_ion_host_ptr
|
||||
{
|
||||
/* Type of external memory allocation. */
|
||||
/* Must be CL_MEM_ION_HOST_PTR_QCOM for ION allocations. */
|
||||
cl_mem_ext_host_ptr ext_host_ptr;
|
||||
|
||||
/* ION file descriptor */
|
||||
int ion_filedesc;
|
||||
|
||||
/* Host pointer to the ION allocated memory */
|
||||
void* ion_hostptr;
|
||||
|
||||
} cl_mem_ion_host_ptr;
|
||||
|
||||
#endif /* CL_VERSION_1_1 */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* __CL_EXT_H */
|
158
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl.h
generated
vendored
158
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl.h
generated
vendored
@ -1,158 +0,0 @@
|
||||
/**********************************************************************************
|
||||
* Copyright (c) 2008 - 2012 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
**********************************************************************************/
|
||||
|
||||
#ifndef __OPENCL_CL_GL_H
|
||||
#define __OPENCL_CL_GL_H
|
||||
|
||||
#include <cl.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef cl_uint cl_gl_object_type;
|
||||
typedef cl_uint cl_gl_texture_info;
|
||||
typedef cl_uint cl_gl_platform_info;
|
||||
typedef struct __GLsync *cl_GLsync;
|
||||
|
||||
/* cl_gl_object_type = 0x2000 - 0x200F enum values are currently taken */
|
||||
#define CL_GL_OBJECT_BUFFER 0x2000
|
||||
#define CL_GL_OBJECT_TEXTURE2D 0x2001
|
||||
#define CL_GL_OBJECT_TEXTURE3D 0x2002
|
||||
#define CL_GL_OBJECT_RENDERBUFFER 0x2003
|
||||
#define CL_GL_OBJECT_TEXTURE2D_ARRAY 0x200E
|
||||
#define CL_GL_OBJECT_TEXTURE1D 0x200F
|
||||
#define CL_GL_OBJECT_TEXTURE1D_ARRAY 0x2010
|
||||
#define CL_GL_OBJECT_TEXTURE_BUFFER 0x2011
|
||||
|
||||
/* cl_gl_texture_info */
|
||||
#define CL_GL_TEXTURE_TARGET 0x2004
|
||||
#define CL_GL_MIPMAP_LEVEL 0x2005
|
||||
#define CL_GL_NUM_SAMPLES 0x2012
|
||||
|
||||
|
||||
extern CL_API_ENTRY cl_mem CL_API_CALL
|
||||
clCreateFromGLBuffer(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLuint /* bufobj */,
|
||||
int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_mem CL_API_CALL
|
||||
clCreateFromGLTexture(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLenum /* target */,
|
||||
cl_GLint /* miplevel */,
|
||||
cl_GLuint /* texture */,
|
||||
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_2;
|
||||
|
||||
extern CL_API_ENTRY cl_mem CL_API_CALL
|
||||
clCreateFromGLRenderbuffer(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLuint /* renderbuffer */,
|
||||
cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetGLObjectInfo(cl_mem /* memobj */,
|
||||
cl_gl_object_type * /* gl_object_type */,
|
||||
cl_GLuint * /* gl_object_name */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetGLTextureInfo(cl_mem /* memobj */,
|
||||
cl_gl_texture_info /* param_name */,
|
||||
size_t /* param_value_size */,
|
||||
void * /* param_value */,
|
||||
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clEnqueueAcquireGLObjects(cl_command_queue /* command_queue */,
|
||||
cl_uint /* num_objects */,
|
||||
const cl_mem * /* mem_objects */,
|
||||
cl_uint /* num_events_in_wait_list */,
|
||||
const cl_event * /* event_wait_list */,
|
||||
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clEnqueueReleaseGLObjects(cl_command_queue /* command_queue */,
|
||||
cl_uint /* num_objects */,
|
||||
const cl_mem * /* mem_objects */,
|
||||
cl_uint /* num_events_in_wait_list */,
|
||||
const cl_event * /* event_wait_list */,
|
||||
cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
|
||||
/* Deprecated OpenCL 1.1 APIs */
|
||||
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
|
||||
clCreateFromGLTexture2D(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLenum /* target */,
|
||||
cl_GLint /* miplevel */,
|
||||
cl_GLuint /* texture */,
|
||||
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
|
||||
|
||||
extern CL_API_ENTRY CL_EXT_PREFIX__VERSION_1_1_DEPRECATED cl_mem CL_API_CALL
|
||||
clCreateFromGLTexture3D(cl_context /* context */,
|
||||
cl_mem_flags /* flags */,
|
||||
cl_GLenum /* target */,
|
||||
cl_GLint /* miplevel */,
|
||||
cl_GLuint /* texture */,
|
||||
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1_DEPRECATED;
|
||||
|
||||
/* cl_khr_gl_sharing extension */
|
||||
|
||||
#define cl_khr_gl_sharing 1
|
||||
|
||||
typedef cl_uint cl_gl_context_info;
|
||||
|
||||
/* Additional Error Codes */
|
||||
#define CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR -1000
|
||||
|
||||
/* cl_gl_context_info */
|
||||
#define CL_CURRENT_DEVICE_FOR_GL_CONTEXT_KHR 0x2006
|
||||
#define CL_DEVICES_FOR_GL_CONTEXT_KHR 0x2007
|
||||
|
||||
/* Additional cl_context_properties */
|
||||
#define CL_GL_CONTEXT_KHR 0x2008
|
||||
#define CL_EGL_DISPLAY_KHR 0x2009
|
||||
#define CL_GLX_DISPLAY_KHR 0x200A
|
||||
#define CL_WGL_HDC_KHR 0x200B
|
||||
#define CL_CGL_SHAREGROUP_KHR 0x200C
|
||||
|
||||
extern CL_API_ENTRY cl_int CL_API_CALL
|
||||
clGetGLContextInfoKHR(const cl_context_properties * /* properties */,
|
||||
cl_gl_context_info /* param_name */,
|
||||
size_t /* param_value_size */,
|
||||
void * /* param_value */,
|
||||
size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0;
|
||||
|
||||
typedef CL_API_ENTRY cl_int (CL_API_CALL *clGetGLContextInfoKHR_fn)(
|
||||
const cl_context_properties * properties,
|
||||
cl_gl_context_info param_name,
|
||||
size_t param_value_size,
|
||||
void * param_value,
|
||||
size_t * param_value_size_ret);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCL_CL_GL_H */
|
65
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl_ext.h
generated
vendored
65
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_gl_ext.h
generated
vendored
@ -1,65 +0,0 @@
|
||||
/**********************************************************************************
|
||||
* Copyright (c) 2008-2012 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
**********************************************************************************/
|
||||
|
||||
/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
|
||||
|
||||
/* cl_gl_ext.h contains vendor (non-KHR) OpenCL extensions which have */
|
||||
/* OpenGL dependencies. */
|
||||
|
||||
#ifndef __OPENCL_CL_GL_EXT_H
|
||||
#define __OPENCL_CL_GL_EXT_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <cl_gl.h>
|
||||
|
||||
/*
|
||||
* For each extension, follow this template
|
||||
* cl_VEN_extname extension */
|
||||
/* #define cl_VEN_extname 1
|
||||
* ... define new types, if any
|
||||
* ... define new tokens, if any
|
||||
* ... define new APIs, if any
|
||||
*
|
||||
* If you need GLtypes here, mirror them with a cl_GLtype, rather than including a GL header
|
||||
* This allows us to avoid having to decide whether to include GL headers or GLES here.
|
||||
*/
|
||||
|
||||
/*
|
||||
* cl_khr_gl_event extension
|
||||
* See section 9.9 in the OpenCL 1.1 spec for more information
|
||||
*/
|
||||
#define CL_COMMAND_GL_FENCE_SYNC_OBJECT_KHR 0x200D
|
||||
|
||||
extern CL_API_ENTRY cl_event CL_API_CALL
|
||||
clCreateEventFromGLsyncKHR(cl_context /* context */,
|
||||
cl_GLsync /* cl_GLsync */,
|
||||
cl_int * /* errcode_ret */) CL_EXT_SUFFIX__VERSION_1_1;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCL_CL_GL_EXT_H */
|
1278
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_platform.h
generated
vendored
1278
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/cl_platform.h
generated
vendored
File diff suppressed because it is too large
Load Diff
43
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/opencl.h
generated
vendored
43
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/headers/1.2/opencl.h
generated
vendored
@ -1,43 +0,0 @@
|
||||
/*******************************************************************************
|
||||
* Copyright (c) 2008-2012 The Khronos Group Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and/or associated documentation files (the
|
||||
* "Materials"), to deal in the Materials without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Materials, and to
|
||||
* permit persons to whom the Materials are furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included
|
||||
* in all copies or substantial portions of the Materials.
|
||||
*
|
||||
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
|
||||
******************************************************************************/
|
||||
|
||||
/* $Revision: 11708 $ on $Date: 2010-06-13 23:36:24 -0700 (Sun, 13 Jun 2010) $ */
|
||||
|
||||
#ifndef __OPENCL_H
|
||||
#define __OPENCL_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include <cl.h>
|
||||
#include <cl_gl.h>
|
||||
#include <cl_gl_ext.h>
|
||||
#include <cl_ext.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __OPENCL_H */
|
||||
|
83
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/image.go
generated
vendored
83
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/image.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
import (
|
||||
"image"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func (ctx *Context) CreateImage(flags MemFlag, imageFormat ImageFormat, imageDesc ImageDescription, data []byte) (*MemObject, error) {
|
||||
format := imageFormat.toCl()
|
||||
desc := imageDesc.toCl()
|
||||
var dataPtr unsafe.Pointer
|
||||
if data != nil {
|
||||
dataPtr = unsafe.Pointer(&data[0])
|
||||
}
|
||||
var err C.cl_int
|
||||
clBuffer := C.clCreateImage(ctx.clContext, C.cl_mem_flags(flags), &format, &desc, dataPtr, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
if clBuffer == nil {
|
||||
return nil, ErrUnknown
|
||||
}
|
||||
return newMemObject(clBuffer, len(data)), nil
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateImageSimple(flags MemFlag, width, height int, channelOrder ChannelOrder, channelDataType ChannelDataType, data []byte) (*MemObject, error) {
|
||||
format := ImageFormat{channelOrder, channelDataType}
|
||||
desc := ImageDescription{
|
||||
Type: MemObjectTypeImage2D,
|
||||
Width: width,
|
||||
Height: height,
|
||||
}
|
||||
return ctx.CreateImage(flags, format, desc, data)
|
||||
}
|
||||
|
||||
func (ctx *Context) CreateImageFromImage(flags MemFlag, img image.Image) (*MemObject, error) {
|
||||
switch m := img.(type) {
|
||||
case *image.Gray:
|
||||
format := ImageFormat{ChannelOrderIntensity, ChannelDataTypeUNormInt8}
|
||||
desc := ImageDescription{
|
||||
Type: MemObjectTypeImage2D,
|
||||
Width: m.Bounds().Dx(),
|
||||
Height: m.Bounds().Dy(),
|
||||
RowPitch: m.Stride,
|
||||
}
|
||||
return ctx.CreateImage(flags, format, desc, m.Pix)
|
||||
case *image.RGBA:
|
||||
format := ImageFormat{ChannelOrderRGBA, ChannelDataTypeUNormInt8}
|
||||
desc := ImageDescription{
|
||||
Type: MemObjectTypeImage2D,
|
||||
Width: m.Bounds().Dx(),
|
||||
Height: m.Bounds().Dy(),
|
||||
RowPitch: m.Stride,
|
||||
}
|
||||
return ctx.CreateImage(flags, format, desc, m.Pix)
|
||||
}
|
||||
|
||||
b := img.Bounds()
|
||||
w := b.Dx()
|
||||
h := b.Dy()
|
||||
data := make([]byte, w*h*4)
|
||||
dataOffset := 0
|
||||
for y := 0; y < h; y++ {
|
||||
for x := 0; x < w; x++ {
|
||||
c := img.At(x+b.Min.X, y+b.Min.Y)
|
||||
r, g, b, a := c.RGBA()
|
||||
data[dataOffset] = uint8(r >> 8)
|
||||
data[dataOffset+1] = uint8(g >> 8)
|
||||
data[dataOffset+2] = uint8(b >> 8)
|
||||
data[dataOffset+3] = uint8(a >> 8)
|
||||
dataOffset += 4
|
||||
}
|
||||
}
|
||||
return ctx.CreateImageSimple(flags, w, h, ChannelOrderRGBA, ChannelDataTypeUNormInt8, data)
|
||||
}
|
127
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel.go
generated
vendored
127
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel.go
generated
vendored
@ -1,127 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type ErrUnsupportedArgumentType struct {
|
||||
Index int
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (e ErrUnsupportedArgumentType) Error() string {
|
||||
return fmt.Sprintf("cl: unsupported argument type for index %d: %+v", e.Index, e.Value)
|
||||
}
|
||||
|
||||
type Kernel struct {
|
||||
clKernel C.cl_kernel
|
||||
name string
|
||||
}
|
||||
|
||||
type LocalBuffer int
|
||||
|
||||
func releaseKernel(k *Kernel) {
|
||||
if k.clKernel != nil {
|
||||
C.clReleaseKernel(k.clKernel)
|
||||
k.clKernel = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kernel) Release() {
|
||||
releaseKernel(k)
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgs(args ...interface{}) error {
|
||||
for index, arg := range args {
|
||||
if err := k.SetArg(index, arg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArg(index int, arg interface{}) error {
|
||||
switch val := arg.(type) {
|
||||
case uint8:
|
||||
return k.SetArgUint8(index, val)
|
||||
case int8:
|
||||
return k.SetArgInt8(index, val)
|
||||
case uint32:
|
||||
return k.SetArgUint32(index, val)
|
||||
case uint64:
|
||||
return k.SetArgUint64(index, val)
|
||||
case int32:
|
||||
return k.SetArgInt32(index, val)
|
||||
case float32:
|
||||
return k.SetArgFloat32(index, val)
|
||||
case *MemObject:
|
||||
return k.SetArgBuffer(index, val)
|
||||
case LocalBuffer:
|
||||
return k.SetArgLocal(index, int(val))
|
||||
default:
|
||||
return ErrUnsupportedArgumentType{Index: index, Value: arg}
|
||||
}
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgBuffer(index int, buffer *MemObject) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(buffer.clMem)), unsafe.Pointer(&buffer.clMem))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgFloat32(index int, val float32) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgInt8(index int, val int8) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUint8(index int, val uint8) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgInt32(index int, val int32) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUint32(index int, val uint32) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUint64(index int, val uint64) error {
|
||||
return k.SetArgUnsafe(index, int(unsafe.Sizeof(val)), unsafe.Pointer(&val))
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgLocal(index int, size int) error {
|
||||
return k.SetArgUnsafe(index, size, nil)
|
||||
}
|
||||
|
||||
func (k *Kernel) SetArgUnsafe(index, argSize int, arg unsafe.Pointer) error {
|
||||
//fmt.Println("FUNKY: ", index, argSize)
|
||||
return toError(C.clSetKernelArg(k.clKernel, C.cl_uint(index), C.size_t(argSize), arg))
|
||||
}
|
||||
|
||||
func (k *Kernel) PreferredWorkGroupSizeMultiple(device *Device) (int, error) {
|
||||
var size C.size_t
|
||||
err := C.clGetKernelWorkGroupInfo(k.clKernel, device.nullableId(), C.CL_KERNEL_PREFERRED_WORK_GROUP_SIZE_MULTIPLE, C.size_t(unsafe.Sizeof(size)), unsafe.Pointer(&size), nil)
|
||||
return int(size), toError(err)
|
||||
}
|
||||
|
||||
func (k *Kernel) WorkGroupSize(device *Device) (int, error) {
|
||||
var size C.size_t
|
||||
err := C.clGetKernelWorkGroupInfo(k.clKernel, device.nullableId(), C.CL_KERNEL_WORK_GROUP_SIZE, C.size_t(unsafe.Sizeof(size)), unsafe.Pointer(&size), nil)
|
||||
return int(size), toError(err)
|
||||
}
|
||||
|
||||
func (k *Kernel) NumArgs() (int, error) {
|
||||
var num C.cl_uint
|
||||
err := C.clGetKernelInfo(k.clKernel, C.CL_KERNEL_NUM_ARGS, C.size_t(unsafe.Sizeof(num)), unsafe.Pointer(&num), nil)
|
||||
return int(num), toError(err)
|
||||
}
|
7
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel10.go
generated
vendored
7
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel10.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
// +build !cl12
|
||||
|
||||
package cl
|
||||
|
||||
func (k *Kernel) ArgName(index int) (string, error) {
|
||||
return "", ErrUnsupported
|
||||
}
|
20
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel12.go
generated
vendored
20
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/kernel12.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
import "unsafe"
|
||||
|
||||
func (k *Kernel) ArgName(index int) (string, error) {
|
||||
var strC [1024]byte
|
||||
var strN C.size_t
|
||||
if err := C.clGetKernelArgInfo(k.clKernel, C.cl_uint(index), C.CL_KERNEL_ARG_NAME, 1024, unsafe.Pointer(&strC[0]), &strN); err != C.CL_SUCCESS {
|
||||
return "", toError(err)
|
||||
}
|
||||
return string(strC[:strN]), nil
|
||||
}
|
83
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/platform.go
generated
vendored
83
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/platform.go
generated
vendored
@ -1,83 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const maxPlatforms = 32
|
||||
|
||||
type Platform struct {
|
||||
id C.cl_platform_id
|
||||
}
|
||||
|
||||
// Obtain the list of platforms available.
|
||||
func GetPlatforms() ([]*Platform, error) {
|
||||
var platformIds [maxPlatforms]C.cl_platform_id
|
||||
var nPlatforms C.cl_uint
|
||||
if err := C.clGetPlatformIDs(C.cl_uint(maxPlatforms), &platformIds[0], &nPlatforms); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
platforms := make([]*Platform, nPlatforms)
|
||||
for i := 0; i < int(nPlatforms); i++ {
|
||||
platforms[i] = &Platform{id: platformIds[i]}
|
||||
}
|
||||
return platforms, nil
|
||||
}
|
||||
|
||||
func (p *Platform) GetDevices(deviceType DeviceType) ([]*Device, error) {
|
||||
return GetDevices(p, deviceType)
|
||||
}
|
||||
|
||||
func (p *Platform) getInfoString(param C.cl_platform_info) (string, error) {
|
||||
var strC [2048]byte
|
||||
var strN C.size_t
|
||||
if err := C.clGetPlatformInfo(p.id, param, 2048, unsafe.Pointer(&strC[0]), &strN); err != C.CL_SUCCESS {
|
||||
return "", toError(err)
|
||||
}
|
||||
return string(strC[:(strN - 1)]), nil
|
||||
}
|
||||
|
||||
func (p *Platform) Name() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_NAME); err != nil {
|
||||
panic("Platform.Name() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Vendor() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_VENDOR); err != nil {
|
||||
panic("Platform.Vendor() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Profile() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_PROFILE); err != nil {
|
||||
panic("Platform.Profile() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Version() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_VERSION); err != nil {
|
||||
panic("Platform.Version() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Platform) Extensions() string {
|
||||
if str, err := p.getInfoString(C.CL_PLATFORM_EXTENSIONS); err != nil {
|
||||
panic("Platform.Extensions() should never fail")
|
||||
} else {
|
||||
return str
|
||||
}
|
||||
}
|
105
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/program.go
generated
vendored
105
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/program.go
generated
vendored
@ -1,105 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #include <stdlib.h>
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type BuildError struct {
|
||||
Message string
|
||||
Device *Device
|
||||
}
|
||||
|
||||
func (e BuildError) Error() string {
|
||||
if e.Device != nil {
|
||||
return fmt.Sprintf("cl: build error on %q: %s", e.Device.Name(), e.Message)
|
||||
} else {
|
||||
return fmt.Sprintf("cl: build error: %s", e.Message)
|
||||
}
|
||||
}
|
||||
|
||||
type Program struct {
|
||||
clProgram C.cl_program
|
||||
devices []*Device
|
||||
}
|
||||
|
||||
func releaseProgram(p *Program) {
|
||||
if p.clProgram != nil {
|
||||
C.clReleaseProgram(p.clProgram)
|
||||
p.clProgram = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Program) Release() {
|
||||
releaseProgram(p)
|
||||
}
|
||||
|
||||
func (p *Program) BuildProgram(devices []*Device, options string) error {
|
||||
var cOptions *C.char
|
||||
if options != "" {
|
||||
cOptions = C.CString(options)
|
||||
defer C.free(unsafe.Pointer(cOptions))
|
||||
}
|
||||
var deviceList []C.cl_device_id
|
||||
var deviceListPtr *C.cl_device_id
|
||||
numDevices := C.cl_uint(len(devices))
|
||||
if devices != nil && len(devices) > 0 {
|
||||
deviceList = buildDeviceIdList(devices)
|
||||
deviceListPtr = &deviceList[0]
|
||||
}
|
||||
if err := C.clBuildProgram(p.clProgram, numDevices, deviceListPtr, cOptions, nil, nil); err != C.CL_SUCCESS {
|
||||
buffer := make([]byte, 4096)
|
||||
var bLen C.size_t
|
||||
var err C.cl_int
|
||||
|
||||
for _, dev := range p.devices {
|
||||
for i := 2; i >= 0; i-- {
|
||||
err = C.clGetProgramBuildInfo(p.clProgram, dev.id, C.CL_PROGRAM_BUILD_LOG, C.size_t(len(buffer)), unsafe.Pointer(&buffer[0]), &bLen)
|
||||
if err == C.CL_INVALID_VALUE && i > 0 && bLen < 1024*1024 {
|
||||
// INVALID_VALUE probably means our buffer isn't large enough
|
||||
buffer = make([]byte, bLen)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != C.CL_SUCCESS {
|
||||
return toError(err)
|
||||
}
|
||||
|
||||
if bLen > 1 {
|
||||
return BuildError{
|
||||
Device: dev,
|
||||
Message: string(buffer[:bLen-1]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return BuildError{
|
||||
Device: nil,
|
||||
Message: "build failed and produced no log entries",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Program) CreateKernel(name string) (*Kernel, error) {
|
||||
cName := C.CString(name)
|
||||
defer C.free(unsafe.Pointer(cName))
|
||||
var err C.cl_int
|
||||
clKernel := C.clCreateKernel(p.clProgram, cName, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
kernel := &Kernel{clKernel: clKernel, name: name}
|
||||
runtime.SetFinalizer(kernel, releaseKernel)
|
||||
return kernel, nil
|
||||
}
|
193
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/queue.go
generated
vendored
193
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/queue.go
generated
vendored
@ -1,193 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import "unsafe"
|
||||
|
||||
type CommandQueueProperty int
|
||||
|
||||
const (
|
||||
CommandQueueOutOfOrderExecModeEnable CommandQueueProperty = C.CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE
|
||||
CommandQueueProfilingEnable CommandQueueProperty = C.CL_QUEUE_PROFILING_ENABLE
|
||||
)
|
||||
|
||||
type CommandQueue struct {
|
||||
clQueue C.cl_command_queue
|
||||
device *Device
|
||||
}
|
||||
|
||||
func releaseCommandQueue(q *CommandQueue) {
|
||||
if q.clQueue != nil {
|
||||
C.clReleaseCommandQueue(q.clQueue)
|
||||
q.clQueue = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Call clReleaseCommandQueue on the CommandQueue. Using the CommandQueue after Release will cause a panick.
|
||||
func (q *CommandQueue) Release() {
|
||||
releaseCommandQueue(q)
|
||||
}
|
||||
|
||||
// Blocks until all previously queued OpenCL commands in a command-queue are issued to the associated device and have completed.
|
||||
func (q *CommandQueue) Finish() error {
|
||||
return toError(C.clFinish(q.clQueue))
|
||||
}
|
||||
|
||||
// Issues all previously queued OpenCL commands in a command-queue to the device associated with the command-queue.
|
||||
func (q *CommandQueue) Flush() error {
|
||||
return toError(C.clFlush(q.clQueue))
|
||||
}
|
||||
|
||||
// Enqueues a command to map a region of the buffer object given by buffer into the host address space and returns a pointer to this mapped region.
|
||||
func (q *CommandQueue) EnqueueMapBuffer(buffer *MemObject, blocking bool, flags MapFlag, offset, size int, eventWaitList []*Event) (*MappedMemObject, *Event, error) {
|
||||
var event C.cl_event
|
||||
var err C.cl_int
|
||||
ptr := C.clEnqueueMapBuffer(q.clQueue, buffer.clMem, clBool(blocking), flags.toCl(), C.size_t(offset), C.size_t(size), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, nil, toError(err)
|
||||
}
|
||||
ev := newEvent(event)
|
||||
if ptr == nil {
|
||||
return nil, ev, ErrUnknown
|
||||
}
|
||||
return &MappedMemObject{ptr: ptr, size: size}, ev, nil
|
||||
}
|
||||
|
||||
// Enqueues a command to map a region of an image object into the host address space and returns a pointer to this mapped region.
|
||||
func (q *CommandQueue) EnqueueMapImage(buffer *MemObject, blocking bool, flags MapFlag, origin, region [3]int, eventWaitList []*Event) (*MappedMemObject, *Event, error) {
|
||||
cOrigin := sizeT3(origin)
|
||||
cRegion := sizeT3(region)
|
||||
var event C.cl_event
|
||||
var err C.cl_int
|
||||
var rowPitch, slicePitch C.size_t
|
||||
ptr := C.clEnqueueMapImage(q.clQueue, buffer.clMem, clBool(blocking), flags.toCl(), &cOrigin[0], &cRegion[0], &rowPitch, &slicePitch, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event, &err)
|
||||
if err != C.CL_SUCCESS {
|
||||
return nil, nil, toError(err)
|
||||
}
|
||||
ev := newEvent(event)
|
||||
if ptr == nil {
|
||||
return nil, ev, ErrUnknown
|
||||
}
|
||||
size := 0 // TODO: could calculate this
|
||||
return &MappedMemObject{ptr: ptr, size: size, rowPitch: int(rowPitch), slicePitch: int(slicePitch)}, ev, nil
|
||||
}
|
||||
|
||||
// Enqueues a command to unmap a previously mapped region of a memory object.
|
||||
func (q *CommandQueue) EnqueueUnmapMemObject(buffer *MemObject, mappedObj *MappedMemObject, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
if err := C.clEnqueueUnmapMemObject(q.clQueue, buffer.clMem, mappedObj.ptr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event); err != C.CL_SUCCESS {
|
||||
return nil, toError(err)
|
||||
}
|
||||
return newEvent(event), nil
|
||||
}
|
||||
|
||||
// Enqueues a command to copy a buffer object to another buffer object.
|
||||
func (q *CommandQueue) EnqueueCopyBuffer(srcBuffer, dstBuffer *MemObject, srcOffset, dstOffset, byteCount int, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueCopyBuffer(q.clQueue, srcBuffer.clMem, dstBuffer.clMem, C.size_t(srcOffset), C.size_t(dstOffset), C.size_t(byteCount), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueue commands to write to a buffer object from host memory.
|
||||
func (q *CommandQueue) EnqueueWriteBuffer(buffer *MemObject, blocking bool, offset, dataSize int, dataPtr unsafe.Pointer, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueWriteBuffer(q.clQueue, buffer.clMem, clBool(blocking), C.size_t(offset), C.size_t(dataSize), dataPtr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
func (q *CommandQueue) EnqueueWriteBufferFloat32(buffer *MemObject, blocking bool, offset int, data []float32, eventWaitList []*Event) (*Event, error) {
|
||||
dataPtr := unsafe.Pointer(&data[0])
|
||||
dataSize := int(unsafe.Sizeof(data[0])) * len(data)
|
||||
return q.EnqueueWriteBuffer(buffer, blocking, offset, dataSize, dataPtr, eventWaitList)
|
||||
}
|
||||
|
||||
// Enqueue commands to read from a buffer object to host memory.
|
||||
func (q *CommandQueue) EnqueueReadBuffer(buffer *MemObject, blocking bool, offset, dataSize int, dataPtr unsafe.Pointer, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueReadBuffer(q.clQueue, buffer.clMem, clBool(blocking), C.size_t(offset), C.size_t(dataSize), dataPtr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
func (q *CommandQueue) EnqueueReadBufferFloat32(buffer *MemObject, blocking bool, offset int, data []float32, eventWaitList []*Event) (*Event, error) {
|
||||
dataPtr := unsafe.Pointer(&data[0])
|
||||
dataSize := int(unsafe.Sizeof(data[0])) * len(data)
|
||||
return q.EnqueueReadBuffer(buffer, blocking, offset, dataSize, dataPtr, eventWaitList)
|
||||
}
|
||||
|
||||
// Enqueues a command to execute a kernel on a device.
|
||||
func (q *CommandQueue) EnqueueNDRangeKernel(kernel *Kernel, globalWorkOffset, globalWorkSize, localWorkSize []int, eventWaitList []*Event) (*Event, error) {
|
||||
workDim := len(globalWorkSize)
|
||||
var globalWorkOffsetList []C.size_t
|
||||
var globalWorkOffsetPtr *C.size_t
|
||||
if globalWorkOffset != nil {
|
||||
globalWorkOffsetList = make([]C.size_t, len(globalWorkOffset))
|
||||
for i, off := range globalWorkOffset {
|
||||
globalWorkOffsetList[i] = C.size_t(off)
|
||||
}
|
||||
globalWorkOffsetPtr = &globalWorkOffsetList[0]
|
||||
}
|
||||
var globalWorkSizeList []C.size_t
|
||||
var globalWorkSizePtr *C.size_t
|
||||
if globalWorkSize != nil {
|
||||
globalWorkSizeList = make([]C.size_t, len(globalWorkSize))
|
||||
for i, off := range globalWorkSize {
|
||||
globalWorkSizeList[i] = C.size_t(off)
|
||||
}
|
||||
globalWorkSizePtr = &globalWorkSizeList[0]
|
||||
}
|
||||
var localWorkSizeList []C.size_t
|
||||
var localWorkSizePtr *C.size_t
|
||||
if localWorkSize != nil {
|
||||
localWorkSizeList = make([]C.size_t, len(localWorkSize))
|
||||
for i, off := range localWorkSize {
|
||||
localWorkSizeList[i] = C.size_t(off)
|
||||
}
|
||||
localWorkSizePtr = &localWorkSizeList[0]
|
||||
}
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueNDRangeKernel(q.clQueue, kernel.clKernel, C.cl_uint(workDim), globalWorkOffsetPtr, globalWorkSizePtr, localWorkSizePtr, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueues a command to read from a 2D or 3D image object to host memory.
|
||||
func (q *CommandQueue) EnqueueReadImage(image *MemObject, blocking bool, origin, region [3]int, rowPitch, slicePitch int, data []byte, eventWaitList []*Event) (*Event, error) {
|
||||
cOrigin := sizeT3(origin)
|
||||
cRegion := sizeT3(region)
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueReadImage(q.clQueue, image.clMem, clBool(blocking), &cOrigin[0], &cRegion[0], C.size_t(rowPitch), C.size_t(slicePitch), unsafe.Pointer(&data[0]), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueues a command to write from a 2D or 3D image object to host memory.
|
||||
func (q *CommandQueue) EnqueueWriteImage(image *MemObject, blocking bool, origin, region [3]int, rowPitch, slicePitch int, data []byte, eventWaitList []*Event) (*Event, error) {
|
||||
cOrigin := sizeT3(origin)
|
||||
cRegion := sizeT3(region)
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueWriteImage(q.clQueue, image.clMem, clBool(blocking), &cOrigin[0], &cRegion[0], C.size_t(rowPitch), C.size_t(slicePitch), unsafe.Pointer(&data[0]), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
func (q *CommandQueue) EnqueueFillBuffer(buffer *MemObject, pattern unsafe.Pointer, patternSize, offset, size int, eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueFillBuffer(q.clQueue, buffer.clMem, pattern, C.size_t(patternSize), C.size_t(offset), C.size_t(size), C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// A synchronization point that enqueues a barrier operation.
|
||||
func (q *CommandQueue) EnqueueBarrierWithWaitList(eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueBarrierWithWaitList(q.clQueue, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
||||
|
||||
// Enqueues a marker command which waits for either a list of events to complete, or all previously enqueued commands to complete.
|
||||
func (q *CommandQueue) EnqueueMarkerWithWaitList(eventWaitList []*Event) (*Event, error) {
|
||||
var event C.cl_event
|
||||
err := toError(C.clEnqueueMarkerWithWaitList(q.clQueue, C.cl_uint(len(eventWaitList)), eventListPtr(eventWaitList), &event))
|
||||
return newEvent(event), err
|
||||
}
|
487
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types.go
generated
vendored
487
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types.go
generated
vendored
@ -1,487 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnknown = errors.New("cl: unknown error") // Generally an unexpected result from an OpenCL function (e.g. CL_SUCCESS but null pointer)
|
||||
)
|
||||
|
||||
type ErrOther int
|
||||
|
||||
func (e ErrOther) Error() string {
|
||||
return fmt.Sprintf("cl: error %d", int(e))
|
||||
}
|
||||
|
||||
var (
|
||||
ErrDeviceNotFound = errors.New("cl: Device Not Found")
|
||||
ErrDeviceNotAvailable = errors.New("cl: Device Not Available")
|
||||
ErrCompilerNotAvailable = errors.New("cl: Compiler Not Available")
|
||||
ErrMemObjectAllocationFailure = errors.New("cl: Mem Object Allocation Failure")
|
||||
ErrOutOfResources = errors.New("cl: Out Of Resources")
|
||||
ErrOutOfHostMemory = errors.New("cl: Out Of Host Memory")
|
||||
ErrProfilingInfoNotAvailable = errors.New("cl: Profiling Info Not Available")
|
||||
ErrMemCopyOverlap = errors.New("cl: Mem Copy Overlap")
|
||||
ErrImageFormatMismatch = errors.New("cl: Image Format Mismatch")
|
||||
ErrImageFormatNotSupported = errors.New("cl: Image Format Not Supported")
|
||||
ErrBuildProgramFailure = errors.New("cl: Build Program Failure")
|
||||
ErrMapFailure = errors.New("cl: Map Failure")
|
||||
ErrMisalignedSubBufferOffset = errors.New("cl: Misaligned Sub Buffer Offset")
|
||||
ErrExecStatusErrorForEventsInWaitList = errors.New("cl: Exec Status Error For Events In Wait List")
|
||||
ErrCompileProgramFailure = errors.New("cl: Compile Program Failure")
|
||||
ErrLinkerNotAvailable = errors.New("cl: Linker Not Available")
|
||||
ErrLinkProgramFailure = errors.New("cl: Link Program Failure")
|
||||
ErrDevicePartitionFailed = errors.New("cl: Device Partition Failed")
|
||||
ErrKernelArgInfoNotAvailable = errors.New("cl: Kernel Arg Info Not Available")
|
||||
ErrInvalidValue = errors.New("cl: Invalid Value")
|
||||
ErrInvalidDeviceType = errors.New("cl: Invalid Device Type")
|
||||
ErrInvalidPlatform = errors.New("cl: Invalid Platform")
|
||||
ErrInvalidDevice = errors.New("cl: Invalid Device")
|
||||
ErrInvalidContext = errors.New("cl: Invalid Context")
|
||||
ErrInvalidQueueProperties = errors.New("cl: Invalid Queue Properties")
|
||||
ErrInvalidCommandQueue = errors.New("cl: Invalid Command Queue")
|
||||
ErrInvalidHostPtr = errors.New("cl: Invalid Host Ptr")
|
||||
ErrInvalidMemObject = errors.New("cl: Invalid Mem Object")
|
||||
ErrInvalidImageFormatDescriptor = errors.New("cl: Invalid Image Format Descriptor")
|
||||
ErrInvalidImageSize = errors.New("cl: Invalid Image Size")
|
||||
ErrInvalidSampler = errors.New("cl: Invalid Sampler")
|
||||
ErrInvalidBinary = errors.New("cl: Invalid Binary")
|
||||
ErrInvalidBuildOptions = errors.New("cl: Invalid Build Options")
|
||||
ErrInvalidProgram = errors.New("cl: Invalid Program")
|
||||
ErrInvalidProgramExecutable = errors.New("cl: Invalid Program Executable")
|
||||
ErrInvalidKernelName = errors.New("cl: Invalid Kernel Name")
|
||||
ErrInvalidKernelDefinition = errors.New("cl: Invalid Kernel Definition")
|
||||
ErrInvalidKernel = errors.New("cl: Invalid Kernel")
|
||||
ErrInvalidArgIndex = errors.New("cl: Invalid Arg Index")
|
||||
ErrInvalidArgValue = errors.New("cl: Invalid Arg Value")
|
||||
ErrInvalidArgSize = errors.New("cl: Invalid Arg Size")
|
||||
ErrInvalidKernelArgs = errors.New("cl: Invalid Kernel Args")
|
||||
ErrInvalidWorkDimension = errors.New("cl: Invalid Work Dimension")
|
||||
ErrInvalidWorkGroupSize = errors.New("cl: Invalid Work Group Size")
|
||||
ErrInvalidWorkItemSize = errors.New("cl: Invalid Work Item Size")
|
||||
ErrInvalidGlobalOffset = errors.New("cl: Invalid Global Offset")
|
||||
ErrInvalidEventWaitList = errors.New("cl: Invalid Event Wait List")
|
||||
ErrInvalidEvent = errors.New("cl: Invalid Event")
|
||||
ErrInvalidOperation = errors.New("cl: Invalid Operation")
|
||||
ErrInvalidGlObject = errors.New("cl: Invalid Gl Object")
|
||||
ErrInvalidBufferSize = errors.New("cl: Invalid Buffer Size")
|
||||
ErrInvalidMipLevel = errors.New("cl: Invalid Mip Level")
|
||||
ErrInvalidGlobalWorkSize = errors.New("cl: Invalid Global Work Size")
|
||||
ErrInvalidProperty = errors.New("cl: Invalid Property")
|
||||
ErrInvalidImageDescriptor = errors.New("cl: Invalid Image Descriptor")
|
||||
ErrInvalidCompilerOptions = errors.New("cl: Invalid Compiler Options")
|
||||
ErrInvalidLinkerOptions = errors.New("cl: Invalid Linker Options")
|
||||
ErrInvalidDevicePartitionCount = errors.New("cl: Invalid Device Partition Count")
|
||||
)
|
||||
var errorMap = map[C.cl_int]error{
|
||||
C.CL_SUCCESS: nil,
|
||||
C.CL_DEVICE_NOT_FOUND: ErrDeviceNotFound,
|
||||
C.CL_DEVICE_NOT_AVAILABLE: ErrDeviceNotAvailable,
|
||||
C.CL_COMPILER_NOT_AVAILABLE: ErrCompilerNotAvailable,
|
||||
C.CL_MEM_OBJECT_ALLOCATION_FAILURE: ErrMemObjectAllocationFailure,
|
||||
C.CL_OUT_OF_RESOURCES: ErrOutOfResources,
|
||||
C.CL_OUT_OF_HOST_MEMORY: ErrOutOfHostMemory,
|
||||
C.CL_PROFILING_INFO_NOT_AVAILABLE: ErrProfilingInfoNotAvailable,
|
||||
C.CL_MEM_COPY_OVERLAP: ErrMemCopyOverlap,
|
||||
C.CL_IMAGE_FORMAT_MISMATCH: ErrImageFormatMismatch,
|
||||
C.CL_IMAGE_FORMAT_NOT_SUPPORTED: ErrImageFormatNotSupported,
|
||||
C.CL_BUILD_PROGRAM_FAILURE: ErrBuildProgramFailure,
|
||||
C.CL_MAP_FAILURE: ErrMapFailure,
|
||||
C.CL_MISALIGNED_SUB_BUFFER_OFFSET: ErrMisalignedSubBufferOffset,
|
||||
C.CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST: ErrExecStatusErrorForEventsInWaitList,
|
||||
C.CL_INVALID_VALUE: ErrInvalidValue,
|
||||
C.CL_INVALID_DEVICE_TYPE: ErrInvalidDeviceType,
|
||||
C.CL_INVALID_PLATFORM: ErrInvalidPlatform,
|
||||
C.CL_INVALID_DEVICE: ErrInvalidDevice,
|
||||
C.CL_INVALID_CONTEXT: ErrInvalidContext,
|
||||
C.CL_INVALID_QUEUE_PROPERTIES: ErrInvalidQueueProperties,
|
||||
C.CL_INVALID_COMMAND_QUEUE: ErrInvalidCommandQueue,
|
||||
C.CL_INVALID_HOST_PTR: ErrInvalidHostPtr,
|
||||
C.CL_INVALID_MEM_OBJECT: ErrInvalidMemObject,
|
||||
C.CL_INVALID_IMAGE_FORMAT_DESCRIPTOR: ErrInvalidImageFormatDescriptor,
|
||||
C.CL_INVALID_IMAGE_SIZE: ErrInvalidImageSize,
|
||||
C.CL_INVALID_SAMPLER: ErrInvalidSampler,
|
||||
C.CL_INVALID_BINARY: ErrInvalidBinary,
|
||||
C.CL_INVALID_BUILD_OPTIONS: ErrInvalidBuildOptions,
|
||||
C.CL_INVALID_PROGRAM: ErrInvalidProgram,
|
||||
C.CL_INVALID_PROGRAM_EXECUTABLE: ErrInvalidProgramExecutable,
|
||||
C.CL_INVALID_KERNEL_NAME: ErrInvalidKernelName,
|
||||
C.CL_INVALID_KERNEL_DEFINITION: ErrInvalidKernelDefinition,
|
||||
C.CL_INVALID_KERNEL: ErrInvalidKernel,
|
||||
C.CL_INVALID_ARG_INDEX: ErrInvalidArgIndex,
|
||||
C.CL_INVALID_ARG_VALUE: ErrInvalidArgValue,
|
||||
C.CL_INVALID_ARG_SIZE: ErrInvalidArgSize,
|
||||
C.CL_INVALID_KERNEL_ARGS: ErrInvalidKernelArgs,
|
||||
C.CL_INVALID_WORK_DIMENSION: ErrInvalidWorkDimension,
|
||||
C.CL_INVALID_WORK_GROUP_SIZE: ErrInvalidWorkGroupSize,
|
||||
C.CL_INVALID_WORK_ITEM_SIZE: ErrInvalidWorkItemSize,
|
||||
C.CL_INVALID_GLOBAL_OFFSET: ErrInvalidGlobalOffset,
|
||||
C.CL_INVALID_EVENT_WAIT_LIST: ErrInvalidEventWaitList,
|
||||
C.CL_INVALID_EVENT: ErrInvalidEvent,
|
||||
C.CL_INVALID_OPERATION: ErrInvalidOperation,
|
||||
C.CL_INVALID_GL_OBJECT: ErrInvalidGlObject,
|
||||
C.CL_INVALID_BUFFER_SIZE: ErrInvalidBufferSize,
|
||||
C.CL_INVALID_MIP_LEVEL: ErrInvalidMipLevel,
|
||||
C.CL_INVALID_GLOBAL_WORK_SIZE: ErrInvalidGlobalWorkSize,
|
||||
C.CL_INVALID_PROPERTY: ErrInvalidProperty,
|
||||
}
|
||||
|
||||
func toError(code C.cl_int) error {
|
||||
if err, ok := errorMap[code]; ok {
|
||||
return err
|
||||
}
|
||||
return ErrOther(code)
|
||||
}
|
||||
|
||||
type LocalMemType int
|
||||
|
||||
const (
|
||||
LocalMemTypeNone LocalMemType = C.CL_NONE
|
||||
LocalMemTypeGlobal LocalMemType = C.CL_GLOBAL
|
||||
LocalMemTypeLocal LocalMemType = C.CL_LOCAL
|
||||
)
|
||||
|
||||
var localMemTypeMap = map[LocalMemType]string{
|
||||
LocalMemTypeNone: "None",
|
||||
LocalMemTypeGlobal: "Global",
|
||||
LocalMemTypeLocal: "Local",
|
||||
}
|
||||
|
||||
func (t LocalMemType) String() string {
|
||||
name := localMemTypeMap[t]
|
||||
if name == "" {
|
||||
name = "Unknown"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type ExecCapability int
|
||||
|
||||
const (
|
||||
ExecCapabilityKernel ExecCapability = C.CL_EXEC_KERNEL // The OpenCL device can execute OpenCL kernels.
|
||||
ExecCapabilityNativeKernel ExecCapability = C.CL_EXEC_NATIVE_KERNEL // The OpenCL device can execute native kernels.
|
||||
)
|
||||
|
||||
func (ec ExecCapability) String() string {
|
||||
var parts []string
|
||||
if ec&ExecCapabilityKernel != 0 {
|
||||
parts = append(parts, "Kernel")
|
||||
}
|
||||
if ec&ExecCapabilityNativeKernel != 0 {
|
||||
parts = append(parts, "NativeKernel")
|
||||
}
|
||||
if parts == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(parts, "|")
|
||||
}
|
||||
|
||||
type MemCacheType int
|
||||
|
||||
const (
|
||||
MemCacheTypeNone MemCacheType = C.CL_NONE
|
||||
MemCacheTypeReadOnlyCache MemCacheType = C.CL_READ_ONLY_CACHE
|
||||
MemCacheTypeReadWriteCache MemCacheType = C.CL_READ_WRITE_CACHE
|
||||
)
|
||||
|
||||
func (ct MemCacheType) String() string {
|
||||
switch ct {
|
||||
case MemCacheTypeNone:
|
||||
return "None"
|
||||
case MemCacheTypeReadOnlyCache:
|
||||
return "ReadOnly"
|
||||
case MemCacheTypeReadWriteCache:
|
||||
return "ReadWrite"
|
||||
}
|
||||
return fmt.Sprintf("Unknown(%x)", int(ct))
|
||||
}
|
||||
|
||||
type MemFlag int
|
||||
|
||||
const (
|
||||
MemReadWrite MemFlag = C.CL_MEM_READ_WRITE
|
||||
MemWriteOnly MemFlag = C.CL_MEM_WRITE_ONLY
|
||||
MemReadOnly MemFlag = C.CL_MEM_READ_ONLY
|
||||
MemUseHostPtr MemFlag = C.CL_MEM_USE_HOST_PTR
|
||||
MemAllocHostPtr MemFlag = C.CL_MEM_ALLOC_HOST_PTR
|
||||
MemCopyHostPtr MemFlag = C.CL_MEM_COPY_HOST_PTR
|
||||
|
||||
MemWriteOnlyHost MemFlag = C.CL_MEM_HOST_WRITE_ONLY
|
||||
MemReadOnlyHost MemFlag = C.CL_MEM_HOST_READ_ONLY
|
||||
MemNoAccessHost MemFlag = C.CL_MEM_HOST_NO_ACCESS
|
||||
)
|
||||
|
||||
type MemObjectType int
|
||||
|
||||
const (
|
||||
MemObjectTypeBuffer MemObjectType = C.CL_MEM_OBJECT_BUFFER
|
||||
MemObjectTypeImage2D MemObjectType = C.CL_MEM_OBJECT_IMAGE2D
|
||||
MemObjectTypeImage3D MemObjectType = C.CL_MEM_OBJECT_IMAGE3D
|
||||
)
|
||||
|
||||
type MapFlag int
|
||||
|
||||
const (
|
||||
// This flag specifies that the region being mapped in the memory object is being mapped for reading.
|
||||
MapFlagRead MapFlag = C.CL_MAP_READ
|
||||
MapFlagWrite MapFlag = C.CL_MAP_WRITE
|
||||
MapFlagWriteInvalidateRegion MapFlag = C.CL_MAP_WRITE_INVALIDATE_REGION
|
||||
)
|
||||
|
||||
func (mf MapFlag) toCl() C.cl_map_flags {
|
||||
return C.cl_map_flags(mf)
|
||||
}
|
||||
|
||||
type ChannelOrder int
|
||||
|
||||
const (
|
||||
ChannelOrderR ChannelOrder = C.CL_R
|
||||
ChannelOrderA ChannelOrder = C.CL_A
|
||||
ChannelOrderRG ChannelOrder = C.CL_RG
|
||||
ChannelOrderRA ChannelOrder = C.CL_RA
|
||||
ChannelOrderRGB ChannelOrder = C.CL_RGB
|
||||
ChannelOrderRGBA ChannelOrder = C.CL_RGBA
|
||||
ChannelOrderBGRA ChannelOrder = C.CL_BGRA
|
||||
ChannelOrderARGB ChannelOrder = C.CL_ARGB
|
||||
ChannelOrderIntensity ChannelOrder = C.CL_INTENSITY
|
||||
ChannelOrderLuminance ChannelOrder = C.CL_LUMINANCE
|
||||
ChannelOrderRx ChannelOrder = C.CL_Rx
|
||||
ChannelOrderRGx ChannelOrder = C.CL_RGx
|
||||
ChannelOrderRGBx ChannelOrder = C.CL_RGBx
|
||||
)
|
||||
|
||||
var channelOrderNameMap = map[ChannelOrder]string{
|
||||
ChannelOrderR: "R",
|
||||
ChannelOrderA: "A",
|
||||
ChannelOrderRG: "RG",
|
||||
ChannelOrderRA: "RA",
|
||||
ChannelOrderRGB: "RGB",
|
||||
ChannelOrderRGBA: "RGBA",
|
||||
ChannelOrderBGRA: "BGRA",
|
||||
ChannelOrderARGB: "ARGB",
|
||||
ChannelOrderIntensity: "Intensity",
|
||||
ChannelOrderLuminance: "Luminance",
|
||||
ChannelOrderRx: "Rx",
|
||||
ChannelOrderRGx: "RGx",
|
||||
ChannelOrderRGBx: "RGBx",
|
||||
}
|
||||
|
||||
func (co ChannelOrder) String() string {
|
||||
name := channelOrderNameMap[co]
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Unknown(%x)", int(co))
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type ChannelDataType int
|
||||
|
||||
const (
|
||||
ChannelDataTypeSNormInt8 ChannelDataType = C.CL_SNORM_INT8
|
||||
ChannelDataTypeSNormInt16 ChannelDataType = C.CL_SNORM_INT16
|
||||
ChannelDataTypeUNormInt8 ChannelDataType = C.CL_UNORM_INT8
|
||||
ChannelDataTypeUNormInt16 ChannelDataType = C.CL_UNORM_INT16
|
||||
ChannelDataTypeUNormShort565 ChannelDataType = C.CL_UNORM_SHORT_565
|
||||
ChannelDataTypeUNormShort555 ChannelDataType = C.CL_UNORM_SHORT_555
|
||||
ChannelDataTypeUNormInt101010 ChannelDataType = C.CL_UNORM_INT_101010
|
||||
ChannelDataTypeSignedInt8 ChannelDataType = C.CL_SIGNED_INT8
|
||||
ChannelDataTypeSignedInt16 ChannelDataType = C.CL_SIGNED_INT16
|
||||
ChannelDataTypeSignedInt32 ChannelDataType = C.CL_SIGNED_INT32
|
||||
ChannelDataTypeUnsignedInt8 ChannelDataType = C.CL_UNSIGNED_INT8
|
||||
ChannelDataTypeUnsignedInt16 ChannelDataType = C.CL_UNSIGNED_INT16
|
||||
ChannelDataTypeUnsignedInt32 ChannelDataType = C.CL_UNSIGNED_INT32
|
||||
ChannelDataTypeHalfFloat ChannelDataType = C.CL_HALF_FLOAT
|
||||
ChannelDataTypeFloat ChannelDataType = C.CL_FLOAT
|
||||
)
|
||||
|
||||
var channelDataTypeNameMap = map[ChannelDataType]string{
|
||||
ChannelDataTypeSNormInt8: "SNormInt8",
|
||||
ChannelDataTypeSNormInt16: "SNormInt16",
|
||||
ChannelDataTypeUNormInt8: "UNormInt8",
|
||||
ChannelDataTypeUNormInt16: "UNormInt16",
|
||||
ChannelDataTypeUNormShort565: "UNormShort565",
|
||||
ChannelDataTypeUNormShort555: "UNormShort555",
|
||||
ChannelDataTypeUNormInt101010: "UNormInt101010",
|
||||
ChannelDataTypeSignedInt8: "SignedInt8",
|
||||
ChannelDataTypeSignedInt16: "SignedInt16",
|
||||
ChannelDataTypeSignedInt32: "SignedInt32",
|
||||
ChannelDataTypeUnsignedInt8: "UnsignedInt8",
|
||||
ChannelDataTypeUnsignedInt16: "UnsignedInt16",
|
||||
ChannelDataTypeUnsignedInt32: "UnsignedInt32",
|
||||
ChannelDataTypeHalfFloat: "HalfFloat",
|
||||
ChannelDataTypeFloat: "Float",
|
||||
}
|
||||
|
||||
func (ct ChannelDataType) String() string {
|
||||
name := channelDataTypeNameMap[ct]
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("Unknown(%x)", int(ct))
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
type ImageFormat struct {
|
||||
ChannelOrder ChannelOrder
|
||||
ChannelDataType ChannelDataType
|
||||
}
|
||||
|
||||
func (f ImageFormat) toCl() C.cl_image_format {
|
||||
var format C.cl_image_format
|
||||
format.image_channel_order = C.cl_channel_order(f.ChannelOrder)
|
||||
format.image_channel_data_type = C.cl_channel_type(f.ChannelDataType)
|
||||
return format
|
||||
}
|
||||
|
||||
type ProfilingInfo int
|
||||
|
||||
const (
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event is enqueued in
|
||||
// a command-queue by the host.
|
||||
ProfilingInfoCommandQueued ProfilingInfo = C.CL_PROFILING_COMMAND_QUEUED
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event that has been
|
||||
// enqueued is submitted by the host to the device associated with the command-queue.
|
||||
ProfilingInfoCommandSubmit ProfilingInfo = C.CL_PROFILING_COMMAND_SUBMIT
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event starts execution on the device.
|
||||
ProfilingInfoCommandStart ProfilingInfo = C.CL_PROFILING_COMMAND_START
|
||||
// A 64-bit value that describes the current device time counter in
|
||||
// nanoseconds when the command identified by event has finished
|
||||
// execution on the device.
|
||||
ProfilingInfoCommandEnd ProfilingInfo = C.CL_PROFILING_COMMAND_END
|
||||
)
|
||||
|
||||
type CommmandExecStatus int
|
||||
|
||||
const (
|
||||
CommmandExecStatusComplete CommmandExecStatus = C.CL_COMPLETE
|
||||
CommmandExecStatusRunning CommmandExecStatus = C.CL_RUNNING
|
||||
CommmandExecStatusSubmitted CommmandExecStatus = C.CL_SUBMITTED
|
||||
CommmandExecStatusQueued CommmandExecStatus = C.CL_QUEUED
|
||||
)
|
||||
|
||||
type Event struct {
|
||||
clEvent C.cl_event
|
||||
}
|
||||
|
||||
func releaseEvent(ev *Event) {
|
||||
if ev.clEvent != nil {
|
||||
C.clReleaseEvent(ev.clEvent)
|
||||
ev.clEvent = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Event) Release() {
|
||||
releaseEvent(e)
|
||||
}
|
||||
|
||||
func (e *Event) GetEventProfilingInfo(paramName ProfilingInfo) (int64, error) {
|
||||
var paramValue C.cl_ulong
|
||||
if err := C.clGetEventProfilingInfo(e.clEvent, C.cl_profiling_info(paramName), C.size_t(unsafe.Sizeof(paramValue)), unsafe.Pointer(¶mValue), nil); err != C.CL_SUCCESS {
|
||||
return 0, toError(err)
|
||||
}
|
||||
return int64(paramValue), nil
|
||||
}
|
||||
|
||||
// Sets the execution status of a user event object.
|
||||
//
|
||||
// `status` specifies the new execution status to be set and
|
||||
// can be CL_COMPLETE or a negative integer value to indicate
|
||||
// an error. A negative integer value causes all enqueued commands
|
||||
// that wait on this user event to be terminated. clSetUserEventStatus
|
||||
// can only be called once to change the execution status of event.
|
||||
func (e *Event) SetUserEventStatus(status int) error {
|
||||
return toError(C.clSetUserEventStatus(e.clEvent, C.cl_int(status)))
|
||||
}
|
||||
|
||||
// Waits on the host thread for commands identified by event objects in
|
||||
// events to complete. A command is considered complete if its execution
|
||||
// status is CL_COMPLETE or a negative value. The events specified in
|
||||
// event_list act as synchronization points.
|
||||
//
|
||||
// If the cl_khr_gl_event extension is enabled, event objects can also be
|
||||
// used to reflect the status of an OpenGL sync object. The sync object
|
||||
// in turn refers to a fence command executing in an OpenGL command
|
||||
// stream. This provides another method of coordinating sharing of buffers
|
||||
// and images between OpenGL and OpenCL.
|
||||
func WaitForEvents(events []*Event) error {
|
||||
return toError(C.clWaitForEvents(C.cl_uint(len(events)), eventListPtr(events)))
|
||||
}
|
||||
|
||||
func newEvent(clEvent C.cl_event) *Event {
|
||||
ev := &Event{clEvent: clEvent}
|
||||
runtime.SetFinalizer(ev, releaseEvent)
|
||||
return ev
|
||||
}
|
||||
|
||||
func eventListPtr(el []*Event) *C.cl_event {
|
||||
if el == nil {
|
||||
return nil
|
||||
}
|
||||
elist := make([]C.cl_event, len(el))
|
||||
for i, e := range el {
|
||||
elist[i] = e.clEvent
|
||||
}
|
||||
return (*C.cl_event)(&elist[0])
|
||||
}
|
||||
|
||||
func clBool(b bool) C.cl_bool {
|
||||
if b {
|
||||
return C.CL_TRUE
|
||||
}
|
||||
return C.CL_FALSE
|
||||
}
|
||||
|
||||
func sizeT3(i3 [3]int) [3]C.size_t {
|
||||
var val [3]C.size_t
|
||||
val[0] = C.size_t(i3[0])
|
||||
val[1] = C.size_t(i3[1])
|
||||
val[2] = C.size_t(i3[2])
|
||||
return val
|
||||
}
|
||||
|
||||
type MappedMemObject struct {
|
||||
ptr unsafe.Pointer
|
||||
size int
|
||||
rowPitch int
|
||||
slicePitch int
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) ByteSlice() []byte {
|
||||
var byteSlice []byte
|
||||
sliceHeader := (*reflect.SliceHeader)(unsafe.Pointer(&byteSlice))
|
||||
sliceHeader.Cap = mb.size
|
||||
sliceHeader.Len = mb.size
|
||||
sliceHeader.Data = uintptr(mb.ptr)
|
||||
return byteSlice
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) Ptr() unsafe.Pointer {
|
||||
return mb.ptr
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) Size() int {
|
||||
return mb.size
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) RowPitch() int {
|
||||
return mb.rowPitch
|
||||
}
|
||||
|
||||
func (mb *MappedMemObject) SlicePitch() int {
|
||||
return mb.slicePitch
|
||||
}
|
71
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types12.go
generated
vendored
71
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types12.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
// +build cl12
|
||||
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
const (
|
||||
ChannelDataTypeUNormInt24 ChannelDataType = C.CL_UNORM_INT24
|
||||
ChannelOrderDepth ChannelOrder = C.CL_DEPTH
|
||||
ChannelOrderDepthStencil ChannelOrder = C.CL_DEPTH_STENCIL
|
||||
MemHostNoAccess MemFlag = C.CL_MEM_HOST_NO_ACCESS // OpenCL 1.2
|
||||
MemHostReadOnly MemFlag = C.CL_MEM_HOST_READ_ONLY // OpenCL 1.2
|
||||
MemHostWriteOnly MemFlag = C.CL_MEM_HOST_WRITE_ONLY // OpenCL 1.2
|
||||
MemObjectTypeImage1D MemObjectType = C.CL_MEM_OBJECT_IMAGE1D
|
||||
MemObjectTypeImage1DArray MemObjectType = C.CL_MEM_OBJECT_IMAGE1D_ARRAY
|
||||
MemObjectTypeImage1DBuffer MemObjectType = C.CL_MEM_OBJECT_IMAGE1D_BUFFER
|
||||
MemObjectTypeImage2DArray MemObjectType = C.CL_MEM_OBJECT_IMAGE2D_ARRAY
|
||||
// This flag specifies that the region being mapped in the memory object is being mapped for writing.
|
||||
//
|
||||
// The contents of the region being mapped are to be discarded. This is typically the case when the
|
||||
// region being mapped is overwritten by the host. This flag allows the implementation to no longer
|
||||
// guarantee that the pointer returned by clEnqueueMapBuffer or clEnqueueMapImage contains the
|
||||
// latest bits in the region being mapped which can be a significant performance enhancement.
|
||||
MapFlagWriteInvalidateRegion MapFlag = C.CL_MAP_WRITE_INVALIDATE_REGION
|
||||
)
|
||||
|
||||
func init() {
|
||||
errorMap[C.CL_COMPILE_PROGRAM_FAILURE] = ErrCompileProgramFailure
|
||||
errorMap[C.CL_DEVICE_PARTITION_FAILED] = ErrDevicePartitionFailed
|
||||
errorMap[C.CL_INVALID_COMPILER_OPTIONS] = ErrInvalidCompilerOptions
|
||||
errorMap[C.CL_INVALID_DEVICE_PARTITION_COUNT] = ErrInvalidDevicePartitionCount
|
||||
errorMap[C.CL_INVALID_IMAGE_DESCRIPTOR] = ErrInvalidImageDescriptor
|
||||
errorMap[C.CL_INVALID_LINKER_OPTIONS] = ErrInvalidLinkerOptions
|
||||
errorMap[C.CL_KERNEL_ARG_INFO_NOT_AVAILABLE] = ErrKernelArgInfoNotAvailable
|
||||
errorMap[C.CL_LINK_PROGRAM_FAILURE] = ErrLinkProgramFailure
|
||||
errorMap[C.CL_LINKER_NOT_AVAILABLE] = ErrLinkerNotAvailable
|
||||
channelOrderNameMap[ChannelOrderDepth] = "Depth"
|
||||
channelOrderNameMap[ChannelOrderDepthStencil] = "DepthStencil"
|
||||
channelDataTypeNameMap[ChannelDataTypeUNormInt24] = "UNormInt24"
|
||||
}
|
||||
|
||||
type ImageDescription struct {
|
||||
Type MemObjectType
|
||||
Width, Height, Depth int
|
||||
ArraySize, RowPitch, SlicePitch int
|
||||
NumMipLevels, NumSamples int
|
||||
Buffer *MemObject
|
||||
}
|
||||
|
||||
func (d ImageDescription) toCl() C.cl_image_desc {
|
||||
var desc C.cl_image_desc
|
||||
desc.image_type = C.cl_mem_object_type(d.Type)
|
||||
desc.image_width = C.size_t(d.Width)
|
||||
desc.image_height = C.size_t(d.Height)
|
||||
desc.image_depth = C.size_t(d.Depth)
|
||||
desc.image_array_size = C.size_t(d.ArraySize)
|
||||
desc.image_row_pitch = C.size_t(d.RowPitch)
|
||||
desc.image_slice_pitch = C.size_t(d.SlicePitch)
|
||||
desc.num_mip_levels = C.cl_uint(d.NumMipLevels)
|
||||
desc.num_samples = C.cl_uint(d.NumSamples)
|
||||
desc.buffer = nil
|
||||
if d.Buffer != nil {
|
||||
desc.buffer = d.Buffer.clMem
|
||||
}
|
||||
return desc
|
||||
}
|
45
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types_darwin.go
generated
vendored
45
Godeps/_workspace/src/github.com/Gustav-Simonsson/go-opencl/cl/types_darwin.go
generated
vendored
@ -1,45 +0,0 @@
|
||||
package cl
|
||||
|
||||
// #ifdef __APPLE__
|
||||
// #include "OpenCL/opencl.h"
|
||||
// #else
|
||||
// #include "cl.h"
|
||||
// #endif
|
||||
import "C"
|
||||
|
||||
// Extension: cl_APPLE_fixed_alpha_channel_orders
|
||||
//
|
||||
// These selectors may be passed to clCreateImage2D() in the cl_image_format.image_channel_order field.
|
||||
// They are like CL_BGRA and CL_ARGB except that the alpha channel to be ignored. On calls to read_imagef,
|
||||
// the alpha will be 0xff (1.0f) if the sample falls in the image and 0 if it does not fall in the image.
|
||||
// On calls to write_imagef, the alpha value is ignored and 0xff (1.0f) is written. These formats are
|
||||
// currently only available for the CL_UNORM_INT8 cl_channel_type. They are intended to support legacy
|
||||
// image formats.
|
||||
const (
|
||||
ChannelOrder1RGBApple ChannelOrder = C.CL_1RGB_APPLE // Introduced in MacOS X.7.
|
||||
ChannelOrderBGR1Apple ChannelOrder = C.CL_BGR1_APPLE // Introduced in MacOS X.7.
|
||||
)
|
||||
|
||||
// Extension: cl_APPLE_biased_fixed_point_image_formats
|
||||
//
|
||||
// This selector may be passed to clCreateImage2D() in the cl_image_format.image_channel_data_type field.
|
||||
// It defines a biased signed 1.14 fixed point storage format, with range [-1, 3). The conversion from
|
||||
// float to this fixed point format is defined as follows:
|
||||
//
|
||||
// ushort float_to_sfixed14( float x ){
|
||||
// int i = convert_int_sat_rte( x * 0x1.0p14f ); // scale [-1, 3.0) to [-16384, 3*16384), round to nearest integer
|
||||
// i = add_sat( i, 0x4000 ); // apply bias, to convert to [0, 65535) range
|
||||
// return convert_ushort_sat(i); // clamp to destination size
|
||||
// }
|
||||
//
|
||||
// The inverse conversion is the reverse process. The formats are currently only available on the CPU with
|
||||
// the CL_RGBA channel layout.
|
||||
const (
|
||||
ChannelDataTypeSFixed14Apple ChannelDataType = C.CL_SFIXED14_APPLE // Introduced in MacOS X.7.
|
||||
)
|
||||
|
||||
func init() {
|
||||
channelOrderNameMap[ChannelOrder1RGBApple] = "1RGBApple"
|
||||
channelOrderNameMap[ChannelOrderBGR1Apple] = "RGB1Apple"
|
||||
channelDataTypeNameMap[ChannelDataTypeSFixed14Apple] = "SFixed14Apple"
|
||||
}
|
19
Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml
generated
vendored
19
Godeps/_workspace/src/github.com/codegangsta/cli/.travis.yml
generated
vendored
@ -1,19 +0,0 @@
|
||||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.0.3
|
||||
- 1.1.2
|
||||
- 1.2.2
|
||||
- 1.3.3
|
||||
- 1.4.2
|
||||
- 1.5.1
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
script:
|
||||
- go vet ./...
|
||||
- go test -v ./...
|
21
Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE
generated
vendored
21
Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
Copyright (C) 2013 Jeremy Saenz
|
||||
All Rights Reserved.
|
||||
|
||||
MIT LICENSE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
352
Godeps/_workspace/src/github.com/codegangsta/cli/README.md
generated
vendored
352
Godeps/_workspace/src/github.com/codegangsta/cli/README.md
generated
vendored
@ -1,352 +0,0 @@
|
||||
[](http://gocover.io/github.com/codegangsta/cli)
|
||||
[](https://travis-ci.org/codegangsta/cli)
|
||||
[](https://godoc.org/github.com/codegangsta/cli)
|
||||
|
||||
# cli.go
|
||||
|
||||
`cli.go` is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way.
|
||||
|
||||
## Overview
|
||||
|
||||
Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app.
|
||||
|
||||
**This is where `cli.go` comes into play.** `cli.go` makes command line programming fun, organized, and expressive!
|
||||
|
||||
## Installation
|
||||
|
||||
Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html).
|
||||
|
||||
To install `cli.go`, simply run:
|
||||
```
|
||||
$ go get github.com/codegangsta/cli
|
||||
```
|
||||
|
||||
Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used:
|
||||
```
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
One of the philosophies behind `cli.go` is that an API should be playful and full of discovery. So a `cli.go` app can be as little as one line of code in `main()`.
|
||||
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cli.NewApp().Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation:
|
||||
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "boom"
|
||||
app.Usage = "make an explosive entrance"
|
||||
app.Action = func(c *cli.Context) {
|
||||
println("boom! I say!")
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
Running this already gives you a ton of functionality, plus support for things like subcommands and flags, which are covered below.
|
||||
|
||||
## Example
|
||||
|
||||
Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness!
|
||||
|
||||
Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it:
|
||||
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "greet"
|
||||
app.Usage = "fight the loneliness!"
|
||||
app.Action = func(c *cli.Context) {
|
||||
println("Hello friend!")
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
Install our command to the `$GOPATH/bin` directory:
|
||||
|
||||
```
|
||||
$ go install
|
||||
```
|
||||
|
||||
Finally run our new command:
|
||||
|
||||
```
|
||||
$ greet
|
||||
Hello friend!
|
||||
```
|
||||
|
||||
`cli.go` also generates neat help text:
|
||||
|
||||
```
|
||||
$ greet help
|
||||
NAME:
|
||||
greet - fight the loneliness!
|
||||
|
||||
USAGE:
|
||||
greet [global options] command [command options] [arguments...]
|
||||
|
||||
VERSION:
|
||||
0.0.0
|
||||
|
||||
COMMANDS:
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
GLOBAL OPTIONS
|
||||
--version Shows version information
|
||||
```
|
||||
|
||||
### Arguments
|
||||
|
||||
You can lookup arguments by calling the `Args` function on `cli.Context`.
|
||||
|
||||
``` go
|
||||
...
|
||||
app.Action = func(c *cli.Context) {
|
||||
println("Hello", c.Args()[0])
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
### Flags
|
||||
|
||||
Setting and querying flags is simple.
|
||||
|
||||
``` go
|
||||
...
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
},
|
||||
}
|
||||
app.Action = func(c *cli.Context) {
|
||||
name := "someone"
|
||||
if len(c.Args()) > 0 {
|
||||
name = c.Args()[0]
|
||||
}
|
||||
if c.String("lang") == "spanish" {
|
||||
println("Hola", name)
|
||||
} else {
|
||||
println("Hello", name)
|
||||
}
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
You can also set a destination variable for a flag, to which the content will be scanned.
|
||||
|
||||
``` go
|
||||
...
|
||||
var language string
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
Destination: &language,
|
||||
},
|
||||
}
|
||||
app.Action = func(c *cli.Context) {
|
||||
name := "someone"
|
||||
if len(c.Args()) > 0 {
|
||||
name = c.Args()[0]
|
||||
}
|
||||
if language == "spanish" {
|
||||
println("Hola", name)
|
||||
} else {
|
||||
println("Hello", name)
|
||||
}
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
See full list of flags at http://godoc.org/github.com/codegangsta/cli
|
||||
|
||||
#### Alternate Names
|
||||
|
||||
You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g.
|
||||
|
||||
``` go
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang, l",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error.
|
||||
|
||||
#### Values from the Environment
|
||||
|
||||
You can also have the default value set from the environment via `EnvVar`. e.g.
|
||||
|
||||
``` go
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang, l",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
EnvVar: "APP_LANG",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
The `EnvVar` may also be given as a comma-delimited "cascade", where the first environment variable that resolves is used as the default.
|
||||
|
||||
``` go
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang, l",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Subcommands
|
||||
|
||||
Subcommands can be defined for a more git-like command line app.
|
||||
|
||||
```go
|
||||
...
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "add",
|
||||
Aliases: []string{"a"},
|
||||
Usage: "add a task to the list",
|
||||
Action: func(c *cli.Context) {
|
||||
println("added task: ", c.Args().First())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "complete",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "complete a task on the list",
|
||||
Action: func(c *cli.Context) {
|
||||
println("completed task: ", c.Args().First())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "template",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "options for task templates",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "add a new template",
|
||||
Action: func(c *cli.Context) {
|
||||
println("new task template: ", c.Args().First())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "remove an existing template",
|
||||
Action: func(c *cli.Context) {
|
||||
println("removed task template: ", c.Args().First())
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
### Bash Completion
|
||||
|
||||
You can enable completion commands by setting the `EnableBashCompletion`
|
||||
flag on the `App` object. By default, this setting will only auto-complete to
|
||||
show an app's subcommands, but you can write your own completion methods for
|
||||
the App or its subcommands.
|
||||
|
||||
```go
|
||||
...
|
||||
var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"}
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "complete",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "complete a task on the list",
|
||||
Action: func(c *cli.Context) {
|
||||
println("completed task: ", c.Args().First())
|
||||
},
|
||||
BashComplete: func(c *cli.Context) {
|
||||
// This will complete if no args are passed
|
||||
if len(c.Args()) > 0 {
|
||||
return
|
||||
}
|
||||
for _, t := range tasks {
|
||||
fmt.Println(t)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
#### To Enable
|
||||
|
||||
Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while
|
||||
setting the `PROG` variable to the name of your program:
|
||||
|
||||
`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete`
|
||||
|
||||
#### To Distribute
|
||||
|
||||
Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename
|
||||
it to the name of the program you wish to add autocomplete support for (or
|
||||
automatically install it there if you are distributing a package). Don't forget
|
||||
to source the file to make it active in the current shell.
|
||||
|
||||
```
|
||||
sudo cp src/bash_autocomplete /etc/bash_completion.d/<myprogram>
|
||||
source /etc/bash_completion.d/<myprogram>
|
||||
```
|
||||
|
||||
Alternatively, you can just document that users should source the generic
|
||||
`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set
|
||||
to the name of their program (as above).
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch.
|
||||
|
||||
If you have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together.
|
||||
|
||||
If you feel like you have contributed to the project but have not yet been added as a collaborator, I probably forgot to add you. Hit @codegangsta up over email and we will get it figured out.
|
349
Godeps/_workspace/src/github.com/codegangsta/cli/app.go
generated
vendored
349
Godeps/_workspace/src/github.com/codegangsta/cli/app.go
generated
vendored
@ -1,349 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
// App is the main structure of a cli application. It is recommended that
|
||||
// an app be created with the cli.NewApp() function
|
||||
type App struct {
|
||||
// The name of the program. Defaults to path.Base(os.Args[0])
|
||||
Name string
|
||||
// Full name of command for help, defaults to Name
|
||||
HelpName string
|
||||
// Description of the program.
|
||||
Usage string
|
||||
// Text to override the USAGE section of help
|
||||
UsageText string
|
||||
// Description of the program argument format.
|
||||
ArgsUsage string
|
||||
// Version of the program
|
||||
Version string
|
||||
// List of commands to execute
|
||||
Commands []Command
|
||||
// List of flags to parse
|
||||
Flags []Flag
|
||||
// Boolean to enable bash completion commands
|
||||
EnableBashCompletion bool
|
||||
// Boolean to hide built-in help command
|
||||
HideHelp bool
|
||||
// Boolean to hide built-in version flag
|
||||
HideVersion bool
|
||||
// An action to execute when the bash-completion flag is set
|
||||
BashComplete func(context *Context)
|
||||
// An action to execute before any subcommands are run, but after the context is ready
|
||||
// If a non-nil error is returned, no subcommands are run
|
||||
Before func(context *Context) error
|
||||
// An action to execute after any subcommands are run, but after the subcommand has finished
|
||||
// It is run even if Action() panics
|
||||
After func(context *Context) error
|
||||
// The action to execute when no subcommands are specified
|
||||
Action func(context *Context)
|
||||
// Execute this function if the proper command cannot be found
|
||||
CommandNotFound func(context *Context, command string)
|
||||
// Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages.
|
||||
// This function is able to replace the original error messages.
|
||||
// If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted.
|
||||
OnUsageError func(context *Context, err error, isSubcommand bool) error
|
||||
// Compilation date
|
||||
Compiled time.Time
|
||||
// List of all authors who contributed
|
||||
Authors []Author
|
||||
// Copyright of the binary if any
|
||||
Copyright string
|
||||
// Name of Author (Note: Use App.Authors, this is deprecated)
|
||||
Author string
|
||||
// Email of Author (Note: Use App.Authors, this is deprecated)
|
||||
Email string
|
||||
// Writer writer to write output to
|
||||
Writer io.Writer
|
||||
}
|
||||
|
||||
// Tries to find out when this binary was compiled.
|
||||
// Returns the current time if it fails to find it.
|
||||
func compileTime() time.Time {
|
||||
info, err := os.Stat(os.Args[0])
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
return info.ModTime()
|
||||
}
|
||||
|
||||
// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action.
|
||||
func NewApp() *App {
|
||||
return &App{
|
||||
Name: path.Base(os.Args[0]),
|
||||
HelpName: path.Base(os.Args[0]),
|
||||
Usage: "A new cli application",
|
||||
UsageText: "",
|
||||
Version: "0.0.0",
|
||||
BashComplete: DefaultAppComplete,
|
||||
Action: helpCommand.Action,
|
||||
Compiled: compileTime(),
|
||||
Writer: os.Stdout,
|
||||
}
|
||||
}
|
||||
|
||||
// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination
|
||||
func (a *App) Run(arguments []string) (err error) {
|
||||
if a.Author != "" || a.Email != "" {
|
||||
a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email})
|
||||
}
|
||||
|
||||
newCmds := []Command{}
|
||||
for _, c := range a.Commands {
|
||||
if c.HelpName == "" {
|
||||
c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
|
||||
}
|
||||
newCmds = append(newCmds, c)
|
||||
}
|
||||
a.Commands = newCmds
|
||||
|
||||
// append help to commands
|
||||
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
|
||||
a.Commands = append(a.Commands, helpCommand)
|
||||
if (HelpFlag != BoolFlag{}) {
|
||||
a.appendFlag(HelpFlag)
|
||||
}
|
||||
}
|
||||
|
||||
//append version/help flags
|
||||
if a.EnableBashCompletion {
|
||||
a.appendFlag(BashCompletionFlag)
|
||||
}
|
||||
|
||||
if !a.HideVersion {
|
||||
a.appendFlag(VersionFlag)
|
||||
}
|
||||
|
||||
// parse flags
|
||||
set := flagSet(a.Name, a.Flags)
|
||||
set.SetOutput(ioutil.Discard)
|
||||
err = set.Parse(arguments[1:])
|
||||
nerr := normalizeFlags(a.Flags, set)
|
||||
context := NewContext(a, set, nil)
|
||||
if nerr != nil {
|
||||
fmt.Fprintln(a.Writer, nerr)
|
||||
ShowAppHelp(context)
|
||||
return nerr
|
||||
}
|
||||
|
||||
if checkCompletions(context) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if a.OnUsageError != nil {
|
||||
err := a.OnUsageError(context, err, false)
|
||||
return err
|
||||
} else {
|
||||
fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
|
||||
ShowAppHelp(context)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !a.HideHelp && checkHelp(context) {
|
||||
ShowAppHelp(context)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !a.HideVersion && checkVersion(context) {
|
||||
ShowVersion(context)
|
||||
return nil
|
||||
}
|
||||
|
||||
if a.After != nil {
|
||||
defer func() {
|
||||
if afterErr := a.After(context); afterErr != nil {
|
||||
if err != nil {
|
||||
err = NewMultiError(err, afterErr)
|
||||
} else {
|
||||
err = afterErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if a.Before != nil {
|
||||
err = a.Before(context)
|
||||
if err != nil {
|
||||
fmt.Fprintf(a.Writer, "%v\n\n", err)
|
||||
ShowAppHelp(context)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
args := context.Args()
|
||||
if args.Present() {
|
||||
name := args.First()
|
||||
c := a.Command(name)
|
||||
if c != nil {
|
||||
return c.Run(context)
|
||||
}
|
||||
}
|
||||
|
||||
// Run default Action
|
||||
a.Action(context)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Another entry point to the cli app, takes care of passing arguments and error handling
|
||||
func (a *App) RunAndExitOnError() {
|
||||
if err := a.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags
|
||||
func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
||||
// append help to commands
|
||||
if len(a.Commands) > 0 {
|
||||
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
|
||||
a.Commands = append(a.Commands, helpCommand)
|
||||
if (HelpFlag != BoolFlag{}) {
|
||||
a.appendFlag(HelpFlag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newCmds := []Command{}
|
||||
for _, c := range a.Commands {
|
||||
if c.HelpName == "" {
|
||||
c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
|
||||
}
|
||||
newCmds = append(newCmds, c)
|
||||
}
|
||||
a.Commands = newCmds
|
||||
|
||||
// append flags
|
||||
if a.EnableBashCompletion {
|
||||
a.appendFlag(BashCompletionFlag)
|
||||
}
|
||||
|
||||
// parse flags
|
||||
set := flagSet(a.Name, a.Flags)
|
||||
set.SetOutput(ioutil.Discard)
|
||||
err = set.Parse(ctx.Args().Tail())
|
||||
nerr := normalizeFlags(a.Flags, set)
|
||||
context := NewContext(a, set, ctx)
|
||||
|
||||
if nerr != nil {
|
||||
fmt.Fprintln(a.Writer, nerr)
|
||||
fmt.Fprintln(a.Writer)
|
||||
if len(a.Commands) > 0 {
|
||||
ShowSubcommandHelp(context)
|
||||
} else {
|
||||
ShowCommandHelp(ctx, context.Args().First())
|
||||
}
|
||||
return nerr
|
||||
}
|
||||
|
||||
if checkCompletions(context) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if a.OnUsageError != nil {
|
||||
err = a.OnUsageError(context, err, true)
|
||||
return err
|
||||
} else {
|
||||
fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
|
||||
ShowSubcommandHelp(context)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(a.Commands) > 0 {
|
||||
if checkSubcommandHelp(context) {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if checkCommandHelp(ctx, context.Args().First()) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if a.After != nil {
|
||||
defer func() {
|
||||
afterErr := a.After(context)
|
||||
if afterErr != nil {
|
||||
if err != nil {
|
||||
err = NewMultiError(err, afterErr)
|
||||
} else {
|
||||
err = afterErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if a.Before != nil {
|
||||
err := a.Before(context)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
args := context.Args()
|
||||
if args.Present() {
|
||||
name := args.First()
|
||||
c := a.Command(name)
|
||||
if c != nil {
|
||||
return c.Run(context)
|
||||
}
|
||||
}
|
||||
|
||||
// Run default Action
|
||||
a.Action(context)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the named command on App. Returns nil if the command does not exist
|
||||
func (a *App) Command(name string) *Command {
|
||||
for _, c := range a.Commands {
|
||||
if c.HasName(name) {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *App) hasFlag(flag Flag) bool {
|
||||
for _, f := range a.Flags {
|
||||
if flag == f {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *App) appendFlag(flag Flag) {
|
||||
if !a.hasFlag(flag) {
|
||||
a.Flags = append(a.Flags, flag)
|
||||
}
|
||||
}
|
||||
|
||||
// Author represents someone who has contributed to a cli project.
|
||||
type Author struct {
|
||||
Name string // The Authors name
|
||||
Email string // The Authors email
|
||||
}
|
||||
|
||||
// String makes Author comply to the Stringer interface, to allow an easy print in the templating process
|
||||
func (a Author) String() string {
|
||||
e := ""
|
||||
if a.Email != "" {
|
||||
e = "<" + a.Email + "> "
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v %v", a.Name, e)
|
||||
}
|
16
Godeps/_workspace/src/github.com/codegangsta/cli/appveyor.yml
generated
vendored
16
Godeps/_workspace/src/github.com/codegangsta/cli/appveyor.yml
generated
vendored
@ -1,16 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
install:
|
||||
- go version
|
||||
- go env
|
||||
|
||||
build_script:
|
||||
- cd %APPVEYOR_BUILD_FOLDER%
|
||||
- go vet ./...
|
||||
- go test -v ./...
|
||||
|
||||
test: off
|
||||
|
||||
deploy: off
|
14
Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete
generated
vendored
14
Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/bash_autocomplete
generated
vendored
@ -1,14 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
: ${PROG:=$(basename ${BASH_SOURCE})}
|
||||
|
||||
_cli_bash_autocomplete() {
|
||||
local cur opts base
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
opts=$( ${COMP_WORDS[@]:0:$COMP_CWORD} --generate-bash-completion )
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
}
|
||||
|
||||
complete -F _cli_bash_autocomplete $PROG
|
5
Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete
generated
vendored
5
Godeps/_workspace/src/github.com/codegangsta/cli/autocomplete/zsh_autocomplete
generated
vendored
@ -1,5 +0,0 @@
|
||||
autoload -U compinit && compinit
|
||||
autoload -U bashcompinit && bashcompinit
|
||||
|
||||
script_dir=$(dirname $0)
|
||||
source ${script_dir}/bash_autocomplete
|
40
Godeps/_workspace/src/github.com/codegangsta/cli/cli.go
generated
vendored
40
Godeps/_workspace/src/github.com/codegangsta/cli/cli.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
// Package cli provides a minimal framework for creating and organizing command line
|
||||
// Go applications. cli is designed to be easy to understand and write, the most simple
|
||||
// cli application can be written as follows:
|
||||
// func main() {
|
||||
// cli.NewApp().Run(os.Args)
|
||||
// }
|
||||
//
|
||||
// Of course this application does not do much, so let's make this an actual application:
|
||||
// func main() {
|
||||
// app := cli.NewApp()
|
||||
// app.Name = "greet"
|
||||
// app.Usage = "say a greeting"
|
||||
// app.Action = func(c *cli.Context) {
|
||||
// println("Greetings")
|
||||
// }
|
||||
//
|
||||
// app.Run(os.Args)
|
||||
// }
|
||||
package cli
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type MultiError struct {
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func NewMultiError(err ...error) MultiError {
|
||||
return MultiError{Errors: err}
|
||||
}
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
errs := make([]string, len(m.Errors))
|
||||
for i, err := range m.Errors {
|
||||
errs[i] = err.Error()
|
||||
}
|
||||
|
||||
return strings.Join(errs, "\n")
|
||||
}
|
250
Godeps/_workspace/src/github.com/codegangsta/cli/command.go
generated
vendored
250
Godeps/_workspace/src/github.com/codegangsta/cli/command.go
generated
vendored
@ -1,250 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Command is a subcommand for a cli.App.
|
||||
type Command struct {
|
||||
// The name of the command
|
||||
Name string
|
||||
// short name of the command. Typically one character (deprecated, use `Aliases`)
|
||||
ShortName string
|
||||
// A list of aliases for the command
|
||||
Aliases []string
|
||||
// A short description of the usage of this command
|
||||
Usage string
|
||||
// Custom text to show on USAGE section of help
|
||||
UsageText string
|
||||
// A longer explanation of how the command works
|
||||
Description string
|
||||
// A short description of the arguments of this command
|
||||
ArgsUsage string
|
||||
// The function to call when checking for bash command completions
|
||||
BashComplete func(context *Context)
|
||||
// An action to execute before any sub-subcommands are run, but after the context is ready
|
||||
// If a non-nil error is returned, no sub-subcommands are run
|
||||
Before func(context *Context) error
|
||||
// An action to execute after any subcommands are run, but after the subcommand has finished
|
||||
// It is run even if Action() panics
|
||||
After func(context *Context) error
|
||||
// The function to call when this command is invoked
|
||||
Action func(context *Context)
|
||||
// Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages.
|
||||
// This function is able to replace the original error messages.
|
||||
// If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted.
|
||||
OnUsageError func(context *Context, err error) error
|
||||
// List of child commands
|
||||
Subcommands []Command
|
||||
// List of flags to parse
|
||||
Flags []Flag
|
||||
// Treat all flags as normal arguments if true
|
||||
SkipFlagParsing bool
|
||||
// Boolean to hide built-in help command
|
||||
HideHelp bool
|
||||
|
||||
// Full name of command for help, defaults to full command name, including parent commands.
|
||||
HelpName string
|
||||
commandNamePath []string
|
||||
}
|
||||
|
||||
// Returns the full name of the command.
|
||||
// For subcommands this ensures that parent commands are part of the command path
|
||||
func (c Command) FullName() string {
|
||||
if c.commandNamePath == nil {
|
||||
return c.Name
|
||||
}
|
||||
return strings.Join(c.commandNamePath, " ")
|
||||
}
|
||||
|
||||
// Invokes the command given the context, parses ctx.Args() to generate command-specific flags
|
||||
func (c Command) Run(ctx *Context) (err error) {
|
||||
if len(c.Subcommands) > 0 {
|
||||
return c.startApp(ctx)
|
||||
}
|
||||
|
||||
if !c.HideHelp && (HelpFlag != BoolFlag{}) {
|
||||
// append help to flags
|
||||
c.Flags = append(
|
||||
c.Flags,
|
||||
HelpFlag,
|
||||
)
|
||||
}
|
||||
|
||||
if ctx.App.EnableBashCompletion {
|
||||
c.Flags = append(c.Flags, BashCompletionFlag)
|
||||
}
|
||||
|
||||
set := flagSet(c.Name, c.Flags)
|
||||
set.SetOutput(ioutil.Discard)
|
||||
|
||||
if !c.SkipFlagParsing {
|
||||
firstFlagIndex := -1
|
||||
terminatorIndex := -1
|
||||
for index, arg := range ctx.Args() {
|
||||
if arg == "--" {
|
||||
terminatorIndex = index
|
||||
break
|
||||
} else if arg == "-" {
|
||||
// Do nothing. A dash alone is not really a flag.
|
||||
continue
|
||||
} else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 {
|
||||
firstFlagIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
if firstFlagIndex > -1 {
|
||||
args := ctx.Args()
|
||||
regularArgs := make([]string, len(args[1:firstFlagIndex]))
|
||||
copy(regularArgs, args[1:firstFlagIndex])
|
||||
|
||||
var flagArgs []string
|
||||
if terminatorIndex > -1 {
|
||||
flagArgs = args[firstFlagIndex:terminatorIndex]
|
||||
regularArgs = append(regularArgs, args[terminatorIndex:]...)
|
||||
} else {
|
||||
flagArgs = args[firstFlagIndex:]
|
||||
}
|
||||
|
||||
err = set.Parse(append(flagArgs, regularArgs...))
|
||||
} else {
|
||||
err = set.Parse(ctx.Args().Tail())
|
||||
}
|
||||
} else {
|
||||
if c.SkipFlagParsing {
|
||||
err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...))
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if c.OnUsageError != nil {
|
||||
err := c.OnUsageError(ctx, err)
|
||||
return err
|
||||
} else {
|
||||
fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.")
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
ShowCommandHelp(ctx, c.Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nerr := normalizeFlags(c.Flags, set)
|
||||
if nerr != nil {
|
||||
fmt.Fprintln(ctx.App.Writer, nerr)
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
ShowCommandHelp(ctx, c.Name)
|
||||
return nerr
|
||||
}
|
||||
context := NewContext(ctx.App, set, ctx)
|
||||
|
||||
if checkCommandCompletions(context, c.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if checkCommandHelp(context, c.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if c.After != nil {
|
||||
defer func() {
|
||||
afterErr := c.After(context)
|
||||
if afterErr != nil {
|
||||
if err != nil {
|
||||
err = NewMultiError(err, afterErr)
|
||||
} else {
|
||||
err = afterErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if c.Before != nil {
|
||||
err := c.Before(context)
|
||||
if err != nil {
|
||||
fmt.Fprintln(ctx.App.Writer, err)
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
ShowCommandHelp(ctx, c.Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
context.Command = c
|
||||
c.Action(context)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Command) Names() []string {
|
||||
names := []string{c.Name}
|
||||
|
||||
if c.ShortName != "" {
|
||||
names = append(names, c.ShortName)
|
||||
}
|
||||
|
||||
return append(names, c.Aliases...)
|
||||
}
|
||||
|
||||
// Returns true if Command.Name or Command.ShortName matches given name
|
||||
func (c Command) HasName(name string) bool {
|
||||
for _, n := range c.Names() {
|
||||
if n == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c Command) startApp(ctx *Context) error {
|
||||
app := NewApp()
|
||||
|
||||
// set the name and usage
|
||||
app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name)
|
||||
if c.HelpName == "" {
|
||||
app.HelpName = c.HelpName
|
||||
} else {
|
||||
app.HelpName = app.Name
|
||||
}
|
||||
|
||||
if c.Description != "" {
|
||||
app.Usage = c.Description
|
||||
} else {
|
||||
app.Usage = c.Usage
|
||||
}
|
||||
|
||||
// set CommandNotFound
|
||||
app.CommandNotFound = ctx.App.CommandNotFound
|
||||
|
||||
// set the flags and commands
|
||||
app.Commands = c.Subcommands
|
||||
app.Flags = c.Flags
|
||||
app.HideHelp = c.HideHelp
|
||||
|
||||
app.Version = ctx.App.Version
|
||||
app.HideVersion = ctx.App.HideVersion
|
||||
app.Compiled = ctx.App.Compiled
|
||||
app.Author = ctx.App.Author
|
||||
app.Email = ctx.App.Email
|
||||
app.Writer = ctx.App.Writer
|
||||
|
||||
// bash completion
|
||||
app.EnableBashCompletion = ctx.App.EnableBashCompletion
|
||||
if c.BashComplete != nil {
|
||||
app.BashComplete = c.BashComplete
|
||||
}
|
||||
|
||||
// set the actions
|
||||
app.Before = c.Before
|
||||
app.After = c.After
|
||||
if c.Action != nil {
|
||||
app.Action = c.Action
|
||||
} else {
|
||||
app.Action = helpSubcommand.Action
|
||||
}
|
||||
|
||||
for index, cc := range app.Commands {
|
||||
app.Commands[index].commandNamePath = []string{c.Name, cc.Name}
|
||||
}
|
||||
|
||||
return app.RunAsSubcommand(ctx)
|
||||
}
|
388
Godeps/_workspace/src/github.com/codegangsta/cli/context.go
generated
vendored
388
Godeps/_workspace/src/github.com/codegangsta/cli/context.go
generated
vendored
@ -1,388 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Context is a type that is passed through to
|
||||
// each Handler action in a cli application. Context
|
||||
// can be used to retrieve context-specific Args and
|
||||
// parsed command-line options.
|
||||
type Context struct {
|
||||
App *App
|
||||
Command Command
|
||||
flagSet *flag.FlagSet
|
||||
setFlags map[string]bool
|
||||
globalSetFlags map[string]bool
|
||||
parentContext *Context
|
||||
}
|
||||
|
||||
// Creates a new context. For use in when invoking an App or Command action.
|
||||
func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context {
|
||||
return &Context{App: app, flagSet: set, parentContext: parentCtx}
|
||||
}
|
||||
|
||||
// Looks up the value of a local int flag, returns 0 if no int flag exists
|
||||
func (c *Context) Int(name string) int {
|
||||
return lookupInt(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists
|
||||
func (c *Context) Duration(name string) time.Duration {
|
||||
return lookupDuration(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists
|
||||
func (c *Context) Float64(name string) float64 {
|
||||
return lookupFloat64(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local bool flag, returns false if no bool flag exists
|
||||
func (c *Context) Bool(name string) bool {
|
||||
return lookupBool(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local boolT flag, returns false if no bool flag exists
|
||||
func (c *Context) BoolT(name string) bool {
|
||||
return lookupBoolT(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local string flag, returns "" if no string flag exists
|
||||
func (c *Context) String(name string) string {
|
||||
return lookupString(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local string slice flag, returns nil if no string slice flag exists
|
||||
func (c *Context) StringSlice(name string) []string {
|
||||
return lookupStringSlice(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local int slice flag, returns nil if no int slice flag exists
|
||||
func (c *Context) IntSlice(name string) []int {
|
||||
return lookupIntSlice(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local generic flag, returns nil if no generic flag exists
|
||||
func (c *Context) Generic(name string) interface{} {
|
||||
return lookupGeneric(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a global int flag, returns 0 if no int flag exists
|
||||
func (c *Context) GlobalInt(name string) int {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupInt(name, fs)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists
|
||||
func (c *Context) GlobalDuration(name string) time.Duration {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupDuration(name, fs)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Looks up the value of a global bool flag, returns false if no bool flag exists
|
||||
func (c *Context) GlobalBool(name string) bool {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupBool(name, fs)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Looks up the value of a global string flag, returns "" if no string flag exists
|
||||
func (c *Context) GlobalString(name string) string {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupString(name, fs)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Looks up the value of a global string slice flag, returns nil if no string slice flag exists
|
||||
func (c *Context) GlobalStringSlice(name string) []string {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupStringSlice(name, fs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Looks up the value of a global int slice flag, returns nil if no int slice flag exists
|
||||
func (c *Context) GlobalIntSlice(name string) []int {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupIntSlice(name, fs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Looks up the value of a global generic flag, returns nil if no generic flag exists
|
||||
func (c *Context) GlobalGeneric(name string) interface{} {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupGeneric(name, fs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the number of flags set
|
||||
func (c *Context) NumFlags() int {
|
||||
return c.flagSet.NFlag()
|
||||
}
|
||||
|
||||
// Determines if the flag was actually set
|
||||
func (c *Context) IsSet(name string) bool {
|
||||
if c.setFlags == nil {
|
||||
c.setFlags = make(map[string]bool)
|
||||
c.flagSet.Visit(func(f *flag.Flag) {
|
||||
c.setFlags[f.Name] = true
|
||||
})
|
||||
}
|
||||
return c.setFlags[name] == true
|
||||
}
|
||||
|
||||
// Determines if the global flag was actually set
|
||||
func (c *Context) GlobalIsSet(name string) bool {
|
||||
if c.globalSetFlags == nil {
|
||||
c.globalSetFlags = make(map[string]bool)
|
||||
ctx := c
|
||||
if ctx.parentContext != nil {
|
||||
ctx = ctx.parentContext
|
||||
}
|
||||
for ; ctx != nil && c.globalSetFlags[name] == false; ctx = ctx.parentContext {
|
||||
ctx.flagSet.Visit(func(f *flag.Flag) {
|
||||
c.globalSetFlags[f.Name] = true
|
||||
})
|
||||
}
|
||||
}
|
||||
return c.globalSetFlags[name]
|
||||
}
|
||||
|
||||
// Returns a slice of flag names used in this context.
|
||||
func (c *Context) FlagNames() (names []string) {
|
||||
for _, flag := range c.Command.Flags {
|
||||
name := strings.Split(flag.GetName(), ",")[0]
|
||||
if name == "help" {
|
||||
continue
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Returns a slice of global flag names used by the app.
|
||||
func (c *Context) GlobalFlagNames() (names []string) {
|
||||
for _, flag := range c.App.Flags {
|
||||
name := strings.Split(flag.GetName(), ",")[0]
|
||||
if name == "help" || name == "version" {
|
||||
continue
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Returns the parent context, if any
|
||||
func (c *Context) Parent() *Context {
|
||||
return c.parentContext
|
||||
}
|
||||
|
||||
type Args []string
|
||||
|
||||
// Returns the command line arguments associated with the context.
|
||||
func (c *Context) Args() Args {
|
||||
args := Args(c.flagSet.Args())
|
||||
return args
|
||||
}
|
||||
|
||||
// Returns the nth argument, or else a blank string
|
||||
func (a Args) Get(n int) string {
|
||||
if len(a) > n {
|
||||
return a[n]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns the first argument, or else a blank string
|
||||
func (a Args) First() string {
|
||||
return a.Get(0)
|
||||
}
|
||||
|
||||
// Return the rest of the arguments (not the first one)
|
||||
// or else an empty string slice
|
||||
func (a Args) Tail() []string {
|
||||
if len(a) >= 2 {
|
||||
return []string(a)[1:]
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// Checks if there are any arguments present
|
||||
func (a Args) Present() bool {
|
||||
return len(a) != 0
|
||||
}
|
||||
|
||||
// Swaps arguments at the given indexes
|
||||
func (a Args) Swap(from, to int) error {
|
||||
if from >= len(a) || to >= len(a) {
|
||||
return errors.New("index out of range")
|
||||
}
|
||||
a[from], a[to] = a[to], a[from]
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet {
|
||||
if ctx.parentContext != nil {
|
||||
ctx = ctx.parentContext
|
||||
}
|
||||
for ; ctx != nil; ctx = ctx.parentContext {
|
||||
if f := ctx.flagSet.Lookup(name); f != nil {
|
||||
return ctx.flagSet
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupInt(name string, set *flag.FlagSet) int {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
val, err := strconv.Atoi(f.Value.String())
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func lookupDuration(name string, set *flag.FlagSet) time.Duration {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
val, err := time.ParseDuration(f.Value.String())
|
||||
if err == nil {
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func lookupFloat64(name string, set *flag.FlagSet) float64 {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
val, err := strconv.ParseFloat(f.Value.String(), 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func lookupString(name string, set *flag.FlagSet) string {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
return f.Value.String()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func lookupStringSlice(name string, set *flag.FlagSet) []string {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
return (f.Value.(*StringSlice)).Value()
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupIntSlice(name string, set *flag.FlagSet) []int {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
return (f.Value.(*IntSlice)).Value()
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupGeneric(name string, set *flag.FlagSet) interface{} {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
return f.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupBool(name string, set *flag.FlagSet) bool {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
val, err := strconv.ParseBool(f.Value.String())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func lookupBoolT(name string, set *flag.FlagSet) bool {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
val, err := strconv.ParseBool(f.Value.String())
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) {
|
||||
switch ff.Value.(type) {
|
||||
case *StringSlice:
|
||||
default:
|
||||
set.Set(name, ff.Value.String())
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeFlags(flags []Flag, set *flag.FlagSet) error {
|
||||
visited := make(map[string]bool)
|
||||
set.Visit(func(f *flag.Flag) {
|
||||
visited[f.Name] = true
|
||||
})
|
||||
for _, f := range flags {
|
||||
parts := strings.Split(f.GetName(), ",")
|
||||
if len(parts) == 1 {
|
||||
continue
|
||||
}
|
||||
var ff *flag.Flag
|
||||
for _, name := range parts {
|
||||
name = strings.Trim(name, " ")
|
||||
if visited[name] {
|
||||
if ff != nil {
|
||||
return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name)
|
||||
}
|
||||
ff = set.Lookup(name)
|
||||
}
|
||||
}
|
||||
if ff == nil {
|
||||
continue
|
||||
}
|
||||
for _, name := range parts {
|
||||
name = strings.Trim(name, " ")
|
||||
if !visited[name] {
|
||||
copyFlag(name, ff, set)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
546
Godeps/_workspace/src/github.com/codegangsta/cli/flag.go
generated
vendored
546
Godeps/_workspace/src/github.com/codegangsta/cli/flag.go
generated
vendored
@ -1,546 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This flag enables bash-completion for all commands and subcommands
|
||||
var BashCompletionFlag = BoolFlag{
|
||||
Name: "generate-bash-completion",
|
||||
}
|
||||
|
||||
// This flag prints the version for the application
|
||||
var VersionFlag = BoolFlag{
|
||||
Name: "version, v",
|
||||
Usage: "print the version",
|
||||
}
|
||||
|
||||
// This flag prints the help for all commands and subcommands
|
||||
// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand
|
||||
// unless HideHelp is set to true)
|
||||
var HelpFlag = BoolFlag{
|
||||
Name: "help, h",
|
||||
Usage: "show help",
|
||||
}
|
||||
|
||||
// Flag is a common interface related to parsing flags in cli.
|
||||
// For more advanced flag parsing techniques, it is recommended that
|
||||
// this interface be implemented.
|
||||
type Flag interface {
|
||||
fmt.Stringer
|
||||
// Apply Flag settings to the given flag set
|
||||
Apply(*flag.FlagSet)
|
||||
GetName() string
|
||||
}
|
||||
|
||||
func flagSet(name string, flags []Flag) *flag.FlagSet {
|
||||
set := flag.NewFlagSet(name, flag.ContinueOnError)
|
||||
|
||||
for _, f := range flags {
|
||||
f.Apply(set)
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
func eachName(longName string, fn func(string)) {
|
||||
parts := strings.Split(longName, ",")
|
||||
for _, name := range parts {
|
||||
name = strings.Trim(name, " ")
|
||||
fn(name)
|
||||
}
|
||||
}
|
||||
|
||||
// Generic is a generic parseable type identified by a specific flag
|
||||
type Generic interface {
|
||||
Set(value string) error
|
||||
String() string
|
||||
}
|
||||
|
||||
// GenericFlag is the flag type for types implementing Generic
|
||||
type GenericFlag struct {
|
||||
Name string
|
||||
Value Generic
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the string representation of the generic flag to display the
|
||||
// help text to the user (uses the String() method of the generic flag to show
|
||||
// the value)
|
||||
func (f GenericFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage))
|
||||
}
|
||||
|
||||
func (f GenericFlag) FormatValueHelp() string {
|
||||
if f.Value == nil {
|
||||
return ""
|
||||
}
|
||||
s := f.Value.String()
|
||||
if len(s) == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("\"%s\"", s)
|
||||
}
|
||||
|
||||
// Apply takes the flagset and calls Set on the generic flag with the value
|
||||
// provided by the user for parsing by the flag
|
||||
func (f GenericFlag) Apply(set *flag.FlagSet) {
|
||||
val := f.Value
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
val.Set(envVal)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
set.Var(f.Value, name, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f GenericFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// StringSlice is an opaque type for []string to satisfy flag.Value
|
||||
type StringSlice []string
|
||||
|
||||
// Set appends the string value to the list of values
|
||||
func (f *StringSlice) Set(value string) error {
|
||||
*f = append(*f, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f *StringSlice) String() string {
|
||||
return fmt.Sprintf("%s", *f)
|
||||
}
|
||||
|
||||
// Value returns the slice of strings set by this flag
|
||||
func (f *StringSlice) Value() []string {
|
||||
return *f
|
||||
}
|
||||
|
||||
// StringSlice is a string flag that can be specified multiple times on the
|
||||
// command-line
|
||||
type StringSliceFlag struct {
|
||||
Name string
|
||||
Value *StringSlice
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f StringSliceFlag) String() string {
|
||||
firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ")
|
||||
pref := prefixFor(firstName)
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f StringSliceFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
newVal := &StringSlice{}
|
||||
for _, s := range strings.Split(envVal, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
newVal.Set(s)
|
||||
}
|
||||
f.Value = newVal
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Value == nil {
|
||||
f.Value = &StringSlice{}
|
||||
}
|
||||
set.Var(f.Value, name, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f StringSliceFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// StringSlice is an opaque type for []int to satisfy flag.Value
|
||||
type IntSlice []int
|
||||
|
||||
// Set parses the value into an integer and appends it to the list of values
|
||||
func (f *IntSlice) Set(value string) error {
|
||||
tmp, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
*f = append(*f, tmp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f *IntSlice) String() string {
|
||||
return fmt.Sprintf("%d", *f)
|
||||
}
|
||||
|
||||
// Value returns the slice of ints set by this flag
|
||||
func (f *IntSlice) Value() []int {
|
||||
return *f
|
||||
}
|
||||
|
||||
// IntSliceFlag is an int flag that can be specified multiple times on the
|
||||
// command-line
|
||||
type IntSliceFlag struct {
|
||||
Name string
|
||||
Value *IntSlice
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f IntSliceFlag) String() string {
|
||||
firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ")
|
||||
pref := prefixFor(firstName)
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f IntSliceFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
newVal := &IntSlice{}
|
||||
for _, s := range strings.Split(envVal, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
err := newVal.Set(s)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
}
|
||||
}
|
||||
f.Value = newVal
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Value == nil {
|
||||
f.Value = &IntSlice{}
|
||||
}
|
||||
set.Var(f.Value, name, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f IntSliceFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// BoolFlag is a switch that defaults to false
|
||||
type BoolFlag struct {
|
||||
Name string
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *bool
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f BoolFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f BoolFlag) Apply(set *flag.FlagSet) {
|
||||
val := false
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
envValBool, err := strconv.ParseBool(envVal)
|
||||
if err == nil {
|
||||
val = envValBool
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.BoolVar(f.Destination, name, val, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Bool(name, val, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f BoolFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// BoolTFlag this represents a boolean flag that is true by default, but can
|
||||
// still be set to false by --some-flag=false
|
||||
type BoolTFlag struct {
|
||||
Name string
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *bool
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f BoolTFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f BoolTFlag) Apply(set *flag.FlagSet) {
|
||||
val := true
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
envValBool, err := strconv.ParseBool(envVal)
|
||||
if err == nil {
|
||||
val = envValBool
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.BoolVar(f.Destination, name, val, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Bool(name, val, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f BoolTFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// StringFlag represents a flag that takes as string value
|
||||
type StringFlag struct {
|
||||
Name string
|
||||
Value string
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f StringFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage))
|
||||
}
|
||||
|
||||
func (f StringFlag) FormatValueHelp() string {
|
||||
s := f.Value
|
||||
if len(s) == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("\"%s\"", s)
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f StringFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
f.Value = envVal
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.StringVar(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.String(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f StringFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// IntFlag is a flag that takes an integer
|
||||
// Errors if the value provided cannot be parsed
|
||||
type IntFlag struct {
|
||||
Name string
|
||||
Value int
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *int
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f IntFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f IntFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
envValInt, err := strconv.ParseInt(envVal, 0, 64)
|
||||
if err == nil {
|
||||
f.Value = int(envValInt)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.IntVar(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Int(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f IntFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// DurationFlag is a flag that takes a duration specified in Go's duration
|
||||
// format: https://golang.org/pkg/time/#ParseDuration
|
||||
type DurationFlag struct {
|
||||
Name string
|
||||
Value time.Duration
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *time.Duration
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f DurationFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f DurationFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
envValDuration, err := time.ParseDuration(envVal)
|
||||
if err == nil {
|
||||
f.Value = envValDuration
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.DurationVar(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Duration(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f DurationFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// Float64Flag is a flag that takes an float value
|
||||
// Errors if the value provided cannot be parsed
|
||||
type Float64Flag struct {
|
||||
Name string
|
||||
Value float64
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *float64
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f Float64Flag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f Float64Flag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
envValFloat, err := strconv.ParseFloat(envVal, 10)
|
||||
if err == nil {
|
||||
f.Value = float64(envValFloat)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.Float64Var(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Float64(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f Float64Flag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
func prefixFor(name string) (prefix string) {
|
||||
if len(name) == 1 {
|
||||
prefix = "-"
|
||||
} else {
|
||||
prefix = "--"
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func prefixedNames(fullName string) (prefixed string) {
|
||||
parts := strings.Split(fullName, ",")
|
||||
for i, name := range parts {
|
||||
name = strings.Trim(name, " ")
|
||||
prefixed += prefixFor(name) + name
|
||||
if i < len(parts)-1 {
|
||||
prefixed += ", "
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func withEnvHint(envVar, str string) string {
|
||||
envText := ""
|
||||
if envVar != "" {
|
||||
prefix := "$"
|
||||
suffix := ""
|
||||
sep := ", $"
|
||||
if runtime.GOOS == "windows" {
|
||||
prefix = "%"
|
||||
suffix = "%"
|
||||
sep = "%, %"
|
||||
}
|
||||
envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix)
|
||||
}
|
||||
return str + envText
|
||||
}
|
248
Godeps/_workspace/src/github.com/codegangsta/cli/help.go
generated
vendored
248
Godeps/_workspace/src/github.com/codegangsta/cli/help.go
generated
vendored
@ -1,248 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// The text template for the Default help topic.
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var AppHelpTemplate = `NAME:
|
||||
{{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
|
||||
{{if .Version}}
|
||||
VERSION:
|
||||
{{.Version}}
|
||||
{{end}}{{if len .Authors}}
|
||||
AUTHOR(S):
|
||||
{{range .Authors}}{{ . }}{{end}}
|
||||
{{end}}{{if .Commands}}
|
||||
COMMANDS:
|
||||
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
GLOBAL OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}{{end}}{{if .Copyright }}
|
||||
COPYRIGHT:
|
||||
{{.Copyright}}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
// The text template for the command help topic.
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var CommandHelpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}}{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Description}}
|
||||
|
||||
DESCRIPTION:
|
||||
{{.Description}}{{end}}{{if .Flags}}
|
||||
|
||||
OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}{{ end }}
|
||||
`
|
||||
|
||||
// The text template for the subcommand help topic.
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var SubcommandHelpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}} command{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
|
||||
|
||||
COMMANDS:
|
||||
{{range .Commands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
var helpCommand = Command{
|
||||
Name: "help",
|
||||
Aliases: []string{"h"},
|
||||
Usage: "Shows a list of commands or help for one command",
|
||||
ArgsUsage: "[command]",
|
||||
Action: func(c *Context) {
|
||||
args := c.Args()
|
||||
if args.Present() {
|
||||
ShowCommandHelp(c, args.First())
|
||||
} else {
|
||||
ShowAppHelp(c)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var helpSubcommand = Command{
|
||||
Name: "help",
|
||||
Aliases: []string{"h"},
|
||||
Usage: "Shows a list of commands or help for one command",
|
||||
ArgsUsage: "[command]",
|
||||
Action: func(c *Context) {
|
||||
args := c.Args()
|
||||
if args.Present() {
|
||||
ShowCommandHelp(c, args.First())
|
||||
} else {
|
||||
ShowSubcommandHelp(c)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// Prints help for the App or Command
|
||||
type helpPrinter func(w io.Writer, templ string, data interface{})
|
||||
|
||||
var HelpPrinter helpPrinter = printHelp
|
||||
|
||||
// Prints version for the App
|
||||
var VersionPrinter = printVersion
|
||||
|
||||
func ShowAppHelp(c *Context) {
|
||||
HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
|
||||
}
|
||||
|
||||
// Prints the list of subcommands as the default app completion method
|
||||
func DefaultAppComplete(c *Context) {
|
||||
for _, command := range c.App.Commands {
|
||||
for _, name := range command.Names() {
|
||||
fmt.Fprintln(c.App.Writer, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prints help for the given command
|
||||
func ShowCommandHelp(ctx *Context, command string) {
|
||||
// show the subcommand help for a command with subcommands
|
||||
if command == "" {
|
||||
HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
|
||||
return
|
||||
}
|
||||
|
||||
for _, c := range ctx.App.Commands {
|
||||
if c.HasName(command) {
|
||||
HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.App.CommandNotFound != nil {
|
||||
ctx.App.CommandNotFound(ctx, command)
|
||||
} else {
|
||||
fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command)
|
||||
}
|
||||
}
|
||||
|
||||
// Prints help for the given subcommand
|
||||
func ShowSubcommandHelp(c *Context) {
|
||||
ShowCommandHelp(c, c.Command.Name)
|
||||
}
|
||||
|
||||
// Prints the version number of the App
|
||||
func ShowVersion(c *Context) {
|
||||
VersionPrinter(c)
|
||||
}
|
||||
|
||||
func printVersion(c *Context) {
|
||||
fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
|
||||
}
|
||||
|
||||
// Prints the lists of commands within a given context
|
||||
func ShowCompletions(c *Context) {
|
||||
a := c.App
|
||||
if a != nil && a.BashComplete != nil {
|
||||
a.BashComplete(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Prints the custom completions for a given command
|
||||
func ShowCommandCompletions(ctx *Context, command string) {
|
||||
c := ctx.App.Command(command)
|
||||
if c != nil && c.BashComplete != nil {
|
||||
c.BashComplete(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp(out io.Writer, templ string, data interface{}) {
|
||||
funcMap := template.FuncMap{
|
||||
"join": strings.Join,
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(out, 0, 8, 1, '\t', 0)
|
||||
t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
|
||||
err := t.Execute(w, data)
|
||||
if err != nil {
|
||||
// If the writer is closed, t.Execute will fail, and there's nothing
|
||||
// we can do to recover. We could send this to os.Stderr if we need.
|
||||
return
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
func checkVersion(c *Context) bool {
|
||||
found := false
|
||||
if VersionFlag.Name != "" {
|
||||
eachName(VersionFlag.Name, func(name string) {
|
||||
if c.GlobalBool(name) || c.Bool(name) {
|
||||
found = true
|
||||
}
|
||||
})
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
func checkHelp(c *Context) bool {
|
||||
found := false
|
||||
if HelpFlag.Name != "" {
|
||||
eachName(HelpFlag.Name, func(name string) {
|
||||
if c.GlobalBool(name) || c.Bool(name) {
|
||||
found = true
|
||||
}
|
||||
})
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
func checkCommandHelp(c *Context, name string) bool {
|
||||
if c.Bool("h") || c.Bool("help") {
|
||||
ShowCommandHelp(c, name)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkSubcommandHelp(c *Context) bool {
|
||||
if c.GlobalBool("h") || c.GlobalBool("help") {
|
||||
ShowSubcommandHelp(c)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkCompletions(c *Context) bool {
|
||||
if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion {
|
||||
ShowCompletions(c)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkCommandCompletions(c *Context, name string) bool {
|
||||
if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion {
|
||||
ShowCommandCompletions(c, name)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
13
Godeps/_workspace/src/github.com/davecgh/go-spew/LICENSE
generated
vendored
13
Godeps/_workspace/src/github.com/davecgh/go-spew/LICENSE
generated
vendored
@ -1,13 +0,0 @@
|
||||
Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
341
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go
generated
vendored
341
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go
generated
vendored
@ -1,341 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
297
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go
generated
vendored
297
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go
generated
vendored
@ -1,297 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "disableunsafe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
202
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
202
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
@ -1,202 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
419
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go
generated
vendored
419
Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go
generated
vendored
@ -1,419 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound == true:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
12
Godeps/_workspace/src/github.com/ethereum/ethash/.gitignore
generated
vendored
12
Godeps/_workspace/src/github.com/ethereum/ethash/.gitignore
generated
vendored
@ -1,12 +0,0 @@
|
||||
.idea/
|
||||
.DS_Store
|
||||
*/**/*un~
|
||||
.vagrant/
|
||||
*.pyc
|
||||
build/
|
||||
pyethash.egg-info/
|
||||
*.so
|
||||
*~
|
||||
*.swp
|
||||
MANIFEST
|
||||
dist/
|
23
Godeps/_workspace/src/github.com/ethereum/ethash/.travis.yml
generated
vendored
23
Godeps/_workspace/src/github.com/ethereum/ethash/.travis.yml
generated
vendored
@ -1,23 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.4.2
|
||||
|
||||
before_install:
|
||||
# for g++4.8 and C++11
|
||||
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
|
||||
|
||||
# Set up go-ethereum
|
||||
- sudo apt-get update -y -qq
|
||||
- sudo apt-get install -yqq libgmp3-dev
|
||||
- git clone --depth=10 https://github.com/ethereum/go-ethereum ${GOPATH}/src/github.com/ethereum/go-ethereum
|
||||
# use canned dependencies from the go-ethereum repository
|
||||
- export GOPATH=$GOPATH:$GOPATH/src/github.com/ethereum/go-ethereum/Godeps/_workspace/
|
||||
- echo $GOPATH
|
||||
|
||||
install:
|
||||
# need to explicitly request version 1.48 since by default we get 1.46 which does not work with C++11
|
||||
- sudo apt-get install -qq --yes --force-yes g++-4.8
|
||||
- sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-4.8 50
|
||||
- sudo apt-get install -qq wget cmake bash libboost-test1.48-dev libboost-system1.48-dev libboost-filesystem1.48-dev nodejs python-pip python-dev valgrind
|
||||
- sudo pip install virtualenv -q
|
||||
script: "./test/test.sh"
|
14
Godeps/_workspace/src/github.com/ethereum/ethash/CMakeLists.txt
generated
vendored
14
Godeps/_workspace/src/github.com/ethereum/ethash/CMakeLists.txt
generated
vendored
@ -1,14 +0,0 @@
|
||||
cmake_minimum_required(VERSION 2.8.7)
|
||||
project(ethash)
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/")
|
||||
set(ETHHASH_LIBS ethash)
|
||||
|
||||
if (WIN32 AND WANT_CRYPTOPP)
|
||||
add_subdirectory(cryptopp)
|
||||
endif()
|
||||
|
||||
add_subdirectory(src/libethash)
|
||||
|
||||
add_subdirectory(src/benchmark EXCLUDE_FROM_ALL)
|
||||
add_subdirectory(test/c)
|
17
Godeps/_workspace/src/github.com/ethereum/ethash/MANIFEST.in
generated
vendored
17
Godeps/_workspace/src/github.com/ethereum/ethash/MANIFEST.in
generated
vendored
@ -1,17 +0,0 @@
|
||||
include setup.py
|
||||
|
||||
# C sources
|
||||
include src/libethash/internal.c
|
||||
include src/libethash/sha3.c
|
||||
include src/libethash/util.c
|
||||
include src/python/core.c
|
||||
|
||||
# Headers
|
||||
include src/libethash/compiler.h
|
||||
include src/libethash/data_sizes.h
|
||||
include src/libethash/endian.h
|
||||
include src/libethash/ethash.h
|
||||
include src/libethash/fnv.h
|
||||
include src/libethash/internal.h
|
||||
include src/libethash/sha3.h
|
||||
include src/libethash/util.h
|
6
Godeps/_workspace/src/github.com/ethereum/ethash/Makefile
generated
vendored
6
Godeps/_workspace/src/github.com/ethereum/ethash/Makefile
generated
vendored
@ -1,6 +0,0 @@
|
||||
.PHONY: clean test
|
||||
test:
|
||||
./test/test.sh
|
||||
|
||||
clean:
|
||||
rm -rf *.so pyethash.egg-info/ build/ test/python/python-virtual-env/ test/c/build/ pyethash.so test/python/*.pyc dist/ MANIFEST
|
22
Godeps/_workspace/src/github.com/ethereum/ethash/README.md
generated
vendored
22
Godeps/_workspace/src/github.com/ethereum/ethash/README.md
generated
vendored
@ -1,22 +0,0 @@
|
||||
[](https://travis-ci.org/ethereum/ethash)
|
||||
[](https://ci.appveyor.com/project/debris/ethash-nr37r/branch/master)
|
||||
|
||||
# Ethash
|
||||
|
||||
For details on this project, please see the Ethereum wiki:
|
||||
https://github.com/ethereum/wiki/wiki/Ethash
|
||||
|
||||
### Coding Style for C++ code:
|
||||
|
||||
Follow the same exact style as in [cpp-ethereum](https://github.com/ethereum/cpp-ethereum/blob/develop/CodingStandards.txt)
|
||||
|
||||
### Coding Style for C code:
|
||||
|
||||
The main thing above all is code consistency.
|
||||
|
||||
- Tabs for indentation. A tab is 4 spaces
|
||||
- Try to stick to the [K&R](http://en.wikipedia.org/wiki/Indent_style#K.26R_style),
|
||||
especially for the C code.
|
||||
- Keep the line lengths reasonable. No hard limit on 80 characters but don't go further
|
||||
than 110. Some people work with multiple buffers next to each other.
|
||||
Make them like you :)
|
7
Godeps/_workspace/src/github.com/ethereum/ethash/Vagrantfile
generated
vendored
7
Godeps/_workspace/src/github.com/ethereum/ethash/Vagrantfile
generated
vendored
@ -1,7 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
config.vm.box = "Ubuntu 12.04"
|
||||
config.vm.box_url = "https://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box"
|
||||
end
|
43
Godeps/_workspace/src/github.com/ethereum/ethash/appveyor.yml
generated
vendored
43
Godeps/_workspace/src/github.com/ethereum/ethash/appveyor.yml
generated
vendored
@ -1,43 +0,0 @@
|
||||
version: 1.0.0.{build}
|
||||
|
||||
environment:
|
||||
BOOST_ROOT: "c:/projects/ethash/deps/boost"
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- develop
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\projects\ethash
|
||||
|
||||
#platform: Any CPU
|
||||
#configuration: Debug
|
||||
|
||||
install:
|
||||
# by default, all script lines are interpreted as batch
|
||||
|
||||
# scripts to run before build
|
||||
before_build:
|
||||
- echo "Downloading boost..."
|
||||
- mkdir c:\projects\ethash\deps
|
||||
- cd c:\projects\ethash\deps
|
||||
- curl -O https://build.ethdev.com/builds/windows-precompiled/boost.tar.gz
|
||||
- echo "Unzipping boost..."
|
||||
- 7z x boost.tar.gz > nul
|
||||
- 7z x boost.tar > nul
|
||||
- ls
|
||||
- echo "Running cmake..."
|
||||
- cd c:\projects\ethash
|
||||
- cmake .
|
||||
|
||||
build:
|
||||
project: ALL_BUILD.vcxproj # path to Visual Studio solution or project
|
||||
|
||||
after_build:
|
||||
- echo "Running tests..."
|
||||
- cd c:\projects\ethash\test\c\Debug
|
||||
- Test.exe
|
||||
- echo "Finished!"
|
||||
|
441
Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
generated
vendored
441
Godeps/_workspace/src/github.com/ethereum/ethash/ethash.go
generated
vendored
@ -1,441 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// Copyright 2015 Lefteris Karapetsas <lefteris@refu.co>
|
||||
// Copyright 2015 Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
/*
|
||||
#include "src/libethash/internal.h"
|
||||
|
||||
int ethashGoCallback_cgo(unsigned);
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/logger"
|
||||
"github.com/ethereum/go-ethereum/logger/glog"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
|
||||
var (
|
||||
maxUint256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
|
||||
sharedLight = new(Light)
|
||||
)
|
||||
|
||||
const (
|
||||
epochLength uint64 = 30000
|
||||
cacheSizeForTesting C.uint64_t = 1024
|
||||
dagSizeForTesting C.uint64_t = 1024 * 32
|
||||
)
|
||||
|
||||
var DefaultDir = defaultDir()
|
||||
|
||||
func defaultDir() string {
|
||||
home := os.Getenv("HOME")
|
||||
if user, err := user.Current(); err == nil {
|
||||
home = user.HomeDir
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
return filepath.Join(home, "AppData", "Ethash")
|
||||
}
|
||||
return filepath.Join(home, ".ethash")
|
||||
}
|
||||
|
||||
// cache wraps an ethash_light_t with some metadata
|
||||
// and automatic memory management.
|
||||
type cache struct {
|
||||
epoch uint64
|
||||
used time.Time
|
||||
test bool
|
||||
|
||||
gen sync.Once // ensures cache is only generated once.
|
||||
ptr *C.struct_ethash_light
|
||||
}
|
||||
|
||||
// generate creates the actual cache. it can be called from multiple
|
||||
// goroutines. the first call will generate the cache, subsequent
|
||||
// calls wait until it is generated.
|
||||
func (cache *cache) generate() {
|
||||
cache.gen.Do(func() {
|
||||
started := time.Now()
|
||||
seedHash := makeSeedHash(cache.epoch)
|
||||
glog.V(logger.Debug).Infof("Generating cache for epoch %d (%x)", cache.epoch, seedHash)
|
||||
size := C.ethash_get_cachesize(C.uint64_t(cache.epoch * epochLength))
|
||||
if cache.test {
|
||||
size = cacheSizeForTesting
|
||||
}
|
||||
cache.ptr = C.ethash_light_new_internal(size, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
|
||||
runtime.SetFinalizer(cache, freeCache)
|
||||
glog.V(logger.Debug).Infof("Done generating cache for epoch %d, it took %v", cache.epoch, time.Since(started))
|
||||
})
|
||||
}
|
||||
|
||||
func freeCache(cache *cache) {
|
||||
C.ethash_light_delete(cache.ptr)
|
||||
cache.ptr = nil
|
||||
}
|
||||
|
||||
func (cache *cache) compute(dagSize uint64, hash common.Hash, nonce uint64) (ok bool, mixDigest, result common.Hash) {
|
||||
ret := C.ethash_light_compute_internal(cache.ptr, C.uint64_t(dagSize), hashToH256(hash), C.uint64_t(nonce))
|
||||
// Make sure cache is live until after the C call.
|
||||
// This is important because a GC might happen and execute
|
||||
// the finalizer before the call completes.
|
||||
_ = cache
|
||||
return bool(ret.success), h256ToHash(ret.mix_hash), h256ToHash(ret.result)
|
||||
}
|
||||
|
||||
// Light implements the Verify half of the proof of work. It uses a few small
|
||||
// in-memory caches to verify the nonces found by Full.
|
||||
type Light struct {
|
||||
test bool // If set, use a smaller cache size
|
||||
|
||||
mu sync.Mutex // Protects the per-epoch map of verification caches
|
||||
caches map[uint64]*cache // Currently maintained verification caches
|
||||
future *cache // Pre-generated cache for the estimated future DAG
|
||||
|
||||
NumCaches int // Maximum number of caches to keep before eviction (only init, don't modify)
|
||||
}
|
||||
|
||||
// Verify checks whether the block's nonce is valid.
|
||||
func (l *Light) Verify(block pow.Block) bool {
|
||||
// TODO: do ethash_quick_verify before getCache in order
|
||||
// to prevent DOS attacks.
|
||||
blockNum := block.NumberU64()
|
||||
if blockNum >= epochLength*2048 {
|
||||
glog.V(logger.Debug).Infof("block number %d too high, limit is %d", epochLength*2048)
|
||||
return false
|
||||
}
|
||||
|
||||
difficulty := block.Difficulty()
|
||||
/* Cannot happen if block header diff is validated prior to PoW, but can
|
||||
happen if PoW is checked first due to parallel PoW checking.
|
||||
We could check the minimum valid difficulty but for SoC we avoid (duplicating)
|
||||
Ethereum protocol consensus rules here which are not in scope of Ethash
|
||||
*/
|
||||
if difficulty.Cmp(common.Big0) == 0 {
|
||||
glog.V(logger.Debug).Infof("invalid block difficulty")
|
||||
return false
|
||||
}
|
||||
|
||||
cache := l.getCache(blockNum)
|
||||
dagSize := C.ethash_get_datasize(C.uint64_t(blockNum))
|
||||
if l.test {
|
||||
dagSize = dagSizeForTesting
|
||||
}
|
||||
// Recompute the hash using the cache.
|
||||
ok, mixDigest, result := cache.compute(uint64(dagSize), block.HashNoNonce(), block.Nonce())
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// avoid mixdigest malleability as it's not included in a block's "hashNononce"
|
||||
if block.MixDigest() != mixDigest {
|
||||
return false
|
||||
}
|
||||
|
||||
// The actual check.
|
||||
target := new(big.Int).Div(maxUint256, difficulty)
|
||||
return result.Big().Cmp(target) <= 0
|
||||
}
|
||||
|
||||
func h256ToHash(in C.ethash_h256_t) common.Hash {
|
||||
return *(*common.Hash)(unsafe.Pointer(&in.b))
|
||||
}
|
||||
|
||||
func hashToH256(in common.Hash) C.ethash_h256_t {
|
||||
return C.ethash_h256_t{b: *(*[32]C.uint8_t)(unsafe.Pointer(&in[0]))}
|
||||
}
|
||||
|
||||
func (l *Light) getCache(blockNum uint64) *cache {
|
||||
var c *cache
|
||||
epoch := blockNum / epochLength
|
||||
|
||||
// If we have a PoW for that epoch, use that
|
||||
l.mu.Lock()
|
||||
if l.caches == nil {
|
||||
l.caches = make(map[uint64]*cache)
|
||||
}
|
||||
if l.NumCaches == 0 {
|
||||
l.NumCaches = 3
|
||||
}
|
||||
c = l.caches[epoch]
|
||||
if c == nil {
|
||||
// No cached DAG, evict the oldest if the cache limit was reached
|
||||
if len(l.caches) >= l.NumCaches {
|
||||
var evict *cache
|
||||
for _, cache := range l.caches {
|
||||
if evict == nil || evict.used.After(cache.used) {
|
||||
evict = cache
|
||||
}
|
||||
}
|
||||
glog.V(logger.Debug).Infof("Evicting DAG for epoch %d in favour of epoch %d", evict.epoch, epoch)
|
||||
delete(l.caches, evict.epoch)
|
||||
}
|
||||
// If we have the new DAG pre-generated, use that, otherwise create a new one
|
||||
if l.future != nil && l.future.epoch == epoch {
|
||||
glog.V(logger.Debug).Infof("Using pre-generated DAG for epoch %d", epoch)
|
||||
c, l.future = l.future, nil
|
||||
} else {
|
||||
glog.V(logger.Debug).Infof("No pre-generated DAG available, creating new for epoch %d", epoch)
|
||||
c = &cache{epoch: epoch, test: l.test}
|
||||
}
|
||||
l.caches[epoch] = c
|
||||
|
||||
// If we just used up the future cache, or need a refresh, regenerate
|
||||
if l.future == nil || l.future.epoch <= epoch {
|
||||
glog.V(logger.Debug).Infof("Pre-generating DAG for epoch %d", epoch+1)
|
||||
l.future = &cache{epoch: epoch + 1, test: l.test}
|
||||
go l.future.generate()
|
||||
}
|
||||
}
|
||||
c.used = time.Now()
|
||||
l.mu.Unlock()
|
||||
|
||||
// Wait for generation finish and return the cache
|
||||
c.generate()
|
||||
return c
|
||||
}
|
||||
|
||||
// dag wraps an ethash_full_t with some metadata
|
||||
// and automatic memory management.
|
||||
type dag struct {
|
||||
epoch uint64
|
||||
test bool
|
||||
dir string
|
||||
|
||||
gen sync.Once // ensures DAG is only generated once.
|
||||
ptr *C.struct_ethash_full
|
||||
}
|
||||
|
||||
// generate creates the actual DAG. it can be called from multiple
|
||||
// goroutines. the first call will generate the DAG, subsequent
|
||||
// calls wait until it is generated.
|
||||
func (d *dag) generate() {
|
||||
d.gen.Do(func() {
|
||||
var (
|
||||
started = time.Now()
|
||||
seedHash = makeSeedHash(d.epoch)
|
||||
blockNum = C.uint64_t(d.epoch * epochLength)
|
||||
cacheSize = C.ethash_get_cachesize(blockNum)
|
||||
dagSize = C.ethash_get_datasize(blockNum)
|
||||
)
|
||||
if d.test {
|
||||
cacheSize = cacheSizeForTesting
|
||||
dagSize = dagSizeForTesting
|
||||
}
|
||||
if d.dir == "" {
|
||||
d.dir = DefaultDir
|
||||
}
|
||||
glog.V(logger.Info).Infof("Generating DAG for epoch %d (size %d) (%x)", d.epoch, dagSize, seedHash)
|
||||
// Generate a temporary cache.
|
||||
// TODO: this could share the cache with Light
|
||||
cache := C.ethash_light_new_internal(cacheSize, (*C.ethash_h256_t)(unsafe.Pointer(&seedHash[0])))
|
||||
defer C.ethash_light_delete(cache)
|
||||
// Generate the actual DAG.
|
||||
d.ptr = C.ethash_full_new_internal(
|
||||
C.CString(d.dir),
|
||||
hashToH256(seedHash),
|
||||
dagSize,
|
||||
cache,
|
||||
(C.ethash_callback_t)(unsafe.Pointer(C.ethashGoCallback_cgo)),
|
||||
)
|
||||
if d.ptr == nil {
|
||||
panic("ethash_full_new IO or memory error")
|
||||
}
|
||||
runtime.SetFinalizer(d, freeDAG)
|
||||
glog.V(logger.Info).Infof("Done generating DAG for epoch %d, it took %v", d.epoch, time.Since(started))
|
||||
})
|
||||
}
|
||||
|
||||
func freeDAG(d *dag) {
|
||||
C.ethash_full_delete(d.ptr)
|
||||
d.ptr = nil
|
||||
}
|
||||
|
||||
func (d *dag) Ptr() unsafe.Pointer {
|
||||
return unsafe.Pointer(d.ptr.data)
|
||||
}
|
||||
|
||||
//export ethashGoCallback
|
||||
func ethashGoCallback(percent C.unsigned) C.int {
|
||||
glog.V(logger.Info).Infof("Generating DAG: %d%%", percent)
|
||||
return 0
|
||||
}
|
||||
|
||||
// MakeDAG pre-generates a DAG file for the given block number in the
|
||||
// given directory. If dir is the empty string, the default directory
|
||||
// is used.
|
||||
func MakeDAG(blockNum uint64, dir string) error {
|
||||
d := &dag{epoch: blockNum / epochLength, dir: dir}
|
||||
if blockNum >= epochLength*2048 {
|
||||
return fmt.Errorf("block number too high, limit is %d", epochLength*2048)
|
||||
}
|
||||
d.generate()
|
||||
if d.ptr == nil {
|
||||
return errors.New("failed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Full implements the Search half of the proof of work.
|
||||
type Full struct {
|
||||
Dir string // use this to specify a non-default DAG directory
|
||||
|
||||
test bool // if set use a smaller DAG size
|
||||
turbo bool
|
||||
hashRate int32
|
||||
|
||||
mu sync.Mutex // protects dag
|
||||
current *dag // current full DAG
|
||||
}
|
||||
|
||||
func (pow *Full) getDAG(blockNum uint64) (d *dag) {
|
||||
epoch := blockNum / epochLength
|
||||
pow.mu.Lock()
|
||||
if pow.current != nil && pow.current.epoch == epoch {
|
||||
d = pow.current
|
||||
} else {
|
||||
d = &dag{epoch: epoch, test: pow.test, dir: pow.Dir}
|
||||
pow.current = d
|
||||
}
|
||||
pow.mu.Unlock()
|
||||
// wait for it to finish generating.
|
||||
d.generate()
|
||||
return d
|
||||
}
|
||||
|
||||
func (pow *Full) Search(block pow.Block, stop <-chan struct{}, index int) (nonce uint64, mixDigest []byte) {
|
||||
dag := pow.getDAG(block.NumberU64())
|
||||
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
diff := block.Difficulty()
|
||||
|
||||
i := int64(0)
|
||||
starti := i
|
||||
start := time.Now().UnixNano()
|
||||
previousHashrate := int32(0)
|
||||
|
||||
nonce = uint64(r.Int63())
|
||||
hash := hashToH256(block.HashNoNonce())
|
||||
target := new(big.Int).Div(maxUint256, diff)
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
atomic.AddInt32(&pow.hashRate, -previousHashrate)
|
||||
return 0, nil
|
||||
default:
|
||||
i++
|
||||
|
||||
// we don't have to update hash rate on every nonce, so update after
|
||||
// first nonce check and then after 2^X nonces
|
||||
if i == 2 || ((i % (1 << 16)) == 0) {
|
||||
elapsed := time.Now().UnixNano() - start
|
||||
hashes := (float64(1e9) / float64(elapsed)) * float64(i-starti)
|
||||
hashrateDiff := int32(hashes) - previousHashrate
|
||||
previousHashrate = int32(hashes)
|
||||
atomic.AddInt32(&pow.hashRate, hashrateDiff)
|
||||
}
|
||||
|
||||
ret := C.ethash_full_compute(dag.ptr, hash, C.uint64_t(nonce))
|
||||
result := h256ToHash(ret.result).Big()
|
||||
|
||||
// TODO: disagrees with the spec https://github.com/ethereum/wiki/wiki/Ethash#mining
|
||||
if ret.success && result.Cmp(target) <= 0 {
|
||||
mixDigest = C.GoBytes(unsafe.Pointer(&ret.mix_hash), C.int(32))
|
||||
atomic.AddInt32(&pow.hashRate, -previousHashrate)
|
||||
return nonce, mixDigest
|
||||
}
|
||||
nonce += 1
|
||||
}
|
||||
|
||||
if !pow.turbo {
|
||||
time.Sleep(20 * time.Microsecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pow *Full) GetHashrate() int64 {
|
||||
return int64(atomic.LoadInt32(&pow.hashRate))
|
||||
}
|
||||
|
||||
func (pow *Full) Turbo(on bool) {
|
||||
// TODO: this needs to use an atomic operation.
|
||||
pow.turbo = on
|
||||
}
|
||||
|
||||
// Ethash combines block verification with Light and
|
||||
// nonce searching with Full into a single proof of work.
|
||||
type Ethash struct {
|
||||
*Light
|
||||
*Full
|
||||
}
|
||||
|
||||
// New creates an instance of the proof of work.
|
||||
func New() *Ethash {
|
||||
return &Ethash{new(Light), &Full{turbo: true}}
|
||||
}
|
||||
|
||||
// NewShared creates an instance of the proof of work., where a single instance
|
||||
// of the Light cache is shared across all instances created with NewShared.
|
||||
func NewShared() *Ethash {
|
||||
return &Ethash{sharedLight, &Full{turbo: true}}
|
||||
}
|
||||
|
||||
// NewForTesting creates a proof of work for use in unit tests.
|
||||
// It uses a smaller DAG and cache size to keep test times low.
|
||||
// DAG files are stored in a temporary directory.
|
||||
//
|
||||
// Nonces found by a testing instance are not verifiable with a
|
||||
// regular-size cache.
|
||||
func NewForTesting() (*Ethash, error) {
|
||||
dir, err := ioutil.TempDir("", "ethash-test")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Ethash{&Light{test: true}, &Full{Dir: dir, test: true}}, nil
|
||||
}
|
||||
|
||||
func GetSeedHash(blockNum uint64) ([]byte, error) {
|
||||
if blockNum >= epochLength*2048 {
|
||||
return nil, fmt.Errorf("block number too high, limit is %d", epochLength*2048)
|
||||
}
|
||||
sh := makeSeedHash(blockNum / epochLength)
|
||||
return sh[:], nil
|
||||
}
|
||||
|
||||
func makeSeedHash(epoch uint64) (sh common.Hash) {
|
||||
for ; epoch > 0; epoch-- {
|
||||
sh = crypto.Sha3Hash(sh[:])
|
||||
}
|
||||
return sh
|
||||
}
|
628
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go
generated
vendored
628
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl.go
generated
vendored
@ -1,628 +0,0 @@
|
||||
// Copyright 2014 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build opencl
|
||||
|
||||
package ethash
|
||||
|
||||
//#cgo LDFLAGS: -w
|
||||
//#include <stdint.h>
|
||||
//#include <string.h>
|
||||
//#include "src/libethash/internal.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
mrand "math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Gustav-Simonsson/go-opencl/cl"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/pow"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
This code have two main entry points:
|
||||
|
||||
1. The initCL(...) function configures one or more OpenCL device
|
||||
(for now only GPU) and loads the Ethash DAG onto device memory
|
||||
|
||||
2. The Search(...) function loads a Ethash nonce into device(s) memory and
|
||||
executes the Ethash OpenCL kernel.
|
||||
|
||||
Throughout the code, we refer to "host memory" and "device memory".
|
||||
For most systems (e.g. regular PC GPU miner) the host memory is RAM and
|
||||
device memory is the GPU global memory (e.g. GDDR5).
|
||||
|
||||
References mentioned in code comments:
|
||||
|
||||
1. https://github.com/ethereum/wiki/wiki/Ethash
|
||||
2. https://github.com/ethereum/cpp-ethereum/blob/develop/libethash-cl/ethash_cl_miner.cpp
|
||||
3. https://www.khronos.org/registry/cl/sdk/1.2/docs/man/xhtml/
|
||||
4. http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_OpenCL_Programming_User_Guide.pdf
|
||||
|
||||
*/
|
||||
|
||||
type OpenCLDevice struct {
|
||||
deviceId int
|
||||
device *cl.Device
|
||||
openCL11 bool // OpenCL version 1.1 and 1.2 are handled a bit different
|
||||
openCL12 bool
|
||||
|
||||
dagBuf *cl.MemObject // Ethash full DAG in device mem
|
||||
headerBuf *cl.MemObject // Hash of block-to-mine in device mem
|
||||
searchBuffers []*cl.MemObject
|
||||
|
||||
searchKernel *cl.Kernel
|
||||
hashKernel *cl.Kernel
|
||||
|
||||
queue *cl.CommandQueue
|
||||
ctx *cl.Context
|
||||
workGroupSize int
|
||||
|
||||
nonceRand *mrand.Rand // seeded by crypto/rand, see comments where it's initialised
|
||||
result common.Hash
|
||||
}
|
||||
|
||||
type OpenCLMiner struct {
|
||||
mu sync.Mutex
|
||||
|
||||
ethash *Ethash // Ethash full DAG & cache in host mem
|
||||
|
||||
deviceIds []int
|
||||
devices []*OpenCLDevice
|
||||
|
||||
dagSize uint64
|
||||
|
||||
hashRate int32 // Go atomics & uint64 have some issues; int32 is supported on all platforms
|
||||
}
|
||||
|
||||
type pendingSearch struct {
|
||||
bufIndex uint32
|
||||
startNonce uint64
|
||||
}
|
||||
|
||||
const (
|
||||
SIZEOF_UINT32 = 4
|
||||
|
||||
// See [1]
|
||||
ethashMixBytesLen = 128
|
||||
ethashAccesses = 64
|
||||
|
||||
// See [4]
|
||||
workGroupSize = 32 // must be multiple of 8
|
||||
maxSearchResults = 63
|
||||
searchBufSize = 2
|
||||
globalWorkSize = 1024 * 256
|
||||
)
|
||||
|
||||
func NewCL(deviceIds []int) *OpenCLMiner {
|
||||
ids := make([]int, len(deviceIds))
|
||||
copy(ids, deviceIds)
|
||||
return &OpenCLMiner{
|
||||
ethash: New(),
|
||||
dagSize: 0, // to see if we need to update DAG.
|
||||
deviceIds: ids,
|
||||
}
|
||||
}
|
||||
|
||||
func PrintDevices() {
|
||||
fmt.Println("=============================================")
|
||||
fmt.Println("============ OpenCL Device Info =============")
|
||||
fmt.Println("=============================================")
|
||||
|
||||
var found []*cl.Device
|
||||
|
||||
platforms, err := cl.GetPlatforms()
|
||||
if err != nil {
|
||||
fmt.Println("Plaform error (check your OpenCL installation):", err)
|
||||
return
|
||||
}
|
||||
|
||||
for i, p := range platforms {
|
||||
fmt.Println("Platform id ", i)
|
||||
fmt.Println("Platform Name ", p.Name())
|
||||
fmt.Println("Platform Vendor ", p.Vendor())
|
||||
fmt.Println("Platform Version ", p.Version())
|
||||
fmt.Println("Platform Extensions ", p.Extensions())
|
||||
fmt.Println("Platform Profile ", p.Profile())
|
||||
fmt.Println("")
|
||||
|
||||
devices, err := cl.GetDevices(p, cl.DeviceTypeGPU)
|
||||
if err != nil {
|
||||
fmt.Println("Device error (check your GPU drivers) :", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, d := range devices {
|
||||
fmt.Println("Device OpenCL id ", i)
|
||||
fmt.Println("Device id for mining ", len(found))
|
||||
fmt.Println("Device Name ", d.Name())
|
||||
fmt.Println("Vendor ", d.Vendor())
|
||||
fmt.Println("Version ", d.Version())
|
||||
fmt.Println("Driver version ", d.DriverVersion())
|
||||
fmt.Println("Address bits ", d.AddressBits())
|
||||
fmt.Println("Max clock freq ", d.MaxClockFrequency())
|
||||
fmt.Println("Global mem size ", d.GlobalMemSize())
|
||||
fmt.Println("Max constant buffer size", d.MaxConstantBufferSize())
|
||||
fmt.Println("Max mem alloc size ", d.MaxMemAllocSize())
|
||||
fmt.Println("Max compute units ", d.MaxComputeUnits())
|
||||
fmt.Println("Max work group size ", d.MaxWorkGroupSize())
|
||||
fmt.Println("Max work item sizes ", d.MaxWorkItemSizes())
|
||||
fmt.Println("=============================================")
|
||||
|
||||
found = append(found, d)
|
||||
}
|
||||
}
|
||||
if len(found) == 0 {
|
||||
fmt.Println("Found no GPU(s). Check that your OS can see the GPU(s)")
|
||||
} else {
|
||||
var idsFormat string
|
||||
for i := 0; i < len(found); i++ {
|
||||
idsFormat += strconv.Itoa(i)
|
||||
if i != len(found)-1 {
|
||||
idsFormat += ","
|
||||
}
|
||||
}
|
||||
fmt.Printf("Found %v devices. Benchmark first GPU: geth gpubench 0\n", len(found))
|
||||
fmt.Printf("Mine using all GPUs: geth --minegpu %v\n", idsFormat)
|
||||
}
|
||||
}
|
||||
|
||||
// See [2]. We basically do the same here, but the Go OpenCL bindings
|
||||
// are at a slightly higher abtraction level.
|
||||
func InitCL(blockNum uint64, c *OpenCLMiner) error {
|
||||
platforms, err := cl.GetPlatforms()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Plaform error: %v\nCheck your OpenCL installation and then run geth gpuinfo", err)
|
||||
}
|
||||
|
||||
var devices []*cl.Device
|
||||
for _, p := range platforms {
|
||||
ds, err := cl.GetDevices(p, cl.DeviceTypeGPU)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Devices error: %v\nCheck your GPU drivers and then run geth gpuinfo", err)
|
||||
}
|
||||
for _, d := range ds {
|
||||
devices = append(devices, d)
|
||||
}
|
||||
}
|
||||
|
||||
pow := New()
|
||||
_ = pow.getDAG(blockNum) // generates DAG if we don't have it
|
||||
pow.Light.getCache(blockNum) // and cache
|
||||
|
||||
c.ethash = pow
|
||||
dagSize := uint64(C.ethash_get_datasize(C.uint64_t(blockNum)))
|
||||
c.dagSize = dagSize
|
||||
|
||||
for _, id := range c.deviceIds {
|
||||
if id > len(devices)-1 {
|
||||
return fmt.Errorf("Device id not found. See available device ids with: geth gpuinfo")
|
||||
} else {
|
||||
err := initCLDevice(id, devices[id], c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(c.devices) == 0 {
|
||||
return fmt.Errorf("No GPU devices found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func initCLDevice(deviceId int, device *cl.Device, c *OpenCLMiner) error {
|
||||
devMaxAlloc := uint64(device.MaxMemAllocSize())
|
||||
devGlobalMem := uint64(device.GlobalMemSize())
|
||||
|
||||
// TODO: more fine grained version logic
|
||||
if device.Version() == "OpenCL 1.0" {
|
||||
fmt.Println("Device OpenCL version not supported: ", device.Version())
|
||||
return fmt.Errorf("opencl version not supported")
|
||||
}
|
||||
|
||||
var cl11, cl12 bool
|
||||
if device.Version() == "OpenCL 1.1" {
|
||||
cl11 = true
|
||||
}
|
||||
if device.Version() == "OpenCL 1.2" {
|
||||
cl12 = true
|
||||
}
|
||||
|
||||
// log warnings but carry on; some device drivers report inaccurate values
|
||||
if c.dagSize > devGlobalMem {
|
||||
fmt.Printf("WARNING: device memory may be insufficient: %v. DAG size: %v.\n", devGlobalMem, c.dagSize)
|
||||
}
|
||||
|
||||
if c.dagSize > devMaxAlloc {
|
||||
fmt.Printf("WARNING: DAG size (%v) larger than device max memory allocation size (%v).\n", c.dagSize, devMaxAlloc)
|
||||
fmt.Printf("You probably have to export GPU_MAX_ALLOC_PERCENT=95\n")
|
||||
}
|
||||
|
||||
fmt.Printf("Initialising device %v: %v\n", deviceId, device.Name())
|
||||
|
||||
context, err := cl.CreateContext([]*cl.Device{device})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed creating context: %v", err)
|
||||
}
|
||||
|
||||
// TODO: test running with CL_QUEUE_PROFILING_ENABLE for profiling?
|
||||
queue, err := context.CreateCommandQueue(device, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("command queue err: %v", err)
|
||||
}
|
||||
|
||||
// See [4] section 3.2 and [3] "clBuildProgram".
|
||||
// The OpenCL kernel code is compiled at run-time.
|
||||
kvs := make(map[string]string, 4)
|
||||
kvs["GROUP_SIZE"] = strconv.FormatUint(workGroupSize, 10)
|
||||
kvs["DAG_SIZE"] = strconv.FormatUint(c.dagSize/ethashMixBytesLen, 10)
|
||||
kvs["ACCESSES"] = strconv.FormatUint(ethashAccesses, 10)
|
||||
kvs["MAX_OUTPUTS"] = strconv.FormatUint(maxSearchResults, 10)
|
||||
kernelCode := replaceWords(kernel, kvs)
|
||||
|
||||
program, err := context.CreateProgramWithSource([]string{kernelCode})
|
||||
if err != nil {
|
||||
return fmt.Errorf("program err: %v", err)
|
||||
}
|
||||
|
||||
/* if using AMD OpenCL impl, you can set this to debug on x86 CPU device.
|
||||
see AMD OpenCL programming guide section 4.2
|
||||
|
||||
export in shell before running:
|
||||
export AMD_OCL_BUILD_OPTIONS_APPEND="-g -O0"
|
||||
export CPU_MAX_COMPUTE_UNITS=1
|
||||
|
||||
buildOpts := "-g -cl-opt-disable"
|
||||
|
||||
*/
|
||||
buildOpts := ""
|
||||
err = program.BuildProgram([]*cl.Device{device}, buildOpts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("program build err: %v", err)
|
||||
}
|
||||
|
||||
var searchKernelName, hashKernelName string
|
||||
searchKernelName = "ethash_search"
|
||||
hashKernelName = "ethash_hash"
|
||||
|
||||
searchKernel, err := program.CreateKernel(searchKernelName)
|
||||
hashKernel, err := program.CreateKernel(hashKernelName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("kernel err: %v", err)
|
||||
}
|
||||
|
||||
// TODO: when this DAG size appears, patch the Go bindings
|
||||
// (context.go) to work with uint64 as size_t
|
||||
if c.dagSize > math.MaxInt32 {
|
||||
fmt.Println("DAG too large for allocation.")
|
||||
return fmt.Errorf("DAG too large for alloc")
|
||||
}
|
||||
|
||||
// TODO: patch up Go bindings to work with size_t, will overflow if > maxint32
|
||||
// TODO: fuck. shit's gonna overflow around 2017-06-09 12:17:02
|
||||
dagBuf := *(new(*cl.MemObject))
|
||||
dagBuf, err = context.CreateEmptyBuffer(cl.MemReadOnly, int(c.dagSize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("allocating dag buf failed: %v", err)
|
||||
}
|
||||
|
||||
// write DAG to device mem
|
||||
dagPtr := unsafe.Pointer(c.ethash.Full.current.ptr.data)
|
||||
_, err = queue.EnqueueWriteBuffer(dagBuf, true, 0, int(c.dagSize), dagPtr, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing to dag buf failed: %v", err)
|
||||
}
|
||||
|
||||
searchBuffers := make([]*cl.MemObject, searchBufSize)
|
||||
for i := 0; i < searchBufSize; i++ {
|
||||
searchBuff, err := context.CreateEmptyBuffer(cl.MemWriteOnly, (1+maxSearchResults)*SIZEOF_UINT32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("search buffer err: %v", err)
|
||||
}
|
||||
searchBuffers[i] = searchBuff
|
||||
}
|
||||
|
||||
headerBuf, err := context.CreateEmptyBuffer(cl.MemReadOnly, 32)
|
||||
if err != nil {
|
||||
return fmt.Errorf("header buffer err: %v", err)
|
||||
}
|
||||
|
||||
// Unique, random nonces are crucial for mining efficieny.
|
||||
// While we do not need cryptographically secure PRNG for nonces,
|
||||
// we want to have uniform distribution and minimal repetition of nonces.
|
||||
// We could guarantee strict uniqueness of nonces by generating unique ranges,
|
||||
// but a int64 seed from crypto/rand should be good enough.
|
||||
// we then use math/rand for speed and to avoid draining OS entropy pool
|
||||
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nonceRand := mrand.New(mrand.NewSource(seed.Int64()))
|
||||
|
||||
deviceStruct := &OpenCLDevice{
|
||||
deviceId: deviceId,
|
||||
device: device,
|
||||
openCL11: cl11,
|
||||
openCL12: cl12,
|
||||
|
||||
dagBuf: dagBuf,
|
||||
headerBuf: headerBuf,
|
||||
searchBuffers: searchBuffers,
|
||||
|
||||
searchKernel: searchKernel,
|
||||
hashKernel: hashKernel,
|
||||
|
||||
queue: queue,
|
||||
ctx: context,
|
||||
|
||||
workGroupSize: workGroupSize,
|
||||
|
||||
nonceRand: nonceRand,
|
||||
}
|
||||
c.devices = append(c.devices, deviceStruct)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *OpenCLMiner) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) {
|
||||
c.mu.Lock()
|
||||
newDagSize := uint64(C.ethash_get_datasize(C.uint64_t(block.NumberU64())))
|
||||
if newDagSize > c.dagSize {
|
||||
// TODO: clean up buffers from previous DAG?
|
||||
err := InitCL(block.NumberU64(), c)
|
||||
if err != nil {
|
||||
fmt.Println("OpenCL init error: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
defer c.mu.Unlock()
|
||||
|
||||
// Avoid unneeded OpenCL initialisation if we received stop while running InitCL
|
||||
select {
|
||||
case <-stop:
|
||||
return 0, []byte{0}
|
||||
default:
|
||||
}
|
||||
|
||||
headerHash := block.HashNoNonce()
|
||||
diff := block.Difficulty()
|
||||
target256 := new(big.Int).Div(maxUint256, diff)
|
||||
target64 := new(big.Int).Rsh(target256, 192).Uint64()
|
||||
var zero uint32 = 0
|
||||
|
||||
d := c.devices[index]
|
||||
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.headerBuf, false, 0, 32, unsafe.Pointer(&headerHash[0]), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
for i := 0; i < searchBufSize; i++ {
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[i], false, 0, 4, unsafe.Pointer(&zero), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueWriterBuffer : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
|
||||
// wait for all search buffers to complete
|
||||
err = d.queue.Finish()
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clFinish : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(1, d.headerBuf)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(2, d.dagBuf)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
err = d.searchKernel.SetArg(4, target64)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
err = d.searchKernel.SetArg(5, uint32(math.MaxUint32))
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
// wait on this before returning
|
||||
var preReturnEvent *cl.Event
|
||||
if d.openCL12 {
|
||||
preReturnEvent, err = d.ctx.CreateUserEvent()
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search create CL user event : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
|
||||
pending := make([]pendingSearch, 0, searchBufSize)
|
||||
var p *pendingSearch
|
||||
searchBufIndex := uint32(0)
|
||||
var checkNonce uint64
|
||||
loops := int64(0)
|
||||
prevHashRate := int32(0)
|
||||
start := time.Now().UnixNano()
|
||||
// we grab a single random nonce and sets this as argument to the kernel search function
|
||||
// the device will then add each local threads gid to the nonce, creating a unique nonce
|
||||
// for each device computing unit executing in parallel
|
||||
initNonce := uint64(d.nonceRand.Int63())
|
||||
for nonce := initNonce; ; nonce += uint64(globalWorkSize) {
|
||||
select {
|
||||
case <-stop:
|
||||
|
||||
/*
|
||||
if d.openCL12 {
|
||||
err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search WaitForEvents: ", err)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
atomic.AddInt32(&c.hashRate, -prevHashRate)
|
||||
return 0, []byte{0}
|
||||
default:
|
||||
}
|
||||
|
||||
if (loops % (1 << 7)) == 0 {
|
||||
elapsed := time.Now().UnixNano() - start
|
||||
// TODO: verify if this is correct hash rate calculation
|
||||
hashes := (float64(1e9) / float64(elapsed)) * float64(loops*1024*256)
|
||||
hashrateDiff := int32(hashes) - prevHashRate
|
||||
prevHashRate = int32(hashes)
|
||||
atomic.AddInt32(&c.hashRate, hashrateDiff)
|
||||
}
|
||||
loops++
|
||||
|
||||
err = d.searchKernel.SetArg(0, d.searchBuffers[searchBufIndex])
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
err = d.searchKernel.SetArg(3, nonce)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clSetKernelArg : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
// execute kernel
|
||||
_, err := d.queue.EnqueueNDRangeKernel(
|
||||
d.searchKernel,
|
||||
[]int{0},
|
||||
[]int{globalWorkSize},
|
||||
[]int{d.workGroupSize},
|
||||
nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueNDRangeKernel : ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
pending = append(pending, pendingSearch{bufIndex: searchBufIndex, startNonce: nonce})
|
||||
searchBufIndex = (searchBufIndex + 1) % searchBufSize
|
||||
|
||||
if len(pending) == searchBufSize {
|
||||
p = &(pending[searchBufIndex])
|
||||
cres, _, err := d.queue.EnqueueMapBuffer(d.searchBuffers[p.bufIndex], true,
|
||||
cl.MapFlagRead, 0, (1+maxSearchResults)*SIZEOF_UINT32,
|
||||
nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueMapBuffer: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
results := cres.ByteSlice()
|
||||
nfound := binary.LittleEndian.Uint32(results)
|
||||
nfound = uint32(math.Min(float64(nfound), float64(maxSearchResults)))
|
||||
// OpenCL returns the offsets from the start nonce
|
||||
for i := uint32(0); i < nfound; i++ {
|
||||
lo := (i + 1) * SIZEOF_UINT32
|
||||
hi := (i + 2) * SIZEOF_UINT32
|
||||
upperNonce := uint64(binary.LittleEndian.Uint32(results[lo:hi]))
|
||||
checkNonce = p.startNonce + upperNonce
|
||||
if checkNonce != 0 {
|
||||
// We verify that the nonce is indeed a solution by
|
||||
// executing the Ethash verification function (on the CPU).
|
||||
cache := c.ethash.Light.getCache(block.NumberU64())
|
||||
ok, mixDigest, result := cache.compute(c.dagSize, headerHash, checkNonce)
|
||||
|
||||
// TODO: return result first
|
||||
if ok && result.Big().Cmp(target256) <= 0 {
|
||||
_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueUnmapMemObject: ", err)
|
||||
}
|
||||
if d.openCL12 {
|
||||
err = cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search WaitForEvents: ", err)
|
||||
}
|
||||
}
|
||||
return checkNonce, mixDigest.Bytes()
|
||||
}
|
||||
_, err := d.queue.EnqueueWriteBuffer(d.searchBuffers[p.bufIndex], false, 0, 4, unsafe.Pointer(&zero), nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search cl: EnqueueWriteBuffer", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = d.queue.EnqueueUnmapMemObject(d.searchBuffers[p.bufIndex], cres, nil)
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clEnqueueUnMapMemObject: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
pending = append(pending[:searchBufIndex], pending[searchBufIndex+1:]...)
|
||||
}
|
||||
}
|
||||
if d.openCL12 {
|
||||
err := cl.WaitForEvents([]*cl.Event{preReturnEvent})
|
||||
if err != nil {
|
||||
fmt.Println("Error in Search clWaitForEvents: ", err)
|
||||
return 0, []byte{0}
|
||||
}
|
||||
}
|
||||
return 0, []byte{0}
|
||||
}
|
||||
|
||||
func (c *OpenCLMiner) Verify(block pow.Block) bool {
|
||||
return c.ethash.Light.Verify(block)
|
||||
}
|
||||
func (c *OpenCLMiner) GetHashrate() int64 {
|
||||
return int64(atomic.LoadInt32(&c.hashRate))
|
||||
}
|
||||
func (c *OpenCLMiner) Turbo(on bool) {
|
||||
// This is GPU mining. Always be turbo.
|
||||
}
|
||||
|
||||
func replaceWords(text string, kvs map[string]string) string {
|
||||
for k, v := range kvs {
|
||||
text = strings.Replace(text, k, v, -1)
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
func logErr(err error) {
|
||||
if err != nil {
|
||||
fmt.Println("Error in OpenCL call:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func argErr(err error) error {
|
||||
return fmt.Errorf("arg err: %v", err)
|
||||
}
|
600
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl_kernel_go_str.go
generated
vendored
600
Godeps/_workspace/src/github.com/ethereum/ethash/ethash_opencl_kernel_go_str.go
generated
vendored
@ -1,600 +0,0 @@
|
||||
package ethash
|
||||
|
||||
/* DO NOT EDIT!!!
|
||||
|
||||
This code is version controlled at
|
||||
https://github.com/ethereum/cpp-ethereum/blob/develop/libethash-cl/ethash_cl_miner_kernel.cl
|
||||
|
||||
If needed change it there first, then copy over here.
|
||||
*/
|
||||
|
||||
const kernel = `
|
||||
// author Tim Hughes <tim@twistedfury.com>
|
||||
// Tested on Radeon HD 7850
|
||||
// Hashrate: 15940347 hashes/s
|
||||
// Bandwidth: 124533 MB/s
|
||||
// search kernel should fit in <= 84 VGPRS (3 wavefronts)
|
||||
|
||||
#define THREADS_PER_HASH (128 / 16)
|
||||
#define HASHES_PER_LOOP (GROUP_SIZE / THREADS_PER_HASH)
|
||||
|
||||
#define FNV_PRIME 0x01000193
|
||||
|
||||
__constant uint2 const Keccak_f1600_RC[24] = {
|
||||
(uint2)(0x00000001, 0x00000000),
|
||||
(uint2)(0x00008082, 0x00000000),
|
||||
(uint2)(0x0000808a, 0x80000000),
|
||||
(uint2)(0x80008000, 0x80000000),
|
||||
(uint2)(0x0000808b, 0x00000000),
|
||||
(uint2)(0x80000001, 0x00000000),
|
||||
(uint2)(0x80008081, 0x80000000),
|
||||
(uint2)(0x00008009, 0x80000000),
|
||||
(uint2)(0x0000008a, 0x00000000),
|
||||
(uint2)(0x00000088, 0x00000000),
|
||||
(uint2)(0x80008009, 0x00000000),
|
||||
(uint2)(0x8000000a, 0x00000000),
|
||||
(uint2)(0x8000808b, 0x00000000),
|
||||
(uint2)(0x0000008b, 0x80000000),
|
||||
(uint2)(0x00008089, 0x80000000),
|
||||
(uint2)(0x00008003, 0x80000000),
|
||||
(uint2)(0x00008002, 0x80000000),
|
||||
(uint2)(0x00000080, 0x80000000),
|
||||
(uint2)(0x0000800a, 0x00000000),
|
||||
(uint2)(0x8000000a, 0x80000000),
|
||||
(uint2)(0x80008081, 0x80000000),
|
||||
(uint2)(0x00008080, 0x80000000),
|
||||
(uint2)(0x80000001, 0x00000000),
|
||||
(uint2)(0x80008008, 0x80000000),
|
||||
};
|
||||
|
||||
void keccak_f1600_round(uint2* a, uint r, uint out_size)
|
||||
{
|
||||
#if !__ENDIAN_LITTLE__
|
||||
for (uint i = 0; i != 25; ++i)
|
||||
a[i] = a[i].yx;
|
||||
#endif
|
||||
|
||||
uint2 b[25];
|
||||
uint2 t;
|
||||
|
||||
// Theta
|
||||
b[0] = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20];
|
||||
b[1] = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21];
|
||||
b[2] = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22];
|
||||
b[3] = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23];
|
||||
b[4] = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24];
|
||||
t = b[4] ^ (uint2)(b[1].x << 1 | b[1].y >> 31, b[1].y << 1 | b[1].x >> 31);
|
||||
a[0] ^= t;
|
||||
a[5] ^= t;
|
||||
a[10] ^= t;
|
||||
a[15] ^= t;
|
||||
a[20] ^= t;
|
||||
t = b[0] ^ (uint2)(b[2].x << 1 | b[2].y >> 31, b[2].y << 1 | b[2].x >> 31);
|
||||
a[1] ^= t;
|
||||
a[6] ^= t;
|
||||
a[11] ^= t;
|
||||
a[16] ^= t;
|
||||
a[21] ^= t;
|
||||
t = b[1] ^ (uint2)(b[3].x << 1 | b[3].y >> 31, b[3].y << 1 | b[3].x >> 31);
|
||||
a[2] ^= t;
|
||||
a[7] ^= t;
|
||||
a[12] ^= t;
|
||||
a[17] ^= t;
|
||||
a[22] ^= t;
|
||||
t = b[2] ^ (uint2)(b[4].x << 1 | b[4].y >> 31, b[4].y << 1 | b[4].x >> 31);
|
||||
a[3] ^= t;
|
||||
a[8] ^= t;
|
||||
a[13] ^= t;
|
||||
a[18] ^= t;
|
||||
a[23] ^= t;
|
||||
t = b[3] ^ (uint2)(b[0].x << 1 | b[0].y >> 31, b[0].y << 1 | b[0].x >> 31);
|
||||
a[4] ^= t;
|
||||
a[9] ^= t;
|
||||
a[14] ^= t;
|
||||
a[19] ^= t;
|
||||
a[24] ^= t;
|
||||
|
||||
// Rho Pi
|
||||
b[0] = a[0];
|
||||
b[10] = (uint2)(a[1].x << 1 | a[1].y >> 31, a[1].y << 1 | a[1].x >> 31);
|
||||
b[7] = (uint2)(a[10].x << 3 | a[10].y >> 29, a[10].y << 3 | a[10].x >> 29);
|
||||
b[11] = (uint2)(a[7].x << 6 | a[7].y >> 26, a[7].y << 6 | a[7].x >> 26);
|
||||
b[17] = (uint2)(a[11].x << 10 | a[11].y >> 22, a[11].y << 10 | a[11].x >> 22);
|
||||
b[18] = (uint2)(a[17].x << 15 | a[17].y >> 17, a[17].y << 15 | a[17].x >> 17);
|
||||
b[3] = (uint2)(a[18].x << 21 | a[18].y >> 11, a[18].y << 21 | a[18].x >> 11);
|
||||
b[5] = (uint2)(a[3].x << 28 | a[3].y >> 4, a[3].y << 28 | a[3].x >> 4);
|
||||
b[16] = (uint2)(a[5].y << 4 | a[5].x >> 28, a[5].x << 4 | a[5].y >> 28);
|
||||
b[8] = (uint2)(a[16].y << 13 | a[16].x >> 19, a[16].x << 13 | a[16].y >> 19);
|
||||
b[21] = (uint2)(a[8].y << 23 | a[8].x >> 9, a[8].x << 23 | a[8].y >> 9);
|
||||
b[24] = (uint2)(a[21].x << 2 | a[21].y >> 30, a[21].y << 2 | a[21].x >> 30);
|
||||
b[4] = (uint2)(a[24].x << 14 | a[24].y >> 18, a[24].y << 14 | a[24].x >> 18);
|
||||
b[15] = (uint2)(a[4].x << 27 | a[4].y >> 5, a[4].y << 27 | a[4].x >> 5);
|
||||
b[23] = (uint2)(a[15].y << 9 | a[15].x >> 23, a[15].x << 9 | a[15].y >> 23);
|
||||
b[19] = (uint2)(a[23].y << 24 | a[23].x >> 8, a[23].x << 24 | a[23].y >> 8);
|
||||
b[13] = (uint2)(a[19].x << 8 | a[19].y >> 24, a[19].y << 8 | a[19].x >> 24);
|
||||
b[12] = (uint2)(a[13].x << 25 | a[13].y >> 7, a[13].y << 25 | a[13].x >> 7);
|
||||
b[2] = (uint2)(a[12].y << 11 | a[12].x >> 21, a[12].x << 11 | a[12].y >> 21);
|
||||
b[20] = (uint2)(a[2].y << 30 | a[2].x >> 2, a[2].x << 30 | a[2].y >> 2);
|
||||
b[14] = (uint2)(a[20].x << 18 | a[20].y >> 14, a[20].y << 18 | a[20].x >> 14);
|
||||
b[22] = (uint2)(a[14].y << 7 | a[14].x >> 25, a[14].x << 7 | a[14].y >> 25);
|
||||
b[9] = (uint2)(a[22].y << 29 | a[22].x >> 3, a[22].x << 29 | a[22].y >> 3);
|
||||
b[6] = (uint2)(a[9].x << 20 | a[9].y >> 12, a[9].y << 20 | a[9].x >> 12);
|
||||
b[1] = (uint2)(a[6].y << 12 | a[6].x >> 20, a[6].x << 12 | a[6].y >> 20);
|
||||
|
||||
// Chi
|
||||
a[0] = bitselect(b[0] ^ b[2], b[0], b[1]);
|
||||
a[1] = bitselect(b[1] ^ b[3], b[1], b[2]);
|
||||
a[2] = bitselect(b[2] ^ b[4], b[2], b[3]);
|
||||
a[3] = bitselect(b[3] ^ b[0], b[3], b[4]);
|
||||
if (out_size >= 4)
|
||||
{
|
||||
a[4] = bitselect(b[4] ^ b[1], b[4], b[0]);
|
||||
a[5] = bitselect(b[5] ^ b[7], b[5], b[6]);
|
||||
a[6] = bitselect(b[6] ^ b[8], b[6], b[7]);
|
||||
a[7] = bitselect(b[7] ^ b[9], b[7], b[8]);
|
||||
a[8] = bitselect(b[8] ^ b[5], b[8], b[9]);
|
||||
if (out_size >= 8)
|
||||
{
|
||||
a[9] = bitselect(b[9] ^ b[6], b[9], b[5]);
|
||||
a[10] = bitselect(b[10] ^ b[12], b[10], b[11]);
|
||||
a[11] = bitselect(b[11] ^ b[13], b[11], b[12]);
|
||||
a[12] = bitselect(b[12] ^ b[14], b[12], b[13]);
|
||||
a[13] = bitselect(b[13] ^ b[10], b[13], b[14]);
|
||||
a[14] = bitselect(b[14] ^ b[11], b[14], b[10]);
|
||||
a[15] = bitselect(b[15] ^ b[17], b[15], b[16]);
|
||||
a[16] = bitselect(b[16] ^ b[18], b[16], b[17]);
|
||||
a[17] = bitselect(b[17] ^ b[19], b[17], b[18]);
|
||||
a[18] = bitselect(b[18] ^ b[15], b[18], b[19]);
|
||||
a[19] = bitselect(b[19] ^ b[16], b[19], b[15]);
|
||||
a[20] = bitselect(b[20] ^ b[22], b[20], b[21]);
|
||||
a[21] = bitselect(b[21] ^ b[23], b[21], b[22]);
|
||||
a[22] = bitselect(b[22] ^ b[24], b[22], b[23]);
|
||||
a[23] = bitselect(b[23] ^ b[20], b[23], b[24]);
|
||||
a[24] = bitselect(b[24] ^ b[21], b[24], b[20]);
|
||||
}
|
||||
}
|
||||
|
||||
// Iota
|
||||
a[0] ^= Keccak_f1600_RC[r];
|
||||
|
||||
#if !__ENDIAN_LITTLE__
|
||||
for (uint i = 0; i != 25; ++i)
|
||||
a[i] = a[i].yx;
|
||||
#endif
|
||||
}
|
||||
|
||||
void keccak_f1600_no_absorb(ulong* a, uint in_size, uint out_size, uint isolate)
|
||||
{
|
||||
for (uint i = in_size; i != 25; ++i)
|
||||
{
|
||||
a[i] = 0;
|
||||
}
|
||||
#if __ENDIAN_LITTLE__
|
||||
a[in_size] ^= 0x0000000000000001;
|
||||
a[24-out_size*2] ^= 0x8000000000000000;
|
||||
#else
|
||||
a[in_size] ^= 0x0100000000000000;
|
||||
a[24-out_size*2] ^= 0x0000000000000080;
|
||||
#endif
|
||||
|
||||
// Originally I unrolled the first and last rounds to interface
|
||||
// better with surrounding code, however I haven't done this
|
||||
// without causing the AMD compiler to blow up the VGPR usage.
|
||||
uint r = 0;
|
||||
do
|
||||
{
|
||||
// This dynamic branch stops the AMD compiler unrolling the loop
|
||||
// and additionally saves about 33% of the VGPRs, enough to gain another
|
||||
// wavefront. Ideally we'd get 4 in flight, but 3 is the best I can
|
||||
// massage out of the compiler. It doesn't really seem to matter how
|
||||
// much we try and help the compiler save VGPRs because it seems to throw
|
||||
// that information away, hence the implementation of keccak here
|
||||
// doesn't bother.
|
||||
if (isolate)
|
||||
{
|
||||
keccak_f1600_round((uint2*)a, r++, 25);
|
||||
}
|
||||
}
|
||||
while (r < 23);
|
||||
|
||||
// final round optimised for digest size
|
||||
keccak_f1600_round((uint2*)a, r++, out_size);
|
||||
}
|
||||
|
||||
#define copy(dst, src, count) for (uint i = 0; i != count; ++i) { (dst)[i] = (src)[i]; }
|
||||
|
||||
#define countof(x) (sizeof(x) / sizeof(x[0]))
|
||||
|
||||
uint fnv(uint x, uint y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
}
|
||||
|
||||
uint4 fnv4(uint4 x, uint4 y)
|
||||
{
|
||||
return x * FNV_PRIME ^ y;
|
||||
}
|
||||
|
||||
uint fnv_reduce(uint4 v)
|
||||
{
|
||||
return fnv(fnv(fnv(v.x, v.y), v.z), v.w);
|
||||
}
|
||||
|
||||
typedef union
|
||||
{
|
||||
ulong ulongs[32 / sizeof(ulong)];
|
||||
uint uints[32 / sizeof(uint)];
|
||||
} hash32_t;
|
||||
|
||||
typedef union
|
||||
{
|
||||
ulong ulongs[64 / sizeof(ulong)];
|
||||
uint4 uint4s[64 / sizeof(uint4)];
|
||||
} hash64_t;
|
||||
|
||||
typedef union
|
||||
{
|
||||
uint uints[128 / sizeof(uint)];
|
||||
uint4 uint4s[128 / sizeof(uint4)];
|
||||
} hash128_t;
|
||||
|
||||
hash64_t init_hash(__constant hash32_t const* header, ulong nonce, uint isolate)
|
||||
{
|
||||
hash64_t init;
|
||||
uint const init_size = countof(init.ulongs);
|
||||
uint const hash_size = countof(header->ulongs);
|
||||
|
||||
// sha3_512(header .. nonce)
|
||||
ulong state[25];
|
||||
copy(state, header->ulongs, hash_size);
|
||||
state[hash_size] = nonce;
|
||||
keccak_f1600_no_absorb(state, hash_size + 1, init_size, isolate);
|
||||
|
||||
copy(init.ulongs, state, init_size);
|
||||
return init;
|
||||
}
|
||||
|
||||
uint inner_loop_chunks(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, __global hash128_t const* g_dag1, __global hash128_t const* g_dag2, __global hash128_t const* g_dag3, uint isolate)
|
||||
{
|
||||
uint4 mix = init;
|
||||
|
||||
// share init0
|
||||
if (thread_id == 0)
|
||||
*share = mix.x;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
uint init0 = *share;
|
||||
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
bool update_share = thread_id == (a/4) % THREADS_PER_HASH;
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != 4; ++i)
|
||||
{
|
||||
if (update_share)
|
||||
{
|
||||
uint m[4] = { mix.x, mix.y, mix.z, mix.w };
|
||||
*share = fnv(init0 ^ (a+i), m[i]) % DAG_SIZE;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
mix = fnv4(mix, *share>=3 * DAG_SIZE / 4 ? g_dag3[*share - 3 * DAG_SIZE / 4].uint4s[thread_id] : *share>=DAG_SIZE / 2 ? g_dag2[*share - DAG_SIZE / 2].uint4s[thread_id] : *share>=DAG_SIZE / 4 ? g_dag1[*share - DAG_SIZE / 4].uint4s[thread_id]:g_dag[*share].uint4s[thread_id]);
|
||||
}
|
||||
} while ((a += 4) != (ACCESSES & isolate));
|
||||
|
||||
return fnv_reduce(mix);
|
||||
}
|
||||
|
||||
|
||||
|
||||
uint inner_loop(uint4 init, uint thread_id, __local uint* share, __global hash128_t const* g_dag, uint isolate)
|
||||
{
|
||||
uint4 mix = init;
|
||||
|
||||
// share init0
|
||||
if (thread_id == 0)
|
||||
*share = mix.x;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
uint init0 = *share;
|
||||
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
bool update_share = thread_id == (a/4) % THREADS_PER_HASH;
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != 4; ++i)
|
||||
{
|
||||
if (update_share)
|
||||
{
|
||||
uint m[4] = { mix.x, mix.y, mix.z, mix.w };
|
||||
*share = fnv(init0 ^ (a+i), m[i]) % DAG_SIZE;
|
||||
}
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
mix = fnv4(mix, g_dag[*share].uint4s[thread_id]);
|
||||
}
|
||||
}
|
||||
while ((a += 4) != (ACCESSES & isolate));
|
||||
|
||||
return fnv_reduce(mix);
|
||||
}
|
||||
|
||||
|
||||
hash32_t final_hash(hash64_t const* init, hash32_t const* mix, uint isolate)
|
||||
{
|
||||
ulong state[25];
|
||||
|
||||
hash32_t hash;
|
||||
uint const hash_size = countof(hash.ulongs);
|
||||
uint const init_size = countof(init->ulongs);
|
||||
uint const mix_size = countof(mix->ulongs);
|
||||
|
||||
// keccak_256(keccak_512(header..nonce) .. mix);
|
||||
copy(state, init->ulongs, init_size);
|
||||
copy(state + init_size, mix->ulongs, mix_size);
|
||||
keccak_f1600_no_absorb(state, init_size+mix_size, hash_size, isolate);
|
||||
|
||||
// copy out
|
||||
copy(hash.ulongs, state, hash_size);
|
||||
return hash;
|
||||
}
|
||||
|
||||
hash32_t compute_hash_simple(
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
hash128_t mix;
|
||||
for (uint i = 0; i != countof(mix.uint4s); ++i)
|
||||
{
|
||||
mix.uint4s[i] = init.uint4s[i % countof(init.uint4s)];
|
||||
}
|
||||
|
||||
uint mix_val = mix.uints[0];
|
||||
uint init0 = mix.uints[0];
|
||||
uint a = 0;
|
||||
do
|
||||
{
|
||||
uint pi = fnv(init0 ^ a, mix_val) % DAG_SIZE;
|
||||
uint n = (a+1) % countof(mix.uints);
|
||||
|
||||
#pragma unroll
|
||||
for (uint i = 0; i != countof(mix.uints); ++i)
|
||||
{
|
||||
mix.uints[i] = fnv(mix.uints[i], g_dag[pi].uints[i]);
|
||||
mix_val = i == n ? mix.uints[i] : mix_val;
|
||||
}
|
||||
}
|
||||
while (++a != (ACCESSES & isolate));
|
||||
|
||||
// reduce to output
|
||||
hash32_t fnv_mix;
|
||||
for (uint i = 0; i != countof(fnv_mix.uints); ++i)
|
||||
{
|
||||
fnv_mix.uints[i] = fnv_reduce(mix.uint4s[i]);
|
||||
}
|
||||
|
||||
return final_hash(&init, &fnv_mix, isolate);
|
||||
}
|
||||
|
||||
typedef union
|
||||
{
|
||||
struct
|
||||
{
|
||||
hash64_t init;
|
||||
uint pad; // avoid lds bank conflicts
|
||||
};
|
||||
hash32_t mix;
|
||||
} compute_hash_share;
|
||||
|
||||
|
||||
hash32_t compute_hash(
|
||||
__local compute_hash_share* share,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
|
||||
// Compute one init hash per work item.
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
// Threads work together in this phase in groups of 8.
|
||||
uint const thread_id = gid % THREADS_PER_HASH;
|
||||
uint const hash_id = (gid % GROUP_SIZE) / THREADS_PER_HASH;
|
||||
|
||||
hash32_t mix;
|
||||
uint i = 0;
|
||||
do
|
||||
{
|
||||
// share init with other threads
|
||||
if (i == thread_id)
|
||||
share[hash_id].init = init;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint4 thread_init = share[hash_id].init.uint4s[thread_id % (64 / sizeof(uint4))];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint thread_mix = inner_loop(thread_init, thread_id, share[hash_id].mix.uints, g_dag, isolate);
|
||||
|
||||
share[hash_id].mix.uints[thread_id] = thread_mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (i == thread_id)
|
||||
mix = share[hash_id].mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
while (++i != (THREADS_PER_HASH & isolate));
|
||||
|
||||
return final_hash(&init, &mix, isolate);
|
||||
}
|
||||
|
||||
|
||||
hash32_t compute_hash_chunks(
|
||||
__local compute_hash_share* share,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
|
||||
// Compute one init hash per work item.
|
||||
hash64_t init = init_hash(g_header, nonce, isolate);
|
||||
|
||||
// Threads work together in this phase in groups of 8.
|
||||
uint const thread_id = gid % THREADS_PER_HASH;
|
||||
uint const hash_id = (gid % GROUP_SIZE) / THREADS_PER_HASH;
|
||||
|
||||
hash32_t mix;
|
||||
uint i = 0;
|
||||
do
|
||||
{
|
||||
// share init with other threads
|
||||
if (i == thread_id)
|
||||
share[hash_id].init = init;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint4 thread_init = share[hash_id].init.uint4s[thread_id % (64 / sizeof(uint4))];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
uint thread_mix = inner_loop_chunks(thread_init, thread_id, share[hash_id].mix.uints, g_dag, g_dag1, g_dag2, g_dag3, isolate);
|
||||
|
||||
share[hash_id].mix.uints[thread_id] = thread_mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
if (i == thread_id)
|
||||
mix = share[hash_id].mix;
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
}
|
||||
while (++i != (THREADS_PER_HASH & isolate));
|
||||
|
||||
return final_hash(&init, &mix, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash_simple(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search_simple(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash_simple(g_header, g_dag, start_nonce + gid, isolate);
|
||||
|
||||
if (hash.ulongs[countof(hash.ulongs)-1] < target)
|
||||
{
|
||||
uint slot = min(convert_uint(MAX_OUTPUTS), convert_uint(atomic_inc(&g_output[0]) + 1));
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash(share, g_header, g_dag, start_nonce + gid, isolate);
|
||||
|
||||
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
|
||||
{
|
||||
uint slot = min((uint)MAX_OUTPUTS, atomic_inc(&g_output[0]) + 1);
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_hash_chunks(
|
||||
__global hash32_t* g_hashes,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong start_nonce,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
g_hashes[gid] = compute_hash_chunks(share, g_header, g_dag, g_dag1, g_dag2, g_dag3,start_nonce + gid, isolate);
|
||||
}
|
||||
|
||||
__attribute__((reqd_work_group_size(GROUP_SIZE, 1, 1)))
|
||||
__kernel void ethash_search_chunks(
|
||||
__global volatile uint* restrict g_output,
|
||||
__constant hash32_t const* g_header,
|
||||
__global hash128_t const* g_dag,
|
||||
__global hash128_t const* g_dag1,
|
||||
__global hash128_t const* g_dag2,
|
||||
__global hash128_t const* g_dag3,
|
||||
ulong start_nonce,
|
||||
ulong target,
|
||||
uint isolate
|
||||
)
|
||||
{
|
||||
__local compute_hash_share share[HASHES_PER_LOOP];
|
||||
|
||||
uint const gid = get_global_id(0);
|
||||
hash32_t hash = compute_hash_chunks(share, g_header, g_dag, g_dag1, g_dag2, g_dag3, start_nonce + gid, isolate);
|
||||
|
||||
if (as_ulong(as_uchar8(hash.ulongs[0]).s76543210) < target)
|
||||
{
|
||||
uint slot = min(convert_uint(MAX_OUTPUTS), convert_uint(atomic_inc(&g_output[0]) + 1));
|
||||
g_output[slot] = gid;
|
||||
}
|
||||
}
|
||||
`
|
51
Godeps/_workspace/src/github.com/ethereum/ethash/ethashc.go
generated
vendored
51
Godeps/_workspace/src/github.com/ethereum/ethash/ethashc.go
generated
vendored
@ -1,51 +0,0 @@
|
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package ethash
|
||||
|
||||
/*
|
||||
-mno-stack-arg-probe disables stack probing which avoids the function
|
||||
__chkstk_ms being linked. this avoids a clash of this symbol as we also
|
||||
separately link the secp256k1 lib which ends up defining this symbol
|
||||
|
||||
1. https://gcc.gnu.org/onlinedocs/gccint/Stack-Checking.html
|
||||
2. https://groups.google.com/forum/#!msg/golang-dev/v1bziURSQ4k/88fXuJ24e-gJ
|
||||
3. https://groups.google.com/forum/#!topic/golang-nuts/VNP6Mwz_B6o
|
||||
|
||||
*/
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -std=gnu99 -Wall
|
||||
#cgo windows CFLAGS: -mno-stack-arg-probe
|
||||
#cgo LDFLAGS: -lm
|
||||
|
||||
#include "src/libethash/internal.c"
|
||||
#include "src/libethash/sha3.c"
|
||||
#include "src/libethash/io.c"
|
||||
|
||||
#ifdef _WIN32
|
||||
# include "src/libethash/io_win32.c"
|
||||
# include "src/libethash/mmap_win32.c"
|
||||
#else
|
||||
# include "src/libethash/io_posix.c"
|
||||
#endif
|
||||
|
||||
// 'gateway function' for calling back into go.
|
||||
extern int ethashGoCallback(unsigned);
|
||||
int ethashGoCallback_cgo(unsigned percent) { return ethashGoCallback(percent); }
|
||||
|
||||
*/
|
||||
import "C"
|
47
Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
generated
vendored
47
Godeps/_workspace/src/github.com/ethereum/ethash/setup.py
generated
vendored
@ -1,47 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
from distutils.core import setup, Extension
|
||||
sources = [
|
||||
'src/python/core.c',
|
||||
'src/libethash/io.c',
|
||||
'src/libethash/internal.c',
|
||||
'src/libethash/sha3.c']
|
||||
if os.name == 'nt':
|
||||
sources += [
|
||||
'src/libethash/util_win32.c',
|
||||
'src/libethash/io_win32.c',
|
||||
'src/libethash/mmap_win32.c',
|
||||
]
|
||||
else:
|
||||
sources += [
|
||||
'src/libethash/io_posix.c'
|
||||
]
|
||||
depends = [
|
||||
'src/libethash/ethash.h',
|
||||
'src/libethash/compiler.h',
|
||||
'src/libethash/data_sizes.h',
|
||||
'src/libethash/endian.h',
|
||||
'src/libethash/ethash.h',
|
||||
'src/libethash/io.h',
|
||||
'src/libethash/fnv.h',
|
||||
'src/libethash/internal.h',
|
||||
'src/libethash/sha3.h',
|
||||
'src/libethash/util.h',
|
||||
]
|
||||
pyethash = Extension('pyethash',
|
||||
sources=sources,
|
||||
depends=depends,
|
||||
extra_compile_args=["-Isrc/", "-std=gnu99", "-Wall"])
|
||||
|
||||
setup(
|
||||
name='pyethash',
|
||||
author="Matthew Wampler-Doty",
|
||||
author_email="matthew.wampler.doty@gmail.com",
|
||||
license='GPL',
|
||||
version='0.1.23',
|
||||
url='https://github.com/ethereum/ethash',
|
||||
download_url='https://github.com/ethereum/ethash/tarball/v23',
|
||||
description=('Python wrappers for ethash, the ethereum proof of work'
|
||||
'hashing function'),
|
||||
ext_modules=[pyethash],
|
||||
)
|
3
Godeps/_workspace/src/github.com/fatih/color/.travis.yml
generated
vendored
3
Godeps/_workspace/src/github.com/fatih/color/.travis.yml
generated
vendored
@ -1,3 +0,0 @@
|
||||
language: go
|
||||
go: 1.3
|
||||
|
151
Godeps/_workspace/src/github.com/fatih/color/README.md
generated
vendored
151
Godeps/_workspace/src/github.com/fatih/color/README.md
generated
vendored
@ -1,151 +0,0 @@
|
||||
# Color [](http://godoc.org/github.com/fatih/color) [](https://travis-ci.org/fatih/color)
|
||||
|
||||
|
||||
|
||||
Color lets you use colorized outputs in terms of [ANSI Escape Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It has support for Windows too! The API can be used in several ways, pick one that suits you.
|
||||
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
go get github.com/fatih/color
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Standard colors
|
||||
|
||||
```go
|
||||
// Print with default helper functions
|
||||
color.Cyan("Prints text in cyan.")
|
||||
|
||||
// A newline will be appended automatically
|
||||
color.Blue("Prints %s in blue.", "text")
|
||||
|
||||
// These are using the default foreground colors
|
||||
color.Red("We have red")
|
||||
color.Magenta("And many others ..")
|
||||
|
||||
```
|
||||
|
||||
### Mix and reuse colors
|
||||
|
||||
```go
|
||||
// Create a new color object
|
||||
c := color.New(color.FgCyan).Add(color.Underline)
|
||||
c.Println("Prints cyan text with an underline.")
|
||||
|
||||
// Or just add them to New()
|
||||
d := color.New(color.FgCyan, color.Bold)
|
||||
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||
|
||||
// Mix up foreground and background colors, create new mixes!
|
||||
red := color.New(color.FgRed)
|
||||
|
||||
boldRed := red.Add(color.Bold)
|
||||
boldRed.Println("This will print text in bold red.")
|
||||
|
||||
whiteBackground := red.Add(color.BgWhite)
|
||||
whiteBackground.Println("Red text with white background.")
|
||||
```
|
||||
|
||||
### Custom print functions (PrintFunc)
|
||||
|
||||
```go
|
||||
// Create a custom print function for convenience
|
||||
red := color.New(color.FgRed).PrintfFunc()
|
||||
red("Warning")
|
||||
red("Error: %s", err)
|
||||
|
||||
// Mix up multiple attributes
|
||||
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||
notice("Don't forget this...")
|
||||
```
|
||||
|
||||
### Insert into noncolor strings (SprintFunc)
|
||||
|
||||
```go
|
||||
// Create SprintXxx functions to mix strings with other non-colorized strings:
|
||||
yellow := color.New(color.FgYellow).SprintFunc()
|
||||
red := color.New(color.FgRed).SprintFunc()
|
||||
fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||
|
||||
info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
|
||||
fmt.Printf("This %s rocks!\n", info("package"))
|
||||
|
||||
// Use helper functions
|
||||
fmt.Printf("This", color.RedString("warning"), "should be not neglected.")
|
||||
fmt.Printf(color.GreenString("Info:"), "an important message." )
|
||||
|
||||
// Windows supported too! Just don't forget to change the output to color.Output
|
||||
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||
```
|
||||
|
||||
### Plug into existing code
|
||||
|
||||
```go
|
||||
// Use handy standard colors
|
||||
color.Set(color.FgYellow)
|
||||
|
||||
fmt.Println("Existing text will now be in yellow")
|
||||
fmt.Printf("This one %s\n", "too")
|
||||
|
||||
color.Unset() // Don't forget to unset
|
||||
|
||||
// You can mix up parameters
|
||||
color.Set(color.FgMagenta, color.Bold)
|
||||
defer color.Unset() // Use it in your function
|
||||
|
||||
fmt.Println("All text will now be bold magenta.")
|
||||
```
|
||||
|
||||
### Disable color
|
||||
|
||||
There might be a case where you want to disable color output (for example to
|
||||
pipe the standard output of your app to somewhere else). `Color` has support to
|
||||
disable colors both globally and for single color definition. For example
|
||||
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
|
||||
the color output with:
|
||||
|
||||
```go
|
||||
|
||||
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||
|
||||
if *flagNoColor {
|
||||
color.NoColor = true // disables colorized output
|
||||
}
|
||||
```
|
||||
|
||||
It also has support for single color definitions (local). You can
|
||||
disable/enable color output on the fly:
|
||||
|
||||
```go
|
||||
c := color.New(color.FgCyan)
|
||||
c.Println("Prints cyan text")
|
||||
|
||||
c.DisableColor()
|
||||
c.Println("This is printed without any color")
|
||||
|
||||
c.EnableColor()
|
||||
c.Println("This prints again cyan...")
|
||||
```
|
||||
|
||||
## Todo
|
||||
|
||||
* Save/Return previous values
|
||||
* Evaluate fmt.Formatter interface
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
* [Fatih Arslan](https://github.com/fatih)
|
||||
* Windows support via @shiena: [ansicolor](https://github.com/shiena/ansicolor)
|
||||
|
||||
## License
|
||||
|
||||
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
|
||||
|
402
Godeps/_workspace/src/github.com/fatih/color/color.go
generated
vendored
402
Godeps/_workspace/src/github.com/fatih/color/color.go
generated
vendored
@ -1,402 +0,0 @@
|
||||
package color
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// NoColor defines if the output is colorized or not. It's dynamically set to
|
||||
// false or true based on the stdout's file descriptor referring to a terminal
|
||||
// or not. This is a global option and affects all colors. For more control
|
||||
// over each color block use the methods DisableColor() individually.
|
||||
var NoColor = !isatty.IsTerminal(os.Stdout.Fd())
|
||||
|
||||
// Color defines a custom color object which is defined by SGR parameters.
|
||||
type Color struct {
|
||||
params []Attribute
|
||||
noColor *bool
|
||||
}
|
||||
|
||||
// Attribute defines a single SGR Code
|
||||
type Attribute int
|
||||
|
||||
const escape = "\x1b"
|
||||
|
||||
// Base attributes
|
||||
const (
|
||||
Reset Attribute = iota
|
||||
Bold
|
||||
Faint
|
||||
Italic
|
||||
Underline
|
||||
BlinkSlow
|
||||
BlinkRapid
|
||||
ReverseVideo
|
||||
Concealed
|
||||
CrossedOut
|
||||
)
|
||||
|
||||
// Foreground text colors
|
||||
const (
|
||||
FgBlack Attribute = iota + 30
|
||||
FgRed
|
||||
FgGreen
|
||||
FgYellow
|
||||
FgBlue
|
||||
FgMagenta
|
||||
FgCyan
|
||||
FgWhite
|
||||
)
|
||||
|
||||
// Foreground Hi-Intensity text colors
|
||||
const (
|
||||
FgHiBlack Attribute = iota + 90
|
||||
FgHiRed
|
||||
FgHiGreen
|
||||
FgHiYellow
|
||||
FgHiBlue
|
||||
FgHiMagenta
|
||||
FgHiCyan
|
||||
FgHiWhite
|
||||
)
|
||||
|
||||
// Background text colors
|
||||
const (
|
||||
BgBlack Attribute = iota + 40
|
||||
BgRed
|
||||
BgGreen
|
||||
BgYellow
|
||||
BgBlue
|
||||
BgMagenta
|
||||
BgCyan
|
||||
BgWhite
|
||||
)
|
||||
|
||||
// Background Hi-Intensity text colors
|
||||
const (
|
||||
BgHiBlack Attribute = iota + 100
|
||||
BgHiRed
|
||||
BgHiGreen
|
||||
BgHiYellow
|
||||
BgHiBlue
|
||||
BgHiMagenta
|
||||
BgHiCyan
|
||||
BgHiWhite
|
||||
)
|
||||
|
||||
// New returns a newly created color object.
|
||||
func New(value ...Attribute) *Color {
|
||||
c := &Color{params: make([]Attribute, 0)}
|
||||
c.Add(value...)
|
||||
return c
|
||||
}
|
||||
|
||||
// Set sets the given parameters immediately. It will change the color of
|
||||
// output with the given SGR parameters until color.Unset() is called.
|
||||
func Set(p ...Attribute) *Color {
|
||||
c := New(p...)
|
||||
c.Set()
|
||||
return c
|
||||
}
|
||||
|
||||
// Unset resets all escape attributes and clears the output. Usually should
|
||||
// be called after Set().
|
||||
func Unset() {
|
||||
if NoColor {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
|
||||
}
|
||||
|
||||
// Set sets the SGR sequence.
|
||||
func (c *Color) Set() *Color {
|
||||
if c.isNoColorSet() {
|
||||
return c
|
||||
}
|
||||
|
||||
fmt.Fprintf(Output, c.format())
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Color) unset() {
|
||||
if c.isNoColorSet() {
|
||||
return
|
||||
}
|
||||
|
||||
Unset()
|
||||
}
|
||||
|
||||
// Add is used to chain SGR parameters. Use as many as parameters to combine
|
||||
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
|
||||
func (c *Color) Add(value ...Attribute) *Color {
|
||||
c.params = append(c.params, value...)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Color) prepend(value Attribute) {
|
||||
c.params = append(c.params, 0)
|
||||
copy(c.params[1:], c.params[0:])
|
||||
c.params[0] = value
|
||||
}
|
||||
|
||||
// Output defines the standard output of the print functions. By default
|
||||
// os.Stdout is used.
|
||||
var Output = colorable.NewColorableStdout()
|
||||
|
||||
// Print formats using the default formats for its operands and writes to
|
||||
// standard output. Spaces are added between operands when neither is a
|
||||
// string. It returns the number of bytes written and any write error
|
||||
// encountered. This is the standard fmt.Print() method wrapped with the given
|
||||
// color.
|
||||
func (c *Color) Print(a ...interface{}) (n int, err error) {
|
||||
c.Set()
|
||||
defer c.unset()
|
||||
|
||||
return fmt.Fprint(Output, a...)
|
||||
}
|
||||
|
||||
// Printf formats according to a format specifier and writes to standard output.
|
||||
// It returns the number of bytes written and any write error encountered.
|
||||
// This is the standard fmt.Printf() method wrapped with the given color.
|
||||
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
c.Set()
|
||||
defer c.unset()
|
||||
|
||||
return fmt.Fprintf(Output, format, a...)
|
||||
}
|
||||
|
||||
// Println formats using the default formats for its operands and writes to
|
||||
// standard output. Spaces are always added between operands and a newline is
|
||||
// appended. It returns the number of bytes written and any write error
|
||||
// encountered. This is the standard fmt.Print() method wrapped with the given
|
||||
// color.
|
||||
func (c *Color) Println(a ...interface{}) (n int, err error) {
|
||||
c.Set()
|
||||
defer c.unset()
|
||||
|
||||
return fmt.Fprintln(Output, a...)
|
||||
}
|
||||
|
||||
// PrintFunc returns a new function that prints the passed arguments as
|
||||
// colorized with color.Print().
|
||||
func (c *Color) PrintFunc() func(a ...interface{}) {
|
||||
return func(a ...interface{}) { c.Print(a...) }
|
||||
}
|
||||
|
||||
// PrintfFunc returns a new function that prints the passed arguments as
|
||||
// colorized with color.Printf().
|
||||
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
|
||||
return func(format string, a ...interface{}) { c.Printf(format, a...) }
|
||||
}
|
||||
|
||||
// PrintlnFunc returns a new function that prints the passed arguments as
|
||||
// colorized with color.Println().
|
||||
func (c *Color) PrintlnFunc() func(a ...interface{}) {
|
||||
return func(a ...interface{}) { c.Println(a...) }
|
||||
}
|
||||
|
||||
// SprintFunc returns a new function that returns colorized strings for the
|
||||
// given arguments with fmt.Sprint(). Useful to put into or mix into other
|
||||
// string. Windows users should use this in conjuction with color.Output, example:
|
||||
//
|
||||
// put := New(FgYellow).SprintFunc()
|
||||
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
|
||||
func (c *Color) SprintFunc() func(a ...interface{}) string {
|
||||
return func(a ...interface{}) string {
|
||||
return c.wrap(fmt.Sprint(a...))
|
||||
}
|
||||
}
|
||||
|
||||
// SprintfFunc returns a new function that returns colorized strings for the
|
||||
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
|
||||
// string. Windows users should use this in conjuction with color.Output.
|
||||
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
|
||||
return func(format string, a ...interface{}) string {
|
||||
return c.wrap(fmt.Sprintf(format, a...))
|
||||
}
|
||||
}
|
||||
|
||||
// SprintlnFunc returns a new function that returns colorized strings for the
|
||||
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
|
||||
// string. Windows users should use this in conjuction with color.Output.
|
||||
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
|
||||
return func(a ...interface{}) string {
|
||||
return c.wrap(fmt.Sprintln(a...))
|
||||
}
|
||||
}
|
||||
|
||||
// sequence returns a formated SGR sequence to be plugged into a "\x1b[...m"
|
||||
// an example output might be: "1;36" -> bold cyan
|
||||
func (c *Color) sequence() string {
|
||||
format := make([]string, len(c.params))
|
||||
for i, v := range c.params {
|
||||
format[i] = strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
return strings.Join(format, ";")
|
||||
}
|
||||
|
||||
// wrap wraps the s string with the colors attributes. The string is ready to
|
||||
// be printed.
|
||||
func (c *Color) wrap(s string) string {
|
||||
if c.isNoColorSet() {
|
||||
return s
|
||||
}
|
||||
|
||||
return c.format() + s + c.unformat()
|
||||
}
|
||||
|
||||
func (c *Color) format() string {
|
||||
return fmt.Sprintf("%s[%sm", escape, c.sequence())
|
||||
}
|
||||
|
||||
func (c *Color) unformat() string {
|
||||
return fmt.Sprintf("%s[%dm", escape, Reset)
|
||||
}
|
||||
|
||||
// DisableColor disables the color output. Useful to not change any existing
|
||||
// code and still being able to output. Can be used for flags like
|
||||
// "--no-color". To enable back use EnableColor() method.
|
||||
func (c *Color) DisableColor() {
|
||||
c.noColor = boolPtr(true)
|
||||
}
|
||||
|
||||
// EnableColor enables the color output. Use it in conjuction with
|
||||
// DisableColor(). Otherwise this method has no side effects.
|
||||
func (c *Color) EnableColor() {
|
||||
c.noColor = boolPtr(false)
|
||||
}
|
||||
|
||||
func (c *Color) isNoColorSet() bool {
|
||||
// check first if we have user setted action
|
||||
if c.noColor != nil {
|
||||
return *c.noColor
|
||||
}
|
||||
|
||||
// if not return the global option, which is disabled by default
|
||||
return NoColor
|
||||
}
|
||||
|
||||
// Equals returns a boolean value indicating whether two colors are equal.
|
||||
func (c *Color) Equals(c2 *Color) bool {
|
||||
if len(c.params) != len(c2.params) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, attr := range c.params {
|
||||
if !c2.attrExists(attr) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Color) attrExists(a Attribute) bool {
|
||||
for _, attr := range c.params {
|
||||
if attr == a {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func boolPtr(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Black is an convenient helper function to print with black foreground. A
|
||||
// newline is appended to format by default.
|
||||
func Black(format string, a ...interface{}) { printColor(format, FgBlack, a...) }
|
||||
|
||||
// Red is an convenient helper function to print with red foreground. A
|
||||
// newline is appended to format by default.
|
||||
func Red(format string, a ...interface{}) { printColor(format, FgRed, a...) }
|
||||
|
||||
// Green is an convenient helper function to print with green foreground. A
|
||||
// newline is appended to format by default.
|
||||
func Green(format string, a ...interface{}) { printColor(format, FgGreen, a...) }
|
||||
|
||||
// Yellow is an convenient helper function to print with yellow foreground.
|
||||
// A newline is appended to format by default.
|
||||
func Yellow(format string, a ...interface{}) { printColor(format, FgYellow, a...) }
|
||||
|
||||
// Blue is an convenient helper function to print with blue foreground. A
|
||||
// newline is appended to format by default.
|
||||
func Blue(format string, a ...interface{}) { printColor(format, FgBlue, a...) }
|
||||
|
||||
// Magenta is an convenient helper function to print with magenta foreground.
|
||||
// A newline is appended to format by default.
|
||||
func Magenta(format string, a ...interface{}) { printColor(format, FgMagenta, a...) }
|
||||
|
||||
// Cyan is an convenient helper function to print with cyan foreground. A
|
||||
// newline is appended to format by default.
|
||||
func Cyan(format string, a ...interface{}) { printColor(format, FgCyan, a...) }
|
||||
|
||||
// White is an convenient helper function to print with white foreground. A
|
||||
// newline is appended to format by default.
|
||||
func White(format string, a ...interface{}) { printColor(format, FgWhite, a...) }
|
||||
|
||||
func printColor(format string, p Attribute, a ...interface{}) {
|
||||
if !strings.HasSuffix(format, "\n") {
|
||||
format += "\n"
|
||||
}
|
||||
|
||||
c := &Color{params: []Attribute{p}}
|
||||
c.Printf(format, a...)
|
||||
}
|
||||
|
||||
// BlackString is an convenient helper function to return a string with black
|
||||
// foreground.
|
||||
func BlackString(format string, a ...interface{}) string {
|
||||
return New(FgBlack).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// RedString is an convenient helper function to return a string with red
|
||||
// foreground.
|
||||
func RedString(format string, a ...interface{}) string {
|
||||
return New(FgRed).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// GreenString is an convenient helper function to return a string with green
|
||||
// foreground.
|
||||
func GreenString(format string, a ...interface{}) string {
|
||||
return New(FgGreen).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// YellowString is an convenient helper function to return a string with yellow
|
||||
// foreground.
|
||||
func YellowString(format string, a ...interface{}) string {
|
||||
return New(FgYellow).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// BlueString is an convenient helper function to return a string with blue
|
||||
// foreground.
|
||||
func BlueString(format string, a ...interface{}) string {
|
||||
return New(FgBlue).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// MagentaString is an convenient helper function to return a string with magenta
|
||||
// foreground.
|
||||
func MagentaString(format string, a ...interface{}) string {
|
||||
return New(FgMagenta).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// CyanString is an convenient helper function to return a string with cyan
|
||||
// foreground.
|
||||
func CyanString(format string, a ...interface{}) string {
|
||||
return New(FgCyan).SprintfFunc()(format, a...)
|
||||
}
|
||||
|
||||
// WhiteString is an convenient helper function to return a string with white
|
||||
// foreground.
|
||||
func WhiteString(format string, a ...interface{}) string {
|
||||
return New(FgWhite).SprintfFunc()(format, a...)
|
||||
}
|
114
Godeps/_workspace/src/github.com/fatih/color/doc.go
generated
vendored
114
Godeps/_workspace/src/github.com/fatih/color/doc.go
generated
vendored
@ -1,114 +0,0 @@
|
||||
/*
|
||||
Package color is an ANSI color package to output colorized or SGR defined
|
||||
output to the standard output. The API can be used in several way, pick one
|
||||
that suits you.
|
||||
|
||||
Use simple and default helper functions with predefined foreground colors:
|
||||
|
||||
color.Cyan("Prints text in cyan.")
|
||||
|
||||
// a newline will be appended automatically
|
||||
color.Blue("Prints %s in blue.", "text")
|
||||
|
||||
// More default foreground colors..
|
||||
color.Red("We have red")
|
||||
color.Yellow("Yellow color too!")
|
||||
color.Magenta("And many others ..")
|
||||
|
||||
However there are times where custom color mixes are required. Below are some
|
||||
examples to create custom color objects and use the print functions of each
|
||||
separate color object.
|
||||
|
||||
// Create a new color object
|
||||
c := color.New(color.FgCyan).Add(color.Underline)
|
||||
c.Println("Prints cyan text with an underline.")
|
||||
|
||||
// Or just add them to New()
|
||||
d := color.New(color.FgCyan, color.Bold)
|
||||
d.Printf("This prints bold cyan %s\n", "too!.")
|
||||
|
||||
|
||||
// Mix up foreground and background colors, create new mixes!
|
||||
red := color.New(color.FgRed)
|
||||
|
||||
boldRed := red.Add(color.Bold)
|
||||
boldRed.Println("This will print text in bold red.")
|
||||
|
||||
whiteBackground := red.Add(color.BgWhite)
|
||||
whiteBackground.Println("Red text with White background.")
|
||||
|
||||
|
||||
You can create PrintXxx functions to simplify even more:
|
||||
|
||||
// Create a custom print function for convenient
|
||||
red := color.New(color.FgRed).PrintfFunc()
|
||||
red("warning")
|
||||
red("error: %s", err)
|
||||
|
||||
// Mix up multiple attributes
|
||||
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
|
||||
notice("don't forget this...")
|
||||
|
||||
|
||||
Or create SprintXxx functions to mix strings with other non-colorized strings:
|
||||
|
||||
yellow := New(FgYellow).SprintFunc()
|
||||
red := New(FgRed).SprintFunc()
|
||||
|
||||
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
|
||||
|
||||
info := New(FgWhite, BgGreen).SprintFunc()
|
||||
fmt.Printf("this %s rocks!\n", info("package"))
|
||||
|
||||
Windows support is enabled by default. All Print functions works as intended.
|
||||
However only for color.SprintXXX functions, user should use fmt.FprintXXX and
|
||||
set the output to color.Output:
|
||||
|
||||
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
|
||||
|
||||
info := New(FgWhite, BgGreen).SprintFunc()
|
||||
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
|
||||
|
||||
Using with existing code is possible. Just use the Set() method to set the
|
||||
standard output to the given parameters. That way a rewrite of an existing
|
||||
code is not required.
|
||||
|
||||
// Use handy standard colors.
|
||||
color.Set(color.FgYellow)
|
||||
|
||||
fmt.Println("Existing text will be now in Yellow")
|
||||
fmt.Printf("This one %s\n", "too")
|
||||
|
||||
color.Unset() // don't forget to unset
|
||||
|
||||
// You can mix up parameters
|
||||
color.Set(color.FgMagenta, color.Bold)
|
||||
defer color.Unset() // use it in your function
|
||||
|
||||
fmt.Println("All text will be now bold magenta.")
|
||||
|
||||
There might be a case where you want to disable color output (for example to
|
||||
pipe the standard output of your app to somewhere else). `Color` has support to
|
||||
disable colors both globally and for single color definition. For example
|
||||
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
|
||||
the color output with:
|
||||
|
||||
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||
|
||||
if *flagNoColor {
|
||||
color.NoColor = true // disables colorized output
|
||||
}
|
||||
|
||||
It also has support for single color definitions (local). You can
|
||||
disable/enable color output on the fly:
|
||||
|
||||
c := color.New(color.FgCyan)
|
||||
c.Println("Prints cyan text")
|
||||
|
||||
c.DisableColor()
|
||||
c.Println("This is printed without any color")
|
||||
|
||||
c.EnableColor()
|
||||
c.Println("This prints again cyan...")
|
||||
*/
|
||||
package color
|
25
Godeps/_workspace/src/github.com/gizak/termui/.gitignore
generated
vendored
25
Godeps/_workspace/src/github.com/gizak/termui/.gitignore
generated
vendored
@ -1,25 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
.DS_Store
|
6
Godeps/_workspace/src/github.com/gizak/termui/.travis.yml
generated
vendored
6
Godeps/_workspace/src/github.com/gizak/termui/.travis.yml
generated
vendored
@ -1,6 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- tip
|
||||
|
||||
script: go test -v ./
|
148
Godeps/_workspace/src/github.com/gizak/termui/README.md
generated
vendored
148
Godeps/_workspace/src/github.com/gizak/termui/README.md
generated
vendored
@ -1,148 +0,0 @@
|
||||
# termui [](https://travis-ci.org/gizak/termui) [](https://godoc.org/github.com/gizak/termui)
|
||||
|
||||
<img src="./_example/dashboard.gif" alt="demo cast under osx 10.10; Terminal.app; Menlo Regular 12pt.)" width="80%">
|
||||
|
||||
`termui` is a cross-platform, easy-to-compile, and fully-customizable terminal dashboard. It is inspired by [blessed-contrib](https://github.com/yaronn/blessed-contrib), but purely in Go.
|
||||
|
||||
Now version v2 has arrived! It brings new event system, new theme system, new `Buffer` interface and specific colour text rendering. (some docs are missing, but it will be completed soon!)
|
||||
|
||||
## Installation
|
||||
|
||||
`master` mirrors v2 branch, to install:
|
||||
|
||||
go get -u github.com/gizak/termui
|
||||
|
||||
For the compatible reason, you can choose to install the legacy version of `termui`:
|
||||
|
||||
go get gopkg.in/gizak/termui.v1
|
||||
|
||||
## Usage
|
||||
|
||||
### Layout
|
||||
|
||||
To use `termui`, the very first thing you may want to know is how to manage layout. `termui` offers two ways of doing this, known as absolute layout and grid layout.
|
||||
|
||||
__Absolute layout__
|
||||
|
||||
Each widget has an underlying block structure which basically is a box model. It has border, label and padding properties. A border of a widget can be chosen to hide or display (with its border label), you can pick a different front/back colour for the border as well. To display such a widget at a specific location in terminal window, you need to assign `.X`, `.Y`, `.Height`, `.Width` values for each widget before send it to `.Render`. Let's demonstrate these by a code snippet:
|
||||
|
||||
`````go
|
||||
import ui "github.com/gizak/termui" // <- ui shortcut, optional
|
||||
|
||||
func main() {
|
||||
err := ui.Init()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer ui.Close()
|
||||
|
||||
p := ui.NewPar(":PRESS q TO QUIT DEMO")
|
||||
p.Height = 3
|
||||
p.Width = 50
|
||||
p.TextFgColor = ui.ColorWhite
|
||||
p.BorderLabel = "Text Box"
|
||||
p.BorderFg = ui.ColorCyan
|
||||
|
||||
g := ui.NewGauge()
|
||||
g.Percent = 50
|
||||
g.Width = 50
|
||||
g.Height = 3
|
||||
g.Y = 11
|
||||
g.BorderLabel = "Gauge"
|
||||
g.BarColor = ui.ColorRed
|
||||
g.BorderFg = ui.ColorWhite
|
||||
g.BorderLabelFg = ui.ColorCyan
|
||||
|
||||
ui.Render(p, g) // feel free to call Render, it's async and non-block
|
||||
|
||||
// event handler...
|
||||
}
|
||||
`````
|
||||
|
||||
Note that components can be overlapped (I'd rather call this a feature...), `Render(rs ...Renderer)` renders its args from left to right (i.e. each component's weight is arising from left to right).
|
||||
|
||||
__Grid layout:__
|
||||
|
||||
<img src="./_example/grid.gif" alt="grid" width="60%">
|
||||
|
||||
Grid layout uses [12 columns grid system](http://www.w3schools.com/bootstrap/bootstrap_grid_system.asp) with expressive syntax. To use `Grid`, all we need to do is build a widget tree consisting of `Row`s and Cols (Actually a Col is also a `Row` but with a widget endpoint attached).
|
||||
|
||||
```go
|
||||
import ui "github.com/gizak/termui"
|
||||
// init and create widgets...
|
||||
|
||||
// build
|
||||
ui.Body.AddRows(
|
||||
ui.NewRow(
|
||||
ui.NewCol(6, 0, widget0),
|
||||
ui.NewCol(6, 0, widget1)),
|
||||
ui.NewRow(
|
||||
ui.NewCol(3, 0, widget2),
|
||||
ui.NewCol(3, 0, widget30, widget31, widget32),
|
||||
ui.NewCol(6, 0, widget4)))
|
||||
|
||||
// calculate layout
|
||||
ui.Body.Align()
|
||||
|
||||
ui.Render(ui.Body)
|
||||
```
|
||||
|
||||
### Events
|
||||
|
||||
`termui` ships with a http-like event mux handling system. All events are channeled up from different sources (typing, click, windows resize, custom event) and then encoded as universal `Event` object. `Event.Path` indicates the event type and `Event.Data` stores the event data struct. Add a handler to a certain event is easy as below:
|
||||
|
||||
```go
|
||||
// handle key q pressing
|
||||
ui.Handle("/sys/kbd/q", func(ui.Event) {
|
||||
// press q to quit
|
||||
ui.StopLoop()
|
||||
})
|
||||
|
||||
ui.Handle("/sys/kbd/C-x", func(ui.Event) {
|
||||
// handle Ctrl + x combination
|
||||
})
|
||||
|
||||
ui.Handle("/sys/kbd", func(ui.Event) {
|
||||
// handle all other key pressing
|
||||
})
|
||||
|
||||
// handle a 1s timer
|
||||
ui.Handle("/timer/1s", func(e ui.Event) {
|
||||
t := e.Data.(ui.EvtTimer)
|
||||
// t is a EvtTimer
|
||||
if t.Count%2 ==0 {
|
||||
// do something
|
||||
}
|
||||
})
|
||||
|
||||
ui.Loop() // block until StopLoop is called
|
||||
```
|
||||
|
||||
### Widgets
|
||||
|
||||
Click image to see the corresponding demo codes.
|
||||
|
||||
[<img src="./_example/par.png" alt="par" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/par.go)
|
||||
[<img src="./_example/list.png" alt="list" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/list.go)
|
||||
[<img src="./_example/gauge.png" alt="gauge" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/gauge.go)
|
||||
[<img src="./_example/linechart.png" alt="linechart" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/linechart.go)
|
||||
[<img src="./_example/barchart.png" alt="barchart" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/barchart.go)
|
||||
[<img src="./_example/mbarchart.png" alt="barchart" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/mbarchart.go)
|
||||
[<img src="./_example/sparklines.png" alt="sparklines" type="image/png" width="45%">](https://github.com/gizak/termui/blob/master/_example/sparklines.go)
|
||||
|
||||
## GoDoc
|
||||
|
||||
[godoc](https://godoc.org/github.com/gizak/termui)
|
||||
|
||||
## TODO
|
||||
|
||||
- [x] Grid layout
|
||||
- [x] Event system
|
||||
- [x] Canvas widget
|
||||
- [x] Refine APIs
|
||||
- [ ] Focusable widgets
|
||||
|
||||
## Changelog
|
||||
|
||||
## License
|
||||
This library is under the [MIT License](http://opensource.org/licenses/MIT)
|
240
Godeps/_workspace/src/github.com/gizak/termui/block.go
generated
vendored
240
Godeps/_workspace/src/github.com/gizak/termui/block.go
generated
vendored
@ -1,240 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
package termui
|
||||
|
||||
import "image"
|
||||
|
||||
// Hline is a horizontal line.
|
||||
type Hline struct {
|
||||
X int
|
||||
Y int
|
||||
Len int
|
||||
Fg Attribute
|
||||
Bg Attribute
|
||||
}
|
||||
|
||||
// Vline is a vertical line.
|
||||
type Vline struct {
|
||||
X int
|
||||
Y int
|
||||
Len int
|
||||
Fg Attribute
|
||||
Bg Attribute
|
||||
}
|
||||
|
||||
// Buffer draws a horizontal line.
|
||||
func (l Hline) Buffer() Buffer {
|
||||
if l.Len <= 0 {
|
||||
return NewBuffer()
|
||||
}
|
||||
return NewFilledBuffer(l.X, l.Y, l.X+l.Len, l.Y+1, HORIZONTAL_LINE, l.Fg, l.Bg)
|
||||
}
|
||||
|
||||
// Buffer draws a vertical line.
|
||||
func (l Vline) Buffer() Buffer {
|
||||
if l.Len <= 0 {
|
||||
return NewBuffer()
|
||||
}
|
||||
return NewFilledBuffer(l.X, l.Y, l.X+1, l.Y+l.Len, VERTICAL_LINE, l.Fg, l.Bg)
|
||||
}
|
||||
|
||||
// Buffer draws a box border.
|
||||
func (b Block) drawBorder(buf Buffer) {
|
||||
if !b.Border {
|
||||
return
|
||||
}
|
||||
|
||||
min := b.area.Min
|
||||
max := b.area.Max
|
||||
|
||||
x0 := min.X
|
||||
y0 := min.Y
|
||||
x1 := max.X - 1
|
||||
y1 := max.Y - 1
|
||||
|
||||
// draw lines
|
||||
if b.BorderTop {
|
||||
buf.Merge(Hline{x0, y0, x1 - x0, b.BorderFg, b.BorderBg}.Buffer())
|
||||
}
|
||||
if b.BorderBottom {
|
||||
buf.Merge(Hline{x0, y1, x1 - x0, b.BorderFg, b.BorderBg}.Buffer())
|
||||
}
|
||||
if b.BorderLeft {
|
||||
buf.Merge(Vline{x0, y0, y1 - y0, b.BorderFg, b.BorderBg}.Buffer())
|
||||
}
|
||||
if b.BorderRight {
|
||||
buf.Merge(Vline{x1, y0, y1 - y0, b.BorderFg, b.BorderBg}.Buffer())
|
||||
}
|
||||
|
||||
// draw corners
|
||||
if b.BorderTop && b.BorderLeft && b.area.Dx() > 0 && b.area.Dy() > 0 {
|
||||
buf.Set(x0, y0, Cell{TOP_LEFT, b.BorderFg, b.BorderBg})
|
||||
}
|
||||
if b.BorderTop && b.BorderRight && b.area.Dx() > 1 && b.area.Dy() > 0 {
|
||||
buf.Set(x1, y0, Cell{TOP_RIGHT, b.BorderFg, b.BorderBg})
|
||||
}
|
||||
if b.BorderBottom && b.BorderLeft && b.area.Dx() > 0 && b.area.Dy() > 1 {
|
||||
buf.Set(x0, y1, Cell{BOTTOM_LEFT, b.BorderFg, b.BorderBg})
|
||||
}
|
||||
if b.BorderBottom && b.BorderRight && b.area.Dx() > 1 && b.area.Dy() > 1 {
|
||||
buf.Set(x1, y1, Cell{BOTTOM_RIGHT, b.BorderFg, b.BorderBg})
|
||||
}
|
||||
}
|
||||
|
||||
func (b Block) drawBorderLabel(buf Buffer) {
|
||||
maxTxtW := b.area.Dx() - 2
|
||||
tx := DTrimTxCls(DefaultTxBuilder.Build(b.BorderLabel, b.BorderLabelFg, b.BorderLabelBg), maxTxtW)
|
||||
|
||||
for i, w := 0, 0; i < len(tx); i++ {
|
||||
buf.Set(b.area.Min.X+1+w, b.area.Min.Y, tx[i])
|
||||
w += tx[i].Width()
|
||||
}
|
||||
}
|
||||
|
||||
// Block is a base struct for all other upper level widgets,
|
||||
// consider it as css: display:block.
|
||||
// Normally you do not need to create it manually.
|
||||
type Block struct {
|
||||
area image.Rectangle
|
||||
innerArea image.Rectangle
|
||||
X int
|
||||
Y int
|
||||
Border bool
|
||||
BorderFg Attribute
|
||||
BorderBg Attribute
|
||||
BorderLeft bool
|
||||
BorderRight bool
|
||||
BorderTop bool
|
||||
BorderBottom bool
|
||||
BorderLabel string
|
||||
BorderLabelFg Attribute
|
||||
BorderLabelBg Attribute
|
||||
Display bool
|
||||
Bg Attribute
|
||||
Width int
|
||||
Height int
|
||||
PaddingTop int
|
||||
PaddingBottom int
|
||||
PaddingLeft int
|
||||
PaddingRight int
|
||||
id string
|
||||
Float Align
|
||||
}
|
||||
|
||||
// NewBlock returns a *Block which inherits styles from current theme.
|
||||
func NewBlock() *Block {
|
||||
b := Block{}
|
||||
b.Display = true
|
||||
b.Border = true
|
||||
b.BorderLeft = true
|
||||
b.BorderRight = true
|
||||
b.BorderTop = true
|
||||
b.BorderBottom = true
|
||||
b.BorderBg = ThemeAttr("border.bg")
|
||||
b.BorderFg = ThemeAttr("border.fg")
|
||||
b.BorderLabelBg = ThemeAttr("label.bg")
|
||||
b.BorderLabelFg = ThemeAttr("label.fg")
|
||||
b.Bg = ThemeAttr("block.bg")
|
||||
b.Width = 2
|
||||
b.Height = 2
|
||||
b.id = GenId()
|
||||
b.Float = AlignNone
|
||||
return &b
|
||||
}
|
||||
|
||||
func (b Block) Id() string {
|
||||
return b.id
|
||||
}
|
||||
|
||||
// Align computes box model
|
||||
func (b *Block) Align() {
|
||||
// outer
|
||||
b.area.Min.X = 0
|
||||
b.area.Min.Y = 0
|
||||
b.area.Max.X = b.Width
|
||||
b.area.Max.Y = b.Height
|
||||
|
||||
// float
|
||||
b.area = AlignArea(TermRect(), b.area, b.Float)
|
||||
b.area = MoveArea(b.area, b.X, b.Y)
|
||||
|
||||
// inner
|
||||
b.innerArea.Min.X = b.area.Min.X + b.PaddingLeft
|
||||
b.innerArea.Min.Y = b.area.Min.Y + b.PaddingTop
|
||||
b.innerArea.Max.X = b.area.Max.X - b.PaddingRight
|
||||
b.innerArea.Max.Y = b.area.Max.Y - b.PaddingBottom
|
||||
|
||||
if b.Border {
|
||||
if b.BorderLeft {
|
||||
b.innerArea.Min.X++
|
||||
}
|
||||
if b.BorderRight {
|
||||
b.innerArea.Max.X--
|
||||
}
|
||||
if b.BorderTop {
|
||||
b.innerArea.Min.Y++
|
||||
}
|
||||
if b.BorderBottom {
|
||||
b.innerArea.Max.Y--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// InnerBounds returns the internal bounds of the block after aligning and
|
||||
// calculating the padding and border, if any.
|
||||
func (b *Block) InnerBounds() image.Rectangle {
|
||||
b.Align()
|
||||
return b.innerArea
|
||||
}
|
||||
|
||||
// Buffer implements Bufferer interface.
|
||||
// Draw background and border (if any).
|
||||
func (b *Block) Buffer() Buffer {
|
||||
b.Align()
|
||||
|
||||
buf := NewBuffer()
|
||||
buf.SetArea(b.area)
|
||||
buf.Fill(' ', ColorDefault, b.Bg)
|
||||
|
||||
b.drawBorder(buf)
|
||||
b.drawBorderLabel(buf)
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
// GetHeight implements GridBufferer.
|
||||
// It returns current height of the block.
|
||||
func (b Block) GetHeight() int {
|
||||
return b.Height
|
||||
}
|
||||
|
||||
// SetX implements GridBufferer interface, which sets block's x position.
|
||||
func (b *Block) SetX(x int) {
|
||||
b.X = x
|
||||
}
|
||||
|
||||
// SetY implements GridBufferer interface, it sets y position for block.
|
||||
func (b *Block) SetY(y int) {
|
||||
b.Y = y
|
||||
}
|
||||
|
||||
// SetWidth implements GridBuffer interface, it sets block's width.
|
||||
func (b *Block) SetWidth(w int) {
|
||||
b.Width = w
|
||||
}
|
||||
|
||||
func (b Block) InnerWidth() int {
|
||||
return b.innerArea.Dx()
|
||||
}
|
||||
|
||||
func (b Block) InnerHeight() int {
|
||||
return b.innerArea.Dy()
|
||||
}
|
||||
|
||||
func (b Block) InnerX() int {
|
||||
return b.innerArea.Min.X
|
||||
}
|
||||
|
||||
func (b Block) InnerY() int { return b.innerArea.Min.Y }
|
106
Godeps/_workspace/src/github.com/gizak/termui/buffer.go
generated
vendored
106
Godeps/_workspace/src/github.com/gizak/termui/buffer.go
generated
vendored
@ -1,106 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
package termui
|
||||
|
||||
import "image"
|
||||
|
||||
// Cell is a rune with assigned Fg and Bg
|
||||
type Cell struct {
|
||||
Ch rune
|
||||
Fg Attribute
|
||||
Bg Attribute
|
||||
}
|
||||
|
||||
// Buffer is a renderable rectangle cell data container.
|
||||
type Buffer struct {
|
||||
Area image.Rectangle // selected drawing area
|
||||
CellMap map[image.Point]Cell
|
||||
}
|
||||
|
||||
// At returns the cell at (x,y).
|
||||
func (b Buffer) At(x, y int) Cell {
|
||||
return b.CellMap[image.Pt(x, y)]
|
||||
}
|
||||
|
||||
// Set assigns a char to (x,y)
|
||||
func (b Buffer) Set(x, y int, c Cell) {
|
||||
b.CellMap[image.Pt(x, y)] = c
|
||||
}
|
||||
|
||||
// Bounds returns the domain for which At can return non-zero color.
|
||||
func (b Buffer) Bounds() image.Rectangle {
|
||||
x0, y0, x1, y1 := 0, 0, 0, 0
|
||||
for p := range b.CellMap {
|
||||
if p.X > x1 {
|
||||
x1 = p.X
|
||||
}
|
||||
if p.X < x0 {
|
||||
x0 = p.X
|
||||
}
|
||||
if p.Y > y1 {
|
||||
y1 = p.Y
|
||||
}
|
||||
if p.Y < y0 {
|
||||
y0 = p.Y
|
||||
}
|
||||
}
|
||||
return image.Rect(x0, y0, x1, y1)
|
||||
}
|
||||
|
||||
// SetArea assigns a new rect area to Buffer b.
|
||||
func (b *Buffer) SetArea(r image.Rectangle) {
|
||||
b.Area.Max = r.Max
|
||||
b.Area.Min = r.Min
|
||||
}
|
||||
|
||||
// Sync sets drawing area to the buffer's bound
|
||||
func (b Buffer) Sync() {
|
||||
b.SetArea(b.Bounds())
|
||||
}
|
||||
|
||||
// NewCell returns a new cell
|
||||
func NewCell(ch rune, fg, bg Attribute) Cell {
|
||||
return Cell{ch, fg, bg}
|
||||
}
|
||||
|
||||
// Merge merges bs Buffers onto b
|
||||
func (b *Buffer) Merge(bs ...Buffer) {
|
||||
for _, buf := range bs {
|
||||
for p, v := range buf.CellMap {
|
||||
b.Set(p.X, p.Y, v)
|
||||
}
|
||||
b.SetArea(b.Area.Union(buf.Area))
|
||||
}
|
||||
}
|
||||
|
||||
// NewBuffer returns a new Buffer
|
||||
func NewBuffer() Buffer {
|
||||
return Buffer{
|
||||
CellMap: make(map[image.Point]Cell),
|
||||
Area: image.Rectangle{}}
|
||||
}
|
||||
|
||||
// Fill fills the Buffer b with ch,fg and bg.
|
||||
func (b Buffer) Fill(ch rune, fg, bg Attribute) {
|
||||
for x := b.Area.Min.X; x < b.Area.Max.X; x++ {
|
||||
for y := b.Area.Min.Y; y < b.Area.Max.Y; y++ {
|
||||
b.Set(x, y, Cell{ch, fg, bg})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NewFilledBuffer returns a new Buffer filled with ch, fb and bg.
|
||||
func NewFilledBuffer(x0, y0, x1, y1 int, ch rune, fg, bg Attribute) Buffer {
|
||||
buf := NewBuffer()
|
||||
buf.Area.Min = image.Pt(x0, y0)
|
||||
buf.Area.Max = image.Pt(x1, y1)
|
||||
|
||||
for x := buf.Area.Min.X; x < buf.Area.Max.X; x++ {
|
||||
for y := buf.Area.Min.Y; y < buf.Area.Max.Y; y++ {
|
||||
buf.Set(x, y, Cell{ch, fg, bg})
|
||||
}
|
||||
}
|
||||
return buf
|
||||
}
|
26
Godeps/_workspace/src/github.com/gizak/termui/config
generated
vendored
26
Godeps/_workspace/src/github.com/gizak/termui/config
generated
vendored
@ -1,26 +0,0 @@
|
||||
#!/usr/bin/env perl6
|
||||
|
||||
use v6;
|
||||
|
||||
my $copyright = '// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
';
|
||||
|
||||
sub MAIN('update-docstr', Str $srcp) {
|
||||
if $srcp.IO.f {
|
||||
$_ = $srcp.IO.slurp;
|
||||
if m/^ \/\/\s Copyright .+? \n\n/ {
|
||||
unless ~$/ eq $copyright {
|
||||
s/^ \/\/\s Copyright .+? \n\n /$copyright/;
|
||||
spurt $srcp, $_;
|
||||
say "[updated] doc string for:"~$srcp;
|
||||
}
|
||||
} else {
|
||||
say "[added] doc string for "~$srcp~" (no match found)";
|
||||
$_ = $copyright ~ $_;
|
||||
spurt $srcp, $_;
|
||||
}
|
||||
}
|
||||
}
|
117
Godeps/_workspace/src/github.com/gizak/termui/debug/debuger.go
generated
vendored
117
Godeps/_workspace/src/github.com/gizak/termui/debug/debuger.go
generated
vendored
@ -1,117 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
package debug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Port string
|
||||
Addr string
|
||||
Path string
|
||||
Msg chan string
|
||||
chs []chan string
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
Port string
|
||||
Addr string
|
||||
Path string
|
||||
ws *websocket.Conn
|
||||
}
|
||||
|
||||
var defaultPort = ":8080"
|
||||
|
||||
func NewServer() *Server {
|
||||
return &Server{
|
||||
Port: defaultPort,
|
||||
Addr: "localhost",
|
||||
Path: "/echo",
|
||||
Msg: make(chan string),
|
||||
chs: make([]chan string, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func NewClient() Client {
|
||||
return Client{
|
||||
Port: defaultPort,
|
||||
Addr: "localhost",
|
||||
Path: "/echo",
|
||||
}
|
||||
}
|
||||
|
||||
func (c Client) ConnectAndListen() error {
|
||||
ws, err := websocket.Dial("ws://"+c.Addr+c.Port+c.Path, "", "http://"+c.Addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ws.Close()
|
||||
|
||||
var m string
|
||||
for {
|
||||
err := websocket.Message.Receive(ws, &m)
|
||||
if err != nil {
|
||||
fmt.Print(err)
|
||||
return err
|
||||
}
|
||||
fmt.Print(m)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) ListenAndServe() error {
|
||||
http.Handle(s.Path, websocket.Handler(func(ws *websocket.Conn) {
|
||||
defer ws.Close()
|
||||
|
||||
mc := make(chan string)
|
||||
s.chs = append(s.chs, mc)
|
||||
|
||||
for m := range mc {
|
||||
websocket.Message.Send(ws, m)
|
||||
}
|
||||
}))
|
||||
|
||||
go func() {
|
||||
for msg := range s.Msg {
|
||||
for _, c := range s.chs {
|
||||
go func(a chan string) {
|
||||
a <- msg
|
||||
}(c)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return http.ListenAndServe(s.Port, nil)
|
||||
}
|
||||
|
||||
func (s *Server) Log(msg string) {
|
||||
go func() { s.Msg <- msg }()
|
||||
}
|
||||
|
||||
func (s *Server) Logf(format string, a ...interface{}) {
|
||||
s.Log(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
var DefaultServer = NewServer()
|
||||
var DefaultClient = NewClient()
|
||||
|
||||
func ListenAndServe() error {
|
||||
return DefaultServer.ListenAndServe()
|
||||
}
|
||||
|
||||
func ConnectAndListen() error {
|
||||
return DefaultClient.ConnectAndListen()
|
||||
}
|
||||
|
||||
func Log(msg string) {
|
||||
DefaultServer.Log(msg)
|
||||
}
|
||||
|
||||
func Logf(format string, a ...interface{}) {
|
||||
DefaultServer.Logf(format, a...)
|
||||
}
|
27
Godeps/_workspace/src/github.com/gizak/termui/doc.go
generated
vendored
27
Godeps/_workspace/src/github.com/gizak/termui/doc.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package termui is a library designed for creating command line UI. For more info, goto http://github.com/gizak/termui
|
||||
|
||||
A simplest example:
|
||||
package main
|
||||
|
||||
import ui "github.com/gizak/termui"
|
||||
|
||||
func main() {
|
||||
if err:=ui.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer ui.Close()
|
||||
|
||||
g := ui.NewGauge()
|
||||
g.Percent = 50
|
||||
g.Width = 50
|
||||
g.Border.Label = "Gauge"
|
||||
|
||||
ui.Render(g)
|
||||
}
|
||||
*/
|
||||
package termui
|
109
Godeps/_workspace/src/github.com/gizak/termui/gauge.go
generated
vendored
109
Godeps/_workspace/src/github.com/gizak/termui/gauge.go
generated
vendored
@ -1,109 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
package termui
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Gauge is a progress bar like widget.
|
||||
// A simple example:
|
||||
/*
|
||||
g := termui.NewGauge()
|
||||
g.Percent = 40
|
||||
g.Width = 50
|
||||
g.Height = 3
|
||||
g.Border.Label = "Slim Gauge"
|
||||
g.BarColor = termui.ColorRed
|
||||
g.PercentColor = termui.ColorBlue
|
||||
*/
|
||||
|
||||
const ColorUndef Attribute = Attribute(^uint16(0))
|
||||
|
||||
type Gauge struct {
|
||||
Block
|
||||
Percent int
|
||||
BarColor Attribute
|
||||
PercentColor Attribute
|
||||
PercentColorHighlighted Attribute
|
||||
Label string
|
||||
LabelAlign Align
|
||||
}
|
||||
|
||||
// NewGauge return a new gauge with current theme.
|
||||
func NewGauge() *Gauge {
|
||||
g := &Gauge{
|
||||
Block: *NewBlock(),
|
||||
PercentColor: ThemeAttr("gauge.percent.fg"),
|
||||
BarColor: ThemeAttr("gauge.bar.bg"),
|
||||
Label: "{{percent}}%",
|
||||
LabelAlign: AlignCenter,
|
||||
PercentColorHighlighted: ColorUndef,
|
||||
}
|
||||
|
||||
g.Width = 12
|
||||
g.Height = 5
|
||||
return g
|
||||
}
|
||||
|
||||
// Buffer implements Bufferer interface.
|
||||
func (g *Gauge) Buffer() Buffer {
|
||||
buf := g.Block.Buffer()
|
||||
|
||||
// plot bar
|
||||
w := g.Percent * g.innerArea.Dx() / 100
|
||||
for i := 0; i < g.innerArea.Dy(); i++ {
|
||||
for j := 0; j < w; j++ {
|
||||
c := Cell{}
|
||||
c.Ch = ' '
|
||||
c.Bg = g.BarColor
|
||||
if c.Bg == ColorDefault {
|
||||
c.Bg |= AttrReverse
|
||||
}
|
||||
buf.Set(g.innerArea.Min.X+j, g.innerArea.Min.Y+i, c)
|
||||
}
|
||||
}
|
||||
|
||||
// plot percentage
|
||||
s := strings.Replace(g.Label, "{{percent}}", strconv.Itoa(g.Percent), -1)
|
||||
pry := g.innerArea.Min.Y + g.innerArea.Dy()/2
|
||||
rs := str2runes(s)
|
||||
var pos int
|
||||
switch g.LabelAlign {
|
||||
case AlignLeft:
|
||||
pos = 0
|
||||
|
||||
case AlignCenter:
|
||||
pos = (g.innerArea.Dx() - strWidth(s)) / 2
|
||||
|
||||
case AlignRight:
|
||||
pos = g.innerArea.Dx() - strWidth(s) - 1
|
||||
}
|
||||
pos += g.innerArea.Min.X
|
||||
|
||||
for i, v := range rs {
|
||||
c := Cell{
|
||||
Ch: v,
|
||||
Fg: g.PercentColor,
|
||||
}
|
||||
|
||||
if w+g.innerArea.Min.X > pos+i {
|
||||
c.Bg = g.BarColor
|
||||
if c.Bg == ColorDefault {
|
||||
c.Bg |= AttrReverse
|
||||
}
|
||||
|
||||
if g.PercentColorHighlighted != ColorUndef {
|
||||
c.Fg = g.PercentColorHighlighted
|
||||
}
|
||||
} else {
|
||||
c.Bg = g.Block.Bg
|
||||
}
|
||||
|
||||
buf.Set(1+pos+i, pry, c)
|
||||
}
|
||||
return buf
|
||||
}
|
11
Godeps/_workspace/src/github.com/gizak/termui/linechart_others.go
generated
vendored
11
Godeps/_workspace/src/github.com/gizak/termui/linechart_others.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package termui
|
||||
|
||||
const VDASH = '┊'
|
||||
const HDASH = '┈'
|
||||
const ORIGIN = '└'
|
11
Godeps/_workspace/src/github.com/gizak/termui/linechart_windows.go
generated
vendored
11
Godeps/_workspace/src/github.com/gizak/termui/linechart_windows.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package termui
|
||||
|
||||
const VDASH = '|'
|
||||
const HDASH = '-'
|
||||
const ORIGIN = '+'
|
89
Godeps/_workspace/src/github.com/gizak/termui/list.go
generated
vendored
89
Godeps/_workspace/src/github.com/gizak/termui/list.go
generated
vendored
@ -1,89 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
package termui
|
||||
|
||||
import "strings"
|
||||
|
||||
// List displays []string as its items,
|
||||
// it has a Overflow option (default is "hidden"), when set to "hidden",
|
||||
// the item exceeding List's width is truncated, but when set to "wrap",
|
||||
// the overflowed text breaks into next line.
|
||||
/*
|
||||
strs := []string{
|
||||
"[0] github.com/gizak/termui",
|
||||
"[1] editbox.go",
|
||||
"[2] iterrupt.go",
|
||||
"[3] keyboard.go",
|
||||
"[4] output.go",
|
||||
"[5] random_out.go",
|
||||
"[6] dashboard.go",
|
||||
"[7] nsf/termbox-go"}
|
||||
|
||||
ls := termui.NewList()
|
||||
ls.Items = strs
|
||||
ls.ItemFgColor = termui.ColorYellow
|
||||
ls.Border.Label = "List"
|
||||
ls.Height = 7
|
||||
ls.Width = 25
|
||||
ls.Y = 0
|
||||
*/
|
||||
type List struct {
|
||||
Block
|
||||
Items []string
|
||||
Overflow string
|
||||
ItemFgColor Attribute
|
||||
ItemBgColor Attribute
|
||||
}
|
||||
|
||||
// NewList returns a new *List with current theme.
|
||||
func NewList() *List {
|
||||
l := &List{Block: *NewBlock()}
|
||||
l.Overflow = "hidden"
|
||||
l.ItemFgColor = ThemeAttr("list.item.fg")
|
||||
l.ItemBgColor = ThemeAttr("list.item.bg")
|
||||
return l
|
||||
}
|
||||
|
||||
// Buffer implements Bufferer interface.
|
||||
func (l *List) Buffer() Buffer {
|
||||
buf := l.Block.Buffer()
|
||||
|
||||
switch l.Overflow {
|
||||
case "wrap":
|
||||
cs := DefaultTxBuilder.Build(strings.Join(l.Items, "\n"), l.ItemFgColor, l.ItemBgColor)
|
||||
i, j, k := 0, 0, 0
|
||||
for i < l.innerArea.Dy() && k < len(cs) {
|
||||
w := cs[k].Width()
|
||||
if cs[k].Ch == '\n' || j+w > l.innerArea.Dx() {
|
||||
i++
|
||||
j = 0
|
||||
if cs[k].Ch == '\n' {
|
||||
k++
|
||||
}
|
||||
continue
|
||||
}
|
||||
buf.Set(l.innerArea.Min.X+j, l.innerArea.Min.Y+i, cs[k])
|
||||
|
||||
k++
|
||||
j++
|
||||
}
|
||||
|
||||
case "hidden":
|
||||
trimItems := l.Items
|
||||
if len(trimItems) > l.innerArea.Dy() {
|
||||
trimItems = trimItems[:l.innerArea.Dy()]
|
||||
}
|
||||
for i, v := range trimItems {
|
||||
cs := DTrimTxCls(DefaultTxBuilder.Build(v, l.ItemFgColor, l.ItemBgColor), l.innerArea.Dx())
|
||||
j := 0
|
||||
for _, vv := range cs {
|
||||
w := vv.Width()
|
||||
buf.Set(l.innerArea.Min.X+j, l.innerArea.Min.Y+i, vv)
|
||||
j += w
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf
|
||||
}
|
135
Godeps/_workspace/src/github.com/gizak/termui/render.go
generated
vendored
135
Godeps/_workspace/src/github.com/gizak/termui/render.go
generated
vendored
@ -1,135 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
package termui
|
||||
|
||||
import (
|
||||
"image"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
tm "github.com/nsf/termbox-go"
|
||||
)
|
||||
|
||||
// Bufferer should be implemented by all renderable components.
|
||||
type Bufferer interface {
|
||||
Buffer() Buffer
|
||||
}
|
||||
|
||||
// Init initializes termui library. This function should be called before any others.
|
||||
// After initialization, the library must be finalized by 'Close' function.
|
||||
func Init() error {
|
||||
if err := tm.Init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sysEvtChs = make([]chan Event, 0)
|
||||
go hookTermboxEvt()
|
||||
|
||||
renderJobs = make(chan []Bufferer)
|
||||
//renderLock = new(sync.RWMutex)
|
||||
|
||||
Body = NewGrid()
|
||||
Body.X = 0
|
||||
Body.Y = 0
|
||||
Body.BgColor = ThemeAttr("bg")
|
||||
Body.Width = TermWidth()
|
||||
|
||||
DefaultEvtStream.Init()
|
||||
DefaultEvtStream.Merge("termbox", NewSysEvtCh())
|
||||
DefaultEvtStream.Merge("timer", NewTimerCh(time.Second))
|
||||
DefaultEvtStream.Merge("custom", usrEvtCh)
|
||||
|
||||
DefaultEvtStream.Handle("/", DefualtHandler)
|
||||
DefaultEvtStream.Handle("/sys/wnd/resize", func(e Event) {
|
||||
w := e.Data.(EvtWnd)
|
||||
Body.Width = w.Width
|
||||
})
|
||||
|
||||
DefaultWgtMgr = NewWgtMgr()
|
||||
DefaultEvtStream.Hook(DefaultWgtMgr.WgtHandlersHook())
|
||||
|
||||
go func() {
|
||||
for bs := range renderJobs {
|
||||
render(bs...)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close finalizes termui library,
|
||||
// should be called after successful initialization when termui's functionality isn't required anymore.
|
||||
func Close() {
|
||||
tm.Close()
|
||||
}
|
||||
|
||||
var renderLock sync.Mutex
|
||||
|
||||
func termSync() {
|
||||
renderLock.Lock()
|
||||
tm.Sync()
|
||||
termWidth, termHeight = tm.Size()
|
||||
renderLock.Unlock()
|
||||
}
|
||||
|
||||
// TermWidth returns the current terminal's width.
|
||||
func TermWidth() int {
|
||||
termSync()
|
||||
return termWidth
|
||||
}
|
||||
|
||||
// TermHeight returns the current terminal's height.
|
||||
func TermHeight() int {
|
||||
termSync()
|
||||
return termHeight
|
||||
}
|
||||
|
||||
// Render renders all Bufferer in the given order from left to right,
|
||||
// right could overlap on left ones.
|
||||
func render(bs ...Bufferer) {
|
||||
|
||||
for _, b := range bs {
|
||||
|
||||
buf := b.Buffer()
|
||||
// set cels in buf
|
||||
for p, c := range buf.CellMap {
|
||||
if p.In(buf.Area) {
|
||||
|
||||
tm.SetCell(p.X, p.Y, c.Ch, toTmAttr(c.Fg), toTmAttr(c.Bg))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
renderLock.Lock()
|
||||
// render
|
||||
tm.Flush()
|
||||
renderLock.Unlock()
|
||||
}
|
||||
|
||||
func Clear() {
|
||||
tm.Clear(tm.ColorDefault, toTmAttr(ThemeAttr("bg")))
|
||||
}
|
||||
|
||||
func clearArea(r image.Rectangle, bg Attribute) {
|
||||
for i := r.Min.X; i < r.Max.X; i++ {
|
||||
for j := r.Min.Y; j < r.Max.Y; j++ {
|
||||
tm.SetCell(i, j, ' ', tm.ColorDefault, toTmAttr(bg))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ClearArea(r image.Rectangle, bg Attribute) {
|
||||
clearArea(r, bg)
|
||||
tm.Flush()
|
||||
}
|
||||
|
||||
var renderJobs chan []Bufferer
|
||||
|
||||
func Render(bs ...Bufferer) {
|
||||
//go func() { renderJobs <- bs }()
|
||||
renderJobs <- bs
|
||||
}
|
66
Godeps/_workspace/src/github.com/gizak/termui/test/runtest.go
generated
vendored
66
Godeps/_workspace/src/github.com/gizak/termui/test/runtest.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/gizak/termui"
|
||||
"github.com/gizak/termui/debug"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// run as client
|
||||
if len(os.Args) > 1 {
|
||||
fmt.Print(debug.ConnectAndListen())
|
||||
return
|
||||
}
|
||||
|
||||
// run as server
|
||||
go func() { panic(debug.ListenAndServe()) }()
|
||||
|
||||
if err := termui.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer termui.Close()
|
||||
|
||||
//termui.UseTheme("helloworld")
|
||||
b := termui.NewBlock()
|
||||
b.Width = 20
|
||||
b.Height = 20
|
||||
b.Float = termui.AlignCenter
|
||||
b.BorderLabel = "[HELLO](fg-red,bg-white) [WORLD](fg-blue,bg-green)"
|
||||
|
||||
termui.Render(b)
|
||||
|
||||
termui.Handle("/sys", func(e termui.Event) {
|
||||
k, ok := e.Data.(termui.EvtKbd)
|
||||
debug.Logf("->%v\n", e)
|
||||
if ok && k.KeyStr == "q" {
|
||||
termui.StopLoop()
|
||||
}
|
||||
})
|
||||
|
||||
termui.Handle(("/usr"), func(e termui.Event) {
|
||||
debug.Logf("->%v\n", e)
|
||||
})
|
||||
|
||||
termui.Handle("/timer/1s", func(e termui.Event) {
|
||||
t := e.Data.(termui.EvtTimer)
|
||||
termui.SendCustomEvt("/usr/t", t.Count)
|
||||
|
||||
if t.Count%2 == 0 {
|
||||
b.BorderLabel = "[HELLO](fg-red,bg-green) [WORLD](fg-blue,bg-white)"
|
||||
} else {
|
||||
b.BorderLabel = "[HELLO](fg-blue,bg-white) [WORLD](fg-red,bg-green)"
|
||||
}
|
||||
|
||||
termui.Render(b)
|
||||
|
||||
})
|
||||
|
||||
termui.Loop()
|
||||
}
|
215
Godeps/_workspace/src/github.com/gizak/termui/textbuilder.go
generated
vendored
215
Godeps/_workspace/src/github.com/gizak/termui/textbuilder.go
generated
vendored
@ -1,215 +0,0 @@
|
||||
// Copyright 2016 Zack Guo <gizak@icloud.com>. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license that can
|
||||
// be found in the LICENSE file.
|
||||
|
||||
package termui
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TextBuilder is a minial interface to produce text []Cell using sepcific syntax (markdown).
|
||||
type TextBuilder interface {
|
||||
Build(s string, fg, bg Attribute) []Cell
|
||||
}
|
||||
|
||||
// DefaultTxBuilder is set to be MarkdownTxBuilder.
|
||||
var DefaultTxBuilder = NewMarkdownTxBuilder()
|
||||
|
||||
// MarkdownTxBuilder implements TextBuilder interface, using markdown syntax.
|
||||
type MarkdownTxBuilder struct {
|
||||
baseFg Attribute
|
||||
baseBg Attribute
|
||||
plainTx []rune
|
||||
markers []marker
|
||||
}
|
||||
|
||||
type marker struct {
|
||||
st int
|
||||
ed int
|
||||
fg Attribute
|
||||
bg Attribute
|
||||
}
|
||||
|
||||
var colorMap = map[string]Attribute{
|
||||
"red": ColorRed,
|
||||
"blue": ColorBlue,
|
||||
"black": ColorBlack,
|
||||
"cyan": ColorCyan,
|
||||
"yellow": ColorYellow,
|
||||
"white": ColorWhite,
|
||||
"default": ColorDefault,
|
||||
"green": ColorGreen,
|
||||
"magenta": ColorMagenta,
|
||||
}
|
||||
|
||||
var attrMap = map[string]Attribute{
|
||||
"bold": AttrBold,
|
||||
"underline": AttrUnderline,
|
||||
"reverse": AttrReverse,
|
||||
}
|
||||
|
||||
func rmSpc(s string) string {
|
||||
reg := regexp.MustCompile(`\s+`)
|
||||
return reg.ReplaceAllString(s, "")
|
||||
}
|
||||
|
||||
// readAttr translates strings like `fg-red,fg-bold,bg-white` to fg and bg Attribute
|
||||
func (mtb MarkdownTxBuilder) readAttr(s string) (Attribute, Attribute) {
|
||||
fg := mtb.baseFg
|
||||
bg := mtb.baseBg
|
||||
|
||||
updateAttr := func(a Attribute, attrs []string) Attribute {
|
||||
for _, s := range attrs {
|
||||
// replace the color
|
||||
if c, ok := colorMap[s]; ok {
|
||||
a &= 0xFF00 // erase clr 0 ~ 8 bits
|
||||
a |= c // set clr
|
||||
}
|
||||
// add attrs
|
||||
if c, ok := attrMap[s]; ok {
|
||||
a |= c
|
||||
}
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
ss := strings.Split(s, ",")
|
||||
fgs := []string{}
|
||||
bgs := []string{}
|
||||
for _, v := range ss {
|
||||
subs := strings.Split(v, "-")
|
||||
if len(subs) > 1 {
|
||||
if subs[0] == "fg" {
|
||||
fgs = append(fgs, subs[1])
|
||||
}
|
||||
if subs[0] == "bg" {
|
||||
bgs = append(bgs, subs[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fg = updateAttr(fg, fgs)
|
||||
bg = updateAttr(bg, bgs)
|
||||
return fg, bg
|
||||
}
|
||||
|
||||
func (mtb *MarkdownTxBuilder) reset() {
|
||||
mtb.plainTx = []rune{}
|
||||
mtb.markers = []marker{}
|
||||
}
|
||||
|
||||
// parse streams and parses text into normalized text and render sequence.
|
||||
func (mtb *MarkdownTxBuilder) parse(str string) {
|
||||
rs := str2runes(str)
|
||||
normTx := []rune{}
|
||||
square := []rune{}
|
||||
brackt := []rune{}
|
||||
accSquare := false
|
||||
accBrackt := false
|
||||
cntSquare := 0
|
||||
|
||||
reset := func() {
|
||||
square = []rune{}
|
||||
brackt = []rune{}
|
||||
accSquare = false
|
||||
accBrackt = false
|
||||
cntSquare = 0
|
||||
}
|
||||
// pipe stacks into normTx and clear
|
||||
rollback := func() {
|
||||
normTx = append(normTx, square...)
|
||||
normTx = append(normTx, brackt...)
|
||||
reset()
|
||||
}
|
||||
// chop first and last
|
||||
chop := func(s []rune) []rune {
|
||||
return s[1 : len(s)-1]
|
||||
}
|
||||
|
||||
for i, r := range rs {
|
||||
switch {
|
||||
// stacking brackt
|
||||
case accBrackt:
|
||||
brackt = append(brackt, r)
|
||||
if ')' == r {
|
||||
fg, bg := mtb.readAttr(string(chop(brackt)))
|
||||
st := len(normTx)
|
||||
ed := len(normTx) + len(square) - 2
|
||||
mtb.markers = append(mtb.markers, marker{st, ed, fg, bg})
|
||||
normTx = append(normTx, chop(square)...)
|
||||
reset()
|
||||
} else if i+1 == len(rs) {
|
||||
rollback()
|
||||
}
|
||||
// stacking square
|
||||
case accSquare:
|
||||
switch {
|
||||
// squares closed and followed by a '('
|
||||
case cntSquare == 0 && '(' == r:
|
||||
accBrackt = true
|
||||
brackt = append(brackt, '(')
|
||||
// squares closed but not followed by a '('
|
||||
case cntSquare == 0:
|
||||
rollback()
|
||||
if '[' == r {
|
||||
accSquare = true
|
||||
cntSquare = 1
|
||||
brackt = append(brackt, '[')
|
||||
} else {
|
||||
normTx = append(normTx, r)
|
||||
}
|
||||
// hit the end
|
||||
case i+1 == len(rs):
|
||||
square = append(square, r)
|
||||
rollback()
|
||||
case '[' == r:
|
||||
cntSquare++
|
||||
square = append(square, '[')
|
||||
case ']' == r:
|
||||
cntSquare--
|
||||
square = append(square, ']')
|
||||
// normal char
|
||||
default:
|
||||
square = append(square, r)
|
||||
}
|
||||
// stacking normTx
|
||||
default:
|
||||
if '[' == r {
|
||||
accSquare = true
|
||||
cntSquare = 1
|
||||
square = append(square, '[')
|
||||
} else {
|
||||
normTx = append(normTx, r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mtb.plainTx = normTx
|
||||
}
|
||||
|
||||
// Build implements TextBuilder interface.
|
||||
func (mtb MarkdownTxBuilder) Build(s string, fg, bg Attribute) []Cell {
|
||||
mtb.baseFg = fg
|
||||
mtb.baseBg = bg
|
||||
mtb.reset()
|
||||
mtb.parse(s)
|
||||
cs := make([]Cell, len(mtb.plainTx))
|
||||
for i := range cs {
|
||||
cs[i] = Cell{Ch: mtb.plainTx[i], Fg: fg, Bg: bg}
|
||||
}
|
||||
for _, mrk := range mtb.markers {
|
||||
for i := mrk.st; i < mrk.ed; i++ {
|
||||
cs[i].Fg = mrk.fg
|
||||
cs[i].Bg = mrk.bg
|
||||
}
|
||||
}
|
||||
|
||||
return cs
|
||||
}
|
||||
|
||||
// NewMarkdownTxBuilder returns a TextBuilder employing markdown syntax.
|
||||
func NewMarkdownTxBuilder() TextBuilder {
|
||||
return MarkdownTxBuilder{}
|
||||
}
|
7
Godeps/_workspace/src/github.com/golang/snappy/README
generated
vendored
7
Godeps/_workspace/src/github.com/golang/snappy/README
generated
vendored
@ -1,7 +0,0 @@
|
||||
The Snappy compression format in the Go programming language.
|
||||
|
||||
To download and install from source:
|
||||
$ go get github.com/golang/snappy
|
||||
|
||||
Unless otherwise noted, the Snappy-Go source files are distributed
|
||||
under the BSD-style license found in the LICENSE file.
|
294
Godeps/_workspace/src/github.com/golang/snappy/decode.go
generated
vendored
294
Godeps/_workspace/src/github.com/golang/snappy/decode.go
generated
vendored
@ -1,294 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt reports that the input is invalid.
|
||||
ErrCorrupt = errors.New("snappy: corrupt input")
|
||||
// ErrTooLarge reports that the uncompressed length is too large.
|
||||
ErrTooLarge = errors.New("snappy: decoded block is too large")
|
||||
// ErrUnsupported reports that the input isn't supported.
|
||||
ErrUnsupported = errors.New("snappy: unsupported input")
|
||||
)
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
func DecodedLen(src []byte) (int, error) {
|
||||
v, _, err := decodedLen(src)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
// that the length header occupied.
|
||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
v, n := binary.Uvarint(src)
|
||||
if n <= 0 || v > 0xffffffff {
|
||||
return 0, 0, ErrCorrupt
|
||||
}
|
||||
|
||||
const wordSize = 32 << (^uint(0) >> 32 & 1)
|
||||
if wordSize == 32 && v > 0x7fffffff {
|
||||
return 0, 0, ErrTooLarge
|
||||
}
|
||||
return int(v), n, nil
|
||||
}
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil dst.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) < dLen {
|
||||
dst = make([]byte, dLen)
|
||||
}
|
||||
|
||||
var d, offset, length int
|
||||
for s < len(src) {
|
||||
switch src[s] & 0x03 {
|
||||
case tagLiteral:
|
||||
x := uint(src[s] >> 2)
|
||||
switch {
|
||||
case x < 60:
|
||||
s++
|
||||
case x == 60:
|
||||
s += 2
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-1])
|
||||
case x == 61:
|
||||
s += 3
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-2]) | uint(src[s-1])<<8
|
||||
case x == 62:
|
||||
s += 4
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16
|
||||
case x == 63:
|
||||
s += 5
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24
|
||||
}
|
||||
length = int(x + 1)
|
||||
if length <= 0 {
|
||||
return nil, errors.New("snappy: unsupported literal length")
|
||||
}
|
||||
if length > len(dst)-d || length > len(src)-s {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
copy(dst[d:], src[s:s+length])
|
||||
d += length
|
||||
s += length
|
||||
continue
|
||||
|
||||
case tagCopy1:
|
||||
s += 2
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length = 4 + int(src[s-2])>>2&0x7
|
||||
offset = int(src[s-2])&0xe0<<3 | int(src[s-1])
|
||||
|
||||
case tagCopy2:
|
||||
s += 3
|
||||
if s > len(src) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
length = 1 + int(src[s-3])>>2
|
||||
offset = int(src[s-2]) | int(src[s-1])<<8
|
||||
|
||||
case tagCopy4:
|
||||
return nil, errors.New("snappy: unsupported COPY_4 tag")
|
||||
}
|
||||
|
||||
end := d + length
|
||||
if offset > d || end > len(dst) {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
for ; d < end; d++ {
|
||||
dst[d] = dst[d-offset]
|
||||
}
|
||||
}
|
||||
if d != dLen {
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
return dst[:d], nil
|
||||
}
|
||||
|
||||
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||
// format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
decoded: make([]byte, maxUncompressedChunkLen),
|
||||
buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
decoded []byte
|
||||
buf []byte
|
||||
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||
i, j int
|
||||
readHeader bool
|
||||
}
|
||||
|
||||
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||
// a new one.
|
||||
func (r *Reader) Reset(reader io.Reader) {
|
||||
r.r = reader
|
||||
r.err = nil
|
||||
r.i = 0
|
||||
r.j = 0
|
||||
r.readHeader = false
|
||||
}
|
||||
|
||||
func (r *Reader) readFull(p []byte) (ok bool) {
|
||||
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
|
||||
if r.err == io.ErrUnexpectedEOF {
|
||||
r.err = ErrCorrupt
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader interface.
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
for {
|
||||
if r.i < r.j {
|
||||
n := copy(p, r.decoded[r.i:r.j])
|
||||
r.i += n
|
||||
return n, nil
|
||||
}
|
||||
if !r.readFull(r.buf[:4]) {
|
||||
return 0, r.err
|
||||
}
|
||||
chunkType := r.buf[0]
|
||||
if !r.readHeader {
|
||||
if chunkType != chunkTypeStreamIdentifier {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.readHeader = true
|
||||
}
|
||||
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
||||
if chunkLen > len(r.buf) {
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
// The chunk types are specified at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
switch chunkType {
|
||||
case chunkTypeCompressedData:
|
||||
// Section 4.2. Compressed data (chunk type 0x00).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:chunkLen]
|
||||
if !r.readFull(buf) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
buf = buf[checksumSize:]
|
||||
|
||||
n, err := DecodedLen(buf)
|
||||
if err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if n > len(r.decoded) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if _, err := Decode(r.decoded, buf); err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeUncompressedData:
|
||||
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:checksumSize]
|
||||
if !r.readFull(buf) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
// Read directly into r.decoded instead of via r.buf.
|
||||
n := chunkLen - checksumSize
|
||||
if !r.readFull(r.decoded[:n]) {
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeStreamIdentifier:
|
||||
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||
if chunkLen != len(magicBody) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.readFull(r.buf[:len(magicBody)]) {
|
||||
return 0, r.err
|
||||
}
|
||||
for i := 0; i < len(magicBody); i++ {
|
||||
if r.buf[i] != magicBody[i] {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if chunkType <= 0x7f {
|
||||
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
// Section 4.4 Padding (chunk type 0xfe).
|
||||
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||
if !r.readFull(r.buf[:chunkLen]) {
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
}
|
356
Godeps/_workspace/src/github.com/golang/snappy/encode.go
generated
vendored
356
Godeps/_workspace/src/github.com/golang/snappy/encode.go
generated
vendored
@ -1,356 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// We limit how far copy back-references can go, the same as the C++ code.
|
||||
const maxOffset = 1 << 15
|
||||
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
i, n := 0, uint(len(lit)-1)
|
||||
switch {
|
||||
case n < 60:
|
||||
dst[0] = uint8(n)<<2 | tagLiteral
|
||||
i = 1
|
||||
case n < 1<<8:
|
||||
dst[0] = 60<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
i = 2
|
||||
case n < 1<<16:
|
||||
dst[0] = 61<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
i = 3
|
||||
case n < 1<<24:
|
||||
dst[0] = 62<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[3] = uint8(n >> 16)
|
||||
i = 4
|
||||
case int64(n) < 1<<32:
|
||||
dst[0] = 63<<2 | tagLiteral
|
||||
dst[1] = uint8(n)
|
||||
dst[2] = uint8(n >> 8)
|
||||
dst[3] = uint8(n >> 16)
|
||||
dst[4] = uint8(n >> 24)
|
||||
i = 5
|
||||
default:
|
||||
panic("snappy: source buffer is too long")
|
||||
}
|
||||
if copy(dst[i:], lit) != len(lit) {
|
||||
panic("snappy: destination buffer is too short")
|
||||
}
|
||||
return i + len(lit)
|
||||
}
|
||||
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
func emitCopy(dst []byte, offset, length int) int {
|
||||
i := 0
|
||||
for length > 0 {
|
||||
x := length - 4
|
||||
if 0 <= x && x < 1<<3 && offset < 1<<11 {
|
||||
dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1
|
||||
dst[i+1] = uint8(offset)
|
||||
i += 2
|
||||
break
|
||||
}
|
||||
|
||||
x = length
|
||||
if x > 1<<6 {
|
||||
x = 1 << 6
|
||||
}
|
||||
dst[i+0] = uint8(x-1)<<2 | tagCopy2
|
||||
dst[i+1] = uint8(offset)
|
||||
dst[i+2] = uint8(offset >> 8)
|
||||
i += 3
|
||||
length -= x
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Encode returns the encoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire encoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
// It is valid to pass a nil dst.
|
||||
func Encode(dst, src []byte) []byte {
|
||||
if n := MaxEncodedLen(len(src)); len(dst) < n {
|
||||
dst = make([]byte, n)
|
||||
}
|
||||
|
||||
// The block starts with the varint-encoded length of the decompressed bytes.
|
||||
d := binary.PutUvarint(dst, uint64(len(src)))
|
||||
|
||||
// Return early if src is short.
|
||||
if len(src) <= 4 {
|
||||
if len(src) != 0 {
|
||||
d += emitLiteral(dst[d:], src)
|
||||
}
|
||||
return dst[:d]
|
||||
}
|
||||
|
||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
const maxTableSize = 1 << 14
|
||||
shift, tableSize := uint(32-8), 1<<8
|
||||
for tableSize < maxTableSize && tableSize < len(src) {
|
||||
shift--
|
||||
tableSize *= 2
|
||||
}
|
||||
var table [maxTableSize]int
|
||||
|
||||
// Iterate over the source bytes.
|
||||
var (
|
||||
s int // The iterator position.
|
||||
t int // The last position with the same hash as s.
|
||||
lit int // The start position of any pending literal bytes.
|
||||
)
|
||||
for s+3 < len(src) {
|
||||
// Update the hash table.
|
||||
b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
|
||||
h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
|
||||
p := &table[(h*0x1e35a7bd)>>shift]
|
||||
// We need to to store values in [-1, inf) in table. To save
|
||||
// some initialization time, (re)use the table's zero value
|
||||
// and shift the values against this zero: add 1 on writes,
|
||||
// subtract 1 on reads.
|
||||
t, *p = *p-1, s+1
|
||||
// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
|
||||
if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
|
||||
// Skip multiple bytes if the last match was >= 32 bytes prior.
|
||||
s += 1 + (s-lit)>>5
|
||||
continue
|
||||
}
|
||||
// Otherwise, we have a match. First, emit any pending literal bytes.
|
||||
if lit != s {
|
||||
d += emitLiteral(dst[d:], src[lit:s])
|
||||
}
|
||||
// Extend the match to be as long as possible.
|
||||
s0 := s
|
||||
s, t = s+4, t+4
|
||||
for s < len(src) && src[s] == src[t] {
|
||||
s++
|
||||
t++
|
||||
}
|
||||
// Emit the copied bytes.
|
||||
d += emitCopy(dst[d:], s-t, s-s0)
|
||||
lit = s
|
||||
}
|
||||
|
||||
// Emit any final pending literal bytes and return.
|
||||
if lit != len(src) {
|
||||
d += emitLiteral(dst[d:], src[lit:])
|
||||
}
|
||||
return dst[:d]
|
||||
}
|
||||
|
||||
// MaxEncodedLen returns the maximum length of a snappy block, given its
|
||||
// uncompressed length.
|
||||
func MaxEncodedLen(srcLen int) int {
|
||||
// Compressed data can be defined as:
|
||||
// compressed := item* literal*
|
||||
// item := literal* copy
|
||||
//
|
||||
// The trailing literal sequence has a space blowup of at most 62/60
|
||||
// since a literal of length 60 needs one tag byte + one extra byte
|
||||
// for length information.
|
||||
//
|
||||
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
||||
// 4 bytes of data. Because of a special check in the encoding code,
|
||||
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
||||
// the copy op takes 3 bytes to encode, and this type of item leads
|
||||
// to at most the 62/60 blowup for representing literals.
|
||||
//
|
||||
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
||||
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
||||
// worst case here is a one-byte literal followed by a five-byte copy.
|
||||
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
|
||||
//
|
||||
// This last factor dominates the blowup, so the final estimate is:
|
||||
return 32 + srcLen + srcLen/6
|
||||
}
|
||||
|
||||
var errClosed = errors.New("snappy: Writer is closed")
|
||||
|
||||
// NewWriter returns a new Writer that compresses to w.
|
||||
//
|
||||
// The Writer returned does not buffer writes. There is no need to Flush or
|
||||
// Close such a Writer.
|
||||
//
|
||||
// Deprecated: the Writer returned is not suitable for many small writes, only
|
||||
// for few large writes. Use NewBufferedWriter instead, which is efficient
|
||||
// regardless of the frequency and shape of the writes, and remember to Close
|
||||
// that Writer when done.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
obuf: make([]byte, obufLen),
|
||||
}
|
||||
}
|
||||
|
||||
// NewBufferedWriter returns a new Writer that compresses to w, using the
|
||||
// framing format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
//
|
||||
// The Writer returned buffers writes. Users must call Close to guarantee all
|
||||
// data has been forwarded to the underlying io.Writer. They may also call
|
||||
// Flush zero or more times before calling Close.
|
||||
func NewBufferedWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
w: w,
|
||||
ibuf: make([]byte, 0, maxUncompressedChunkLen),
|
||||
obuf: make([]byte, obufLen),
|
||||
}
|
||||
}
|
||||
|
||||
// Writer is an io.Writer than can write Snappy-compressed bytes.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
|
||||
// ibuf is a buffer for the incoming (uncompressed) bytes.
|
||||
//
|
||||
// Its use is optional. For backwards compatibility, Writers created by the
|
||||
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
|
||||
// therefore do not need to be Flush'ed or Close'd.
|
||||
ibuf []byte
|
||||
|
||||
// obuf is a buffer for the outgoing (compressed) bytes.
|
||||
obuf []byte
|
||||
|
||||
// wroteStreamHeader is whether we have written the stream header.
|
||||
wroteStreamHeader bool
|
||||
}
|
||||
|
||||
// Reset discards the writer's state and switches the Snappy writer to write to
|
||||
// w. This permits reusing a Writer rather than allocating a new one.
|
||||
func (w *Writer) Reset(writer io.Writer) {
|
||||
w.w = writer
|
||||
w.err = nil
|
||||
if w.ibuf != nil {
|
||||
w.ibuf = w.ibuf[:0]
|
||||
}
|
||||
w.wroteStreamHeader = false
|
||||
}
|
||||
|
||||
// Write satisfies the io.Writer interface.
|
||||
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
|
||||
if w.ibuf == nil {
|
||||
// Do not buffer incoming bytes. This does not perform or compress well
|
||||
// if the caller of Writer.Write writes many small slices. This
|
||||
// behavior is therefore deprecated, but still supported for backwards
|
||||
// compatibility with code that doesn't explicitly Flush or Close.
|
||||
return w.write(p)
|
||||
}
|
||||
|
||||
// The remainder of this method is based on bufio.Writer.Write from the
|
||||
// standard library.
|
||||
|
||||
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
|
||||
var n int
|
||||
if len(w.ibuf) == 0 {
|
||||
// Large write, empty buffer.
|
||||
// Write directly from p to avoid copy.
|
||||
n, _ = w.write(p)
|
||||
} else {
|
||||
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||
w.Flush()
|
||||
}
|
||||
nRet += n
|
||||
p = p[n:]
|
||||
}
|
||||
if w.err != nil {
|
||||
return nRet, w.err
|
||||
}
|
||||
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
|
||||
w.ibuf = w.ibuf[:len(w.ibuf)+n]
|
||||
nRet += n
|
||||
return nRet, nil
|
||||
}
|
||||
|
||||
func (w *Writer) write(p []byte) (nRet int, errRet error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
for len(p) > 0 {
|
||||
obufStart := len(magicChunk)
|
||||
if !w.wroteStreamHeader {
|
||||
w.wroteStreamHeader = true
|
||||
copy(w.obuf, magicChunk)
|
||||
obufStart = 0
|
||||
}
|
||||
|
||||
var uncompressed []byte
|
||||
if len(p) > maxUncompressedChunkLen {
|
||||
uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:]
|
||||
} else {
|
||||
uncompressed, p = p, nil
|
||||
}
|
||||
checksum := crc(uncompressed)
|
||||
|
||||
// Compress the buffer, discarding the result if the improvement
|
||||
// isn't at least 12.5%.
|
||||
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
|
||||
chunkType := uint8(chunkTypeCompressedData)
|
||||
chunkLen := 4 + len(compressed)
|
||||
obufEnd := obufHeaderLen + len(compressed)
|
||||
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
|
||||
chunkType = chunkTypeUncompressedData
|
||||
chunkLen = 4 + len(uncompressed)
|
||||
obufEnd = obufHeaderLen
|
||||
}
|
||||
|
||||
// Fill in the per-chunk header that comes before the body.
|
||||
w.obuf[len(magicChunk)+0] = chunkType
|
||||
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
|
||||
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
|
||||
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
|
||||
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
|
||||
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
|
||||
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
|
||||
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
|
||||
|
||||
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
|
||||
w.err = err
|
||||
return nRet, err
|
||||
}
|
||||
if chunkType == chunkTypeUncompressedData {
|
||||
if _, err := w.w.Write(uncompressed); err != nil {
|
||||
w.err = err
|
||||
return nRet, err
|
||||
}
|
||||
}
|
||||
nRet += len(uncompressed)
|
||||
}
|
||||
return nRet, nil
|
||||
}
|
||||
|
||||
// Flush flushes the Writer to its underlying io.Writer.
|
||||
func (w *Writer) Flush() error {
|
||||
if w.err != nil {
|
||||
return w.err
|
||||
}
|
||||
if len(w.ibuf) == 0 {
|
||||
return nil
|
||||
}
|
||||
w.write(w.ibuf)
|
||||
w.ibuf = w.ibuf[:0]
|
||||
return w.err
|
||||
}
|
||||
|
||||
// Close calls Flush and then closes the Writer.
|
||||
func (w *Writer) Close() error {
|
||||
w.Flush()
|
||||
ret := w.err
|
||||
if w.err == nil {
|
||||
w.err = errClosed
|
||||
}
|
||||
return ret
|
||||
}
|
77
Godeps/_workspace/src/github.com/golang/snappy/snappy.go
generated
vendored
77
Godeps/_workspace/src/github.com/golang/snappy/snappy.go
generated
vendored
@ -1,77 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package snappy implements the snappy block-based compression format.
|
||||
// It aims for very high speeds and reasonable compression.
|
||||
//
|
||||
// The C++ snappy implementation is at https://github.com/google/snappy
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"hash/crc32"
|
||||
)
|
||||
|
||||
/*
|
||||
Each encoded block begins with the varint-encoded length of the decoded data,
|
||||
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
|
||||
first byte of each chunk is broken into its 2 least and 6 most significant bits
|
||||
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
|
||||
Zero means a literal tag. All other values mean a copy tag.
|
||||
|
||||
For literal tags:
|
||||
- If m < 60, the next 1 + m bytes are literal bytes.
|
||||
- Otherwise, let n be the little-endian unsigned integer denoted by the next
|
||||
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
|
||||
|
||||
For copy tags, length bytes are copied from offset bytes ago, in the style of
|
||||
Lempel-Ziv compression algorithms. In particular:
|
||||
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
|
||||
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
|
||||
of the offset. The next byte is bits 0-7 of the offset.
|
||||
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
|
||||
The length is 1 + m. The offset is the little-endian unsigned integer
|
||||
denoted by the next 2 bytes.
|
||||
- For l == 3, this tag is a legacy format that is no longer supported.
|
||||
*/
|
||||
const (
|
||||
tagLiteral = 0x00
|
||||
tagCopy1 = 0x01
|
||||
tagCopy2 = 0x02
|
||||
tagCopy4 = 0x03
|
||||
)
|
||||
|
||||
const (
|
||||
checksumSize = 4
|
||||
chunkHeaderSize = 4
|
||||
magicChunk = "\xff\x06\x00\x00" + magicBody
|
||||
magicBody = "sNaPpY"
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt says
|
||||
// that "the uncompressed data in a chunk must be no longer than 65536 bytes".
|
||||
maxUncompressedChunkLen = 65536
|
||||
|
||||
// maxEncodedLenOfMaxUncompressedChunkLen equals
|
||||
// MaxEncodedLen(maxUncompressedChunkLen), but is hard coded to be a const
|
||||
// instead of a variable, so that obufLen can also be a const. Their
|
||||
// equivalence is confirmed by TestMaxEncodedLenOfMaxUncompressedChunkLen.
|
||||
maxEncodedLenOfMaxUncompressedChunkLen = 76490
|
||||
|
||||
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
|
||||
obufLen = obufHeaderLen + maxEncodedLenOfMaxUncompressedChunkLen
|
||||
)
|
||||
|
||||
const (
|
||||
chunkTypeCompressedData = 0x00
|
||||
chunkTypeUncompressedData = 0x01
|
||||
chunkTypePadding = 0xfe
|
||||
chunkTypeStreamIdentifier = 0xff
|
||||
)
|
||||
|
||||
var crcTable = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// crc implements the checksum specified in section 3 of
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func crc(b []byte) uint32 {
|
||||
c := crc32.Update(0, crcTable, b)
|
||||
return uint32(c>>15|c<<17) + 0xa282ead8
|
||||
}
|
23
Godeps/_workspace/src/github.com/hashicorp/golang-lru/.gitignore
generated
vendored
23
Godeps/_workspace/src/github.com/hashicorp/golang-lru/.gitignore
generated
vendored
@ -1,23 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
1
Godeps/_workspace/src/github.com/huin/goupnp/.gitignore
generated
vendored
1
Godeps/_workspace/src/github.com/huin/goupnp/.gitignore
generated
vendored
@ -1 +0,0 @@
|
||||
/gotasks/specs
|
44
Godeps/_workspace/src/github.com/huin/goupnp/README.md
generated
vendored
44
Godeps/_workspace/src/github.com/huin/goupnp/README.md
generated
vendored
@ -1,44 +0,0 @@
|
||||
goupnp is a UPnP client library for Go
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
Run `go get -u github.com/huin/goupnp`.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
All doc links below are for .
|
||||
|
||||
Supported DCPs (you probably want to start with one of these):
|
||||
* [av1](https://godoc.org/github.com/huin/goupnp/dcps/av1) - Client for UPnP Device Control Protocol MediaServer v1 and MediaRenderer v1.
|
||||
* [internetgateway1](https://godoc.org/github.com/huin/goupnp/dcps/internetgateway1) - Client for UPnP Device Control Protocol Internet Gateway Device v1.
|
||||
* [internetgateway2](https://godoc.org/github.com/huin/goupnp/dcps/internetgateway2) - Client for UPnP Device Control Protocol Internet Gateway Device v2.
|
||||
|
||||
Core components:
|
||||
* [(goupnp)](https://godoc.org/github.com/huin/goupnp) core library - contains datastructures and utilities typically used by the implemented DCPs.
|
||||
* [httpu](https://godoc.org/github.com/huin/goupnp/httpu) HTTPU implementation, underlies SSDP.
|
||||
* [ssdp](https://godoc.org/github.com/huin/goupnp/ssdp) SSDP client implementation (simple service discovery protocol) - used to discover UPnP services on a network.
|
||||
* [soap](https://godoc.org/github.com/huin/goupnp/soap) SOAP client implementation (simple object access protocol) - used to communicate with discovered services.
|
||||
|
||||
|
||||
Regenerating dcps generated source code:
|
||||
----------------------------------------
|
||||
|
||||
1. Install gotasks: `go get -u github.com/jingweno/gotask`
|
||||
2. Change to the gotasks directory: `cd gotasks`
|
||||
3. Run specgen task: `gotask specgen`
|
||||
|
||||
Supporting additional UPnP devices and services:
|
||||
------------------------------------------------
|
||||
|
||||
Supporting additional services is, in the trivial case, simply a matter of
|
||||
adding the service to the `dcpMetadata` whitelist in `gotasks/specgen_task.go`,
|
||||
regenerating the source code (see above), and committing that source code.
|
||||
|
||||
However, it would be helpful if anyone needing such a service could test the
|
||||
service against the service they have, and then reporting any trouble
|
||||
encountered as an [issue on this
|
||||
project](https://github.com/huin/goupnp/issues/new). If it just works, then
|
||||
please report at least minimal working functionality as an issue, and
|
||||
optionally contribute the metadata upstream.
|
@ -1,20 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/huin/goupnp/httpu"
|
||||
)
|
||||
|
||||
func main() {
|
||||
srv := httpu.Server{
|
||||
Addr: "239.255.255.250:1900",
|
||||
Multicast: true,
|
||||
Handler: httpu.HandlerFunc(func(r *http.Request) {
|
||||
log.Printf("Got %s %s message from %v: %v", r.Method, r.URL.Path, r.RemoteAddr, r.Header)
|
||||
}),
|
||||
}
|
||||
err := srv.ListenAndServe()
|
||||
log.Printf("Serving failed with error: %v", err)
|
||||
}
|
@ -1,67 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/huin/goupnp/dcps/internetgateway1"
|
||||
)
|
||||
|
||||
func main() {
|
||||
clients, errors, err := internetgateway1.NewWANPPPConnection1Clients()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Got %d errors finding servers and %d successfully discovered.\n",
|
||||
len(errors), len(clients))
|
||||
for i, e := range errors {
|
||||
fmt.Printf("Error finding server #%d: %v\n", i+1, e)
|
||||
}
|
||||
|
||||
for _, c := range clients {
|
||||
dev := &c.ServiceClient.RootDevice.Device
|
||||
srv := c.ServiceClient.Service
|
||||
fmt.Println(dev.FriendlyName, " :: ", srv.String())
|
||||
scpd, err := srv.RequestSCDP()
|
||||
if err != nil {
|
||||
fmt.Printf(" Error requesting service SCPD: %v\n", err)
|
||||
} else {
|
||||
fmt.Println(" Available actions:")
|
||||
for _, action := range scpd.Actions {
|
||||
fmt.Printf(" * %s\n", action.Name)
|
||||
for _, arg := range action.Arguments {
|
||||
var varDesc string
|
||||
if stateVar := scpd.GetStateVariable(arg.RelatedStateVariable); stateVar != nil {
|
||||
varDesc = fmt.Sprintf(" (%s)", stateVar.DataType.Name)
|
||||
}
|
||||
fmt.Printf(" * [%s] %s%s\n", arg.Direction, arg.Name, varDesc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if scpd == nil || scpd.GetAction("GetExternalIPAddress") != nil {
|
||||
ip, err := c.GetExternalIPAddress()
|
||||
fmt.Println("GetExternalIPAddress: ", ip, err)
|
||||
}
|
||||
|
||||
if scpd == nil || scpd.GetAction("GetStatusInfo") != nil {
|
||||
status, lastErr, uptime, err := c.GetStatusInfo()
|
||||
fmt.Println("GetStatusInfo: ", status, lastErr, uptime, err)
|
||||
}
|
||||
|
||||
if scpd == nil || scpd.GetAction("GetIdleDisconnectTime") != nil {
|
||||
idleTime, err := c.GetIdleDisconnectTime()
|
||||
fmt.Println("GetIdleDisconnectTime: ", idleTime, err)
|
||||
}
|
||||
|
||||
if scpd == nil || scpd.GetAction("AddPortMapping") != nil {
|
||||
err := c.AddPortMapping("", 5000, "TCP", 5001, "192.168.1.2", true, "Test port mapping", 0)
|
||||
fmt.Println("AddPortMapping: ", err)
|
||||
}
|
||||
if scpd == nil || scpd.GetAction("DeletePortMapping") != nil {
|
||||
err := c.DeletePortMapping("", 5000, "TCP")
|
||||
fmt.Println("DeletePortMapping: ", err)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/huin/goupnp/ssdp"
|
||||
)
|
||||
|
||||
func main() {
|
||||
c := make(chan ssdp.Update)
|
||||
srv, reg := ssdp.NewServerAndRegistry()
|
||||
reg.AddListener(c)
|
||||
go listener(c)
|
||||
if err := srv.ListenAndServe(); err != nil {
|
||||
log.Print("ListenAndServe failed: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func listener(c <-chan ssdp.Update) {
|
||||
for u := range c {
|
||||
if u.Entry != nil {
|
||||
log.Printf("Event: %v USN: %s Entry: %#v", u.EventType, u.USN, *u.Entry)
|
||||
} else {
|
||||
log.Printf("Event: %v USN: %s Entry: <nil>", u.EventType, u.USN)
|
||||
}
|
||||
}
|
||||
}
|
8452
Godeps/_workspace/src/github.com/huin/goupnp/dcps/av1/av1.go
generated
vendored
8452
Godeps/_workspace/src/github.com/huin/goupnp/dcps/av1/av1.go
generated
vendored
File diff suppressed because it is too large
Load Diff
6
Godeps/_workspace/src/github.com/huin/goupnp/example/example.go
generated
vendored
6
Godeps/_workspace/src/github.com/huin/goupnp/example/example.go
generated
vendored
@ -1,6 +0,0 @@
|
||||
// Serves as examples of using the goupnp library.
|
||||
//
|
||||
// To run examples and see the output for your local network, run the following
|
||||
// command (specifically including the -v flag):
|
||||
// go test -v github.com/huin/goupnp/example
|
||||
package example
|
603
Godeps/_workspace/src/github.com/huin/goupnp/gotasks/specgen_task.go
generated
vendored
603
Godeps/_workspace/src/github.com/huin/goupnp/gotasks/specgen_task.go
generated
vendored
@ -1,603 +0,0 @@
|
||||
// +build gotask
|
||||
|
||||
package gotasks
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/huin/goupnp"
|
||||
"github.com/huin/goupnp/scpd"
|
||||
"github.com/huin/goutil/codegen"
|
||||
"github.com/jingweno/gotask/tasking"
|
||||
)
|
||||
|
||||
var (
|
||||
deviceURNPrefix = "urn:schemas-upnp-org:device:"
|
||||
serviceURNPrefix = "urn:schemas-upnp-org:service:"
|
||||
)
|
||||
|
||||
// DCP contains extra metadata to use when generating DCP source files.
|
||||
type DCPMetadata struct {
|
||||
Name string // What to name the Go DCP package.
|
||||
OfficialName string // Official name for the DCP.
|
||||
DocURL string // Optional - URL for futher documentation about the DCP.
|
||||
XMLSpecURL string // Where to download the XML spec from.
|
||||
// Any special-case functions to run against the DCP before writing it out.
|
||||
Hacks []DCPHackFn
|
||||
}
|
||||
|
||||
var dcpMetadata = []DCPMetadata{
|
||||
{
|
||||
Name: "internetgateway1",
|
||||
OfficialName: "Internet Gateway Device v1",
|
||||
DocURL: "http://upnp.org/specs/gw/UPnP-gw-InternetGatewayDevice-v1-Device.pdf",
|
||||
XMLSpecURL: "http://upnp.org/specs/gw/UPnP-gw-IGD-TestFiles-20010921.zip",
|
||||
},
|
||||
{
|
||||
Name: "internetgateway2",
|
||||
OfficialName: "Internet Gateway Device v2",
|
||||
DocURL: "http://upnp.org/specs/gw/UPnP-gw-InternetGatewayDevice-v2-Device.pdf",
|
||||
XMLSpecURL: "http://upnp.org/specs/gw/UPnP-gw-IGD-Testfiles-20110224.zip",
|
||||
Hacks: []DCPHackFn{
|
||||
func(dcp *DCP) error {
|
||||
missingURN := "urn:schemas-upnp-org:service:WANIPv6FirewallControl:1"
|
||||
if _, ok := dcp.ServiceTypes[missingURN]; ok {
|
||||
return nil
|
||||
}
|
||||
urnParts, err := extractURNParts(missingURN, serviceURNPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dcp.ServiceTypes[missingURN] = urnParts
|
||||
return nil
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "av1",
|
||||
OfficialName: "MediaServer v1 and MediaRenderer v1",
|
||||
DocURL: "http://upnp.org/specs/av/av1/",
|
||||
XMLSpecURL: "http://upnp.org/specs/av/UPnP-av-TestFiles-20070927.zip",
|
||||
},
|
||||
}
|
||||
|
||||
type DCPHackFn func(*DCP) error
|
||||
|
||||
// NAME
|
||||
// specgen - generates Go code from the UPnP specification files.
|
||||
//
|
||||
// DESCRIPTION
|
||||
// The specification is available for download from:
|
||||
//
|
||||
// OPTIONS
|
||||
// -s, --specs_dir=<spec directory>
|
||||
// Path to the specification storage directory. This is used to find (and download if not present) the specification ZIP files. Defaults to 'specs'
|
||||
// -o, --out_dir=<output directory>
|
||||
// Path to the output directory. This is is where the DCP source files will be placed. Should normally correspond to the directory for github.com/huin/goupnp/dcps. Defaults to '../dcps'
|
||||
// --nogofmt
|
||||
// Disable passing the output through gofmt. Do this if debugging code output problems and needing to see the generated code prior to being passed through gofmt.
|
||||
func TaskSpecgen(t *tasking.T) {
|
||||
specsDir := fallbackStrValue("specs", t.Flags.String("specs_dir"), t.Flags.String("s"))
|
||||
if err := os.MkdirAll(specsDir, os.ModePerm); err != nil {
|
||||
t.Fatalf("Could not create specs-dir %q: %v\n", specsDir, err)
|
||||
}
|
||||
outDir := fallbackStrValue("../dcps", t.Flags.String("out_dir"), t.Flags.String("o"))
|
||||
useGofmt := !t.Flags.Bool("nogofmt")
|
||||
|
||||
NEXT_DCP:
|
||||
for _, d := range dcpMetadata {
|
||||
specFilename := filepath.Join(specsDir, d.Name+".zip")
|
||||
err := acquireFile(specFilename, d.XMLSpecURL)
|
||||
if err != nil {
|
||||
t.Logf("Could not acquire spec for %s, skipping: %v\n", d.Name, err)
|
||||
continue NEXT_DCP
|
||||
}
|
||||
dcp := newDCP(d)
|
||||
if err := dcp.processZipFile(specFilename); err != nil {
|
||||
log.Printf("Error processing spec for %s in file %q: %v", d.Name, specFilename, err)
|
||||
continue NEXT_DCP
|
||||
}
|
||||
for i, hack := range d.Hacks {
|
||||
if err := hack(dcp); err != nil {
|
||||
log.Printf("Error with Hack[%d] for %s: %v", i, d.Name, err)
|
||||
continue NEXT_DCP
|
||||
}
|
||||
}
|
||||
dcp.writePackage(outDir, useGofmt)
|
||||
if err := dcp.writePackage(outDir, useGofmt); err != nil {
|
||||
log.Printf("Error writing package %q: %v", dcp.Metadata.Name, err)
|
||||
continue NEXT_DCP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fallbackStrValue(defaultValue string, values ...string) string {
|
||||
for _, v := range values {
|
||||
if v != "" {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func acquireFile(specFilename string, xmlSpecURL string) error {
|
||||
if f, err := os.Open(specFilename); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
f.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
resp, err := http.Get(xmlSpecURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("could not download spec %q from %q: ",
|
||||
specFilename, xmlSpecURL, resp.Status)
|
||||
}
|
||||
|
||||
tmpFilename := specFilename + ".download"
|
||||
w, err := os.Create(tmpFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
_, err = io.Copy(w, resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.Rename(tmpFilename, specFilename)
|
||||
}
|
||||
|
||||
// DCP collects together information about a UPnP Device Control Protocol.
|
||||
type DCP struct {
|
||||
Metadata DCPMetadata
|
||||
DeviceTypes map[string]*URNParts
|
||||
ServiceTypes map[string]*URNParts
|
||||
Services []SCPDWithURN
|
||||
}
|
||||
|
||||
func newDCP(metadata DCPMetadata) *DCP {
|
||||
return &DCP{
|
||||
Metadata: metadata,
|
||||
DeviceTypes: make(map[string]*URNParts),
|
||||
ServiceTypes: make(map[string]*URNParts),
|
||||
}
|
||||
}
|
||||
|
||||
func (dcp *DCP) processZipFile(filename string) error {
|
||||
archive, err := zip.OpenReader(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading zip file %q: %v", filename, err)
|
||||
}
|
||||
defer archive.Close()
|
||||
for _, deviceFile := range globFiles("*/device/*.xml", archive) {
|
||||
if err := dcp.processDeviceFile(deviceFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, scpdFile := range globFiles("*/service/*.xml", archive) {
|
||||
if err := dcp.processSCPDFile(scpdFile); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dcp *DCP) processDeviceFile(file *zip.File) error {
|
||||
var device goupnp.Device
|
||||
if err := unmarshalXmlFile(file, &device); err != nil {
|
||||
return fmt.Errorf("error decoding device XML from file %q: %v", file.Name, err)
|
||||
}
|
||||
var mainErr error
|
||||
device.VisitDevices(func(d *goupnp.Device) {
|
||||
t := strings.TrimSpace(d.DeviceType)
|
||||
if t != "" {
|
||||
u, err := extractURNParts(t, deviceURNPrefix)
|
||||
if err != nil {
|
||||
mainErr = err
|
||||
}
|
||||
dcp.DeviceTypes[t] = u
|
||||
}
|
||||
})
|
||||
device.VisitServices(func(s *goupnp.Service) {
|
||||
u, err := extractURNParts(s.ServiceType, serviceURNPrefix)
|
||||
if err != nil {
|
||||
mainErr = err
|
||||
}
|
||||
dcp.ServiceTypes[s.ServiceType] = u
|
||||
})
|
||||
return mainErr
|
||||
}
|
||||
|
||||
func (dcp *DCP) writePackage(outDir string, useGofmt bool) error {
|
||||
packageDirname := filepath.Join(outDir, dcp.Metadata.Name)
|
||||
err := os.MkdirAll(packageDirname, os.ModePerm)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
packageFilename := filepath.Join(packageDirname, dcp.Metadata.Name+".go")
|
||||
packageFile, err := os.Create(packageFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var output io.WriteCloser = packageFile
|
||||
if useGofmt {
|
||||
if output, err = codegen.NewGofmtWriteCloser(output); err != nil {
|
||||
packageFile.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = packageTmpl.Execute(output, dcp); err != nil {
|
||||
output.Close()
|
||||
return err
|
||||
}
|
||||
return output.Close()
|
||||
}
|
||||
|
||||
func (dcp *DCP) processSCPDFile(file *zip.File) error {
|
||||
scpd := new(scpd.SCPD)
|
||||
if err := unmarshalXmlFile(file, scpd); err != nil {
|
||||
return fmt.Errorf("error decoding SCPD XML from file %q: %v", file.Name, err)
|
||||
}
|
||||
scpd.Clean()
|
||||
urnParts, err := urnPartsFromSCPDFilename(file.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not recognize SCPD filename %q: %v", file.Name, err)
|
||||
}
|
||||
dcp.Services = append(dcp.Services, SCPDWithURN{
|
||||
URNParts: urnParts,
|
||||
SCPD: scpd,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
type SCPDWithURN struct {
|
||||
*URNParts
|
||||
SCPD *scpd.SCPD
|
||||
}
|
||||
|
||||
func (s *SCPDWithURN) WrapArguments(args []*scpd.Argument) (argumentWrapperList, error) {
|
||||
wrappedArgs := make(argumentWrapperList, len(args))
|
||||
for i, arg := range args {
|
||||
wa, err := s.wrapArgument(arg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wrappedArgs[i] = wa
|
||||
}
|
||||
return wrappedArgs, nil
|
||||
}
|
||||
|
||||
func (s *SCPDWithURN) wrapArgument(arg *scpd.Argument) (*argumentWrapper, error) {
|
||||
relVar := s.SCPD.GetStateVariable(arg.RelatedStateVariable)
|
||||
if relVar == nil {
|
||||
return nil, fmt.Errorf("no such state variable: %q, for argument %q", arg.RelatedStateVariable, arg.Name)
|
||||
}
|
||||
cnv, ok := typeConvs[relVar.DataType.Name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown data type: %q, for state variable %q, for argument %q", relVar.DataType.Type, arg.RelatedStateVariable, arg.Name)
|
||||
}
|
||||
return &argumentWrapper{
|
||||
Argument: *arg,
|
||||
relVar: relVar,
|
||||
conv: cnv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type argumentWrapper struct {
|
||||
scpd.Argument
|
||||
relVar *scpd.StateVariable
|
||||
conv conv
|
||||
}
|
||||
|
||||
func (arg *argumentWrapper) AsParameter() string {
|
||||
return fmt.Sprintf("%s %s", arg.Name, arg.conv.ExtType)
|
||||
}
|
||||
|
||||
func (arg *argumentWrapper) HasDoc() bool {
|
||||
rng := arg.relVar.AllowedValueRange
|
||||
return ((rng != nil && (rng.Minimum != "" || rng.Maximum != "" || rng.Step != "")) ||
|
||||
len(arg.relVar.AllowedValues) > 0)
|
||||
}
|
||||
|
||||
func (arg *argumentWrapper) Document() string {
|
||||
relVar := arg.relVar
|
||||
if rng := relVar.AllowedValueRange; rng != nil {
|
||||
var parts []string
|
||||
if rng.Minimum != "" {
|
||||
parts = append(parts, fmt.Sprintf("minimum=%s", rng.Minimum))
|
||||
}
|
||||
if rng.Maximum != "" {
|
||||
parts = append(parts, fmt.Sprintf("maximum=%s", rng.Maximum))
|
||||
}
|
||||
if rng.Step != "" {
|
||||
parts = append(parts, fmt.Sprintf("step=%s", rng.Step))
|
||||
}
|
||||
return "allowed value range: " + strings.Join(parts, ", ")
|
||||
}
|
||||
if len(relVar.AllowedValues) != 0 {
|
||||
return "allowed values: " + strings.Join(relVar.AllowedValues, ", ")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (arg *argumentWrapper) Marshal() string {
|
||||
return fmt.Sprintf("soap.Marshal%s(%s)", arg.conv.FuncSuffix, arg.Name)
|
||||
}
|
||||
|
||||
func (arg *argumentWrapper) Unmarshal(objVar string) string {
|
||||
return fmt.Sprintf("soap.Unmarshal%s(%s.%s)", arg.conv.FuncSuffix, objVar, arg.Name)
|
||||
}
|
||||
|
||||
type argumentWrapperList []*argumentWrapper
|
||||
|
||||
func (args argumentWrapperList) HasDoc() bool {
|
||||
for _, arg := range args {
|
||||
if arg.HasDoc() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type conv struct {
|
||||
FuncSuffix string
|
||||
ExtType string
|
||||
}
|
||||
|
||||
// typeConvs maps from a SOAP type (e.g "fixed.14.4") to the function name
|
||||
// suffix inside the soap module (e.g "Fixed14_4") and the Go type.
|
||||
var typeConvs = map[string]conv{
|
||||
"ui1": conv{"Ui1", "uint8"},
|
||||
"ui2": conv{"Ui2", "uint16"},
|
||||
"ui4": conv{"Ui4", "uint32"},
|
||||
"i1": conv{"I1", "int8"},
|
||||
"i2": conv{"I2", "int16"},
|
||||
"i4": conv{"I4", "int32"},
|
||||
"int": conv{"Int", "int64"},
|
||||
"r4": conv{"R4", "float32"},
|
||||
"r8": conv{"R8", "float64"},
|
||||
"number": conv{"R8", "float64"}, // Alias for r8.
|
||||
"fixed.14.4": conv{"Fixed14_4", "float64"},
|
||||
"float": conv{"R8", "float64"},
|
||||
"char": conv{"Char", "rune"},
|
||||
"string": conv{"String", "string"},
|
||||
"date": conv{"Date", "time.Time"},
|
||||
"dateTime": conv{"DateTime", "time.Time"},
|
||||
"dateTime.tz": conv{"DateTimeTz", "time.Time"},
|
||||
"time": conv{"TimeOfDay", "soap.TimeOfDay"},
|
||||
"time.tz": conv{"TimeOfDayTz", "soap.TimeOfDay"},
|
||||
"boolean": conv{"Boolean", "bool"},
|
||||
"bin.base64": conv{"BinBase64", "[]byte"},
|
||||
"bin.hex": conv{"BinHex", "[]byte"},
|
||||
"uri": conv{"URI", "*url.URL"},
|
||||
}
|
||||
|
||||
func globFiles(pattern string, archive *zip.ReadCloser) []*zip.File {
|
||||
var files []*zip.File
|
||||
for _, f := range archive.File {
|
||||
if matched, err := path.Match(pattern, f.Name); err != nil {
|
||||
// This shouldn't happen - all patterns are hard-coded, errors in them
|
||||
// are a programming error.
|
||||
panic(err)
|
||||
} else if matched {
|
||||
files = append(files, f)
|
||||
}
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func unmarshalXmlFile(file *zip.File, data interface{}) error {
|
||||
r, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
decoder := xml.NewDecoder(r)
|
||||
r.Close()
|
||||
return decoder.Decode(data)
|
||||
}
|
||||
|
||||
type URNParts struct {
|
||||
URN string
|
||||
Name string
|
||||
Version string
|
||||
}
|
||||
|
||||
func (u *URNParts) Const() string {
|
||||
return fmt.Sprintf("URN_%s_%s", u.Name, u.Version)
|
||||
}
|
||||
|
||||
// extractURNParts extracts the name and version from a URN string.
|
||||
func extractURNParts(urn, expectedPrefix string) (*URNParts, error) {
|
||||
if !strings.HasPrefix(urn, expectedPrefix) {
|
||||
return nil, fmt.Errorf("%q does not have expected prefix %q", urn, expectedPrefix)
|
||||
}
|
||||
parts := strings.SplitN(strings.TrimPrefix(urn, expectedPrefix), ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("%q does not have a name and version", urn)
|
||||
}
|
||||
name, version := parts[0], parts[1]
|
||||
return &URNParts{urn, name, version}, nil
|
||||
}
|
||||
|
||||
var scpdFilenameRe = regexp.MustCompile(
|
||||
`.*/([a-zA-Z0-9]+)([0-9]+)\.xml`)
|
||||
|
||||
func urnPartsFromSCPDFilename(filename string) (*URNParts, error) {
|
||||
parts := scpdFilenameRe.FindStringSubmatch(filename)
|
||||
if len(parts) != 3 {
|
||||
return nil, fmt.Errorf("SCPD filename %q does not have expected number of parts", filename)
|
||||
}
|
||||
name, version := parts[1], parts[2]
|
||||
return &URNParts{
|
||||
URN: serviceURNPrefix + name + ":" + version,
|
||||
Name: name,
|
||||
Version: version,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var packageTmpl = template.Must(template.New("package").Parse(`{{$name := .Metadata.Name}}
|
||||
// Client for UPnP Device Control Protocol {{.Metadata.OfficialName}}.
|
||||
// {{if .Metadata.DocURL}}
|
||||
// This DCP is documented in detail at: {{.Metadata.DocURL}}{{end}}
|
||||
//
|
||||
// Typically, use one of the New* functions to create clients for services.
|
||||
package {{$name}}
|
||||
|
||||
// Generated file - do not edit by hand. See README.md
|
||||
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/huin/goupnp"
|
||||
"github.com/huin/goupnp/soap"
|
||||
)
|
||||
|
||||
// Hack to avoid Go complaining if time isn't used.
|
||||
var _ time.Time
|
||||
|
||||
// Device URNs:
|
||||
const ({{range .DeviceTypes}}
|
||||
{{.Const}} = "{{.URN}}"{{end}}
|
||||
)
|
||||
|
||||
// Service URNs:
|
||||
const ({{range .ServiceTypes}}
|
||||
{{.Const}} = "{{.URN}}"{{end}}
|
||||
)
|
||||
|
||||
{{range .Services}}
|
||||
{{$srv := .}}
|
||||
{{$srvIdent := printf "%s%s" .Name .Version}}
|
||||
|
||||
// {{$srvIdent}} is a client for UPnP SOAP service with URN "{{.URN}}". See
|
||||
// goupnp.ServiceClient, which contains RootDevice and Service attributes which
|
||||
// are provided for informational value.
|
||||
type {{$srvIdent}} struct {
|
||||
goupnp.ServiceClient
|
||||
}
|
||||
|
||||
// New{{$srvIdent}}Clients discovers instances of the service on the network,
|
||||
// and returns clients to any that are found. errors will contain an error for
|
||||
// any devices that replied but which could not be queried, and err will be set
|
||||
// if the discovery process failed outright.
|
||||
//
|
||||
// This is a typical entry calling point into this package.
|
||||
func New{{$srvIdent}}Clients() (clients []*{{$srvIdent}}, errors []error, err error) {
|
||||
var genericClients []goupnp.ServiceClient
|
||||
if genericClients, errors, err = goupnp.NewServiceClients({{$srv.Const}}); err != nil {
|
||||
return
|
||||
}
|
||||
clients = new{{$srvIdent}}ClientsFromGenericClients(genericClients)
|
||||
return
|
||||
}
|
||||
|
||||
// New{{$srvIdent}}ClientsByURL discovers instances of the service at the given
|
||||
// URL, and returns clients to any that are found. An error is returned if
|
||||
// there was an error probing the service.
|
||||
//
|
||||
// This is a typical entry calling point into this package when reusing an
|
||||
// previously discovered service URL.
|
||||
func New{{$srvIdent}}ClientsByURL(loc *url.URL) ([]*{{$srvIdent}}, error) {
|
||||
genericClients, err := goupnp.NewServiceClientsByURL(loc, {{$srv.Const}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return new{{$srvIdent}}ClientsFromGenericClients(genericClients), nil
|
||||
}
|
||||
|
||||
// New{{$srvIdent}}ClientsFromRootDevice discovers instances of the service in
|
||||
// a given root device, and returns clients to any that are found. An error is
|
||||
// returned if there was not at least one instance of the service within the
|
||||
// device. The location parameter is simply assigned to the Location attribute
|
||||
// of the wrapped ServiceClient(s).
|
||||
//
|
||||
// This is a typical entry calling point into this package when reusing an
|
||||
// previously discovered root device.
|
||||
func New{{$srvIdent}}ClientsFromRootDevice(rootDevice *goupnp.RootDevice, loc *url.URL) ([]*{{$srvIdent}}, error) {
|
||||
genericClients, err := goupnp.NewServiceClientsFromRootDevice(rootDevice, loc, {{$srv.Const}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return new{{$srvIdent}}ClientsFromGenericClients(genericClients), nil
|
||||
}
|
||||
|
||||
func new{{$srvIdent}}ClientsFromGenericClients(genericClients []goupnp.ServiceClient) []*{{$srvIdent}} {
|
||||
clients := make([]*{{$srvIdent}}, len(genericClients))
|
||||
for i := range genericClients {
|
||||
clients[i] = &{{$srvIdent}}{genericClients[i]}
|
||||
}
|
||||
return clients
|
||||
}
|
||||
|
||||
{{range .SCPD.Actions}}{{/* loops over *SCPDWithURN values */}}
|
||||
|
||||
{{$winargs := $srv.WrapArguments .InputArguments}}
|
||||
{{$woutargs := $srv.WrapArguments .OutputArguments}}
|
||||
{{if $winargs.HasDoc}}
|
||||
//
|
||||
// Arguments:{{range $winargs}}{{if .HasDoc}}
|
||||
//
|
||||
// * {{.Name}}: {{.Document}}{{end}}{{end}}{{end}}
|
||||
{{if $woutargs.HasDoc}}
|
||||
//
|
||||
// Return values:{{range $woutargs}}{{if .HasDoc}}
|
||||
//
|
||||
// * {{.Name}}: {{.Document}}{{end}}{{end}}{{end}}
|
||||
func (client *{{$srvIdent}}) {{.Name}}({{range $winargs}}{{/*
|
||||
*/}}{{.AsParameter}}, {{end}}{{/*
|
||||
*/}}) ({{range $woutargs}}{{/*
|
||||
*/}}{{.AsParameter}}, {{end}} err error) {
|
||||
// Request structure.
|
||||
request := {{if $winargs}}&{{template "argstruct" $winargs}}{{"{}"}}{{else}}{{"interface{}(nil)"}}{{end}}
|
||||
// BEGIN Marshal arguments into request.
|
||||
{{range $winargs}}
|
||||
if request.{{.Name}}, err = {{.Marshal}}; err != nil {
|
||||
return
|
||||
}{{end}}
|
||||
// END Marshal arguments into request.
|
||||
|
||||
// Response structure.
|
||||
response := {{if $woutargs}}&{{template "argstruct" $woutargs}}{{"{}"}}{{else}}{{"interface{}(nil)"}}{{end}}
|
||||
|
||||
// Perform the SOAP call.
|
||||
if err = client.SOAPClient.PerformAction({{$srv.URNParts.Const}}, "{{.Name}}", request, response); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// BEGIN Unmarshal arguments from response.
|
||||
{{range $woutargs}}
|
||||
if {{.Name}}, err = {{.Unmarshal "response"}}; err != nil {
|
||||
return
|
||||
}{{end}}
|
||||
// END Unmarshal arguments from response.
|
||||
return
|
||||
}
|
||||
{{end}}{{/* range .SCPD.Actions */}}
|
||||
{{end}}{{/* range .Services */}}
|
||||
|
||||
{{define "argstruct"}}struct {{"{"}}{{range .}}
|
||||
{{.Name}} string
|
||||
{{end}}{{"}"}}{{end}}
|
||||
`))
|
312
Godeps/_workspace/src/github.com/huin/goupnp/ssdp/registry.go
generated
vendored
312
Godeps/_workspace/src/github.com/huin/goupnp/ssdp/registry.go
generated
vendored
@ -1,312 +0,0 @@
|
||||
package ssdp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/huin/goupnp/httpu"
|
||||
)
|
||||
|
||||
const (
|
||||
maxExpiryTimeSeconds = 24 * 60 * 60
|
||||
)
|
||||
|
||||
var (
|
||||
maxAgeRx = regexp.MustCompile("max-age=([0-9]+)")
|
||||
)
|
||||
|
||||
const (
|
||||
EventAlive = EventType(iota)
|
||||
EventUpdate
|
||||
EventByeBye
|
||||
)
|
||||
|
||||
type EventType int8
|
||||
|
||||
func (et EventType) String() string {
|
||||
switch et {
|
||||
case EventAlive:
|
||||
return "EventAlive"
|
||||
case EventUpdate:
|
||||
return "EventUpdate"
|
||||
case EventByeBye:
|
||||
return "EventByeBye"
|
||||
default:
|
||||
return fmt.Sprintf("EventUnknown(%d)", int8(et))
|
||||
}
|
||||
}
|
||||
|
||||
type Update struct {
|
||||
// The USN of the service.
|
||||
USN string
|
||||
// What happened.
|
||||
EventType EventType
|
||||
// The entry, which is nil if the service was not known and
|
||||
// EventType==EventByeBye. The contents of this must not be modified as it is
|
||||
// shared with the registry and other listeners. Once created, the Registry
|
||||
// does not modify the Entry value - any updates are replaced with a new
|
||||
// Entry value.
|
||||
Entry *Entry
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
// The address that the entry data was actually received from.
|
||||
RemoteAddr string
|
||||
// Unique Service Name. Identifies a unique instance of a device or service.
|
||||
USN string
|
||||
// Notfication Type. The type of device or service being announced.
|
||||
NT string
|
||||
// Server's self-identifying string.
|
||||
Server string
|
||||
Host string
|
||||
// Location of the UPnP root device description.
|
||||
Location url.URL
|
||||
|
||||
// Despite BOOTID,CONFIGID being required fields, apparently they are not
|
||||
// always set by devices. Set to -1 if not present.
|
||||
|
||||
BootID int32
|
||||
ConfigID int32
|
||||
|
||||
SearchPort uint16
|
||||
|
||||
// When the last update was received for this entry identified by this USN.
|
||||
LastUpdate time.Time
|
||||
// When the last update's cached values are advised to expire.
|
||||
CacheExpiry time.Time
|
||||
}
|
||||
|
||||
func newEntryFromRequest(r *http.Request) (*Entry, error) {
|
||||
now := time.Now()
|
||||
expiryDuration, err := parseCacheControlMaxAge(r.Header.Get("CACHE-CONTROL"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssdp: error parsing CACHE-CONTROL max age: %v", err)
|
||||
}
|
||||
|
||||
loc, err := url.Parse(r.Header.Get("LOCATION"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssdp: error parsing entry Location URL: %v", err)
|
||||
}
|
||||
|
||||
bootID, err := parseUpnpIntHeader(r.Header, "BOOTID.UPNP.ORG", -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configID, err := parseUpnpIntHeader(r.Header, "CONFIGID.UPNP.ORG", -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
searchPort, err := parseUpnpIntHeader(r.Header, "SEARCHPORT.UPNP.ORG", ssdpSearchPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if searchPort < 1 || searchPort > 65535 {
|
||||
return nil, fmt.Errorf("ssdp: search port %d is out of range", searchPort)
|
||||
}
|
||||
|
||||
return &Entry{
|
||||
RemoteAddr: r.RemoteAddr,
|
||||
USN: r.Header.Get("USN"),
|
||||
NT: r.Header.Get("NT"),
|
||||
Server: r.Header.Get("SERVER"),
|
||||
Host: r.Header.Get("HOST"),
|
||||
Location: *loc,
|
||||
BootID: bootID,
|
||||
ConfigID: configID,
|
||||
SearchPort: uint16(searchPort),
|
||||
LastUpdate: now,
|
||||
CacheExpiry: now.Add(expiryDuration),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseCacheControlMaxAge(cc string) (time.Duration, error) {
|
||||
matches := maxAgeRx.FindStringSubmatch(cc)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("did not find exactly one max-age in cache control header: %q", cc)
|
||||
}
|
||||
expirySeconds, err := strconv.ParseInt(matches[1], 10, 16)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if expirySeconds < 1 || expirySeconds > maxExpiryTimeSeconds {
|
||||
return 0, fmt.Errorf("rejecting bad expiry time of %d seconds", expirySeconds)
|
||||
}
|
||||
return time.Duration(expirySeconds) * time.Second, nil
|
||||
}
|
||||
|
||||
// parseUpnpIntHeader is intended to parse the
|
||||
// {BOOT,CONFIGID,SEARCHPORT}.UPNP.ORG header fields. It returns the def if
|
||||
// the head is empty or missing.
|
||||
func parseUpnpIntHeader(headers http.Header, headerName string, def int32) (int32, error) {
|
||||
s := headers.Get(headerName)
|
||||
if s == "" {
|
||||
return def, nil
|
||||
}
|
||||
v, err := strconv.ParseInt(s, 10, 32)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ssdp: could not parse header %s: %v", headerName, err)
|
||||
}
|
||||
return int32(v), nil
|
||||
}
|
||||
|
||||
var _ httpu.Handler = new(Registry)
|
||||
|
||||
// Registry maintains knowledge of discovered devices and services.
|
||||
//
|
||||
// NOTE: the interface for this is experimental and may change, or go away
|
||||
// entirely.
|
||||
type Registry struct {
|
||||
lock sync.Mutex
|
||||
byUSN map[string]*Entry
|
||||
|
||||
listenersLock sync.RWMutex
|
||||
listeners map[chan<- Update]struct{}
|
||||
}
|
||||
|
||||
func NewRegistry() *Registry {
|
||||
return &Registry{
|
||||
byUSN: make(map[string]*Entry),
|
||||
listeners: make(map[chan<- Update]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// NewServerAndRegistry is a convenience function to create a registry, and an
|
||||
// httpu server to pass it messages. Call ListenAndServe on the server for
|
||||
// messages to be processed.
|
||||
func NewServerAndRegistry() (*httpu.Server, *Registry) {
|
||||
reg := NewRegistry()
|
||||
srv := &httpu.Server{
|
||||
Addr: ssdpUDP4Addr,
|
||||
Multicast: true,
|
||||
Handler: reg,
|
||||
}
|
||||
return srv, reg
|
||||
}
|
||||
|
||||
func (reg *Registry) AddListener(c chan<- Update) {
|
||||
reg.listenersLock.Lock()
|
||||
defer reg.listenersLock.Unlock()
|
||||
reg.listeners[c] = struct{}{}
|
||||
}
|
||||
|
||||
func (reg *Registry) RemoveListener(c chan<- Update) {
|
||||
reg.listenersLock.Lock()
|
||||
defer reg.listenersLock.Unlock()
|
||||
delete(reg.listeners, c)
|
||||
}
|
||||
|
||||
func (reg *Registry) sendUpdate(u Update) {
|
||||
reg.listenersLock.RLock()
|
||||
defer reg.listenersLock.RUnlock()
|
||||
for c := range reg.listeners {
|
||||
c <- u
|
||||
}
|
||||
}
|
||||
|
||||
// GetService returns known service (or device) entries for the given service
|
||||
// URN.
|
||||
func (reg *Registry) GetService(serviceURN string) []*Entry {
|
||||
// Currently assumes that the map is small, so we do a linear search rather
|
||||
// than indexed to avoid maintaining two maps.
|
||||
var results []*Entry
|
||||
reg.lock.Lock()
|
||||
defer reg.lock.Unlock()
|
||||
for _, entry := range reg.byUSN {
|
||||
if entry.NT == serviceURN {
|
||||
results = append(results, entry)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// ServeMessage implements httpu.Handler, and uses SSDP NOTIFY requests to
|
||||
// maintain the registry of devices and services.
|
||||
func (reg *Registry) ServeMessage(r *http.Request) {
|
||||
if r.Method != methodNotify {
|
||||
return
|
||||
}
|
||||
|
||||
nts := r.Header.Get("nts")
|
||||
|
||||
var err error
|
||||
switch nts {
|
||||
case ntsAlive:
|
||||
err = reg.handleNTSAlive(r)
|
||||
case ntsUpdate:
|
||||
err = reg.handleNTSUpdate(r)
|
||||
case ntsByebye:
|
||||
err = reg.handleNTSByebye(r)
|
||||
default:
|
||||
err = fmt.Errorf("unknown NTS value: %q", nts)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("goupnp/ssdp: failed to handle %s message from %s: %v", nts, r.RemoteAddr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (reg *Registry) handleNTSAlive(r *http.Request) error {
|
||||
entry, err := newEntryFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reg.lock.Lock()
|
||||
reg.byUSN[entry.USN] = entry
|
||||
reg.lock.Unlock()
|
||||
|
||||
reg.sendUpdate(Update{
|
||||
USN: entry.USN,
|
||||
EventType: EventAlive,
|
||||
Entry: entry,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (reg *Registry) handleNTSUpdate(r *http.Request) error {
|
||||
entry, err := newEntryFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextBootID, err := parseUpnpIntHeader(r.Header, "NEXTBOOTID.UPNP.ORG", -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entry.BootID = nextBootID
|
||||
|
||||
reg.lock.Lock()
|
||||
reg.byUSN[entry.USN] = entry
|
||||
reg.lock.Unlock()
|
||||
|
||||
reg.sendUpdate(Update{
|
||||
USN: entry.USN,
|
||||
EventType: EventUpdate,
|
||||
Entry: entry,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (reg *Registry) handleNTSByebye(r *http.Request) error {
|
||||
usn := r.Header.Get("USN")
|
||||
|
||||
reg.lock.Lock()
|
||||
entry := reg.byUSN[usn]
|
||||
delete(reg.byUSN, usn)
|
||||
reg.lock.Unlock()
|
||||
|
||||
reg.sendUpdate(Update{
|
||||
USN: usn,
|
||||
EventType: EventByeBye,
|
||||
Entry: entry,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
27
Godeps/_workspace/src/github.com/jackpal/gateway/LICENSE
generated
vendored
27
Godeps/_workspace/src/github.com/jackpal/gateway/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
// Copyright (c) 2010 Jack Palevich. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
7
Godeps/_workspace/src/github.com/jackpal/gateway/README.md
generated
vendored
7
Godeps/_workspace/src/github.com/jackpal/gateway/README.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
# gateway
|
||||
|
||||
A very simple library for discovering the IP address of the local LAN gateway.
|
||||
|
||||
Provides implementations for Linux, OS X (Darwin) and Windows.
|
||||
|
||||
Pull requests for other OSs happily considered!
|
40
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_darwin.go
generated
vendored
40
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_darwin.go
generated
vendored
@ -1,40 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("route", "-n", "get", "0.0.0.0")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Darwin route out format is always like this:
|
||||
// route to: default
|
||||
// destination: default
|
||||
// mask: default
|
||||
// gateway: 192.168.1.1
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for _, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("gateway:")) {
|
||||
gatewayFields := bytes.Fields(line)
|
||||
ip = net.ParseIP(string(gatewayFields[1]))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
75
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_linux.go
generated
vendored
75
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_linux.go
generated
vendored
@ -1,75 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func discoverGatewayUsingIp() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("ip", "route", "show")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Linux 'ip route show' format looks like this:
|
||||
// default via 192.168.178.1 dev wlp3s0 metric 303
|
||||
// 192.168.178.0/24 dev wlp3s0 proto kernel scope link src 192.168.178.76 metric 303
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for _, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("default")) {
|
||||
ipFields := bytes.Fields(line)
|
||||
ip = net.ParseIP(string(ipFields[2]))
|
||||
break
|
||||
}
|
||||
}
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func discoverGatewayUsingRoute() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("route", "-n")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Linux route out format is always like this:
|
||||
// Kernel IP routing table
|
||||
// Destination Gateway Genmask Flags Metric Ref Use Iface
|
||||
// 0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 eth0
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for _, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("0.0.0.0")) {
|
||||
ipFields := bytes.Fields(line)
|
||||
ip = net.ParseIP(string(ipFields[1]))
|
||||
break
|
||||
}
|
||||
}
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
ip, err = discoverGatewayUsingRoute()
|
||||
if err != nil {
|
||||
ip, err = discoverGatewayUsingIp()
|
||||
}
|
||||
return
|
||||
}
|
14
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_unimplemented.go
generated
vendored
14
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_unimplemented.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
// +build !darwin,!linux,!windows
|
||||
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
err = fmt.Errorf("DiscoverGateway not implemented for OS %s", runtime.GOOS)
|
||||
return
|
||||
}
|
43
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_windows.go
generated
vendored
43
Godeps/_workspace/src/github.com/jackpal/gateway/gateway_windows.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func DiscoverGateway() (ip net.IP, err error) {
|
||||
routeCmd := exec.Command("route", "print", "0.0.0.0")
|
||||
stdOut, err := routeCmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = routeCmd.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
output, err := ioutil.ReadAll(stdOut)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Windows route output format is always like this:
|
||||
// ===========================================================================
|
||||
// Active Routes:
|
||||
// Network Destination Netmask Gateway Interface Metric
|
||||
// 0.0.0.0 0.0.0.0 192.168.1.1 192.168.1.100 20
|
||||
// ===========================================================================
|
||||
// I'm trying to pick the active route,
|
||||
// then jump 2 lines and pick the third IP
|
||||
// Not using regex because output is quite standard from Windows XP to 8 (NEEDS TESTING)
|
||||
outputLines := bytes.Split(output, []byte("\n"))
|
||||
for idx, line := range outputLines {
|
||||
if bytes.Contains(line, []byte("Active Routes:")) {
|
||||
ipFields := bytes.Fields(outputLines[idx+2])
|
||||
ip = net.ParseIP(string(ipFields[2]))
|
||||
break
|
||||
}
|
||||
}
|
||||
err = routeCmd.Wait()
|
||||
return
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user