Compare commits

...

98 Commits

Author SHA1 Message Date
9c2882b2e5 params: Geth 1.6.4 stable (hotfix) 2017-06-01 17:33:17 +03:00
1a0eb903f1 internal/ethapi: initialize account mutex in lock properly 2017-06-01 17:16:12 +03:00
0036e2a747 swarm/dev: add development environment (#14332)
This PR adds a Swarm development environment which can be run in a
Docker container and provides scripts for building binaries and running
Swarm clusters.
2017-06-01 12:52:18 +02:00
727eadacca VERSION, params: begin Geth 1.6.4 release cycle 2017-06-01 11:43:57 +03:00
99cba96f26 params: release Geth 1.6.3 - Covfefe 2017-06-01 11:41:48 +03:00
f272879e5a Merge pull request #14565 from karalabe/relax-privkey-checks
accounts/keystore, crypto: don't enforce key checks on existing keyfiles
2017-06-01 11:14:11 +03:00
72dd51e25a accounts/keystore, crypto: don't enforce key checks on existing keyfiles 2017-06-01 11:11:06 +03:00
799a469000 Merge pull request #14561 from karalabe/txpool-perf-fix
core: reduce transaction reorganization overhead
2017-06-01 10:33:47 +03:00
f4d81178d8 Merge pull request #14563 from karalabe/ethstats-reduce-traffic-2
ethstats: reduce ethstats traffic by trottling reports
2017-06-01 10:33:21 +03:00
310d2e7ef4 Merge pull request #14564 from karalabe/fix-1.6-docker
cotnainers/docker: fix the legacy alpine image before dropping
2017-06-01 10:33:03 +03:00
3ecde4e2aa cotnainers/docker: fix the legacy alpine image before dropping 2017-06-01 00:21:47 +03:00
a355b401db ethstats: reduce ethstats traffic by trottling reports 2017-06-01 00:16:19 +03:00
cba33029a8 core: only reorg changed account, not all 2017-05-31 23:26:24 +03:00
9702badd83 core: don't uselessly recheck transactions on dump 2017-05-31 21:29:50 +03:00
067dc2cbf5 params, VERSION: 1.6.3 unstable 2017-05-31 05:47:35 +02:00
65979770e6 params: 1.6.2 stable 2017-05-31 05:45:13 +02:00
41bdf49eed Merge pull request #14516 from holiman/noncefixes
internal/ethapi: add mutex around signing + nonce assignment
2017-05-30 18:15:57 +03:00
ea11f7dd7a internal/ethapi: add mutex around signing + nonce assignment
This prevents concurrent assignment of identical nonces when automatic
assignment is used.
2017-05-30 16:43:38 +02:00
8df24760d7 Merge pull request #14553 from karalabe/puppeth-key-check
cmd/puppeth: fix improper key validation for remotes
2017-05-30 14:50:48 +03:00
71814bf6c4 Merge pull request #14547 from karalabe/txpool-gas-decrease
core: check for gas limit exceeding txs too on new block
2017-05-30 14:50:27 +03:00
ec1700600a cmd/puppeth: fix improper key validation for remotes 2017-05-30 14:24:01 +03:00
b0f30b0b37 Merge pull request #14545 from karalabe/clique-cache-signatures
consensus/clique: cache block signatures for fast checks
2017-05-30 12:56:57 +03:00
e96f2981e2 Merge pull request #14548 from karalabe/ethstats-no-txs
ethstats: don't report transaction content, only hash
2017-05-30 12:54:00 +03:00
09d59da3a1 ethstats: don't report transaction content, only hash 2017-05-30 02:15:40 +03:00
280609c99b core: check for gas limit exceeding txs too on new block 2017-05-30 00:31:37 +03:00
309da541de consensus/clique: cache block signatures for fast checks 2017-05-29 22:07:02 +03:00
dd06c85843 Merge pull request #14523 from karalabe/txpool-cli-flags
cmd, core, eth: configurable txpool parameters
2017-05-29 11:42:48 +03:00
ae40d51410 Merge pull request #14539 from karalabe/txpool-inspec-nonces
internal/ethapi: fix tx nonces in pool inspect/content
2017-05-29 11:37:34 +03:00
b865fad888 Merge pull request #14537 from karalabe/setgasprice-durning-nomine
eth: update default gas price when not mining too
2017-05-29 11:37:26 +03:00
afb17cf071 Merge pull request #14524 from karalabe/noimport-during-fastsync
eth: don't import propagated blocks during fastsync
2017-05-29 11:37:08 +03:00
08959bbc70 cmd, core, eth: configurable txpool parameters 2017-05-29 11:29:46 +03:00
673c92db6b internal/ethapi: fix tx nonces in pool inspect/content 2017-05-29 11:17:31 +03:00
c2a494c743 eth: update default gas price when not mining too 2017-05-29 10:21:34 +03:00
afdd23b5ca eth: don't import propagated blocks during fastsync 2017-05-26 16:04:12 +03:00
cb809c03da Merge pull request #14517 from Ali92hm/master
Improved Dockerfile?
2017-05-26 10:54:41 +01:00
45421d3130 dockerfile: expose 30303/udp 2017-05-25 12:34:29 -07:00
115e7d71cc dockerfile: cp geth to /usr/local/bin 2017-05-25 12:34:28 -07:00
dd5ed01f3b Merge pull request #14514 from karalabe/go1.8.3
travis, appveyor: bump to Go 1.8.3, Android NDK 14b
2017-05-25 17:42:40 +03:00
b7ff0d42e3 Merge pull request #14515 from karalabe/golint-tooooolong
core: fix various golint warnings and errors
2017-05-25 17:40:51 +03:00
c98bce709c core: fix minor accidental typos and comment errors 2017-05-25 17:22:45 +03:00
17f0b11942 core: typos and comments improve
1. fix typos
2. methods recevier of struct should be same
3. comments improve

(cherry picked from commit 1ba9795395)
2017-05-25 17:14:33 +03:00
6231edcbab travis, appveyor: bump to Go 1.8.3, Android NDK 14b 2017-05-25 17:05:33 +03:00
07aae19e5d Merge pull request #14446 from bas-vk/cli-help
Rewrite templates for (sub)commands help section
2017-05-25 13:58:55 +03:00
b596b4ba5b Merge pull request #14513 from obscuren/allocate-stack
core/vm: allocate stack to 1024
2017-05-25 13:22:56 +03:00
8b1e4c4c5e README: corrected attach example (#14512) 2017-05-25 13:22:26 +03:00
846d091bd2 core/vm: allocate stack to 1024
Pre allocate the stack to 1024 optimising stack pushing, reducing calls
to runtime.makeslice and runtime.mallocgc
2017-05-25 11:37:04 +02:00
a346aedb90 cmd/geth: reorganise help section for new cli flag handling 2017-05-25 09:15:51 +02:00
ef25b826e6 Merge pull request #14502 from karalabe/mobile-import-ecdsa
Enforce 256 bit keys on raw import, support raw mobile imports
2017-05-24 22:30:47 +02:00
261b3e2351 Merge pull request #14336 from obscuren/metropolis-preparation
consensus, core/*, params: metropolis preparation refactor
2017-05-24 22:28:22 +02:00
344f25fb3e Merge pull request #14507 from karalabe/faucet-misspell
cmd/faucet: fix a few typos
2017-05-24 17:14:21 +03:00
1afaea4bfe cmd/faucet: fix a few typos 2017-05-24 17:12:07 +03:00
11cf5b7ead consensus/ethash: fix TestCalcDifficulty 2017-05-24 15:40:54 +02:00
069cb661c3 crypto/bn256: fix go vet false positive
Also add the package to the license tool ignore list.
2017-05-24 15:40:26 +02:00
3b8915e387 Merge pull request #14504 from bas-vk/wallet-import
cmd/geth: reintroduce wallet import subcommand
2017-05-24 13:00:22 +03:00
437ceaa9be cmd/geth: reintroduce wallet import subcommand 2017-05-23 17:45:26 +02:00
136f78ff0a mobile: support importing flat ecdsa keyst too 2017-05-23 14:58:28 +03:00
aa73420207 accounts/keystore, crypto: enforce 256 bit keys on import 2017-05-23 14:58:03 +03:00
3556962053 Merge pull request #14501 from sqli-nantes/master
mobile: manage FilterQuery enabling contract events subscription
2017-05-23 13:47:22 +03:00
e1e87d8b1a common: fixed byte padding functions
Byte padding function should return the given slice if the length is
smaller or equal rather than *only* smaller than.

This fix improves almost all EVM push operations.
2017-05-23 11:24:07 +02:00
30cc1c3bf0 mobile: Add management methods to {Addresses,Topics,Hashes} structures 2017-05-23 11:16:25 +02:00
10582a97ca core/vm: expose intpool to stack dup method
Improve the duplication method of the stack to reuse big ints by passing
in an existing integer pool.
2017-05-23 10:52:11 +02:00
e16a7ef60f core/vm: capped int pool 2017-05-23 10:40:09 +02:00
a816e75662 core/vm: improved push instructions
Improved push instructions by removing unnecessary big int allocations
and by making it int instead of big.Int
2017-05-23 10:39:53 +02:00
3ee75bec9f cmd/evm: added mem/cpu profiling 2017-05-23 10:17:55 +02:00
04b668b232 core/vm: improve error message for invalid opcodes 2017-05-22 17:48:07 +02:00
da636c53d6 mobile: Allows mobile clients to create custom FilterQueries 2017-05-22 17:12:36 +02:00
2a41e76b39 swarm/api: Fix adding paths which exist as manifests (#14482)
Signed-off-by: Lewis Marshall <lewis@lmars.net>
2017-05-22 08:57:03 +02:00
4a2c17b1ab cmd/swarm: Add --httpaddr flag (#14475)
Fixes #14474.

Signed-off-by: Lewis Marshall <lewis@lmars.net>
2017-05-22 08:56:40 +02:00
bc75351edf README: fixing typo in documentation (#14493) 2017-05-22 08:47:27 +02:00
33b158e0ed discover: Changed Logging from Debug to Info (#14485) 2017-05-20 13:10:59 +02:00
83721a95ce internal/ethapi: lock when auto-filling transaction nonce (#14483)
More context in the bug This solves the problems of transactions being
submitted simultaneously, and getting the same nonce, due to the gap (due to
signing) between nonce-issuance and nonce-update. With this PR, a lock will
need to be acquired whenever a nonce is used, and released when the transaction
is submitted or errors out.
2017-05-19 15:03:56 +02:00
e7119ce12d core/state: fixed (self)destructed objects
Add the object to the list of destructed objects during a selfdestruct /
suicide operation and also remove it from the list once the journal
reverts.
2017-05-18 09:05:58 +02:00
a5f6a1cb7c consensus, core, core/vm, parems: review fixes 2017-05-18 09:05:58 +02:00
e6aff513db core/types: corrected abstract signing address 2017-05-18 09:05:58 +02:00
8a4c1fb799 consensus/ethash: set time to current instead of parent time 2017-05-18 09:05:58 +02:00
10a57fc3d4 consensus, core/*, params: metropolis preparation refactor
This commit is a preparation for the upcoming metropolis hardfork. It
prepares the state, core and vm packages such that integration with
metropolis becomes less of a hassle.

* Difficulty calculation requires header instead of individual
  parameters
* statedb.StartRecord renamed to statedb.Prepare and added Finalise
  method required by metropolis, which removes unwanted accounts from
  the state (i.e. selfdestruct)
* State keeps record of destructed objects (in addition to dirty
  objects)
* core/vm pre-compiles may now return errors
* core/vm pre-compiles gas check now take the full byte slice as argument
  instead of just the size
* core/vm now keeps several hard-fork instruction tables instead of a
  single instruction table and removes the need for hard-fork checks in
  the instructions
* core/vm contains a empty restruction function which is added in
  preparation of metropolis write-only mode operations
* Adds the bn256 curve
* Adds and sets the metropolis chain config block parameters (2^64-1)
2017-05-18 09:05:58 +02:00
a2f23ca9b1 cmd, core, eth, miner: remove txpool gas price limits (#14442) 2017-05-16 21:07:27 +02:00
e20158176d les: fix goroutine leak in execQueue (#14480)
execQueue used an atomic counter to track whether the queue had been
closed, but the checking the counter didn't happen because the queue was
blocked on its channel.

Fix it by using a condition variable instead of sync/atomic. I tried an
implementation based on channels first, but it was hard to make it
reliable.

quit now waits for the queue loop to exit.
2017-05-16 20:56:02 +02:00
ef7b9fb7d0 cmd/puppeth: v4/v5 boot separation, signer gas configs (#14453) 2017-05-13 02:03:56 +02:00
b0d0fafd68 swarm/api: fix error reporting in api.Resolve (#14464)
Previously, resolve errors were being swallowed and the returned error
was a generic "not a content hash" which isn't helpful.

This updates the Resolve function to fail fast rather than only
returning an error at the end, and also adds test coverage.
2017-05-13 02:02:25 +02:00
90c7155ef4 mobile: accept nil for chainid as homestead signing (#14463) 2017-05-13 02:00:39 +02:00
df4e7eccf5 containers/vagrant: add support for CentOS (#14380)
CentOS has been added as a multi-machine option to the Vagrant script.
Ubuntu is still the default option. For starting the CentOS machine, use:

   vagrant up centos
2017-05-13 01:59:03 +02:00
7c707d14d1 Merge pull request #14454 from karalabe/mobile-surface-txrlp
mobile: add toString & rlp/json encoding for protocol types
2017-05-11 17:53:15 +03:00
953a995116 mobile: add toString & rlp/json encoding for protocol types 2017-05-11 17:25:40 +03:00
c5840ce12f Merge pull request #14452 from karalabe/dual-bootnodes
cmd, node: support different bootnodes, fix default light port
2017-05-10 21:00:22 +03:00
3b3989de6a cmd, node: support different bootnodes, fix default light port 2017-05-10 17:51:52 +03:00
40976ea1a0 README: update attach instructions for testnet users (#14448) 2017-05-09 16:06:07 +02:00
d18b509e40 Merge pull request #14441 from karalabe/receipt-data-regression
core: fix processing regression during receipt import
2017-05-08 12:37:01 +03:00
2e4d23a793 Merge pull request #14427 from zsfelfoldi/compress
common/bitutil: added data compression algorithm
2017-05-08 12:30:35 +03:00
60293820b7 core: fix processing regression during receipt import 2017-05-08 12:09:35 +03:00
82defe5c56 common/compress: internalize encoders, add length wrappers 2017-05-08 11:38:25 +03:00
dd483d7d0d Merge pull request #14440 from karalabe/cocoapods-confirm-fix
travis: adapt build script to new travis VM settings
2017-05-08 11:26:35 +03:00
dddebe469b travis: adapt build script to new travis VM settings 2017-05-08 11:22:08 +03:00
cf19586cfb common/bitutil: fix decompression corner cases; fuzz, test & bench 2017-05-06 19:06:17 +03:00
fd5d51c9ae common/bitutil: added data compression algorithm 2017-05-05 20:24:48 +02:00
2ec5cf1673 Merge pull request #14423 from karalabe/bitutil
common/bitutil, consensus/ethash: reusable bitutil package
2017-05-05 18:23:08 +03:00
36a800a1d2 common/bitutil, consensus/ethash: reusable bitutil package 2017-05-05 16:00:11 +03:00
93832b633e VERSION, params: begin 1.6.2 release cycle 2017-05-04 14:34:59 +03:00
129 changed files with 6945 additions and 1273 deletions

View File

@ -6,7 +6,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
go: 1.7.5
go: 1.7.6
script:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
- sudo modprobe fuse
@ -19,7 +19,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
go: 1.8.1
go: 1.8.3
script:
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
- sudo modprobe fuse
@ -29,7 +29,7 @@ matrix:
- go run build/ci.go test -coverage -misspell
- os: osx
go: 1.8.1
go: 1.8.3
sudo: required
script:
- brew update
@ -42,7 +42,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
go: 1.8.1
go: 1.8.3
env:
- ubuntu-ppa
- azure-linux
@ -80,7 +80,7 @@ matrix:
sudo: required
services:
- docker
go: 1.8.1
go: 1.8.3
env:
- azure-linux-mips
script:
@ -120,16 +120,16 @@ matrix:
- azure-android
- maven-android
before_install:
- curl https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | tar -xz
- curl https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
script:
# Build the Android archive and upload it to Maven Central and Azure
- curl https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip -o android-ndk-r13b.zip
- unzip -q android-ndk-r13b.zip && rm android-ndk-r13b.zip
- mv android-ndk-r13b $HOME
- export ANDROID_NDK=$HOME/android-ndk-r13b
- curl https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip -o android-ndk-r14b.zip
- unzip -q android-ndk-r14b.zip && rm android-ndk-r14b.zip
- mv android-ndk-r14b $HOME
- export ANDROID_NDK=$HOME/android-ndk-r14b
- mkdir -p $GOPATH/src/github.com/ethereum
- ln -s `pwd` $GOPATH/src/github.com/ethereum
@ -137,7 +137,7 @@ matrix:
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- os: osx
go: 1.8.1
go: 1.8.3
env:
- azure-osx
- azure-ios
@ -147,7 +147,7 @@ matrix:
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
# Build the iOS framework and upload it to CocoaPods and Azure
- gem uninstall cocoapods -a
- gem uninstall cocoapods -a -x
- gem install cocoapods
- mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak
@ -163,7 +163,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
go: 1.8.1
go: 1.8.3
env:
- azure-purge
script:

View File

@ -4,11 +4,12 @@ ADD . /go-ethereum
RUN \
apk add --update git go make gcc musl-dev linux-headers && \
(cd go-ethereum && make geth) && \
cp go-ethereum/build/bin/geth /geth && \
cp go-ethereum/build/bin/geth /usr/local/bin/ && \
apk del git go make gcc musl-dev linux-headers && \
rm -rf /go-ethereum && rm -rf /var/cache/apk/*
EXPOSE 8545
EXPOSE 30303
EXPOSE 30303/udp
ENTRYPOINT ["/geth"]
ENTRYPOINT ["geth"]

View File

@ -70,7 +70,7 @@ This command will:
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
This too is optional and if you leave it out you can always attach to an already running Geth instance
with `geth --attach`.
with `geth attach`.
### Full node on the Ethereum test network
@ -84,21 +84,23 @@ $ geth --testnet --fast --cache=512 console
```
The `--fast`, `--cache` flags and `console` subcommand have the exact same meaning as above and they
are equially useful on the testnet too. Please see above for their explanations if you've skipped to
are equally useful on the testnet too. Please see above for their explanations if you've skipped to
here.
Specifying the `--testnet` flag however will reconfigure your Geth instance a bit:
* Instead of using the default data directory (`~/.ethereum` on Linux for example), Geth will nest
itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on Linux).
itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on Linux). Note, on OSX
and Linux this also means that attaching to a running testnet node requires the use of a custom
endpoint since `geth attach` will try to attach to a production node endpoint by default. E.g.
`geth attach <datadir>/testnet/geth.ipc`. Windows users are not affected by this.
* Instead of connecting the main Ethereum network, the client will connect to the test network,
which uses different P2P bootnodes, different network IDs and genesis states.
*Note: Although there are some internal protective measures to prevent transactions from crossing
over between the main network and test network (different starting nonces), you should make sure to
always use separate accounts for play-money and real-money. Unless you manually move accounts, Geth
will by default correctly separate the two networks and will not make any accounts available between
them.*
over between the main network and test network, you should make sure to always use separate accounts
for play-money and real-money. Unless you manually move accounts, Geth will by default correctly
separate the two networks and will not make any accounts available between them.*
#### Docker quick start

View File

@ -1 +1 @@
1.6.1
1.6.4

View File

@ -124,14 +124,13 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) {
if err != nil {
return err
}
privkey, err := hex.DecodeString(keyJSON.PrivateKey)
privkey, err := crypto.HexToECDSA(keyJSON.PrivateKey)
if err != nil {
return err
}
k.Address = common.BytesToAddress(addr)
k.PrivateKey = crypto.ToECDSA(privkey)
k.PrivateKey = privkey
return nil
}

View File

@ -450,7 +450,6 @@ func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (acco
if ks.cache.hasAddress(key.Address) {
return accounts.Account{}, fmt.Errorf("account already exists")
}
return ks.importKey(key, passphrase)
}

View File

@ -182,7 +182,8 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
if err != nil {
return nil, err
}
key := crypto.ToECDSA(keyBytes)
key := crypto.ToECDSAUnsafe(keyBytes)
return &Key{
Id: uuid.UUID(keyId),
Address: crypto.PubkeyToAddress(key.PublicKey),

View File

@ -46,7 +46,7 @@ func TestKeyEncryptDecrypt(t *testing.T) {
// Decrypt with the correct password
key, err := DecryptKey(keyjson, password)
if err != nil {
t.Errorf("test %d: json key failed to decrypt: %v", i, err)
t.Fatalf("test %d: json key failed to decrypt: %v", i, err)
}
if key.Address != address {
t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address)

View File

@ -74,7 +74,8 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error
return nil, err
}
ethPriv := crypto.Keccak256(plainText)
ecKey := crypto.ToECDSA(ethPriv)
ecKey := crypto.ToECDSAUnsafe(ethPriv)
key = &Key{
Id: nil,
Address: crypto.PubkeyToAddress(ecKey.PublicKey),

View File

@ -22,8 +22,8 @@ environment:
install:
- rmdir C:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.1.windows-%GETH_ARCH%.zip
- 7z x go1.8.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.3.windows-%GETH_ARCH%.zip
- 7z x go1.8.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version

View File

@ -47,11 +47,14 @@ var (
// boring stuff
"vendor/", "tests/files/", "build/",
// don't relicense vendored sources
"crypto/sha3/", "crypto/ecies/", "log/",
"crypto/secp256k1/curve.go",
"consensus/ethash/xor.go",
"internal/jsre/deps",
"cmd/internal/browser",
"consensus/ethash/xor.go",
"crypto/bn256/",
"crypto/ecies/",
"crypto/secp256k1/curve.go",
"crypto/sha3/",
"internal/jsre/deps",
"log/",
// don't license generated files
"contracts/chequebook/contract/",
"contracts/ens/contract/",

View File

@ -35,6 +35,18 @@ var (
Name: "debug",
Usage: "output full trace logs",
}
MemProfileFlag = cli.StringFlag{
Name: "memprofile",
Usage: "creates a memory profile at the given path",
}
CPUProfileFlag = cli.StringFlag{
Name: "cpuprofile",
Usage: "creates a CPU profile at the given path",
}
StatDumpFlag = cli.BoolFlag{
Name: "statdump",
Usage: "displays stack and heap memory information",
}
CodeFlag = cli.StringFlag{
Name: "code",
Usage: "EVM code",
@ -93,6 +105,9 @@ func init() {
DumpFlag,
InputFlag,
DisableGasMeteringFlag,
MemProfileFlag,
CPUProfileFlag,
StatDumpFlag,
}
app.Commands = []cli.Command{
compileCommand,

View File

@ -21,6 +21,7 @@ import (
"fmt"
"io/ioutil"
"os"
"runtime/pprof"
"time"
goruntime "runtime"
@ -108,6 +109,19 @@ func runCmd(ctx *cli.Context) error {
},
}
if cpuProfilePath := ctx.GlobalString(CPUProfileFlag.Name); cpuProfilePath != "" {
f, err := os.Create(cpuProfilePath)
if err != nil {
fmt.Println("could not create CPU profile: ", err)
os.Exit(1)
}
if err := pprof.StartCPUProfile(f); err != nil {
fmt.Println("could not start CPU profile: ", err)
os.Exit(1)
}
defer pprof.StopCPUProfile()
}
tstart := time.Now()
if ctx.GlobalBool(CreateFlag.Name) {
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
@ -125,12 +139,27 @@ func runCmd(ctx *cli.Context) error {
fmt.Println(string(statedb.Dump()))
}
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" {
f, err := os.Create(memProfilePath)
if err != nil {
fmt.Println("could not create memory profile: ", err)
os.Exit(1)
}
if err := pprof.WriteHeapProfile(f); err != nil {
fmt.Println("could not write memory profile: ", err)
os.Exit(1)
}
f.Close()
}
if ctx.GlobalBool(DebugFlag.Name) {
fmt.Fprintln(os.Stderr, "#### TRACE ####")
vm.WriteTrace(os.Stderr, logger.StructLogs())
fmt.Fprintln(os.Stderr, "#### LOGS ####")
vm.WriteLogs(os.Stderr, statedb.Logs())
}
if ctx.GlobalBool(StatDumpFlag.Name) {
var mem goruntime.MemStats
goruntime.ReadMemStats(&mem)
fmt.Fprintf(os.Stderr, `evm execution time: %v

View File

@ -102,7 +102,7 @@ func main() {
if amount == 1 {
amounts[i] = strings.TrimSuffix(amounts[i], "s")
}
// Calcualte the period for th enext tier and format it
// Calculate the period for the next tier and format it
period := *minutesFlag * int(math.Pow(3, float64(i)))
periods[i] = fmt.Sprintf("%d mins", period)
if period%60 == 0 {

View File

@ -31,25 +31,40 @@ import (
var (
walletCommand = cli.Command{
Name: "wallet",
Usage: "Import Ethereum presale wallets",
Action: utils.MigrateFlags(importWallet),
Category: "ACCOUNT COMMANDS",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.KeyStoreDirFlag,
utils.PasswordFileFlag,
utils.LightKDFFlag,
},
Name: "wallet",
Usage: "Manage Ethereum presale wallets",
ArgsUsage: "",
Category: "ACCOUNT COMMANDS",
Description: `
geth wallet [options] /path/to/my/presale.wallet
geth wallet import /path/to/my/presale.wallet
will prompt for your password and imports your ether presale account.
It can be used non-interactively with the --password option taking a
passwordfile as argument containing the wallet password in plaintext.
will prompt for your password and imports your ether presale account.
It can be used non-interactively with the --password option taking a
passwordfile as argument containing the wallet password in plaintext.`,
Subcommands: []cli.Command{
{
`,
Name: "import",
Usage: "Import Ethereum presale wallet",
ArgsUsage: "<keyFile>",
Action: utils.MigrateFlags(importWallet),
Category: "ACCOUNT COMMANDS",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.KeyStoreDirFlag,
utils.PasswordFileFlag,
utils.LightKDFFlag,
},
Description: `
geth wallet [options] /path/to/my/presale.wallet
will prompt for your password and imports your ether presale account.
It can be used non-interactively with the --password option taking a
passwordfile as argument containing the wallet password in plaintext.`,
},
},
}
accountCommand = cli.Command{
Name: "account",
Usage: "Manage accounts",

View File

@ -30,9 +30,7 @@ import (
var (
consoleFlags = []cli.Flag{utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag}
)
var (
consoleCommand = cli.Command{
Action: utils.MigrateFlags(localConsole),
Name: "console",

View File

@ -55,6 +55,8 @@ var (
utils.UnlockedAccountFlag,
utils.PasswordFileFlag,
utils.BootnodesFlag,
utils.BootnodesV4Flag,
utils.BootnodesV5Flag,
utils.DataDirFlag,
utils.KeyStoreDirFlag,
utils.NoUSBFlag,
@ -64,6 +66,13 @@ var (
utils.EthashDatasetDirFlag,
utils.EthashDatasetsInMemoryFlag,
utils.EthashDatasetsOnDiskFlag,
utils.TxPoolPriceLimitFlag,
utils.TxPoolPriceBumpFlag,
utils.TxPoolAccountSlotsFlag,
utils.TxPoolGlobalSlotsFlag,
utils.TxPoolAccountQueueFlag,
utils.TxPoolGlobalQueueFlag,
utils.TxPoolLifetimeFlag,
utils.FastSyncFlag,
utils.LightModeFlag,
utils.SyncModeFlag,

View File

@ -20,6 +20,7 @@ package main
import (
"io"
"sort"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/internal/debug"
@ -91,6 +92,18 @@ var AppHelpFlagGroups = []flagGroup{
utils.EthashDatasetsOnDiskFlag,
},
},
{
Name: "TRANSACTION POOL",
Flags: []cli.Flag{
utils.TxPoolPriceLimitFlag,
utils.TxPoolPriceBumpFlag,
utils.TxPoolAccountSlotsFlag,
utils.TxPoolGlobalSlotsFlag,
utils.TxPoolAccountQueueFlag,
utils.TxPoolGlobalQueueFlag,
utils.TxPoolLifetimeFlag,
},
},
{
Name: "PERFORMANCE TUNING",
Flags: []cli.Flag{
@ -129,6 +142,8 @@ var AppHelpFlagGroups = []flagGroup{
Name: "NETWORKING",
Flags: []cli.Flag{
utils.BootnodesFlag,
utils.BootnodesV4Flag,
utils.BootnodesV5Flag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
utils.MaxPendingPeersFlag,
@ -187,6 +202,39 @@ var AppHelpFlagGroups = []flagGroup{
},
}
// byCategory sorts an array of flagGroup by Name in the order
// defined in AppHelpFlagGroups.
type byCategory []flagGroup
func (a byCategory) Len() int { return len(a) }
func (a byCategory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byCategory) Less(i, j int) bool {
iCat, jCat := a[i].Name, a[j].Name
iIdx, jIdx := len(AppHelpFlagGroups), len(AppHelpFlagGroups) // ensure non categorized flags come last
for i, group := range AppHelpFlagGroups {
if iCat == group.Name {
iIdx = i
}
if jCat == group.Name {
jIdx = i
}
}
return iIdx < jIdx
}
func flagCategory(flag cli.Flag) string {
for _, category := range AppHelpFlagGroups {
for _, flg := range category.Flags {
if flg.GetName() == flag.GetName() {
return category.Name
}
}
}
return "MISC"
}
func init() {
// Override the default app help template
cli.AppHelpTemplate = AppHelpTemplate
@ -196,6 +244,7 @@ func init() {
App interface{}
FlagGroups []flagGroup
}
// Override the default app help printer, but only for the global app help
originalHelpPrinter := cli.HelpPrinter
cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) {
@ -225,6 +274,27 @@ func init() {
}
// Render out custom usage screen
originalHelpPrinter(w, tmpl, helpData{data, AppHelpFlagGroups})
} else if tmpl == utils.CommandHelpTemplate {
// Iterate over all command specific flags and categorize them
categorized := make(map[string][]cli.Flag)
for _, flag := range data.(cli.Command).Flags {
if _, ok := categorized[flag.String()]; !ok {
categorized[flagCategory(flag)] = append(categorized[flagCategory(flag)], flag)
}
}
// sort to get a stable ordering
sorted := make([]flagGroup, 0, len(categorized))
for cat, flgs := range categorized {
sorted = append(sorted, flagGroup{cat, flgs})
}
sort.Sort(byCategory(sorted))
// add sorted array to data and render with default printer
originalHelpPrinter(w, tmpl, map[string]interface{}{
"cmd": data,
"categorizedFlags": sorted,
})
} else {
originalHelpPrinter(w, tmpl, data)
}

View File

@ -40,7 +40,7 @@ ADD genesis.json /genesis.json
RUN \
echo '/geth init /genesis.json' > geth.sh && \{{if .Unlock}}
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
echo $'/geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}}' >> geth.sh
echo $'/geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
ENTRYPOINT ["/bin/sh", "geth.sh"]
`
@ -66,17 +66,20 @@ services:
- LIGHT_PEERS={{.LightPeers}}
- STATS_NAME={{.Ethstats}}
- MINER_NAME={{.Etherbase}}
- GAS_TARGET={{.GasTarget}}
- GAS_PRICE={{.GasPrice}}
restart: always
`
// deployNode deploys a new Ethereum node container to a remote machine via SSH,
// docker and docker-compose. If an instance with the specified network name
// already exists there, it will be overwritten!
func deployNode(client *sshClient, network string, bootnodes []string, config *nodeInfos) ([]byte, error) {
func deployNode(client *sshClient, network string, bootv4, bootv5 []string, config *nodeInfos) ([]byte, error) {
kind := "sealnode"
if config.keyJSON == "" && config.etherbase == "" {
kind = "bootnode"
bootnodes = make([]string, 0)
bootv4 = make([]string, 0)
bootv5 = make([]string, 0)
}
// Generate the content to upload to the server
workdir := fmt.Sprintf("%d", rand.Int63())
@ -92,9 +95,12 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
"Port": config.portFull,
"Peers": config.peersTotal,
"LightFlag": lightFlag,
"Bootnodes": strings.Join(bootnodes, ","),
"BootV4": strings.Join(bootv4, ","),
"BootV5": strings.Join(bootv5, ","),
"Ethstats": config.ethstats,
"Etherbase": config.etherbase,
"GasTarget": uint64(1000000 * config.gasTarget),
"GasPrice": uint64(1000000000 * config.gasPrice),
"Unlock": config.keyJSON != "",
})
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
@ -111,6 +117,8 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
"LightPeers": config.peersLight,
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
"Etherbase": config.etherbase,
"GasTarget": config.gasTarget,
"GasPrice": config.gasPrice,
})
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
@ -127,7 +135,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
}
defer client.Run("rm -rf " + workdir)
// Build and deploy the bootnode service
// Build and deploy the boot or seal node service
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
}
@ -147,6 +155,8 @@ type nodeInfos struct {
etherbase string
keyJSON string
keyPass string
gasTarget float64
gasPrice float64
}
// String implements the stringer interface.
@ -155,7 +165,8 @@ func (info *nodeInfos) String() string {
if info.peersLight > 0 {
discv5 = fmt.Sprintf(", portv5=%d", info.portLight)
}
return fmt.Sprintf("port=%d%s, datadir=%s, peers=%d, lights=%d, ethstats=%s", info.portFull, discv5, info.datadir, info.peersTotal, info.peersLight, info.ethstats)
return fmt.Sprintf("port=%d%s, datadir=%s, peers=%d, lights=%d, ethstats=%s, gastarget=%0.3f MGas, gasprice=%0.3f GWei",
info.portFull, discv5, info.datadir, info.peersTotal, info.peersLight, info.ethstats, info.gasTarget, info.gasPrice)
}
// checkNode does a health-check against an boot or seal node server to verify
@ -176,6 +187,8 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
// Resolve a few types from the environmental variables
totalPeers, _ := strconv.Atoi(infos.envvars["TOTAL_PEERS"])
lightPeers, _ := strconv.Atoi(infos.envvars["LIGHT_PEERS"])
gasTarget, _ := strconv.ParseFloat(infos.envvars["GAS_TARGET"], 64)
gasPrice, _ := strconv.ParseFloat(infos.envvars["GAS_PRICE"], 64)
// Container available, retrieve its node ID and its genesis json
var out []byte
@ -213,6 +226,8 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
etherbase: infos.envvars["MINER_NAME"],
keyJSON: keyJSON,
keyPass: keyPass,
gasTarget: gasTarget,
gasPrice: gasPrice,
}
stats.enodeFull = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.portFull)
if stats.portLight != 0 {

View File

@ -162,6 +162,48 @@ func (w *wizard) readDefaultInt(def int) int {
}
}
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
// to parse into a float.
func (w *wizard) readFloat() float64 {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
if text = strings.TrimSpace(text); text == "" {
continue
}
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
if err != nil {
log.Error("Invalid input, expected float", "err", err)
continue
}
return val
}
}
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
// it to parse into a float. If an empty line is entered, the default value is returned.
func (w *wizard) readDefaultFloat(def float64) float64 {
for {
fmt.Printf("> ")
text, err := w.in.ReadString('\n')
if err != nil {
log.Crit("Failed to read user input", "err", err)
}
if text = strings.TrimSpace(text); text == "" {
return def
}
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
if err != nil {
log.Error("Invalid input, expected float", "err", err)
continue
}
return val
}
}
// readPassword reads a single line from stdin, trimming it from the trailing new
// line and returns it. The input will not be echoed.
func (w *wizard) readPassword() string {

View File

@ -39,7 +39,7 @@ func (w *wizard) networkStats(tips bool) {
// Iterate over all the specified hosts and check their status
stats := tablewriter.NewWriter(os.Stdout)
stats.SetHeader([]string{"Server", "IP", "Status", "Service", "Details"})
stats.SetColWidth(128)
stats.SetColWidth(100)
for server, pubkey := range w.conf.Servers {
client := w.servers[server]

View File

@ -50,7 +50,7 @@ func (w *wizard) deployNode(boot bool) {
if boot {
infos = &nodeInfos{portFull: 30303, peersTotal: 512, peersLight: 256}
} else {
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0}
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
}
}
infos.genesis, _ = json.MarshalIndent(w.conf.genesis, "", " ")
@ -109,8 +109,7 @@ func (w *wizard) deployNode(boot bool) {
} else if w.conf.genesis.Config.Clique != nil {
// If a previous signer was already set, offer to reuse it
if infos.keyJSON != "" {
var key keystore.Key
if err := json.Unmarshal([]byte(infos.keyJSON), &key); err != nil {
if key, err := keystore.DecryptKey([]byte(infos.keyJSON), infos.keyPass); err != nil {
infos.keyJSON, infos.keyPass = "", ""
} else {
fmt.Println()
@ -136,9 +135,17 @@ func (w *wizard) deployNode(boot bool) {
}
}
}
// Establish the gas dynamics to be enforced by the signer
fmt.Println()
fmt.Printf("What gas limit should empty blocks target (MGas)? (default = %0.3f)\n", infos.gasTarget)
infos.gasTarget = w.readDefaultFloat(infos.gasTarget)
fmt.Println()
fmt.Printf("What gas price should the signer require (GWei)? (default = %0.3f)\n", infos.gasPrice)
infos.gasPrice = w.readDefaultFloat(infos.gasPrice)
}
// Try to deploy the full node on the host
if out, err := deployNode(client, w.network, w.conf.bootFull, infos); err != nil {
if out, err := deployNode(client, w.network, w.conf.bootFull, w.conf.bootLight, infos); err != nil {
log.Error("Failed to deploy Ethereum node container", "err", err)
if len(out) > 0 {
fmt.Printf("%s\n", out)

View File

@ -67,6 +67,10 @@ var (
Name: "bzzaccount",
Usage: "Swarm account key file",
}
SwarmListenAddrFlag = cli.StringFlag{
Name: "httpaddr",
Usage: "Swarm HTTP API listening interface",
}
SwarmPortFlag = cli.StringFlag{
Name: "bzzport",
Usage: "Swarm local http api port",
@ -249,6 +253,7 @@ Cleans database of corrupted entries.
SwarmConfigPathFlag,
SwarmSwapEnabledFlag,
SwarmSyncEnabledFlag,
SwarmListenAddrFlag,
SwarmPortFlag,
SwarmAccountFlag,
SwarmNetworkIdFlag,
@ -345,6 +350,9 @@ func registerBzzService(ctx *cli.Context, stack *node.Node) {
if len(bzzport) > 0 {
bzzconfig.Port = bzzport
}
if bzzaddr := ctx.GlobalString(SwarmListenAddrFlag.Name); bzzaddr != "" {
bzzconfig.ListenAddr = bzzaddr
}
swapEnabled := ctx.GlobalBool(SwarmSwapEnabledFlag.Name)
syncEnabled := ctx.GlobalBoolT(SwarmSyncEnabledFlag.Name)

View File

@ -56,6 +56,19 @@ import (
"gopkg.in/urfave/cli.v1"
)
var (
CommandHelpTemplate = `{{.cmd.Name}}{{if .cmd.Subcommands}} command{{end}}{{if .cmd.Flags}} [command options]{{end}} [arguments...]
{{if .cmd.Description}}{{.cmd.Description}}
{{end}}{{if .cmd.Subcommands}}
SUBCOMMANDS:
{{range .cmd.Subcommands}}{{.cmd.Name}}{{with .cmd.ShortName}}, {{.cmd}}{{end}}{{ "\t" }}{{.cmd.Usage}}
{{end}}{{end}}{{if .categorizedFlags}}
{{range $idx, $categorized := .categorizedFlags}}{{$categorized.Name}} OPTIONS:
{{range $categorized.Flags}}{{"\t"}}{{.}}
{{end}}
{{end}}{{end}}`
)
func init() {
cli.AppHelpTemplate = `{{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...]
@ -70,16 +83,7 @@ GLOBAL OPTIONS:
{{end}}{{end}}
`
cli.CommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
{{if .Description}}{{.Description}}
{{end}}{{if .Subcommands}}
SUBCOMMANDS:
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
{{end}}{{end}}{{if .Flags}}
OPTIONS:
{{range .Flags}}{{.}}
{{end}}{{end}}
`
cli.CommandHelpTemplate = CommandHelpTemplate
}
// NewApp creates an app with sane defaults.
@ -119,35 +123,6 @@ var (
Name: "nousb",
Usage: "Disables monitoring for and managine USB hardware wallets",
}
EthashCacheDirFlag = DirectoryFlag{
Name: "ethash.cachedir",
Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
}
EthashCachesInMemoryFlag = cli.IntFlag{
Name: "ethash.cachesinmem",
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
Value: eth.DefaultConfig.EthashCachesInMem,
}
EthashCachesOnDiskFlag = cli.IntFlag{
Name: "ethash.cachesondisk",
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
Value: eth.DefaultConfig.EthashCachesOnDisk,
}
EthashDatasetDirFlag = DirectoryFlag{
Name: "ethash.dagdir",
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
}
EthashDatasetsInMemoryFlag = cli.IntFlag{
Name: "ethash.dagsinmem",
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
Value: eth.DefaultConfig.EthashDatasetsInMem,
}
EthashDatasetsOnDiskFlag = cli.IntFlag{
Name: "ethash.dagsondisk",
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
Value: eth.DefaultConfig.EthashDatasetsOnDisk,
}
NetworkIdFlag = cli.Uint64Flag{
Name: "networkid",
Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten, 4=Rinkeby)",
@ -203,6 +178,72 @@ var (
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
}
// Ethash settings
EthashCacheDirFlag = DirectoryFlag{
Name: "ethash.cachedir",
Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
}
EthashCachesInMemoryFlag = cli.IntFlag{
Name: "ethash.cachesinmem",
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
Value: eth.DefaultConfig.EthashCachesInMem,
}
EthashCachesOnDiskFlag = cli.IntFlag{
Name: "ethash.cachesondisk",
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
Value: eth.DefaultConfig.EthashCachesOnDisk,
}
EthashDatasetDirFlag = DirectoryFlag{
Name: "ethash.dagdir",
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
}
EthashDatasetsInMemoryFlag = cli.IntFlag{
Name: "ethash.dagsinmem",
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
Value: eth.DefaultConfig.EthashDatasetsInMem,
}
EthashDatasetsOnDiskFlag = cli.IntFlag{
Name: "ethash.dagsondisk",
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
Value: eth.DefaultConfig.EthashDatasetsOnDisk,
}
// Transaction pool settings
TxPoolPriceLimitFlag = cli.Uint64Flag{
Name: "txpool.pricelimit",
Usage: "Minimum gas price limit to enforce for acceptance into the pool",
Value: eth.DefaultConfig.TxPool.PriceLimit,
}
TxPoolPriceBumpFlag = cli.Uint64Flag{
Name: "txpool.pricebump",
Usage: "Price bump percentage to replace an already existing transaction",
Value: eth.DefaultConfig.TxPool.PriceBump,
}
TxPoolAccountSlotsFlag = cli.Uint64Flag{
Name: "txpool.accountslots",
Usage: "Minimum number of executable transaction slots guaranteed per account",
Value: eth.DefaultConfig.TxPool.AccountSlots,
}
TxPoolGlobalSlotsFlag = cli.Uint64Flag{
Name: "txpool.globalslots",
Usage: "Maximum number of executable transaction slots for all accounts",
Value: eth.DefaultConfig.TxPool.GlobalSlots,
}
TxPoolAccountQueueFlag = cli.Uint64Flag{
Name: "txpool.accountqueue",
Usage: "Maximum number of non-executable transaction slots permitted per account",
Value: eth.DefaultConfig.TxPool.AccountQueue,
}
TxPoolGlobalQueueFlag = cli.Uint64Flag{
Name: "txpool.globalqueue",
Usage: "Maximum number of non-executable transaction slots for all accounts",
Value: eth.DefaultConfig.TxPool.GlobalQueue,
}
TxPoolLifetimeFlag = cli.DurationFlag{
Name: "txpool.lifetime",
Usage: "Maximum amount of time non-executable transaction are queued",
Value: eth.DefaultConfig.TxPool.Lifetime,
}
// Performance tuning settings
CacheFlag = cli.IntFlag{
Name: "cache",
@ -237,7 +278,7 @@ var (
GasPriceFlag = BigFlag{
Name: "gasprice",
Usage: "Minimal gas price to accept for mining a transactions",
Value: big.NewInt(20 * params.Shannon),
Value: eth.DefaultConfig.GasPrice,
}
ExtraDataFlag = cli.StringFlag{
Name: "extradata",
@ -360,7 +401,17 @@ var (
}
BootnodesFlag = cli.StringFlag{
Name: "bootnodes",
Usage: "Comma separated enode URLs for P2P discovery bootstrap",
Usage: "Comma separated enode URLs for P2P discovery bootstrap (set v4+v5 instead for light servers)",
Value: "",
}
BootnodesV4Flag = cli.StringFlag{
Name: "bootnodesv4",
Usage: "Comma separated enode URLs for P2P v4 discovery bootstrap (light server, full nodes)",
Value: "",
}
BootnodesV5Flag = cli.StringFlag{
Name: "bootnodesv5",
Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes)",
Value: "",
}
NodeKeyFileFlag = cli.StringFlag{
@ -469,8 +520,12 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) {
func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls := params.MainnetBootnodes
switch {
case ctx.GlobalIsSet(BootnodesFlag.Name):
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV4Flag.Name):
if ctx.GlobalIsSet(BootnodesV4Flag.Name) {
urls = strings.Split(ctx.GlobalString(BootnodesV4Flag.Name), ",")
} else {
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
}
case ctx.GlobalBool(TestnetFlag.Name):
urls = params.TestnetBootnodes
case ctx.GlobalBool(RinkebyFlag.Name):
@ -493,8 +548,12 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
urls := params.DiscoveryV5Bootnodes
switch {
case ctx.GlobalIsSet(BootnodesFlag.Name):
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV5Flag.Name):
if ctx.GlobalIsSet(BootnodesV5Flag.Name) {
urls = strings.Split(ctx.GlobalString(BootnodesV5Flag.Name), ",")
} else {
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
}
case ctx.GlobalBool(RinkebyFlag.Name):
urls = params.RinkebyV5Bootnodes
case cfg.BootstrapNodesV5 != nil:
@ -717,6 +776,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
// --dev mode can't use p2p networking.
cfg.MaxPeers = 0
cfg.ListenAddr = ":0"
cfg.DiscoveryV5Addr = ":0"
cfg.NoDiscovery = true
cfg.DiscoveryV5 = false
}
@ -761,6 +821,30 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
}
}
func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) {
cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name)
}
if ctx.GlobalIsSet(TxPoolPriceBumpFlag.Name) {
cfg.PriceBump = ctx.GlobalUint64(TxPoolPriceBumpFlag.Name)
}
if ctx.GlobalIsSet(TxPoolAccountSlotsFlag.Name) {
cfg.AccountSlots = ctx.GlobalUint64(TxPoolAccountSlotsFlag.Name)
}
if ctx.GlobalIsSet(TxPoolGlobalSlotsFlag.Name) {
cfg.GlobalSlots = ctx.GlobalUint64(TxPoolGlobalSlotsFlag.Name)
}
if ctx.GlobalIsSet(TxPoolAccountQueueFlag.Name) {
cfg.AccountQueue = ctx.GlobalUint64(TxPoolAccountQueueFlag.Name)
}
if ctx.GlobalIsSet(TxPoolGlobalQueueFlag.Name) {
cfg.GlobalQueue = ctx.GlobalUint64(TxPoolGlobalQueueFlag.Name)
}
if ctx.GlobalIsSet(TxPoolLifetimeFlag.Name) {
cfg.Lifetime = ctx.GlobalDuration(TxPoolLifetimeFlag.Name)
}
}
func setEthash(ctx *cli.Context, cfg *eth.Config) {
if ctx.GlobalIsSet(EthashCacheDirFlag.Name) {
cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name)
@ -803,6 +887,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
setEtherbase(ctx, ks, cfg)
setGPO(ctx, &cfg.GPO)
setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
switch {

188
common/bitutil/bitutil.go Normal file
View File

@ -0,0 +1,188 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted from: https://golang.org/src/crypto/cipher/xor.go
// Package bitutil implements fast bitwise operations.
package bitutil
import (
"runtime"
"unsafe"
)
const wordSize = int(unsafe.Sizeof(uintptr(0)))
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
// XORBytes xors the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes xor'd.
func XORBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastXORBytes(dst, a, b)
}
return safeXORBytes(dst, a, b)
}
// fastXORBytes xors in bulk. It only works on architectures that support
// unaligned read/writes.
func fastXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
for i := 0; i < w; i++ {
dw[i] = aw[i] ^ bw[i]
}
}
for i := (n - n%wordSize); i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
// safeXORBytes xors one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
// ANDBytes ands the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes and'd.
func ANDBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastANDBytes(dst, a, b)
}
return safeANDBytes(dst, a, b)
}
// fastANDBytes ands in bulk. It only works on architectures that support
// unaligned read/writes.
func fastANDBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
for i := 0; i < w; i++ {
dw[i] = aw[i] & bw[i]
}
}
for i := (n - n%wordSize); i < n; i++ {
dst[i] = a[i] & b[i]
}
return n
}
// safeANDBytes ands one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeANDBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ {
dst[i] = a[i] & b[i]
}
return n
}
// ORBytes ors the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes or'd.
func ORBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastORBytes(dst, a, b)
}
return safeORBytes(dst, a, b)
}
// fastORBytes ors in bulk. It only works on architectures that support
// unaligned read/writes.
func fastORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
for i := 0; i < w; i++ {
dw[i] = aw[i] | bw[i]
}
}
for i := (n - n%wordSize); i < n; i++ {
dst[i] = a[i] | b[i]
}
return n
}
// safeORBytes ors one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ {
dst[i] = a[i] | b[i]
}
return n
}
// TestBytes tests whether any bit is set in the input byte slice.
func TestBytes(p []byte) bool {
if supportsUnaligned {
return fastTestBytes(p)
}
return safeTestBytes(p)
}
// fastTestBytes tests for set bits in bulk. It only works on architectures that
// support unaligned read/writes.
func fastTestBytes(p []byte) bool {
n := len(p)
w := n / wordSize
if w > 0 {
pw := *(*[]uintptr)(unsafe.Pointer(&p))
for i := 0; i < w; i++ {
if pw[i] != 0 {
return true
}
}
}
for i := (n - n%wordSize); i < n; i++ {
if p[i] != 0 {
return true
}
}
return false
}
// safeTestBytes tests for set bits one byte at a time. It works on all
// architectures, independent if it supports unaligned read/writes or not.
func safeTestBytes(p []byte) bool {
for i := 0; i < len(p); i++ {
if p[i] != 0 {
return true
}
}
return false
}

View File

@ -0,0 +1,215 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted from: https://golang.org/src/crypto/cipher/xor_test.go
package bitutil
import (
"bytes"
"testing"
)
// Tests that bitwise XOR works for various alignments.
func TestXOR(t *testing.T) {
for alignP := 0; alignP < 2; alignP++ {
for alignQ := 0; alignQ < 2; alignQ++ {
for alignD := 0; alignD < 2; alignD++ {
p := make([]byte, 1023)[alignP:]
q := make([]byte, 1023)[alignQ:]
for i := 0; i < len(p); i++ {
p[i] = byte(i)
}
for i := 0; i < len(q); i++ {
q[i] = byte(len(q) - i)
}
d1 := make([]byte, 1023+alignD)[alignD:]
d2 := make([]byte, 1023+alignD)[alignD:]
XORBytes(d1, p, q)
safeXORBytes(d2, p, q)
if !bytes.Equal(d1, d2) {
t.Error("not equal", d1, d2)
}
}
}
}
}
// Tests that bitwise AND works for various alignments.
func TestAND(t *testing.T) {
for alignP := 0; alignP < 2; alignP++ {
for alignQ := 0; alignQ < 2; alignQ++ {
for alignD := 0; alignD < 2; alignD++ {
p := make([]byte, 1023)[alignP:]
q := make([]byte, 1023)[alignQ:]
for i := 0; i < len(p); i++ {
p[i] = byte(i)
}
for i := 0; i < len(q); i++ {
q[i] = byte(len(q) - i)
}
d1 := make([]byte, 1023+alignD)[alignD:]
d2 := make([]byte, 1023+alignD)[alignD:]
ANDBytes(d1, p, q)
safeANDBytes(d2, p, q)
if !bytes.Equal(d1, d2) {
t.Error("not equal")
}
}
}
}
}
// Tests that bitwise OR works for various alignments.
func TestOR(t *testing.T) {
for alignP := 0; alignP < 2; alignP++ {
for alignQ := 0; alignQ < 2; alignQ++ {
for alignD := 0; alignD < 2; alignD++ {
p := make([]byte, 1023)[alignP:]
q := make([]byte, 1023)[alignQ:]
for i := 0; i < len(p); i++ {
p[i] = byte(i)
}
for i := 0; i < len(q); i++ {
q[i] = byte(len(q) - i)
}
d1 := make([]byte, 1023+alignD)[alignD:]
d2 := make([]byte, 1023+alignD)[alignD:]
ORBytes(d1, p, q)
safeORBytes(d2, p, q)
if !bytes.Equal(d1, d2) {
t.Error("not equal")
}
}
}
}
}
// Tests that bit testing works for various alignments.
func TestTest(t *testing.T) {
for align := 0; align < 2; align++ {
// Test for bits set in the bulk part
p := make([]byte, 1023)[align:]
p[100] = 1
if TestBytes(p) != safeTestBytes(p) {
t.Error("not equal")
}
// Test for bits set in the tail part
q := make([]byte, 1023)[align:]
q[len(q)-1] = 1
if TestBytes(q) != safeTestBytes(q) {
t.Error("not equal")
}
}
}
// Benchmarks the potentially optimized XOR performance.
func BenchmarkFastXOR1KB(b *testing.B) { benchmarkFastXOR(b, 1024) }
func BenchmarkFastXOR2KB(b *testing.B) { benchmarkFastXOR(b, 2048) }
func BenchmarkFastXOR4KB(b *testing.B) { benchmarkFastXOR(b, 4096) }
func benchmarkFastXOR(b *testing.B, size int) {
p, q := make([]byte, size), make([]byte, size)
for i := 0; i < b.N; i++ {
XORBytes(p, p, q)
}
}
// Benchmarks the baseline XOR performance.
func BenchmarkBaseXOR1KB(b *testing.B) { benchmarkBaseXOR(b, 1024) }
func BenchmarkBaseXOR2KB(b *testing.B) { benchmarkBaseXOR(b, 2048) }
func BenchmarkBaseXOR4KB(b *testing.B) { benchmarkBaseXOR(b, 4096) }
func benchmarkBaseXOR(b *testing.B, size int) {
p, q := make([]byte, size), make([]byte, size)
for i := 0; i < b.N; i++ {
safeXORBytes(p, p, q)
}
}
// Benchmarks the potentially optimized AND performance.
func BenchmarkFastAND1KB(b *testing.B) { benchmarkFastAND(b, 1024) }
func BenchmarkFastAND2KB(b *testing.B) { benchmarkFastAND(b, 2048) }
func BenchmarkFastAND4KB(b *testing.B) { benchmarkFastAND(b, 4096) }
func benchmarkFastAND(b *testing.B, size int) {
p, q := make([]byte, size), make([]byte, size)
for i := 0; i < b.N; i++ {
ANDBytes(p, p, q)
}
}
// Benchmarks the baseline AND performance.
func BenchmarkBaseAND1KB(b *testing.B) { benchmarkBaseAND(b, 1024) }
func BenchmarkBaseAND2KB(b *testing.B) { benchmarkBaseAND(b, 2048) }
func BenchmarkBaseAND4KB(b *testing.B) { benchmarkBaseAND(b, 4096) }
func benchmarkBaseAND(b *testing.B, size int) {
p, q := make([]byte, size), make([]byte, size)
for i := 0; i < b.N; i++ {
safeANDBytes(p, p, q)
}
}
// Benchmarks the potentially optimized OR performance.
func BenchmarkFastOR1KB(b *testing.B) { benchmarkFastOR(b, 1024) }
func BenchmarkFastOR2KB(b *testing.B) { benchmarkFastOR(b, 2048) }
func BenchmarkFastOR4KB(b *testing.B) { benchmarkFastOR(b, 4096) }
func benchmarkFastOR(b *testing.B, size int) {
p, q := make([]byte, size), make([]byte, size)
for i := 0; i < b.N; i++ {
ORBytes(p, p, q)
}
}
// Benchmarks the baseline OR performance.
func BenchmarkBaseOR1KB(b *testing.B) { benchmarkBaseOR(b, 1024) }
func BenchmarkBaseOR2KB(b *testing.B) { benchmarkBaseOR(b, 2048) }
func BenchmarkBaseOR4KB(b *testing.B) { benchmarkBaseOR(b, 4096) }
func benchmarkBaseOR(b *testing.B, size int) {
p, q := make([]byte, size), make([]byte, size)
for i := 0; i < b.N; i++ {
safeORBytes(p, p, q)
}
}
// Benchmarks the potentially optimized bit testing performance.
func BenchmarkFastTest1KB(b *testing.B) { benchmarkFastTest(b, 1024) }
func BenchmarkFastTest2KB(b *testing.B) { benchmarkFastTest(b, 2048) }
func BenchmarkFastTest4KB(b *testing.B) { benchmarkFastTest(b, 4096) }
func benchmarkFastTest(b *testing.B, size int) {
p := make([]byte, size)
for i := 0; i < b.N; i++ {
TestBytes(p)
}
}
// Benchmarks the baseline bit testing performance.
func BenchmarkBaseTest1KB(b *testing.B) { benchmarkBaseTest(b, 1024) }
func BenchmarkBaseTest2KB(b *testing.B) { benchmarkBaseTest(b, 2048) }
func BenchmarkBaseTest4KB(b *testing.B) { benchmarkBaseTest(b, 4096) }
func benchmarkBaseTest(b *testing.B, size int) {
p := make([]byte, size)
for i := 0; i < b.N; i++ {
safeTestBytes(p)
}
}

170
common/bitutil/compress.go Normal file
View File

@ -0,0 +1,170 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bitutil
import "errors"
var (
// errMissingData is returned from decompression if the byte referenced by
// the bitset header overflows the input data.
errMissingData = errors.New("missing bytes on input")
// errUnreferencedData is returned from decompression if not all bytes were used
// up from the input data after decompressing it.
errUnreferencedData = errors.New("extra bytes on input")
// errExceededTarget is returned from decompression if the bitset header has
// more bits defined than the number of target buffer space available.
errExceededTarget = errors.New("target data size exceeded")
// errZeroContent is returned from decompression if a data byte referenced in
// the bitset header is actually a zero byte.
errZeroContent = errors.New("zero byte in input content")
)
// The compression algorithm implemented by CompressBytes and DecompressBytes is
// optimized for sparse input data which contains a lot of zero bytes. Decompression
// requires knowledge of the decompressed data length.
//
// Compression works as follows:
//
// if data only contains zeroes,
// CompressBytes(data) == nil
// otherwise if len(data) <= 1,
// CompressBytes(data) == data
// otherwise:
// CompressBytes(data) == append(CompressBytes(nonZeroBitset(data)), nonZeroBytes(data)...)
// where
// nonZeroBitset(data) is a bit vector with len(data) bits (MSB first):
// nonZeroBitset(data)[i/8] && (1 << (7-i%8)) != 0 if data[i] != 0
// len(nonZeroBitset(data)) == (len(data)+7)/8
// nonZeroBytes(data) contains the non-zero bytes of data in the same order
// CompressBytes compresses the input byte slice according to the sparse bitset
// representation algorithm. If the result is bigger than the original input, no
// compression is done.
func CompressBytes(data []byte) []byte {
if out := bitsetEncodeBytes(data); len(out) < len(data) {
return out
}
cpy := make([]byte, len(data))
copy(cpy, data)
return cpy
}
// bitsetEncodeBytes compresses the input byte slice according to the sparse
// bitset representation algorithm.
func bitsetEncodeBytes(data []byte) []byte {
// Empty slices get compressed to nil
if len(data) == 0 {
return nil
}
// One byte slices compress to nil or retain the single byte
if len(data) == 1 {
if data[0] == 0 {
return nil
}
return data
}
// Calculate the bitset of set bytes, and gather the non-zero bytes
nonZeroBitset := make([]byte, (len(data)+7)/8)
nonZeroBytes := make([]byte, 0, len(data))
for i, b := range data {
if b != 0 {
nonZeroBytes = append(nonZeroBytes, b)
nonZeroBitset[i/8] |= 1 << byte(7-i%8)
}
}
if len(nonZeroBytes) == 0 {
return nil
}
return append(bitsetEncodeBytes(nonZeroBitset), nonZeroBytes...)
}
// DecompressBytes decompresses data with a known target size. If the input data
// matches the size of the target, it means no compression was done in the first
// place.
func DecompressBytes(data []byte, target int) ([]byte, error) {
if len(data) > target {
return nil, errExceededTarget
}
if len(data) == target {
cpy := make([]byte, len(data))
copy(cpy, data)
return cpy, nil
}
return bitsetDecodeBytes(data, target)
}
// bitsetDecodeBytes decompresses data with a known target size.
func bitsetDecodeBytes(data []byte, target int) ([]byte, error) {
out, size, err := bitsetDecodePartialBytes(data, target)
if err != nil {
return nil, err
}
if size != len(data) {
return nil, errUnreferencedData
}
return out, nil
}
// bitsetDecodePartialBytes decompresses data with a known target size, but does
// not enforce consuming all the input bytes. In addition to the decompressed
// output, the function returns the length of compressed input data corresponding
// to the output as the input slice may be longer.
func bitsetDecodePartialBytes(data []byte, target int) ([]byte, int, error) {
// Sanity check 0 targets to avoid infinite recursion
if target == 0 {
return nil, 0, nil
}
// Handle the zero and single byte corner cases
decomp := make([]byte, target)
if len(data) == 0 {
return decomp, 0, nil
}
if target == 1 {
decomp[0] = data[0] // copy to avoid referencing the input slice
if data[0] != 0 {
return decomp, 1, nil
}
return decomp, 0, nil
}
// Decompress the bitset of set bytes and distribute the non zero bytes
nonZeroBitset, ptr, err := bitsetDecodePartialBytes(data, (target+7)/8)
if err != nil {
return nil, ptr, err
}
for i := 0; i < 8*len(nonZeroBitset); i++ {
if nonZeroBitset[i/8]&(1<<byte(7-i%8)) != 0 {
// Make sure we have enough data to push into the correct slot
if ptr >= len(data) {
return nil, 0, errMissingData
}
if i >= len(decomp) {
return nil, 0, errExceededTarget
}
// Make sure the data is valid and push into the slot
if data[ptr] == 0 {
return nil, 0, errZeroContent
}
decomp[i] = data[ptr]
ptr++
}
}
return decomp, ptr, nil
}

View File

@ -0,0 +1,56 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build gofuzz
package bitutil
import "bytes"
// Fuzz implements a go-fuzz fuzzer method to test various encoding method
// invocations.
func Fuzz(data []byte) int {
if len(data) == 0 {
return -1
}
if data[0]%2 == 0 {
return fuzzEncode(data[1:])
}
return fuzzDecode(data[1:])
}
// fuzzEncode implements a go-fuzz fuzzer method to test the bitset encoding and
// decoding algorithm.
func fuzzEncode(data []byte) int {
proc, _ := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
if !bytes.Equal(data, proc) {
panic("content mismatch")
}
return 0
}
// fuzzDecode implements a go-fuzz fuzzer method to test the bit decoding and
// reencoding algorithm.
func fuzzDecode(data []byte) int {
blob, err := bitsetDecodeBytes(data, 1024)
if err != nil {
return 0
}
if comp := bitsetEncodeBytes(blob); !bytes.Equal(comp, data) {
panic("content mismatch")
}
return 0
}

View File

@ -0,0 +1,181 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bitutil
import (
"bytes"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/common/hexutil"
)
// Tests that data bitset encoding and decoding works and is bijective.
func TestEncodingCycle(t *testing.T) {
tests := []string{
// Tests generated by go-fuzz to maximize code coverage
"0x000000000000000000",
"0xef0400",
"0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb",
"0x7b64000000",
"0x000034000000000000",
"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f0000000000000000000",
"0x4912385c0e7b64000000",
"0x000034000000000000000000000000000000",
"0x00",
"0x000003e834ff7f0000",
"0x0000",
"0x0000000000000000000000000000000000000000000000000000000000ff00",
"0x895f0c6a020f850c6a020f85f88df88d",
"0xdf7070533534333636313639343638373432313536346c1bc3315aac2f65fefb",
"0x0000000000",
"0xdf70706336346c65fefb",
"0x00006d643634000000",
"0xdf7070533534333636313639343638373532313536346c1bc333393438373130707063363430353639343638373532313536346c1bc333393438336336346c65fe",
}
for i, tt := range tests {
data := hexutil.MustDecode(tt)
proc, err := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
if err != nil {
t.Errorf("test %d: failed to decompress compressed data: %v", i, err)
continue
}
if !bytes.Equal(data, proc) {
t.Errorf("test %d: compress/decompress mismatch: have %x, want %x", i, proc, data)
}
}
}
// Tests that data bitset decoding and rencoding works and is bijective.
func TestDecodingCycle(t *testing.T) {
tests := []struct {
size int
input string
fail error
}{
{size: 0, input: "0x"},
// Crashers generated by go-fuzz
{size: 0, input: "0x0020", fail: errUnreferencedData},
{size: 0, input: "0x30", fail: errUnreferencedData},
{size: 1, input: "0x00", fail: errUnreferencedData},
{size: 2, input: "0x07", fail: errMissingData},
{size: 1024, input: "0x8000", fail: errZeroContent},
// Tests generated by go-fuzz to maximize code coverage
{size: 29490, input: "0x343137343733323134333839373334323073333930783e3078333930783e70706336346c65303e", fail: errMissingData},
{size: 59395, input: "0x00", fail: errUnreferencedData},
{size: 52574, input: "0x70706336346c65c0de", fail: errExceededTarget},
{size: 42264, input: "0x07", fail: errMissingData},
{size: 52, input: "0xa5045bad48f4", fail: errExceededTarget},
{size: 52574, input: "0xc0de", fail: errMissingData},
{size: 52574, input: "0x"},
{size: 29490, input: "0x34313734373332313433383937333432307333393078073034333839373334323073333930783e3078333937333432307333393078073061333930783e70706336346c65303e", fail: errMissingData},
{size: 29491, input: "0x3973333930783e30783e", fail: errMissingData},
{size: 1024, input: "0x808080608080"},
{size: 1024, input: "0x808470705e3632383337363033313434303137393130306c6580ef46806380635a80"},
{size: 1024, input: "0x8080808070"},
{size: 1024, input: "0x808070705e36346c6580ef46806380635a80"},
{size: 1024, input: "0x80808046802680"},
{size: 1024, input: "0x4040404035"},
{size: 1024, input: "0x4040bf3ba2b3f684402d353234373438373934409fe5b1e7ada94ebfd7d0505e27be4035"},
{size: 1024, input: "0x404040bf3ba2b3f6844035"},
{size: 1024, input: "0x40402d35323437343837393440bfd7d0505e27be4035"},
}
for i, tt := range tests {
data := hexutil.MustDecode(tt.input)
orig, err := bitsetDecodeBytes(data, tt.size)
if err != tt.fail {
t.Errorf("test %d: failure mismatch: have %v, want %v", i, err, tt.fail)
}
if err != nil {
continue
}
if comp := bitsetEncodeBytes(orig); !bytes.Equal(comp, data) {
t.Errorf("test %d: decompress/compress mismatch: have %x, want %x", i, comp, data)
}
}
}
// TestCompression tests that compression works by returning either the bitset
// encoded input, or the actual input if the bitset version is longer.
func TestCompression(t *testing.T) {
// Check the the compression returns the bitset encoding is shorter
in := hexutil.MustDecode("0x4912385c0e7b64000000")
out := hexutil.MustDecode("0x80fe4912385c0e7b64")
if data := CompressBytes(in); bytes.Compare(data, out) != 0 {
t.Errorf("encoding mismatch for sparse data: have %x, want %x", data, out)
}
if data, err := DecompressBytes(out, len(in)); err != nil || bytes.Compare(data, in) != 0 {
t.Errorf("decoding mismatch for sparse data: have %x, want %x, error %v", data, in, err)
}
// Check the the compression returns the input if the bitset encoding is longer
in = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
out = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
if data := CompressBytes(in); bytes.Compare(data, out) != 0 {
t.Errorf("encoding mismatch for dense data: have %x, want %x", data, out)
}
if data, err := DecompressBytes(out, len(in)); err != nil || bytes.Compare(data, in) != 0 {
t.Errorf("decoding mismatch for dense data: have %x, want %x, error %v", data, in, err)
}
// Check that decompressing a longer input than the target fails
if _, err := DecompressBytes([]byte{0xc0, 0x01, 0x01}, 2); err != errExceededTarget {
t.Errorf("decoding error mismatch for long data: have %v, want %v", err, errExceededTarget)
}
}
// Crude benchmark for compressing random slices of bytes.
func BenchmarkEncoding1KBVerySparse(b *testing.B) { benchmarkEncoding(b, 1024, 0.0001) }
func BenchmarkEncoding2KBVerySparse(b *testing.B) { benchmarkEncoding(b, 2048, 0.0001) }
func BenchmarkEncoding4KBVerySparse(b *testing.B) { benchmarkEncoding(b, 4096, 0.0001) }
func BenchmarkEncoding1KBSparse(b *testing.B) { benchmarkEncoding(b, 1024, 0.001) }
func BenchmarkEncoding2KBSparse(b *testing.B) { benchmarkEncoding(b, 2048, 0.001) }
func BenchmarkEncoding4KBSparse(b *testing.B) { benchmarkEncoding(b, 4096, 0.001) }
func BenchmarkEncoding1KBDense(b *testing.B) { benchmarkEncoding(b, 1024, 0.1) }
func BenchmarkEncoding2KBDense(b *testing.B) { benchmarkEncoding(b, 2048, 0.1) }
func BenchmarkEncoding4KBDense(b *testing.B) { benchmarkEncoding(b, 4096, 0.1) }
func BenchmarkEncoding1KBSaturated(b *testing.B) { benchmarkEncoding(b, 1024, 0.5) }
func BenchmarkEncoding2KBSaturated(b *testing.B) { benchmarkEncoding(b, 2048, 0.5) }
func BenchmarkEncoding4KBSaturated(b *testing.B) { benchmarkEncoding(b, 4096, 0.5) }
func benchmarkEncoding(b *testing.B, bytes int, fill float64) {
// Generate a random slice of bytes to compress
random := rand.NewSource(0) // reproducible and comparable
data := make([]byte, bytes)
bits := int(float64(bytes) * 8 * fill)
for i := 0; i < bits; i++ {
idx := random.Int63() % int64(len(data))
bit := uint(random.Int63() % 8)
data[idx] |= 1 << bit
}
// Reset the benchmark and measure encoding/decoding
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
}
}

View File

@ -89,18 +89,18 @@ func Hex2BytesFixed(str string, flen int) []byte {
}
func RightPadBytes(slice []byte, l int) []byte {
if l < len(slice) {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded[0:len(slice)], slice)
copy(padded, slice)
return padded
}
func LeftPadBytes(slice []byte, l int) []byte {
if l < len(slice) {
if l <= len(slice) {
return slice
}

View File

@ -44,7 +44,7 @@ import (
const (
checkpointInterval = 1024 // Number of blocks after which to save the vote snapshot to the database
inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory
inmemorySignatures = 1024 // Number of recent blocks to keep in memory
inmemorySignatures = 4096 // Number of recent block signatures to keep in memory
wiggleTime = 500 * time.Millisecond // Random delay (per signer) to allow concurrent signers
)
@ -162,7 +162,12 @@ func sigHash(header *types.Header) (hash common.Hash) {
}
// ecrecover extracts the Ethereum account address from a signed header.
func ecrecover(header *types.Header) (common.Address, error) {
func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, error) {
// If the signature's already cached, return that
hash := header.Hash()
if address, known := sigcache.Get(hash); known {
return address.(common.Address), nil
}
// Retrieve the signature from the header extra-data
if len(header.Extra) < extraSeal {
return common.Address{}, errMissingSignature
@ -177,6 +182,7 @@ func ecrecover(header *types.Header) (common.Address, error) {
var signer common.Address
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
sigcache.Add(hash, signer)
return signer, nil
}
@ -223,7 +229,7 @@ func New(config *params.CliqueConfig, db ethdb.Database) *Clique {
// Author implements consensus.Engine, returning the Ethereum address recovered
// from the signature in the header's extra-data section.
func (c *Clique) Author(header *types.Header) (common.Address, error) {
return ecrecover(header)
return ecrecover(header, c.signatures)
}
// VerifyHeader checks whether a header conforms to the consensus rules.
@ -369,7 +375,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
}
// If an on-disk checkpoint snapshot can be found, use that
if number%checkpointInterval == 0 {
if s, err := loadSnapshot(c.config, c.db, hash); err == nil {
if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil {
log.Trace("Loaded voting snapshot form disk", "number", number, "hash", hash)
snap = s
break
@ -385,7 +391,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
for i := 0; i < len(signers); i++ {
copy(signers[i][:], genesis.Extra[extraVanity+i*common.AddressLength:])
}
snap = newSnapshot(c.config, 0, genesis.Hash(), signers)
snap = newSnapshot(c.config, c.signatures, 0, genesis.Hash(), signers)
if err := snap.store(c.db); err != nil {
return nil, err
}
@ -464,7 +470,7 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
c.recents.Add(snap.Hash, snap)
// Resolve the authorization key and check against signers
signer, err := ecrecover(header)
signer, err := ecrecover(header, c.signatures)
if err != nil {
return err
}

View File

@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
lru "github.com/hashicorp/golang-lru"
)
// Vote represents a single vote that an authorized signer made to modify the
@ -44,7 +45,8 @@ type Tally struct {
// Snapshot is the state of the authorization voting at a given point in time.
type Snapshot struct {
config *params.CliqueConfig // Consensus engine parameters to fine tune behavior
config *params.CliqueConfig // Consensus engine parameters to fine tune behavior
sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover
Number uint64 `json:"number"` // Block number where the snapshot was created
Hash common.Hash `json:"hash"` // Block hash where the snapshot was created
@ -57,14 +59,15 @@ type Snapshot struct {
// newSnapshot create a new snapshot with the specified startup parameters. This
// method does not initialize the set of recent signers, so only ever use if for
// the genesis block.
func newSnapshot(config *params.CliqueConfig, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
func newSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
snap := &Snapshot{
config: config,
Number: number,
Hash: hash,
Signers: make(map[common.Address]struct{}),
Recents: make(map[uint64]common.Address),
Tally: make(map[common.Address]Tally),
config: config,
sigcache: sigcache,
Number: number,
Hash: hash,
Signers: make(map[common.Address]struct{}),
Recents: make(map[uint64]common.Address),
Tally: make(map[common.Address]Tally),
}
for _, signer := range signers {
snap.Signers[signer] = struct{}{}
@ -73,7 +76,7 @@ func newSnapshot(config *params.CliqueConfig, number uint64, hash common.Hash, s
}
// loadSnapshot loads an existing snapshot from the database.
func loadSnapshot(config *params.CliqueConfig, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
func loadSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
blob, err := db.Get(append([]byte("clique-"), hash[:]...))
if err != nil {
return nil, err
@ -83,6 +86,7 @@ func loadSnapshot(config *params.CliqueConfig, db ethdb.Database, hash common.Ha
return nil, err
}
snap.config = config
snap.sigcache = sigcache
return snap, nil
}
@ -99,13 +103,14 @@ func (s *Snapshot) store(db ethdb.Database) error {
// copy creates a deep copy of the snapshot, though not the individual votes.
func (s *Snapshot) copy() *Snapshot {
cpy := &Snapshot{
config: s.config,
Number: s.Number,
Hash: s.Hash,
Signers: make(map[common.Address]struct{}),
Recents: make(map[uint64]common.Address),
Votes: make([]*Vote, len(s.Votes)),
Tally: make(map[common.Address]Tally),
config: s.config,
sigcache: s.sigcache,
Number: s.Number,
Hash: s.Hash,
Signers: make(map[common.Address]struct{}),
Recents: make(map[uint64]common.Address),
Votes: make([]*Vote, len(s.Votes)),
Tally: make(map[common.Address]Tally),
}
for signer := range s.Signers {
cpy.Signers[signer] = struct{}{}
@ -190,7 +195,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
delete(snap.Recents, number-limit)
}
// Resolve the authorization key and check against signers
signer, err := ecrecover(header)
signer, err := ecrecover(header, s.sigcache)
if err != nil {
return nil, err
}

View File

@ -27,6 +27,7 @@ import (
"unsafe"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
@ -142,7 +143,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
dstOff = j * hashBytes
xorOff = (binary.LittleEndian.Uint32(cache[dstOff:]) % uint32(rows)) * hashBytes
)
xorBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
keccak512(cache[dstOff:], temp)
atomic.AddUint32(&progress, 1)

View File

@ -239,7 +239,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
return errZeroBlockTime
}
// Verify the block's difficulty based in it's timestamp and parent's difficulty
expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
if expected.Cmp(header.Difficulty) != 0 {
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
}
@ -283,16 +283,19 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
return nil
}
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have when created at time given the parent block's time
// and difficulty.
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time
// given the parent block's time and difficulty.
//
// TODO (karalabe): Move the chain maker into this package and make this private!
func CalcDifficulty(config *params.ChainConfig, time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
if config.IsHomestead(new(big.Int).Add(parentNumber, common.Big1)) {
return calcDifficultyHomestead(time, parentTime, parentNumber, parentDiff)
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
next := new(big.Int).Add(parent.Number, common.Big1)
switch {
case config.IsHomestead(next):
return calcDifficultyHomestead(time, parent)
default:
return calcDifficultyFrontier(time, parent)
}
return calcDifficultyFrontier(time, parentTime, parentNumber, parentDiff)
}
// Some weird constants to avoid constant memory allocs for them.
@ -305,7 +308,7 @@ var (
// calcDifficultyHomestead is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time given the
// parent block's time and difficulty. The calculation uses the Homestead rules.
func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
// algorithm:
// diff = (parent_diff +
@ -313,7 +316,7 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
// ) + 2^(periodCount - 2)
bigTime := new(big.Int).SetUint64(time)
bigParentTime := new(big.Int).SetUint64(parentTime)
bigParentTime := new(big.Int).Set(parent.Time)
// holds intermediate values to make the algo easier to read & audit
x := new(big.Int)
@ -329,16 +332,16 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
x.Set(bigMinus99)
}
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
y.Div(parentDiff, params.DifficultyBoundDivisor)
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
x.Mul(y, x)
x.Add(parentDiff, x)
x.Add(parent.Difficulty, x)
// minimum difficulty can ever be (before exponential factor)
if x.Cmp(params.MinimumDifficulty) < 0 {
x.Set(params.MinimumDifficulty)
}
// for the exponential factor
periodCount := new(big.Int).Add(parentNumber, common.Big1)
periodCount := new(big.Int).Add(parent.Number, common.Big1)
periodCount.Div(periodCount, expDiffPeriod)
// the exponential factor, commonly referred to as "the bomb"
@ -354,25 +357,25 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
// calcDifficultyFrontier is the difficulty adjustment algorithm. It returns the
// difficulty that a new block should have when created at time given the parent
// block's time and difficulty. The calculation uses the Frontier rules.
func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
diff := new(big.Int)
adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
adjust := new(big.Int).Div(parent.Difficulty, params.DifficultyBoundDivisor)
bigTime := new(big.Int)
bigParentTime := new(big.Int)
bigTime.SetUint64(time)
bigParentTime.SetUint64(parentTime)
bigParentTime.Set(parent.Time)
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
diff.Add(parentDiff, adjust)
diff.Add(parent.Difficulty, adjust)
} else {
diff.Sub(parentDiff, adjust)
diff.Sub(parent.Difficulty, adjust)
}
if diff.Cmp(params.MinimumDifficulty) < 0 {
diff.Set(params.MinimumDifficulty)
}
periodCount := new(big.Int).Add(parentNumber, common.Big1)
periodCount := new(big.Int).Add(parent.Number, common.Big1)
periodCount.Div(periodCount, expDiffPeriod)
if periodCount.Cmp(common.Big1) > 0 {
// diff = diff + 2^(periodCount - 2)
@ -434,8 +437,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
if parent == nil {
return consensus.ErrUnknownAncestor
}
header.Difficulty = CalcDifficulty(chain.Config(), header.Time.Uint64(),
parent.Time.Uint64(), parent.Number, parent.Difficulty)
header.Difficulty = CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
return nil
}

View File

@ -23,6 +23,7 @@ import (
"testing"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
@ -71,7 +72,11 @@ func TestCalcDifficulty(t *testing.T) {
config := &params.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
for name, test := range tests {
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
diff := CalcDifficulty(config, test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty)
diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{
Number: number,
Time: new(big.Int).SetUint64(test.ParentTimestamp),
Difficulty: test.ParentDifficulty,
})
if diff.Cmp(test.CurrentDifficulty) != 0 {
t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff)
}

View File

@ -1,85 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Source: https://golang.org/src/crypto/cipher/xor.go
package ethash
import (
"runtime"
"unsafe"
)
const wordSize = int(unsafe.Sizeof(uintptr(0)))
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
// fastXORBytes xors in bulk. It only works on architectures that
// support unaligned read/writes.
func fastXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
for i := 0; i < w; i++ {
dw[i] = aw[i] ^ bw[i]
}
}
for i := (n - n%wordSize); i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
func safeXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
// xorBytes xors the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes xor'd.
func xorBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastXORBytes(dst, a, b)
}
// TODO(hanwen): if (dst, a, b) have common alignment
// we could still try fastXORBytes. It is not clear
// how often this happens, and it's only worth it if
// the block encryption itself is hardware
// accelerated.
return safeXORBytes(dst, a, b)
}
// fastXORWords XORs multiples of 4 or 8 bytes (depending on architecture.)
// The arguments are assumed to be of equal length.
func fastXORWords(dst, a, b []byte) {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
n := len(b) / wordSize
for i := 0; i < n; i++ {
dw[i] = aw[i] ^ bw[i]
}
}
func xorWords(dst, a, b []byte) {
if supportsUnaligned {
fastXORWords(dst, a, b)
} else {
safeXORBytes(dst, a, b)
}
}

View File

@ -2,7 +2,7 @@ FROM alpine:3.5
RUN \
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
git clone --depth 1 --branch release/1.5 https://github.com/ethereum/go-ethereum && \
git clone --depth 1 --branch release/1.6 https://github.com/ethereum/go-ethereum && \
(cd go-ethereum && make geth) && \
cp go-ethereum/build/bin/geth /geth && \
apk del go git make gcc musl-dev linux-headers && \

1
containers/vagrant/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.vagrant

View File

@ -1,29 +1,38 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure(2) do |config|
config.vm.box = "ubuntu/trusty64"
require 'yaml'
config.vm.provider "virtualbox" do |vb|
vb.memory = "2048"
VAGRANTFILE_API_VERSION = 2
VM_RAM = 2048
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.define "ubuntu", :primary => true do |ubuntu|
ubuntu.vm.box = "ubuntu/trusty64"
ubuntu.vm.provision "shell", :path => "provisioners/shell/ubuntu.sh"
end
config.vm.define "debian", :primary => true do |debian|
debian.vm.box = "debian/jessie64"
debian.vm.provision "shell", :path => "provisioners/shell/debian.sh"
end
config.vm.define "centos", :autostart => false do |centos|
centos.vm.box = "centos/7"
centos.vm.provision "shell", :path => "provisioners/shell/centos.sh"
end
config.vm.provider "virtualbox" do |vb|
vb.memory = VM_RAM
end
config.vm.provider "libvirt" do |lv|
lv.memory = VM_RAM
config.vm.synced_folder ".", "/home/vagrant/sync", :disabled => true
end
config.vm.synced_folder ".", "/vagrant", :disabled => true
config.vm.synced_folder "../../", "/home/vagrant/go/src/github.com/ethereum/go-ethereum"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.provision "shell", inline: <<-SHELL
sudo apt-get install software-properties-common
sudo add-apt-repository -y ppa:ethereum/ethereum
sudo add-apt-repository -y ppa:ethereum/ethereum-dev
sudo apt-get update
sudo apt-get install -y build-essential golang git-all
GOPATH=/home/vagrant/go go get github.com/tools/godep
sudo chown -R vagrant:vagrant ~vagrant/go
echo "export GOPATH=/home/vagrant/go" >> ~vagrant/.bashrc
echo "export PATH=\\\$PATH:\\\$GOPATH/bin:/usr/local/go/bin" >> ~vagrant/.bashrc
SHELL
end

View File

@ -0,0 +1,11 @@
#!/bin/bash
sudo yum install -y git wget
sudo yum update -y
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc

View File

@ -0,0 +1,11 @@
#!/bin/bash
sudo apt-get install -y build-essential git-all wget
sudo apt-get update
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc

View File

@ -0,0 +1,11 @@
#!/bin/bash
sudo apt-get install -y build-essential git-all wget
sudo apt-get update
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc

File diff suppressed because it is too large Load Diff

View File

@ -18,7 +18,7 @@ package core
import "github.com/ethereum/go-ethereum/common"
// Set of manually tracked bad hashes (usually hard forks)
// BadHashes represent a set of manually tracked bad hashes (usually hard forks)
var BadHashes = map[common.Hash]bool{
common.HexToHash("05bef30ef572270f654746da22639a7a0c97dd97a7050b9e252391996aaeb689"): true,
common.HexToHash("7d05d08cbc596a2e5e4f13b80a743e53e09221b5323c3a61946b20873e58583f"): true,

View File

@ -84,7 +84,7 @@ func (b *BlockGen) AddTx(tx *types.Transaction) {
if b.gasPool == nil {
b.SetCoinbase(common.Address{})
}
b.statedb.StartRecord(tx.Hash(), common.Hash{}, len(b.txs))
b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs))
receipt, _, err := ApplyTransaction(b.config, nil, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, b.header.GasUsed, vm.Config{})
if err != nil {
panic(err)
@ -98,10 +98,10 @@ func (b *BlockGen) Number() *big.Int {
return new(big.Int).Set(b.header.Number)
}
// AddUncheckedReceipts forcefully adds a receipts to the block without a
// AddUncheckedReceipt forcefully adds a receipts to the block without a
// backing transaction.
//
// AddUncheckedReceipts will cause consensus failures when used during real
// AddUncheckedReceipt will cause consensus failures when used during real
// chain processing. This is best used in conjunction with raw block insertion.
func (b *BlockGen) AddUncheckedReceipt(receipt *types.Receipt) {
b.receipts = append(b.receipts, receipt)
@ -142,7 +142,7 @@ func (b *BlockGen) OffsetTime(seconds int64) {
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
panic("block time out of range")
}
b.header.Difficulty = ethash.CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
b.header.Difficulty = ethash.CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Header())
}
// GenerateChain creates a chain of n blocks. The first block's
@ -209,15 +209,20 @@ func makeHeader(config *params.ChainConfig, parent *types.Block, state *state.St
} else {
time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds
}
return &types.Header{
Root: state.IntermediateRoot(config.IsEIP158(parent.Number())),
ParentHash: parent.Hash(),
Coinbase: parent.Coinbase(),
Difficulty: ethash.CalcDifficulty(config, time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
GasLimit: CalcGasLimit(parent),
GasUsed: new(big.Int),
Number: new(big.Int).Add(parent.Number(), common.Big1),
Time: time,
Difficulty: ethash.CalcDifficulty(config, time.Uint64(), &types.Header{
Number: parent.Number(),
Time: new(big.Int).Sub(time, big.NewInt(10)),
Difficulty: parent.Difficulty(),
}),
GasLimit: CalcGasLimit(parent),
GasUsed: new(big.Int),
Number: new(big.Int).Add(parent.Number(), common.Big1),
Time: time,
}
}

View File

@ -64,7 +64,7 @@ var (
oldBlockReceiptsPrefix = []byte("receipts-block-")
oldBlockHashPrefix = []byte("block-hash-") // [deprecated by the header/block split, remove eventually]
ChainConfigNotFoundErr = errors.New("ChainConfig not found") // general config not found error
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
mipmapBloomMu sync.Mutex // protect against race condition when updating mipmap blooms
@ -546,7 +546,7 @@ func mipmapKey(num, level uint64) []byte {
return append(mipmapPre, append(lkey, key.Bytes()...)...)
}
// WriteMapmapBloom writes each address included in the receipts' logs to the
// WriteMipmapBloom writes each address included in the receipts' logs to the
// MIP bloom bin.
func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
mipmapBloomMu.Lock()
@ -638,7 +638,7 @@ func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *params.ChainConf
func GetChainConfig(db ethdb.Database, hash common.Hash) (*params.ChainConfig, error) {
jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...))
if len(jsonChainConfig) == 0 {
return nil, ChainConfigNotFoundErr
return nil, ErrChainConfigNotFound
}
var config params.ChainConfig

View File

@ -17,8 +17,6 @@
package core
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
@ -43,7 +41,7 @@ type NewMinedBlockEvent struct{ Block *types.Block }
// RemovedTransactionEvent is posted when a reorg happens
type RemovedTransactionEvent struct{ Txs types.Transactions }
// RemovedLogEvent is posted when a reorg happens
// RemovedLogsEvent is posted when a reorg happens
type RemovedLogsEvent struct{ Logs []*types.Log }
type ChainEvent struct {
@ -67,8 +65,6 @@ type ChainUncleEvent struct {
type ChainHeadEvent struct{ Block *types.Block }
type GasPriceChanged struct{ Price *big.Int }
// Mining operation events
type StartMining struct{}
type TopMining struct{}

View File

@ -20,4 +20,4 @@ import (
"math/big"
)
var BlockReward *big.Int = big.NewInt(5e+18)
var BlockReward = big.NewInt(5e+18)

View File

@ -133,7 +133,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
newcfg := genesis.configOrDefault(stored)
storedcfg, err := GetChainConfig(db, stored)
if err != nil {
if err == ChainConfigNotFoundErr {
if err == ErrChainConfigNotFound {
// This case happens if a genesis write was interrupted.
log.Warn("Found genesis block without chain config")
err = WriteChainConfig(db, stored, newcfg)

View File

@ -201,15 +201,6 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
// header writes should be protected by the parent chain mutex individually.
type WhCallback func(*types.Header) error
// InsertHeaderChain attempts to insert the given header chain in to the local
// chain, possibly creating a reorg. If an error is returned, it will return the
// index number of the failing header as well an error describing what went wrong.
//
// The verify parameter can be used to fine tune whether nonce verification
// should be done or not. The reason behind the optional check is because some
// of the header retrieval mechanisms already need to verfy nonces, as well as
// because nonces can be verified sparsely, not needing to check each.
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
// Do a sanity check that the provided chain is actually ordered and linked
for i := 1; i < len(chain); i++ {
@ -257,6 +248,14 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
return 0, nil
}
// InsertHeaderChain attempts to insert the given header chain in to the local
// chain, possibly creating a reorg. If an error is returned, it will return the
// index number of the failing header as well an error describing what went wrong.
//
// The verify parameter can be used to fine tune whether nonce verification
// should be done or not. The reason behind the optional check is because some
// of the header retrieval mechanisms already need to verfy nonces, as well as
// because nonces can be verified sparsely, not needing to check each.
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCallback, start time.Time) (int, error) {
// Collect some import statistics to report on
stats := struct{ processed, ignored int }{}

View File

@ -21,8 +21,6 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/core/types"
// "github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
)
@ -38,24 +36,24 @@ type TestManager struct {
Blocks []*types.Block
}
func (s *TestManager) IsListening() bool {
func (tm *TestManager) IsListening() bool {
return false
}
func (s *TestManager) IsMining() bool {
func (tm *TestManager) IsMining() bool {
return false
}
func (s *TestManager) PeerCount() int {
func (tm *TestManager) PeerCount() int {
return 0
}
func (s *TestManager) Peers() *list.List {
func (tm *TestManager) Peers() *list.List {
return list.New()
}
func (s *TestManager) BlockChain() *BlockChain {
return s.blockChain
func (tm *TestManager) BlockChain() *BlockChain {
return tm.blockChain
}
func (tm *TestManager) TxPool() *TxPool {

View File

@ -71,8 +71,8 @@ type (
hash common.Hash
}
touchChange struct {
account *common.Address
prev bool
account *common.Address
prev bool
prevDirty bool
}
)
@ -91,6 +91,11 @@ func (ch suicideChange) undo(s *StateDB) {
if obj != nil {
obj.suicided = ch.prev
obj.setBalance(ch.prevbalance)
// if the object wasn't suicided before, remove
// it from the list of destructed objects as well.
if !obj.suicided {
delete(s.stateObjectsDestructed, *ch.account)
}
}
}

View File

@ -62,8 +62,9 @@ type StateDB struct {
codeSizeCache *lru.Cache
// This map holds 'live' objects, which will get modified while processing a state transition.
stateObjects map[common.Address]*stateObject
stateObjectsDirty map[common.Address]struct{}
stateObjects map[common.Address]*stateObject
stateObjectsDirty map[common.Address]struct{}
stateObjectsDestructed map[common.Address]struct{}
// The refund counter, also used by state transitioning.
refund *big.Int
@ -92,14 +93,15 @@ func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
}
csc, _ := lru.New(codeSizeCacheSize)
return &StateDB{
db: db,
trie: tr,
codeSizeCache: csc,
stateObjects: make(map[common.Address]*stateObject),
stateObjectsDirty: make(map[common.Address]struct{}),
refund: new(big.Int),
logs: make(map[common.Hash][]*types.Log),
preimages: make(map[common.Hash][]byte),
db: db,
trie: tr,
codeSizeCache: csc,
stateObjects: make(map[common.Address]*stateObject),
stateObjectsDirty: make(map[common.Address]struct{}),
stateObjectsDestructed: make(map[common.Address]struct{}),
refund: new(big.Int),
logs: make(map[common.Hash][]*types.Log),
preimages: make(map[common.Hash][]byte),
}, nil
}
@ -114,14 +116,15 @@ func (self *StateDB) New(root common.Hash) (*StateDB, error) {
return nil, err
}
return &StateDB{
db: self.db,
trie: tr,
codeSizeCache: self.codeSizeCache,
stateObjects: make(map[common.Address]*stateObject),
stateObjectsDirty: make(map[common.Address]struct{}),
refund: new(big.Int),
logs: make(map[common.Hash][]*types.Log),
preimages: make(map[common.Hash][]byte),
db: self.db,
trie: tr,
codeSizeCache: self.codeSizeCache,
stateObjects: make(map[common.Address]*stateObject),
stateObjectsDirty: make(map[common.Address]struct{}),
stateObjectsDestructed: make(map[common.Address]struct{}),
refund: new(big.Int),
logs: make(map[common.Hash][]*types.Log),
preimages: make(map[common.Hash][]byte),
}, nil
}
@ -138,6 +141,7 @@ func (self *StateDB) Reset(root common.Hash) error {
self.trie = tr
self.stateObjects = make(map[common.Address]*stateObject)
self.stateObjectsDirty = make(map[common.Address]struct{})
self.stateObjectsDestructed = make(map[common.Address]struct{})
self.thash = common.Hash{}
self.bhash = common.Hash{}
self.txIndex = 0
@ -173,12 +177,6 @@ func (self *StateDB) pushTrie(t *trie.SecureTrie) {
}
}
func (self *StateDB) StartRecord(thash, bhash common.Hash, ti int) {
self.thash = thash
self.bhash = bhash
self.txIndex = ti
}
func (self *StateDB) AddLog(log *types.Log) {
self.journal = append(self.journal, addLogChange{txhash: self.thash})
@ -380,6 +378,8 @@ func (self *StateDB) Suicide(addr common.Address) bool {
})
stateObject.markSuicided()
stateObject.data.Balance = new(big.Int)
self.stateObjectsDestructed[addr] = struct{}{}
return true
}
@ -510,21 +510,25 @@ func (self *StateDB) Copy() *StateDB {
// Copy all the basic fields, initialize the memory ones
state := &StateDB{
db: self.db,
trie: self.trie,
pastTries: self.pastTries,
codeSizeCache: self.codeSizeCache,
stateObjects: make(map[common.Address]*stateObject, len(self.stateObjectsDirty)),
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
refund: new(big.Int).Set(self.refund),
logs: make(map[common.Hash][]*types.Log, len(self.logs)),
logSize: self.logSize,
preimages: make(map[common.Hash][]byte),
db: self.db,
trie: self.trie,
pastTries: self.pastTries,
codeSizeCache: self.codeSizeCache,
stateObjects: make(map[common.Address]*stateObject, len(self.stateObjectsDirty)),
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
stateObjectsDestructed: make(map[common.Address]struct{}, len(self.stateObjectsDestructed)),
refund: new(big.Int).Set(self.refund),
logs: make(map[common.Hash][]*types.Log, len(self.logs)),
logSize: self.logSize,
preimages: make(map[common.Hash][]byte),
}
// Copy the dirty states, logs, and preimages
for addr := range self.stateObjectsDirty {
state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state, state.MarkStateObjectDirty)
state.stateObjectsDirty[addr] = struct{}{}
if self.stateObjects[addr].suicided {
state.stateObjectsDestructed[addr] = struct{}{}
}
}
for hash, logs := range self.logs {
state.logs[hash] = make([]*types.Log, len(logs))
@ -590,6 +594,27 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
return s.trie.Hash()
}
// Prepare sets the current transaction hash and index and block hash which is
// used when the EVM emits new state logs.
func (self *StateDB) Prepare(thash, bhash common.Hash, ti int) {
self.thash = thash
self.bhash = bhash
self.txIndex = ti
}
// Finalise finalises the state by removing the self destructed objects
// in the current stateObjectsDestructed buffer and clears the journal
// as well as the refunds.
//
// Please note that Finalise is used by EIP#98 and is used instead of
// IntermediateRoot.
func (s *StateDB) Finalise() {
for addr := range s.stateObjectsDestructed {
s.deleteStateObject(s.stateObjects[addr])
}
s.clearJournalAndRefund()
}
// DeleteSuicides flags the suicided objects for deletion so that it
// won't be referenced again when called / queried up on.
//

View File

@ -69,7 +69,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
}
// Iterate over and process the individual transactions
for i, tx := range block.Transactions() {
statedb.StartRecord(tx.Hash(), block.Hash(), i)
statedb.Prepare(tx.Hash(), block.Hash(), i)
receipt, _, err := ApplyTransaction(p.config, p.bc, nil, gp, statedb, header, tx, totalUsedGas, cfg)
if err != nil {
return nil, nil, nil, err
@ -107,7 +107,8 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
usedGas.Add(usedGas, gas)
// Create a new receipt for the transaction, storing the intermediate root and gas used by the tx
// based on the eip phase, we're passing wether the root touch-delete accounts.
receipt := types.NewReceipt(statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes(), usedGas)
root := statedb.IntermediateRoot(config.IsEIP158(header.Number))
receipt := types.NewReceipt(root.Bytes(), usedGas)
receipt.TxHash = tx.Hash()
receipt.GasUsed = new(big.Int).Set(gas)
// if the transaction created a contract, store the creation address in the receipt.

View File

@ -78,10 +78,6 @@ type Message interface {
Data() []byte
}
func MessageCreatesContract(msg Message) bool {
return msg.To() == nil
}
// IntrinsicGas computes the 'intrinsic gas' for a message
// with the given data.
//
@ -138,112 +134,113 @@ func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) ([]byte, *big.Int, erro
return ret, gasUsed, err
}
func (self *StateTransition) from() vm.AccountRef {
f := self.msg.From()
if !self.state.Exist(f) {
self.state.CreateAccount(f)
func (st *StateTransition) from() vm.AccountRef {
f := st.msg.From()
if !st.state.Exist(f) {
st.state.CreateAccount(f)
}
return vm.AccountRef(f)
}
func (self *StateTransition) to() vm.AccountRef {
if self.msg == nil {
func (st *StateTransition) to() vm.AccountRef {
if st.msg == nil {
return vm.AccountRef{}
}
to := self.msg.To()
to := st.msg.To()
if to == nil {
return vm.AccountRef{} // contract creation
}
reference := vm.AccountRef(*to)
if !self.state.Exist(*to) {
self.state.CreateAccount(*to)
if !st.state.Exist(*to) {
st.state.CreateAccount(*to)
}
return reference
}
func (self *StateTransition) useGas(amount uint64) error {
if self.gas < amount {
func (st *StateTransition) useGas(amount uint64) error {
if st.gas < amount {
return vm.ErrOutOfGas
}
self.gas -= amount
st.gas -= amount
return nil
}
func (self *StateTransition) buyGas() error {
mgas := self.msg.Gas()
func (st *StateTransition) buyGas() error {
mgas := st.msg.Gas()
if mgas.BitLen() > 64 {
return vm.ErrOutOfGas
}
mgval := new(big.Int).Mul(mgas, self.gasPrice)
mgval := new(big.Int).Mul(mgas, st.gasPrice)
var (
state = self.state
sender = self.from()
state = st.state
sender = st.from()
)
if state.GetBalance(sender.Address()).Cmp(mgval) < 0 {
return errInsufficientBalanceForGas
}
if err := self.gp.SubGas(mgas); err != nil {
if err := st.gp.SubGas(mgas); err != nil {
return err
}
self.gas += mgas.Uint64()
st.gas += mgas.Uint64()
self.initialGas.Set(mgas)
st.initialGas.Set(mgas)
state.SubBalance(sender.Address(), mgval)
return nil
}
func (self *StateTransition) preCheck() error {
msg := self.msg
sender := self.from()
func (st *StateTransition) preCheck() error {
msg := st.msg
sender := st.from()
// Make sure this transaction's nonce is correct
if msg.CheckNonce() {
if n := self.state.GetNonce(sender.Address()); n != msg.Nonce() {
if n := st.state.GetNonce(sender.Address()); n != msg.Nonce() {
return fmt.Errorf("invalid nonce: have %d, expected %d", msg.Nonce(), n)
}
}
return self.buyGas()
return st.buyGas()
}
// TransitionDb will transition the state by applying the current message and returning the result
// including the required gas for the operation as well as the used gas. It returns an error if it
// failed. An error indicates a consensus issue.
func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big.Int, err error) {
if err = self.preCheck(); err != nil {
func (st *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big.Int, err error) {
if err = st.preCheck(); err != nil {
return
}
msg := self.msg
sender := self.from() // err checked in preCheck
msg := st.msg
sender := st.from() // err checked in preCheck
homestead := st.evm.ChainConfig().IsHomestead(st.evm.BlockNumber)
contractCreation := msg.To() == nil
homestead := self.evm.ChainConfig().IsHomestead(self.evm.BlockNumber)
contractCreation := MessageCreatesContract(msg)
// Pay intrinsic gas
// TODO convert to uint64
intrinsicGas := IntrinsicGas(self.data, contractCreation, homestead)
intrinsicGas := IntrinsicGas(st.data, contractCreation, homestead)
if intrinsicGas.BitLen() > 64 {
return nil, nil, nil, vm.ErrOutOfGas
}
if err = self.useGas(intrinsicGas.Uint64()); err != nil {
if err = st.useGas(intrinsicGas.Uint64()); err != nil {
return nil, nil, nil, err
}
var (
evm = self.evm
evm = st.evm
// vm errors do not effect consensus and are therefor
// not assigned to err, except for insufficient balance
// error.
vmerr error
)
if contractCreation {
ret, _, self.gas, vmerr = evm.Create(sender, self.data, self.gas, self.value)
ret, _, st.gas, vmerr = evm.Create(sender, st.data, st.gas, st.value)
} else {
// Increment the nonce for the next transaction
self.state.SetNonce(sender.Address(), self.state.GetNonce(sender.Address())+1)
ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value)
st.state.SetNonce(sender.Address(), st.state.GetNonce(sender.Address())+1)
ret, st.gas, vmerr = evm.Call(sender, st.to().Address(), st.data, st.gas, st.value)
}
if vmerr != nil {
log.Debug("VM returned with error", "err", err)
@ -254,33 +251,33 @@ func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *b
return nil, nil, nil, vmerr
}
}
requiredGas = new(big.Int).Set(self.gasUsed())
requiredGas = new(big.Int).Set(st.gasUsed())
self.refundGas()
self.state.AddBalance(self.evm.Coinbase, new(big.Int).Mul(self.gasUsed(), self.gasPrice))
st.refundGas()
st.state.AddBalance(st.evm.Coinbase, new(big.Int).Mul(st.gasUsed(), st.gasPrice))
return ret, requiredGas, self.gasUsed(), err
return ret, requiredGas, st.gasUsed(), err
}
func (self *StateTransition) refundGas() {
func (st *StateTransition) refundGas() {
// Return eth for remaining gas to the sender account,
// exchanged at the original rate.
sender := self.from() // err already checked
remaining := new(big.Int).Mul(new(big.Int).SetUint64(self.gas), self.gasPrice)
self.state.AddBalance(sender.Address(), remaining)
sender := st.from() // err already checked
remaining := new(big.Int).Mul(new(big.Int).SetUint64(st.gas), st.gasPrice)
st.state.AddBalance(sender.Address(), remaining)
// Apply refund counter, capped to half of the used gas.
uhalf := remaining.Div(self.gasUsed(), common.Big2)
refund := math.BigMin(uhalf, self.state.GetRefund())
self.gas += refund.Uint64()
uhalf := remaining.Div(st.gasUsed(), common.Big2)
refund := math.BigMin(uhalf, st.state.GetRefund())
st.gas += refund.Uint64()
self.state.AddBalance(sender.Address(), refund.Mul(refund, self.gasPrice))
st.state.AddBalance(sender.Address(), refund.Mul(refund, st.gasPrice))
// Also return remaining gas to the block gas counter so it is
// available for the next transaction.
self.gp.AddGas(new(big.Int).SetUint64(self.gas))
st.gp.AddGas(new(big.Int).SetUint64(st.gas))
}
func (self *StateTransition) gasUsed() *big.Int {
return new(big.Int).Sub(self.initialGas, new(big.Int).SetUint64(self.gas))
func (st *StateTransition) gasUsed() *big.Int {
return new(big.Int).Sub(st.initialGas, new(big.Int).SetUint64(st.gas))
}

View File

@ -22,7 +22,9 @@ import (
"math/big"
"sort"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// nonceHeap is a heap.Interface implementation over 64bit unsigned integers for
@ -53,11 +55,11 @@ type txSortedMap struct {
cache types.Transactions // Cache of the transactions already sorted
}
// newTxSortedMap creates a new sorted transaction map.
// newTxSortedMap creates a new nonce-sorted transaction map.
func newTxSortedMap() *txSortedMap {
return &txSortedMap{
items: make(map[uint64]*types.Transaction),
index: &nonceHeap{},
index: new(nonceHeap),
}
}
@ -218,9 +220,11 @@ func (m *txSortedMap) Flatten() types.Transactions {
// the executable/pending queue; and for storing gapped transactions for the non-
// executable/future queue, with minor behavioral changes.
type txList struct {
strict bool // Whether nonces are strictly continuous or not
txs *txSortedMap // Heap indexed sorted hash map of the transactions
costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance)
strict bool // Whether nonces are strictly continuous or not
txs *txSortedMap // Heap indexed sorted hash map of the transactions
costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance)
gascap *big.Int // Gas limit of the highest spending transaction (reset only if exceeds block limit)
}
// newTxList create a new transaction list for maintaining nonce-indexable fast,
@ -230,25 +234,38 @@ func newTxList(strict bool) *txList {
strict: strict,
txs: newTxSortedMap(),
costcap: new(big.Int),
gascap: new(big.Int),
}
}
// Overlaps returns whether the transaction specified has the same nonce as one
// already contained within the list.
func (l *txList) Overlaps(tx *types.Transaction) bool {
return l.txs.Get(tx.Nonce()) != nil
}
// Add tries to insert a new transaction into the list, returning whether the
// transaction was accepted, and if yes, any previous transaction it replaced.
//
// If the new transaction is accepted into the list, the lists' cost threshold
// is also potentially updated.
func (l *txList) Add(tx *types.Transaction) (bool, *types.Transaction) {
// If the new transaction is accepted into the list, the lists' cost and gas
// thresholds are also potentially updated.
func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transaction) {
// If there's an older better transaction, abort
old := l.txs.Get(tx.Nonce())
if old != nil && old.GasPrice().Cmp(tx.GasPrice()) >= 0 {
return false, nil
if old != nil {
threshold := new(big.Int).Div(new(big.Int).Mul(old.GasPrice(), big.NewInt(100+int64(priceBump))), big.NewInt(100))
if threshold.Cmp(tx.GasPrice()) >= 0 {
return false, nil
}
}
// Otherwise overwrite the old transaction with the current one
l.txs.Put(tx)
if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 {
l.costcap = cost
}
if gas := tx.Gas(); l.gascap.Cmp(gas) < 0 {
l.gascap = gas
}
return true, old
}
@ -259,23 +276,25 @@ func (l *txList) Forward(threshold uint64) types.Transactions {
return l.txs.Forward(threshold)
}
// Filter removes all transactions from the list with a cost higher than the
// provided threshold. Every removed transaction is returned for any post-removal
// maintenance. Strict-mode invalidated transactions are also returned.
// Filter removes all transactions from the list with a cost or gas limit higher
// than the provided thresholds. Every removed transaction is returned for any
// post-removal maintenance. Strict-mode invalidated transactions are also
// returned.
//
// This method uses the cached costcap to quickly decide if there's even a point
// in calculating all the costs or if the balance covers all. If the threshold is
// lower than the costcap, the costcap will be reset to a new high after removing
// expensive the too transactions.
func (l *txList) Filter(threshold *big.Int) (types.Transactions, types.Transactions) {
// This method uses the cached costcap and gascap to quickly decide if there's even
// a point in calculating all the costs or if the balance covers all. If the threshold
// is lower than the costgas cap, the caps will be reset to a new high after removing
// the newly invalidated transactions.
func (l *txList) Filter(costLimit, gasLimit *big.Int) (types.Transactions, types.Transactions) {
// If all transactions are below the threshold, short circuit
if l.costcap.Cmp(threshold) <= 0 {
if l.costcap.Cmp(costLimit) <= 0 && l.gascap.Cmp(gasLimit) <= 0 {
return nil, nil
}
l.costcap = new(big.Int).Set(threshold) // Lower the cap to the threshold
l.costcap = new(big.Int).Set(costLimit) // Lower the caps to the thresholds
l.gascap = new(big.Int).Set(gasLimit)
// Filter out all the transactions above the account's funds
removed := l.txs.Filter(func(tx *types.Transaction) bool { return tx.Cost().Cmp(threshold) > 0 })
removed := l.txs.Filter(func(tx *types.Transaction) bool { return tx.Cost().Cmp(costLimit) > 0 || tx.Gas().Cmp(gasLimit) > 0 })
// If the list was strict, filter anything above the lowest nonce
var invalids types.Transactions
@ -340,3 +359,150 @@ func (l *txList) Empty() bool {
func (l *txList) Flatten() types.Transactions {
return l.txs.Flatten()
}
// priceHeap is a heap.Interface implementation over transactions for retrieving
// price-sorted transactions to discard when the pool fills up.
type priceHeap []*types.Transaction
func (h priceHeap) Len() int { return len(h) }
func (h priceHeap) Less(i, j int) bool { return h[i].GasPrice().Cmp(h[j].GasPrice()) < 0 }
func (h priceHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *priceHeap) Push(x interface{}) {
*h = append(*h, x.(*types.Transaction))
}
func (h *priceHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
// txPricedList is a price-sorted heap to allow operating on transactions pool
// contents in a price-incrementing way.
type txPricedList struct {
all *map[common.Hash]*types.Transaction // Pointer to the map of all transactions
items *priceHeap // Heap of prices of all the stored transactions
stales int // Number of stale price points to (re-heap trigger)
}
// newTxPricedList creates a new price-sorted transaction heap.
func newTxPricedList(all *map[common.Hash]*types.Transaction) *txPricedList {
return &txPricedList{
all: all,
items: new(priceHeap),
}
}
// Put inserts a new transaction into the heap.
func (l *txPricedList) Put(tx *types.Transaction) {
heap.Push(l.items, tx)
}
// Removed notifies the prices transaction list that an old transaction dropped
// from the pool. The list will just keep a counter of stale objects and update
// the heap if a large enough ratio of transactions go stale.
func (l *txPricedList) Removed() {
// Bump the stale counter, but exit if still too low (< 25%)
l.stales++
if l.stales <= len(*l.items)/4 {
return
}
// Seems we've reached a critical number of stale transactions, reheap
reheap := make(priceHeap, 0, len(*l.all))
l.stales, l.items = 0, &reheap
for _, tx := range *l.all {
*l.items = append(*l.items, tx)
}
heap.Init(l.items)
}
// Discard finds all the transactions below the given price threshold, drops them
// from the priced list and returs them for further removal from the entire pool.
func (l *txPricedList) Cap(threshold *big.Int, local *txSet) types.Transactions {
drop := make(types.Transactions, 0, 128) // Remote underpriced transactions to drop
save := make(types.Transactions, 0, 64) // Local underpriced transactions to keep
for len(*l.items) > 0 {
// Discard stale transactions if found during cleanup
tx := heap.Pop(l.items).(*types.Transaction)
hash := tx.Hash()
if _, ok := (*l.all)[hash]; !ok {
l.stales--
continue
}
// Stop the discards if we've reached the threshold
if tx.GasPrice().Cmp(threshold) >= 0 {
break
}
// Non stale transaction found, discard unless local
if local.contains(hash) {
save = append(save, tx)
} else {
drop = append(drop, tx)
}
}
for _, tx := range save {
heap.Push(l.items, tx)
}
return drop
}
// Underpriced checks whether a transaction is cheaper than (or as cheap as) the
// lowest priced transaction currently being tracked.
func (l *txPricedList) Underpriced(tx *types.Transaction, local *txSet) bool {
// Local transactions cannot be underpriced
if local.contains(tx.Hash()) {
return false
}
// Discard stale price points if found at the heap start
for len(*l.items) > 0 {
head := []*types.Transaction(*l.items)[0]
if _, ok := (*l.all)[head.Hash()]; !ok {
l.stales--
heap.Pop(l.items)
continue
}
break
}
// Check if the transaction is underpriced or not
if len(*l.items) == 0 {
log.Error("Pricing query for empty pool") // This cannot happen, print to catch programming errors
return false
}
cheapest := []*types.Transaction(*l.items)[0]
return cheapest.GasPrice().Cmp(tx.GasPrice()) >= 0
}
// Discard finds a number of most underpriced transactions, removes them from the
// priced list and returs them for further removal from the entire pool.
func (l *txPricedList) Discard(count int, local *txSet) types.Transactions {
drop := make(types.Transactions, 0, count) // Remote underpriced transactions to drop
save := make(types.Transactions, 0, 64) // Local underpriced transactions to keep
for len(*l.items) > 0 && count > 0 {
// Discard stale transactions if found during cleanup
tx := heap.Pop(l.items).(*types.Transaction)
hash := tx.Hash()
if _, ok := (*l.all)[hash]; !ok {
l.stales--
continue
}
// Non stale transaction found, discard unless local
if local.contains(hash) {
save = append(save, tx)
} else {
drop = append(drop, tx)
count--
}
}
for _, tx := range save {
heap.Push(l.items, tx)
}
return drop
}

View File

@ -38,7 +38,7 @@ func TestStrictTxListAdd(t *testing.T) {
// Insert the transactions in a random order
list := newTxList(true)
for _, v := range rand.Perm(len(txs)) {
list.Add(txs[v])
list.Add(txs[v], DefaultTxPoolConfig.PriceBump)
}
// Verify internal state
if len(list.txs.items) != len(txs) {

View File

@ -36,23 +36,20 @@ import (
var (
// Transaction Pool Errors
ErrInvalidSender = errors.New("Invalid sender")
ErrNonce = errors.New("Nonce too low")
ErrCheap = errors.New("Gas price too low for acceptance")
ErrBalance = errors.New("Insufficient balance")
ErrInsufficientFunds = errors.New("Insufficient funds for gas * price + value")
ErrIntrinsicGas = errors.New("Intrinsic gas too low")
ErrGasLimit = errors.New("Exceeds block gas limit")
ErrNegativeValue = errors.New("Negative value")
ErrInvalidSender = errors.New("invalid sender")
ErrNonce = errors.New("nonce too low")
ErrUnderpriced = errors.New("transaction underpriced")
ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
ErrBalance = errors.New("insufficient balance")
ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value")
ErrIntrinsicGas = errors.New("intrinsic gas too low")
ErrGasLimit = errors.New("exceeds block gas limit")
ErrNegativeValue = errors.New("negative value")
)
var (
minPendingPerAccount = uint64(16) // Min number of guaranteed transaction slots per address
maxPendingTotal = uint64(4096) // Max limit of pending transactions from all accounts (soft)
maxQueuedPerAccount = uint64(64) // Max limit of queued transactions per address
maxQueuedInTotal = uint64(1024) // Max limit of queued transactions from all accounts
maxQueuedLifetime = 3 * time.Hour // Max amount of time transactions from idle accounts are queued
evictionInterval = time.Minute // Time interval to check for evictable transactions
evictionInterval = time.Minute // Time interval to check for evictable transactions
statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
)
var (
@ -69,11 +66,54 @@ var (
queuedNofundsCounter = metrics.NewCounter("txpool/queued/nofunds") // Dropped due to out-of-funds
// General tx metrics
invalidTxCounter = metrics.NewCounter("txpool/invalid")
invalidTxCounter = metrics.NewCounter("txpool/invalid")
underpricedTxCounter = metrics.NewCounter("txpool/underpriced")
)
type stateFn func() (*state.StateDB, error)
// TxPoolConfig are the configuration parameters of the transaction pool.
type TxPoolConfig struct {
PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
AccountSlots uint64 // Minimum number of executable transaction slots guaranteed per account
GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
}
// DefaultTxPoolConfig contains the default configurations for the transaction
// pool.
var DefaultTxPoolConfig = TxPoolConfig{
PriceLimit: 1,
PriceBump: 10,
AccountSlots: 16,
GlobalSlots: 4096,
AccountQueue: 64,
GlobalQueue: 1024,
Lifetime: 3 * time.Hour,
}
// sanitize checks the provided user configurations and changes anything that's
// unreasonable or unworkable.
func (config *TxPoolConfig) sanitize() TxPoolConfig {
conf := *config
if conf.PriceLimit < 1 {
log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
}
if conf.PriceBump < 1 {
log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
conf.PriceBump = DefaultTxPoolConfig.PriceBump
}
return conf
}
// TxPool contains all currently known transactions. Transactions
// enter the pool when they are received from the network or submitted
// locally. They exit the pool when they are included in the blockchain.
@ -82,21 +122,23 @@ type stateFn func() (*state.StateDB, error)
// current state) and future transactions. Transactions move between those
// two states over time as they are received and processed.
type TxPool struct {
config *params.ChainConfig
config TxPoolConfig
chainconfig *params.ChainConfig
currentState stateFn // The state function which will allow us to do some pre checks
pendingState *state.ManagedState
gasLimit func() *big.Int // The current gas limit function callback
minGasPrice *big.Int
gasPrice *big.Int
eventMux *event.TypeMux
events *event.TypeMuxSubscription
localTx *txSet
locals *txSet
signer types.Signer
mu sync.RWMutex
pending map[common.Address]*txList // All currently processable transactions
queue map[common.Address]*txList // Queued but non-processable transactions
all map[common.Hash]*types.Transaction // All transactions to allow lookups
beats map[common.Address]time.Time // Last heartbeat from each known account
all map[common.Hash]*types.Transaction // All transactions to allow lookups
priced *txPricedList // All transactions sorted by price
wg sync.WaitGroup // for shutdown sync
quit chan struct{}
@ -104,26 +146,34 @@ type TxPool struct {
homestead bool
}
func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
// NewTxPool creates a new transaction pool to gather, sort and filter inbound
// trnsactions from the network.
func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
// Sanitize the input to ensure no vulnerable gas prices are set
config = (&config).sanitize()
// Create the transaction pool with its initial settings
pool := &TxPool{
config: config,
signer: types.NewEIP155Signer(config.ChainId),
chainconfig: chainconfig,
signer: types.NewEIP155Signer(chainconfig.ChainId),
pending: make(map[common.Address]*txList),
queue: make(map[common.Address]*txList),
all: make(map[common.Hash]*types.Transaction),
beats: make(map[common.Address]time.Time),
all: make(map[common.Hash]*types.Transaction),
eventMux: eventMux,
currentState: currentStateFn,
gasLimit: gasLimitFn,
minGasPrice: new(big.Int),
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
pendingState: nil,
localTx: newTxSet(),
events: eventMux.Subscribe(ChainHeadEvent{}, GasPriceChanged{}, RemovedTransactionEvent{}),
locals: newTxSet(),
events: eventMux.Subscribe(ChainHeadEvent{}, RemovedTransactionEvent{}),
quit: make(chan struct{}),
}
pool.priced = newTxPricedList(&pool.all)
pool.resetState()
// Start the various events loops and return
pool.wg.Add(2)
go pool.eventLoop()
go pool.expirationLoop()
@ -134,27 +184,48 @@ func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentState
func (pool *TxPool) eventLoop() {
defer pool.wg.Done()
// Start a ticker and keep track of interesting pool stats to report
var prevPending, prevQueued, prevStales int
report := time.NewTicker(statsReportInterval)
defer report.Stop()
// Track chain events. When a chain events occurs (new chain canon block)
// we need to know the new state. The new state will help us determine
// the nonces in the managed state
for ev := range pool.events.Chan() {
switch ev := ev.Data.(type) {
case ChainHeadEvent:
pool.mu.Lock()
if ev.Block != nil {
if pool.config.IsHomestead(ev.Block.Number()) {
pool.homestead = true
for {
select {
// Handle any events fired by the system
case ev, ok := <-pool.events.Chan():
if !ok {
return
}
switch ev := ev.Data.(type) {
case ChainHeadEvent:
pool.mu.Lock()
if ev.Block != nil {
if pool.chainconfig.IsHomestead(ev.Block.Number()) {
pool.homestead = true
}
}
pool.resetState()
pool.mu.Unlock()
case RemovedTransactionEvent:
pool.AddBatch(ev.Txs)
}
pool.resetState()
pool.mu.Unlock()
case GasPriceChanged:
pool.mu.Lock()
pool.minGasPrice = ev.Price
pool.mu.Unlock()
case RemovedTransactionEvent:
pool.AddBatch(ev.Txs)
// Handle stats reporting ticks
case <-report.C:
pool.mu.RLock()
pending, queued := pool.stats()
stales := pool.priced.stales
pool.mu.RUnlock()
if pending != prevPending || queued != prevQueued || stales != prevStales {
log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
prevPending, prevQueued, prevStales = pending, queued, stales
}
}
}
}
@ -180,9 +251,10 @@ func (pool *TxPool) resetState() {
}
// Check the queue and move transactions over to the pending if possible
// or remove those that have become invalid
pool.promoteExecutables(currentState)
pool.promoteExecutables(currentState, nil)
}
// Stop terminates the transaction pool.
func (pool *TxPool) Stop() {
pool.events.Unsubscribe()
close(pool.quit)
@ -191,6 +263,28 @@ func (pool *TxPool) Stop() {
log.Info("Transaction pool stopped")
}
// GasPrice returns the current gas price enforced by the transaction pool.
func (pool *TxPool) GasPrice() *big.Int {
pool.mu.RLock()
defer pool.mu.RUnlock()
return new(big.Int).Set(pool.gasPrice)
}
// SetGasPrice updates the minimum price required by the transaction pool for a
// new transaction, and drops all transactions below this threshold.
func (pool *TxPool) SetGasPrice(price *big.Int) {
pool.mu.Lock()
defer pool.mu.Unlock()
pool.gasPrice = price
for _, tx := range pool.priced.Cap(price, pool.locals) {
pool.removeTx(tx.Hash())
}
log.Info("Transaction pool price threshold updated", "price", price)
}
// State returns the virtual managed state of the transaction pool.
func (pool *TxPool) State() *state.ManagedState {
pool.mu.RLock()
defer pool.mu.RUnlock()
@ -200,17 +294,25 @@ func (pool *TxPool) State() *state.ManagedState {
// Stats retrieves the current pool stats, namely the number of pending and the
// number of queued (non-executable) transactions.
func (pool *TxPool) Stats() (pending int, queued int) {
func (pool *TxPool) Stats() (int, int) {
pool.mu.RLock()
defer pool.mu.RUnlock()
return pool.stats()
}
// stats retrieves the current pool stats, namely the number of pending and the
// number of queued (non-executable) transactions.
func (pool *TxPool) stats() (int, int) {
pending := 0
for _, list := range pool.pending {
pending += list.Len()
}
queued := 0
for _, list := range pool.queue {
queued += list.Len()
}
return
return pending, queued
}
// Content retrieves the data content of the transaction pool, returning all the
@ -237,17 +339,6 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
pool.mu.Lock()
defer pool.mu.Unlock()
state, err := pool.currentState()
if err != nil {
return nil, err
}
// check queue first
pool.promoteExecutables(state)
// invalidate any txs
pool.demoteUnexecutables(state)
pending := make(map[common.Address]types.Transactions)
for addr, list := range pool.pending {
pending[addr] = list.Flatten()
@ -260,16 +351,16 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
func (pool *TxPool) SetLocal(tx *types.Transaction) {
pool.mu.Lock()
defer pool.mu.Unlock()
pool.localTx.add(tx.Hash())
pool.locals.add(tx.Hash())
}
// validateTx checks whether a transaction is valid according
// to the consensus rules.
func (pool *TxPool) validateTx(tx *types.Transaction) error {
local := pool.localTx.contains(tx.Hash())
local := pool.locals.contains(tx.Hash())
// Drop transactions under our own minimal accepted gas price
if !local && pool.minGasPrice.Cmp(tx.GasPrice()) > 0 {
return ErrCheap
if !local && pool.gasPrice.Cmp(tx.GasPrice()) > 0 {
return ErrUnderpriced
}
currentState, err := pool.currentState()
@ -314,47 +405,92 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error {
}
// add validates a transaction and inserts it into the non-executable queue for
// later pending promotion and execution.
func (pool *TxPool) add(tx *types.Transaction) error {
// later pending promotion and execution. If the transaction is a replacement for
// an already pending or queued one, it overwrites the previous and returns this
// so outer code doesn't uselessly call promote.
func (pool *TxPool) add(tx *types.Transaction) (bool, error) {
// If the transaction is already known, discard it
hash := tx.Hash()
if pool.all[hash] != nil {
log.Trace("Discarding already known transaction", "hash", hash)
return fmt.Errorf("known transaction: %x", hash)
return false, fmt.Errorf("known transaction: %x", hash)
}
// Otherwise ensure basic validation passes and queue it up
// If the transaction fails basic validation, discard it
if err := pool.validateTx(tx); err != nil {
log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
invalidTxCounter.Inc(1)
return err
return false, err
}
pool.enqueueTx(hash, tx)
// If the transaction pool is full, discard underpriced transactions
if uint64(len(pool.all)) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
// If the new transaction is underpriced, don't accept it
if pool.priced.Underpriced(tx, pool.locals) {
log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice())
underpricedTxCounter.Inc(1)
return false, ErrUnderpriced
}
// New transaction is better than our worse ones, make room for it
drop := pool.priced.Discard(len(pool.all)-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals)
for _, tx := range drop {
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
underpricedTxCounter.Inc(1)
pool.removeTx(tx.Hash())
}
}
// If the transaction is replacing an already pending one, do directly
from, _ := types.Sender(pool.signer, tx) // already validated
if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
// Nonce already pending, check if required price bump is met
inserted, old := list.Add(tx, pool.config.PriceBump)
if !inserted {
pendingDiscardCounter.Inc(1)
return false, ErrReplaceUnderpriced
}
// New transaction is better, replace old one
if old != nil {
delete(pool.all, old.Hash())
pool.priced.Removed()
pendingReplaceCounter.Inc(1)
}
pool.all[tx.Hash()] = tx
pool.priced.Put(tx)
// Print a log message if low enough level is set
log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To())
return nil
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
return old != nil, nil
}
// New transaction isn't replacing a pending one, push into queue
replace, err := pool.enqueueTx(hash, tx)
if err != nil {
return false, err
}
log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
return replace, nil
}
// enqueueTx inserts a new transaction into the non-executable transaction queue.
//
// Note, this method assumes the pool lock is held!
func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) {
func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, error) {
// Try to insert the transaction into the future queue
from, _ := types.Sender(pool.signer, tx) // already validated
if pool.queue[from] == nil {
pool.queue[from] = newTxList(false)
}
inserted, old := pool.queue[from].Add(tx)
inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
if !inserted {
// An older transaction was better, discard this
queuedDiscardCounter.Inc(1)
return // An older transaction was better, discard this
return false, ErrReplaceUnderpriced
}
// Discard any previous transaction and mark this
if old != nil {
delete(pool.all, old.Hash())
pool.priced.Removed()
queuedReplaceCounter.Inc(1)
}
pool.all[hash] = tx
pool.priced.Put(tx)
return old != nil, nil
}
// promoteTx adds a transaction to the pending (processable) list of transactions.
@ -367,20 +503,27 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
}
list := pool.pending[addr]
inserted, old := list.Add(tx)
inserted, old := list.Add(tx, pool.config.PriceBump)
if !inserted {
// An older transaction was better, discard this
delete(pool.all, hash)
pool.priced.Removed()
pendingDiscardCounter.Inc(1)
return
}
// Otherwise discard any previous transaction and mark this
if old != nil {
delete(pool.all, old.Hash())
pool.priced.Removed()
pendingReplaceCounter.Inc(1)
}
pool.all[hash] = tx // Failsafe to work around direct pending inserts (tests)
// Failsafe to work around direct pending inserts (tests)
if pool.all[hash] == nil {
pool.all[hash] = tx
pool.priced.Put(tx)
}
// Set the potentially new pending nonce and notify any subsystems of the new tx
pool.beats[addr] = time.Now()
pool.pendingState.SetNonce(addr, tx.Nonce()+1)
@ -392,16 +535,20 @@ func (pool *TxPool) Add(tx *types.Transaction) error {
pool.mu.Lock()
defer pool.mu.Unlock()
if err := pool.add(tx); err != nil {
return err
}
state, err := pool.currentState()
// Try to inject the transaction and update any state
replace, err := pool.add(tx)
if err != nil {
return err
}
pool.promoteExecutables(state)
// If we added a new transaction, run promotion checks and return
if !replace {
state, err := pool.currentState()
if err != nil {
return err
}
from, _ := types.Sender(pool.signer, tx) // already validated
pool.promoteExecutables(state, []common.Address{from})
}
return nil
}
@ -411,19 +558,26 @@ func (pool *TxPool) AddBatch(txs []*types.Transaction) error {
defer pool.mu.Unlock()
// Add the batch of transaction, tracking the accepted ones
added := 0
dirty := make(map[common.Address]struct{})
for _, tx := range txs {
if err := pool.add(tx); err == nil {
added++
if replace, err := pool.add(tx); err == nil {
if !replace {
from, _ := types.Sender(pool.signer, tx) // already validated
dirty[from] = struct{}{}
}
}
}
// Only reprocess the internal state if something was actually added
if added > 0 {
if len(dirty) > 0 {
state, err := pool.currentState()
if err != nil {
return err
}
pool.promoteExecutables(state)
addrs := make([]common.Address, 0, len(dirty))
for addr, _ := range dirty {
addrs = append(addrs, addr)
}
pool.promoteExecutables(state, addrs)
}
return nil
}
@ -467,6 +621,7 @@ func (pool *TxPool) removeTx(hash common.Hash) {
// Remove it from the list of known transactions
delete(pool.all, hash)
pool.priced.Removed()
// Remove the transaction from the pending lists and reset the account nonce
if pending := pool.pending[addr]; pending != nil {
@ -499,35 +654,51 @@ func (pool *TxPool) removeTx(hash common.Hash) {
// promoteExecutables moves transactions that have become processable from the
// future queue to the set of pending transactions. During this process, all
// invalidated transactions (low nonce, low balance) are deleted.
func (pool *TxPool) promoteExecutables(state *state.StateDB) {
func (pool *TxPool) promoteExecutables(state *state.StateDB, accounts []common.Address) {
gaslimit := pool.gasLimit()
// Gather all the accounts potentially needing updates
if accounts == nil {
accounts = make([]common.Address, 0, len(pool.queue))
for addr, _ := range pool.queue {
accounts = append(accounts, addr)
}
}
// Iterate over all accounts and promote any executable transactions
queued := uint64(0)
for addr, list := range pool.queue {
for _, addr := range accounts {
list := pool.queue[addr]
if list == nil {
continue // Just in case someone calls with a non existing account
}
// Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(state.GetNonce(addr)) {
hash := tx.Hash()
log.Debug("Removed old queued transaction", "hash", hash)
log.Trace("Removed old queued transaction", "hash", hash)
delete(pool.all, hash)
pool.priced.Removed()
}
// Drop all transactions that are too costly (low balance)
drops, _ := list.Filter(state.GetBalance(addr))
// Drop all transactions that are too costly (low balance or out of gas)
drops, _ := list.Filter(state.GetBalance(addr), gaslimit)
for _, tx := range drops {
hash := tx.Hash()
log.Debug("Removed unpayable queued transaction", "hash", hash)
log.Trace("Removed unpayable queued transaction", "hash", hash)
delete(pool.all, hash)
pool.priced.Removed()
queuedNofundsCounter.Inc(1)
}
// Gather all executable transactions and promote them
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
hash := tx.Hash()
log.Debug("Promoting queued transaction", "hash", hash)
log.Trace("Promoting queued transaction", "hash", hash)
pool.promoteTx(addr, hash, tx)
}
// Drop all transactions over the allowed limit
for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
for _, tx := range list.Cap(int(pool.config.AccountQueue)) {
hash := tx.Hash()
log.Debug("Removed cap-exceeding queued transaction", "hash", hash)
log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
delete(pool.all, hash)
pool.priced.Removed()
queuedRLCounter.Inc(1)
}
queued += uint64(list.Len())
@ -542,16 +713,16 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
for _, list := range pool.pending {
pending += uint64(list.Len())
}
if pending > maxPendingTotal {
if pending > pool.config.GlobalSlots {
pendingBeforeCap := pending
// Assemble a spam order to penalize large transactors first
spammers := prque.New()
for addr, list := range pool.pending {
// Only evict transactions from high rollers
if uint64(list.Len()) > minPendingPerAccount {
if uint64(list.Len()) > pool.config.AccountSlots {
// Skip local accounts as pools should maintain backlogs for themselves
for _, tx := range list.txs.items {
if !pool.localTx.contains(tx.Hash()) {
if !pool.locals.contains(tx.Hash()) {
spammers.Push(addr, float32(list.Len()))
}
break // Checking on transaction for locality is enough
@ -560,7 +731,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
}
// Gradually drop transactions from offenders
offenders := []common.Address{}
for pending > maxPendingTotal && !spammers.Empty() {
for pending > pool.config.GlobalSlots && !spammers.Empty() {
// Retrieve the next offender if not local address
offender, _ := spammers.Pop()
offenders = append(offenders, offender.(common.Address))
@ -571,7 +742,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
threshold := pool.pending[offender.(common.Address)].Len()
// Iteratively reduce all offenders until below limit or threshold reached
for pending > maxPendingTotal && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
for i := 0; i < len(offenders)-1; i++ {
list := pool.pending[offenders[i]]
list.Cap(list.Len() - 1)
@ -581,8 +752,8 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
}
}
// If still above threshold, reduce to limit or min allowance
if pending > maxPendingTotal && len(offenders) > 0 {
for pending > maxPendingTotal && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > minPendingPerAccount {
if pending > pool.config.GlobalSlots && len(offenders) > 0 {
for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
for _, addr := range offenders {
list := pool.pending[addr]
list.Cap(list.Len() - 1)
@ -593,7 +764,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
pendingRLCounter.Inc(int64(pendingBeforeCap - pending))
}
// If we've queued more transactions than the hard limit, drop oldest ones
if queued > maxQueuedInTotal {
if queued > pool.config.GlobalQueue {
// Sort all accounts with queued transactions by heartbeat
addresses := make(addresssByHeartbeat, 0, len(pool.queue))
for addr := range pool.queue {
@ -602,7 +773,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
sort.Sort(addresses)
// Drop transactions until the total is below the limit
for drop := queued - maxQueuedInTotal; drop > 0; {
for drop := queued - pool.config.GlobalQueue; drop > 0; {
addr := addresses[len(addresses)-1]
list := pool.queue[addr.address]
@ -632,6 +803,8 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
// executable/pending queue and any subsequent transactions that become unexecutable
// are moved back into the future queue.
func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
gaslimit := pool.gasLimit()
// Iterate over all accounts and demote any non-executable transactions
for addr, list := range pool.pending {
nonce := state.GetNonce(addr)
@ -639,20 +812,22 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
// Drop all transactions that are deemed too old (low nonce)
for _, tx := range list.Forward(nonce) {
hash := tx.Hash()
log.Debug("Removed old pending transaction", "hash", hash)
log.Trace("Removed old pending transaction", "hash", hash)
delete(pool.all, hash)
pool.priced.Removed()
}
// Drop all transactions that are too costly (low balance), and queue any invalids back for later
drops, invalids := list.Filter(state.GetBalance(addr))
// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
drops, invalids := list.Filter(state.GetBalance(addr), gaslimit)
for _, tx := range drops {
hash := tx.Hash()
log.Debug("Removed unpayable pending transaction", "hash", hash)
log.Trace("Removed unpayable pending transaction", "hash", hash)
delete(pool.all, hash)
pool.priced.Removed()
pendingNofundsCounter.Inc(1)
}
for _, tx := range invalids {
hash := tx.Hash()
log.Debug("Demoting pending transaction", "hash", hash)
log.Trace("Demoting pending transaction", "hash", hash)
pool.enqueueTx(hash, tx)
}
// Delete the entire queue entry if it became empty.
@ -677,7 +852,7 @@ func (pool *TxPool) expirationLoop() {
case <-evict.C:
pool.mu.Lock()
for addr := range pool.queue {
if time.Since(pool.beats[addr]) > maxQueuedLifetime {
if time.Since(pool.beats[addr]) > pool.config.Lifetime {
for _, tx := range pool.queue[addr].Flatten() {
pool.removeTx(tx.Hash())
}
@ -729,22 +904,22 @@ func newTxSet() *txSet {
// contains returns true if the set contains the given transaction hash
// (not thread safe, should be called from a locked environment)
func (self *txSet) contains(hash common.Hash) bool {
_, ok := self.txMap[hash]
func (ts *txSet) contains(hash common.Hash) bool {
_, ok := ts.txMap[hash]
return ok
}
// add adds a transaction hash to the set, then removes entries older than txSetDuration
// (not thread safe, should be called from a locked environment)
func (self *txSet) add(hash common.Hash) {
self.txMap[hash] = struct{}{}
func (ts *txSet) add(hash common.Hash) {
ts.txMap[hash] = struct{}{}
now := time.Now()
self.txOrd[self.addPtr] = txOrdType{hash: hash, time: now}
self.addPtr++
ts.txOrd[ts.addPtr] = txOrdType{hash: hash, time: now}
ts.addPtr++
delBefore := now.Add(-txSetDuration)
for self.delPtr < self.addPtr && self.txOrd[self.delPtr].time.Before(delBefore) {
delete(self.txMap, self.txOrd[self.delPtr].hash)
delete(self.txOrd, self.delPtr)
self.delPtr++
for ts.delPtr < ts.addPtr && ts.txOrd[ts.delPtr].time.Before(delBefore) {
delete(ts.txMap, ts.txOrd[ts.delPtr].hash)
delete(ts.txOrd, ts.delPtr)
ts.delPtr++
}
}

View File

@ -33,7 +33,11 @@ import (
)
func transaction(nonce uint64, gaslimit *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, big.NewInt(1), nil), types.HomesteadSigner{}, key)
return pricedTransaction(nonce, gaslimit, big.NewInt(1), key)
}
func pricedTransaction(nonce uint64, gaslimit, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key)
return tx
}
@ -42,7 +46,7 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
statedb, _ := state.New(common.Hash{}, db)
key, _ := crypto.GenerateKey()
newPool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
newPool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
newPool.resetState()
return newPool, key
@ -91,7 +95,7 @@ func TestStateChangeDuringPoolReset(t *testing.T) {
gasLimitFunc := func() *big.Int { return big.NewInt(1000000000) }
txpool := NewTxPool(params.TestChainConfig, mux, stateFunc, gasLimitFunc)
txpool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, mux, stateFunc, gasLimitFunc)
txpool.resetState()
nonce := txpool.State().GetNonce(address)
@ -151,9 +155,9 @@ func TestInvalidTransactions(t *testing.T) {
}
tx = transaction(1, big.NewInt(100000), key)
pool.minGasPrice = big.NewInt(1000)
if err := pool.Add(tx); err != ErrCheap {
t.Error("expected", ErrCheap, "got", err)
pool.gasPrice = big.NewInt(1000)
if err := pool.Add(tx); err != ErrUnderpriced {
t.Error("expected", ErrUnderpriced, "got", err)
}
pool.SetLocal(tx)
@ -171,7 +175,7 @@ func TestTransactionQueue(t *testing.T) {
pool.resetState()
pool.enqueueTx(tx.Hash(), tx)
pool.promoteExecutables(currentState)
pool.promoteExecutables(currentState, []common.Address{from})
if len(pool.pending) != 1 {
t.Error("expected valid txs to be 1 is", len(pool.pending))
}
@ -180,7 +184,7 @@ func TestTransactionQueue(t *testing.T) {
from, _ = deriveSender(tx)
currentState.SetNonce(from, 2)
pool.enqueueTx(tx.Hash(), tx)
pool.promoteExecutables(currentState)
pool.promoteExecutables(currentState, []common.Address{from})
if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok {
t.Error("expected transaction to be in tx pool")
}
@ -202,7 +206,7 @@ func TestTransactionQueue(t *testing.T) {
pool.enqueueTx(tx2.Hash(), tx2)
pool.enqueueTx(tx3.Hash(), tx3)
pool.promoteExecutables(currentState)
pool.promoteExecutables(currentState, []common.Address{from})
if len(pool.pending) != 1 {
t.Error("expected tx pool to be 1, got", len(pool.pending))
@ -262,14 +266,14 @@ func TestTransactionChainFork(t *testing.T) {
resetState()
tx := transaction(0, big.NewInt(100000), key)
if err := pool.add(tx); err != nil {
if _, err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
pool.RemoveBatch([]*types.Transaction{tx})
// reset the pool's internal state
resetState()
if err := pool.add(tx); err != nil {
if _, err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
}
@ -293,25 +297,23 @@ func TestTransactionDoubleNonce(t *testing.T) {
tx3, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), big.NewInt(1000000), big.NewInt(1), nil), signer, key)
// Add the first two transaction, ensure higher priced stays only
if err := pool.add(tx1); err != nil {
t.Error("didn't expect error", err)
if replace, err := pool.add(tx1); err != nil || replace {
t.Errorf("first transaction insert failed (%v) or reported replacement (%v)", err, replace)
}
if err := pool.add(tx2); err != nil {
t.Error("didn't expect error", err)
if replace, err := pool.add(tx2); err != nil || !replace {
t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace)
}
state, _ := pool.currentState()
pool.promoteExecutables(state)
pool.promoteExecutables(state, []common.Address{addr})
if pool.pending[addr].Len() != 1 {
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
}
if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() {
t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash())
}
// Add the thid transaction and ensure it's not saved (smaller price)
if err := pool.add(tx3); err != nil {
t.Error("didn't expect error", err)
}
pool.promoteExecutables(state)
// Add the third transaction and ensure it's not saved (smaller price)
pool.add(tx3)
pool.promoteExecutables(state, []common.Address{addr})
if pool.pending[addr].Len() != 1 {
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
}
@ -330,7 +332,7 @@ func TestMissingNonce(t *testing.T) {
currentState, _ := pool.currentState()
currentState.AddBalance(addr, big.NewInt(100000000000000))
tx := transaction(1, big.NewInt(100000), key)
if err := pool.add(tx); err != nil {
if _, err := pool.add(tx); err != nil {
t.Error("didn't expect error", err)
}
if len(pool.pending) != 0 {
@ -395,49 +397,78 @@ func TestTransactionDropping(t *testing.T) {
var (
tx0 = transaction(0, big.NewInt(100), key)
tx1 = transaction(1, big.NewInt(200), key)
tx2 = transaction(2, big.NewInt(300), key)
tx10 = transaction(10, big.NewInt(100), key)
tx11 = transaction(11, big.NewInt(200), key)
tx12 = transaction(12, big.NewInt(300), key)
)
pool.promoteTx(account, tx0.Hash(), tx0)
pool.promoteTx(account, tx1.Hash(), tx1)
pool.promoteTx(account, tx1.Hash(), tx2)
pool.enqueueTx(tx10.Hash(), tx10)
pool.enqueueTx(tx11.Hash(), tx11)
pool.enqueueTx(tx11.Hash(), tx12)
// Check that pre and post validations leave the pool as is
if pool.pending[account].Len() != 2 {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 2)
if pool.pending[account].Len() != 3 {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
}
if pool.queue[account].Len() != 2 {
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 2)
if pool.queue[account].Len() != 3 {
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
}
if len(pool.all) != 4 {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4)
}
pool.resetState()
if pool.pending[account].Len() != 2 {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 2)
if pool.pending[account].Len() != 3 {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
}
if pool.queue[account].Len() != 2 {
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 2)
if pool.queue[account].Len() != 3 {
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
}
if len(pool.all) != 4 {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4)
}
// Reduce the balance of the account, and check that invalidated transactions are dropped
state.AddBalance(account, big.NewInt(-750))
state.AddBalance(account, big.NewInt(-650))
pool.resetState()
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
}
if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
}
if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok {
t.Errorf("out-of-fund pending transaction present: %v", tx1)
}
if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
t.Errorf("funded queued transaction missing: %v", tx10)
}
if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; !ok {
t.Errorf("funded queued transaction missing: %v", tx10)
}
if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok {
t.Errorf("out-of-fund queued transaction present: %v", tx11)
}
if len(pool.all) != 4 {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4)
}
// Reduce the block gas limit, check that invalidated transactions are dropped
pool.gasLimit = func() *big.Int { return big.NewInt(100) }
pool.resetState()
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
t.Errorf("funded pending transaction missing: %v", tx0)
}
if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok {
t.Errorf("out-of-fund pending transaction present: %v", tx1)
t.Errorf("over-gased pending transaction present: %v", tx1)
}
if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
t.Errorf("funded queued transaction missing: %v", tx10)
}
if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok {
t.Errorf("out-of-fund queued transaction present: %v", tx11)
t.Errorf("over-gased queued transaction present: %v", tx11)
}
if len(pool.all) != 2 {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 2)
@ -531,25 +562,25 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
pool.resetState()
// Keep queuing up transactions and make sure all above a limit are dropped
for i := uint64(1); i <= maxQueuedPerAccount+5; i++ {
for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue+5; i++ {
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
if len(pool.pending) != 0 {
t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0)
}
if i <= maxQueuedPerAccount {
if i <= DefaultTxPoolConfig.AccountQueue {
if pool.queue[account].Len() != int(i) {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i)
}
} else {
if pool.queue[account].Len() != int(maxQueuedPerAccount) {
t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), maxQueuedPerAccount)
if pool.queue[account].Len() != int(DefaultTxPoolConfig.AccountQueue) {
t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), DefaultTxPoolConfig.AccountQueue)
}
}
}
if len(pool.all) != int(maxQueuedPerAccount) {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), maxQueuedPerAccount)
if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue) {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue)
}
}
@ -557,14 +588,14 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
// some threshold, the higher transactions are dropped to prevent DOS attacks.
func TestTransactionQueueGlobalLimiting(t *testing.T) {
// Reduce the queue limits to shorten test time
defer func(old uint64) { maxQueuedInTotal = old }(maxQueuedInTotal)
maxQueuedInTotal = maxQueuedPerAccount * 3
defer func(old uint64) { DefaultTxPoolConfig.GlobalQueue = old }(DefaultTxPoolConfig.GlobalQueue)
DefaultTxPoolConfig.GlobalQueue = DefaultTxPoolConfig.AccountQueue * 3
// Create the pool to test the limit enforcement with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a number of test accounts and fund them
@ -578,7 +609,7 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) {
// Generate and queue a batch of transactions
nonces := make(map[common.Address]uint64)
txs := make(types.Transactions, 0, 3*maxQueuedInTotal)
txs := make(types.Transactions, 0, 3*DefaultTxPoolConfig.GlobalQueue)
for len(txs) < cap(txs) {
key := keys[rand.Intn(len(keys))]
addr := crypto.PubkeyToAddress(key.PublicKey)
@ -591,13 +622,13 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) {
queued := 0
for addr, list := range pool.queue {
if list.Len() > int(maxQueuedPerAccount) {
t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), maxQueuedPerAccount)
if list.Len() > int(DefaultTxPoolConfig.AccountQueue) {
t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), DefaultTxPoolConfig.AccountQueue)
}
queued += list.Len()
}
if queued > int(maxQueuedInTotal) {
t.Fatalf("total transactions overflow allowance: %d > %d", queued, maxQueuedInTotal)
if queued > int(DefaultTxPoolConfig.GlobalQueue) {
t.Fatalf("total transactions overflow allowance: %d > %d", queued, DefaultTxPoolConfig.GlobalQueue)
}
}
@ -606,9 +637,9 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) {
// on shuffling them around.
func TestTransactionQueueTimeLimiting(t *testing.T) {
// Reduce the queue limits to shorten test time
defer func(old time.Duration) { maxQueuedLifetime = old }(maxQueuedLifetime)
defer func(old time.Duration) { DefaultTxPoolConfig.Lifetime = old }(DefaultTxPoolConfig.Lifetime)
defer func(old time.Duration) { evictionInterval = old }(evictionInterval)
maxQueuedLifetime = time.Second
DefaultTxPoolConfig.Lifetime = time.Second
evictionInterval = time.Second
// Create a test account and fund it
@ -619,7 +650,7 @@ func TestTransactionQueueTimeLimiting(t *testing.T) {
state.AddBalance(account, big.NewInt(1000000))
// Queue up a batch of transactions
for i := uint64(1); i <= maxQueuedPerAccount; i++ {
for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue; i++ {
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
@ -644,7 +675,7 @@ func TestTransactionPendingLimiting(t *testing.T) {
pool.resetState()
// Keep queuing up transactions and make sure all above a limit are dropped
for i := uint64(0); i < maxQueuedPerAccount+5; i++ {
for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
@ -655,8 +686,8 @@ func TestTransactionPendingLimiting(t *testing.T) {
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0)
}
}
if len(pool.all) != int(maxQueuedPerAccount+5) {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), maxQueuedPerAccount+5)
if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue+5) {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue+5)
}
}
@ -672,7 +703,7 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
state1, _ := pool1.currentState()
state1.AddBalance(account1, big.NewInt(1000000))
for i := uint64(0); i < maxQueuedPerAccount+5; i++ {
for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
if err := pool1.Add(transaction(origin+i, big.NewInt(100000), key1)); err != nil {
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
}
@ -684,7 +715,7 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
state2.AddBalance(account2, big.NewInt(1000000))
txns := []*types.Transaction{}
for i := uint64(0); i < maxQueuedPerAccount+5; i++ {
for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
txns = append(txns, transaction(origin+i, big.NewInt(100000), key2))
}
pool2.AddBatch(txns)
@ -706,14 +737,14 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
// attacks.
func TestTransactionPendingGlobalLimiting(t *testing.T) {
// Reduce the queue limits to shorten test time
defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal)
maxPendingTotal = minPendingPerAccount * 10
defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots)
DefaultTxPoolConfig.GlobalSlots = DefaultTxPoolConfig.AccountSlots * 10
// Create the pool to test the limit enforcement with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a number of test accounts and fund them
@ -730,7 +761,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
txs := types.Transactions{}
for _, key := range keys {
addr := crypto.PubkeyToAddress(key.PublicKey)
for j := 0; j < int(maxPendingTotal)/len(keys)*2; j++ {
for j := 0; j < int(DefaultTxPoolConfig.GlobalSlots)/len(keys)*2; j++ {
txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key))
nonces[addr]++
}
@ -742,8 +773,8 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
for _, list := range pool.pending {
pending += list.Len()
}
if pending > int(maxPendingTotal) {
t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, maxPendingTotal)
if pending > int(DefaultTxPoolConfig.GlobalSlots) {
t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, DefaultTxPoolConfig.GlobalSlots)
}
}
@ -752,14 +783,14 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
// the transactions are still kept.
func TestTransactionPendingMinimumAllowance(t *testing.T) {
// Reduce the queue limits to shorten test time
defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal)
maxPendingTotal = 0
defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots)
DefaultTxPoolConfig.GlobalSlots = 0
// Create the pool to test the limit enforcement with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a number of test accounts and fund them
@ -776,7 +807,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
txs := types.Transactions{}
for _, key := range keys {
addr := crypto.PubkeyToAddress(key.PublicKey)
for j := 0; j < int(minPendingPerAccount)*2; j++ {
for j := 0; j < int(DefaultTxPoolConfig.AccountSlots)*2; j++ {
txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key))
nonces[addr]++
}
@ -785,12 +816,233 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
pool.AddBatch(txs)
for addr, list := range pool.pending {
if list.Len() != int(minPendingPerAccount) {
t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), minPendingPerAccount)
if list.Len() != int(DefaultTxPoolConfig.AccountSlots) {
t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), DefaultTxPoolConfig.AccountSlots)
}
}
}
// Tests that setting the transaction pool gas price to a higher value correctly
// discards everything cheaper than that and moves any gapped transactions back
// from the pending pool to the queue.
//
// Note, local transactions are never allowed to be dropped.
func TestTransactionPoolRepricing(t *testing.T) {
// Create the pool to test the pricing enforcement with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a number of test accounts and fund them
state, _ := pool.currentState()
keys := make([]*ecdsa.PrivateKey, 3)
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
state.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
}
// Generate and queue a batch of transactions, both pending and queued
txs := types.Transactions{}
txs = append(txs, pricedTransaction(0, big.NewInt(100000), big.NewInt(2), keys[0]))
txs = append(txs, pricedTransaction(1, big.NewInt(100000), big.NewInt(1), keys[0]))
txs = append(txs, pricedTransaction(2, big.NewInt(100000), big.NewInt(2), keys[0]))
txs = append(txs, pricedTransaction(1, big.NewInt(100000), big.NewInt(2), keys[1]))
txs = append(txs, pricedTransaction(2, big.NewInt(100000), big.NewInt(1), keys[1]))
txs = append(txs, pricedTransaction(3, big.NewInt(100000), big.NewInt(2), keys[1]))
txs = append(txs, pricedTransaction(0, big.NewInt(100000), big.NewInt(1), keys[2]))
pool.SetLocal(txs[len(txs)-1]) // prevent this one from ever being dropped
// Import the batch and that both pending and queued transactions match up
pool.AddBatch(txs)
pending, queued := pool.stats()
if pending != 4 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4)
}
if queued != 3 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
}
// Reprice the pool and check that underpriced transactions get dropped
pool.SetGasPrice(big.NewInt(2))
pending, queued = pool.stats()
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
if queued != 3 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
}
// Check that we can't add the old transactions back
if err := pool.Add(pricedTransaction(1, big.NewInt(100000), big.NewInt(1), keys[0])); err != ErrUnderpriced {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(1), keys[1])); err != ErrUnderpriced {
t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
// However we can add local underpriced transactions
tx := pricedTransaction(1, big.NewInt(100000), big.NewInt(1), keys[2])
pool.SetLocal(tx) // prevent this one from ever being dropped
if err := pool.Add(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
if pending, _ = pool.stats(); pending != 3 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
}
}
// Tests that when the pool reaches its global transaction limit, underpriced
// transactions are gradually shifted out for more expensive ones and any gapped
// pending transactions are moved into te queue.
//
// Note, local transactions are never allowed to be dropped.
func TestTransactionPoolUnderpricing(t *testing.T) {
// Reduce the queue limits to shorten test time
defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots)
DefaultTxPoolConfig.GlobalSlots = 2
defer func(old uint64) { DefaultTxPoolConfig.GlobalQueue = old }(DefaultTxPoolConfig.GlobalQueue)
DefaultTxPoolConfig.GlobalQueue = 2
// Create the pool to test the pricing enforcement with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a number of test accounts and fund them
state, _ := pool.currentState()
keys := make([]*ecdsa.PrivateKey, 3)
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
state.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
}
// Generate and queue a batch of transactions, both pending and queued
txs := types.Transactions{}
txs = append(txs, pricedTransaction(0, big.NewInt(100000), big.NewInt(1), keys[0]))
txs = append(txs, pricedTransaction(1, big.NewInt(100000), big.NewInt(2), keys[0]))
txs = append(txs, pricedTransaction(1, big.NewInt(100000), big.NewInt(1), keys[1]))
txs = append(txs, pricedTransaction(0, big.NewInt(100000), big.NewInt(1), keys[2]))
pool.SetLocal(txs[len(txs)-1]) // prevent this one from ever being dropped
// Import the batch and that both pending and queued transactions match up
pool.AddBatch(txs)
pending, queued := pool.stats()
if pending != 3 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
}
if queued != 1 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
}
// Ensure that adding an underpriced transaction on block limit fails
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), keys[1])); err != ErrUnderpriced {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
// Ensure that adding high priced transactions drops cheap ones, but not own
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(3), keys[1])); err != nil {
t.Fatalf("failed to add well priced transaction: %v", err)
}
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(4), keys[1])); err != nil {
t.Fatalf("failed to add well priced transaction: %v", err)
}
if err := pool.Add(pricedTransaction(3, big.NewInt(100000), big.NewInt(5), keys[1])); err != nil {
t.Fatalf("failed to add well priced transaction: %v", err)
}
pending, queued = pool.stats()
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
if queued != 2 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
}
// Ensure that adding local transactions can push out even higher priced ones
tx := pricedTransaction(1, big.NewInt(100000), big.NewInt(0), keys[2])
pool.SetLocal(tx) // prevent this one from ever being dropped
if err := pool.Add(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
pending, queued = pool.stats()
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
if queued != 2 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
}
}
// Tests that the pool rejects replacement transactions that don't meet the minimum
// price bump required.
func TestTransactionReplacement(t *testing.T) {
// Create the pool to test the pricing enforcement with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, db)
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
pool.resetState()
// Create a a test account to add transactions with
key, _ := crypto.GenerateKey()
state, _ := pool.currentState()
state.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000))
// Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
price := int64(100)
threshold := (price * (100 + int64(DefaultTxPoolConfig.PriceBump))) / 100
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original cheap pending transaction: %v", err)
}
if err := pool.Add(pricedTransaction(0, big.NewInt(100001), big.NewInt(1), key)); err != ErrReplaceUnderpriced {
t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
}
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(2), key)); err != nil {
t.Fatalf("failed to replace original cheap pending transaction: %v", err)
}
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(price), key)); err != nil {
t.Fatalf("failed to add original proper pending transaction: %v", err)
}
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(threshold), key)); err != ErrReplaceUnderpriced {
t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
}
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(threshold+1), key)); err != nil {
t.Fatalf("failed to replace original proper pending transaction: %v", err)
}
// Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(1), key)); err != nil {
t.Fatalf("failed to add original queued transaction: %v", err)
}
if err := pool.Add(pricedTransaction(2, big.NewInt(100001), big.NewInt(1), key)); err != ErrReplaceUnderpriced {
t.Fatalf("original queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
}
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(2), key)); err != nil {
t.Fatalf("failed to replace original queued transaction: %v", err)
}
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(price), key)); err != nil {
t.Fatalf("failed to add original queued transaction: %v", err)
}
if err := pool.Add(pricedTransaction(2, big.NewInt(100001), big.NewInt(threshold), key)); err != ErrReplaceUnderpriced {
t.Fatalf("original queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
}
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(threshold+1), key)); err != nil {
t.Fatalf("failed to replace original queued transaction: %v", err)
}
}
// Benchmarks the speed of validating the contents of the pending queue of the
// transaction pool.
func BenchmarkPendingDemotion100(b *testing.B) { benchmarkPendingDemotion(b, 100) }
@ -835,7 +1087,7 @@ func benchmarkFuturePromotion(b *testing.B, size int) {
// Benchmark the speed of pool validation
b.ResetTimer()
for i := 0; i < b.N; i++ {
pool.promoteExecutables(state)
pool.promoteExecutables(state, nil)
}
}

View File

@ -27,7 +27,12 @@ import (
"github.com/ethereum/go-ethereum/params"
)
var ErrInvalidChainId = errors.New("invalid chaid id for signer")
var (
ErrInvalidChainId = errors.New("invalid chaid id for signer")
errAbstractSigner = errors.New("abstract signer")
abstractSignerAddress = common.HexToAddress("ffffffffffffffffffffffffffffffffffffffff")
)
// sigCache is used to cache the derived sender and contains
// the signer used to derive it.

View File

@ -79,7 +79,7 @@ func decodeTx(data []byte) (*Transaction, error) {
}
func defaultTestKey() (*ecdsa.PrivateKey, common.Address) {
key := crypto.ToECDSA(common.Hex2Bytes("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"))
key, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8")
addr := crypto.PubkeyToAddress(key.PublicKey)
return key, addr
}

View File

@ -18,6 +18,7 @@ package vm
import (
"crypto/sha256"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/common"
@ -27,15 +28,17 @@ import (
"golang.org/x/crypto/ripemd160"
)
var errBadPrecompileInput = errors.New("bad pre compile input")
// Precompiled contract is the basic interface for native Go contracts. The implementation
// requires a deterministic gas count based on the input size of the Run method of the
// contract.
type PrecompiledContract interface {
RequiredGas(inputSize int) uint64 // RequiredPrice calculates the contract gas use
Run(input []byte) []byte // Run runs the precompiled contract
RequiredGas(input []byte) uint64 // RequiredPrice calculates the contract gas use
Run(input []byte) ([]byte, error) // Run runs the precompiled contract
}
// Precompiled contains the default set of ethereum contracts
// PrecompiledContracts contains the default set of ethereum contracts
var PrecompiledContracts = map[common.Address]PrecompiledContract{
common.BytesToAddress([]byte{1}): &ecrecover{},
common.BytesToAddress([]byte{2}): &sha256hash{},
@ -45,11 +48,9 @@ var PrecompiledContracts = map[common.Address]PrecompiledContract{
// RunPrecompile runs and evaluate the output of a precompiled contract defined in contracts.go
func RunPrecompiledContract(p PrecompiledContract, input []byte, contract *Contract) (ret []byte, err error) {
gas := p.RequiredGas(len(input))
gas := p.RequiredGas(input)
if contract.UseGas(gas) {
ret = p.Run(input)
return ret, nil
return p.Run(input)
} else {
return nil, ErrOutOfGas
}
@ -58,11 +59,11 @@ func RunPrecompiledContract(p PrecompiledContract, input []byte, contract *Contr
// ECRECOVER implemented as a native contract
type ecrecover struct{}
func (c *ecrecover) RequiredGas(inputSize int) uint64 {
func (c *ecrecover) RequiredGas(input []byte) uint64 {
return params.EcrecoverGas
}
func (c *ecrecover) Run(in []byte) []byte {
func (c *ecrecover) Run(in []byte) ([]byte, error) {
const ecRecoverInputLength = 128
in = common.RightPadBytes(in, ecRecoverInputLength)
@ -76,18 +77,18 @@ func (c *ecrecover) Run(in []byte) []byte {
// tighter sig s values in homestead only apply to tx sigs
if !allZero(in[32:63]) || !crypto.ValidateSignatureValues(v, r, s, false) {
log.Trace("ECRECOVER error: v, r or s value invalid")
return nil
return nil, nil
}
// v needs to be at the end for libsecp256k1
pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v))
// make sure the public key is a valid one
if err != nil {
log.Trace("ECRECOVER failed", "err", err)
return nil
return nil, nil
}
// the first byte of pubkey is bitcoin heritage
return common.LeftPadBytes(crypto.Keccak256(pubKey[1:])[12:], 32)
return common.LeftPadBytes(crypto.Keccak256(pubKey[1:])[12:], 32), nil
}
// SHA256 implemented as a native contract
@ -97,12 +98,12 @@ type sha256hash struct{}
//
// This method does not require any overflow checking as the input size gas costs
// required for anything significant is so high it's impossible to pay for.
func (c *sha256hash) RequiredGas(inputSize int) uint64 {
return uint64(inputSize+31)/32*params.Sha256WordGas + params.Sha256Gas
func (c *sha256hash) RequiredGas(input []byte) uint64 {
return uint64(len(input)+31)/32*params.Sha256WordGas + params.Sha256Gas
}
func (c *sha256hash) Run(in []byte) []byte {
func (c *sha256hash) Run(in []byte) ([]byte, error) {
h := sha256.Sum256(in)
return h[:]
return h[:], nil
}
// RIPMED160 implemented as a native contract
@ -112,13 +113,13 @@ type ripemd160hash struct{}
//
// This method does not require any overflow checking as the input size gas costs
// required for anything significant is so high it's impossible to pay for.
func (c *ripemd160hash) RequiredGas(inputSize int) uint64 {
return uint64(inputSize+31)/32*params.Ripemd160WordGas + params.Ripemd160Gas
func (c *ripemd160hash) RequiredGas(input []byte) uint64 {
return uint64(len(input)+31)/32*params.Ripemd160WordGas + params.Ripemd160Gas
}
func (c *ripemd160hash) Run(in []byte) []byte {
func (c *ripemd160hash) Run(in []byte) ([]byte, error) {
ripemd := ripemd160.New()
ripemd.Write(in)
return common.LeftPadBytes(ripemd.Sum(nil), 32)
return common.LeftPadBytes(ripemd.Sum(nil), 32), nil
}
// data copy implemented as a native contract
@ -128,9 +129,9 @@ type dataCopy struct{}
//
// This method does not require any overflow checking as the input size gas costs
// required for anything significant is so high it's impossible to pay for.
func (c *dataCopy) RequiredGas(inputSize int) uint64 {
return uint64(inputSize+31)/32*params.IdentityWordGas + params.IdentityGas
func (c *dataCopy) RequiredGas(input []byte) uint64 {
return uint64(len(input)+31)/32*params.IdentityWordGas + params.IdentityGas
}
func (c *dataCopy) Run(in []byte) []byte {
return in
func (c *dataCopy) Run(in []byte) ([]byte, error) {
return in, nil
}

View File

@ -0,0 +1 @@
package vm

View File

@ -33,7 +33,20 @@ type (
GetHashFunc func(uint64) common.Hash
)
// Context provides the EVM with auxiliary information. Once provided it shouldn't be modified.
// run runs the given contract and takes care of running precompiles with a fallback to the byte code interpreter.
func run(evm *EVM, snapshot int, contract *Contract, input []byte) ([]byte, error) {
if contract.CodeAddr != nil {
precompiledContracts := PrecompiledContracts
if p := precompiledContracts[*contract.CodeAddr]; p != nil {
return RunPrecompiledContract(p, input, contract)
}
}
return evm.interpreter.Run(snapshot, contract, input)
}
// Context provides the EVM with auxiliary information. Once provided
// it shouldn't be modified.
type Context struct {
// CanTransfer returns whether the account contains
// sufficient ether to transfer the value
@ -55,7 +68,13 @@ type Context struct {
Difficulty *big.Int // Provides information for DIFFICULTY
}
// EVM provides information about external sources for the EVM
// EVM is the Ethereum Virtual Machine base object and provides
// the necessary tools to run a contract on the given state with
// the provided context. It should be noted that any error
// generated through any of the calls should be considered a
// revert-state-and-consume-all-gas operation, no checks on
// specific errors should ever be performed. The interpreter makes
// sure that any errors generated are to be considered faulty code.
//
// The EVM should never be reused and is not thread safe.
type EVM struct {
@ -68,6 +87,8 @@ type EVM struct {
// chainConfig contains information about the current chain
chainConfig *params.ChainConfig
// chain rules contains the chain rules for the current epoch
chainRules params.Rules
// virtual machine configuration options used to initialise the
// evm.
vmConfig Config
@ -79,21 +100,23 @@ type EVM struct {
abort int32
}
// NewEVM retutrns a new EVM evmironment.
// NewEVM retutrns a new EVM evmironment. The returned EVM is not thread safe
// and should only ever be used *once*.
func NewEVM(ctx Context, statedb StateDB, chainConfig *params.ChainConfig, vmConfig Config) *EVM {
evm := &EVM{
Context: ctx,
StateDB: statedb,
vmConfig: vmConfig,
chainConfig: chainConfig,
chainRules: chainConfig.Rules(ctx.BlockNumber),
}
evm.interpreter = NewInterpreter(evm, vmConfig)
return evm
}
// Cancel cancels any running EVM operation. This may be called concurrently and it's safe to be
// called multiple times.
// Cancel cancels any running EVM operation. This may be called concurrently and
// it's safe to be called multiple times.
func (evm *EVM) Cancel() {
atomic.StoreInt32(&evm.abort, 1)
}
@ -134,13 +157,12 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
contract := NewContract(caller, to, value, gas)
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
ret, err = evm.interpreter.Run(contract, input)
ret, err = run(evm, snapshot, contract, input)
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
// when we're in homestead this also counts for code storage gas errors.
if err != nil {
contract.UseGas(contract.Gas)
evm.StateDB.RevertToSnapshot(snapshot)
}
return ret, contract.Gas, err
@ -175,10 +197,9 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
contract := NewContract(caller, to, value, gas)
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
ret, err = evm.interpreter.Run(contract, input)
ret, err = run(evm, snapshot, contract, input)
if err != nil {
contract.UseGas(contract.Gas)
evm.StateDB.RevertToSnapshot(snapshot)
}
@ -210,10 +231,9 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
contract := NewContract(caller, to, nil, gas).AsDelegate()
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
ret, err = evm.interpreter.Run(contract, input)
ret, err = run(evm, snapshot, contract, input)
if err != nil {
contract.UseGas(contract.Gas)
evm.StateDB.RevertToSnapshot(snapshot)
}
@ -253,8 +273,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
contract := NewContract(caller, AccountRef(contractAddr), value, gas)
contract.SetCallCode(&contractAddr, crypto.Keccak256Hash(code), code)
ret, err = evm.interpreter.Run(contract, nil)
ret, err = run(evm, snapshot, contract, nil)
// check whether the max code size has been exceeded
maxCodeSizeExceeded := len(ret) > params.MaxCodeSize
// if the contract creation ran successfully and no errors were returned
@ -275,10 +294,8 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
// when we're in homestead this also counts for code storage gas errors.
if maxCodeSizeExceeded ||
(err != nil && (evm.ChainConfig().IsHomestead(evm.BlockNumber) || err != ErrCodeStoreOutOfGas)) {
contract.UseGas(contract.Gas)
evm.StateDB.RevertToSnapshot(snapshot)
// Nothing should be returned when an error is thrown.
return nil, contractAddr, 0, err
}
// If the vm returned with an error the return value should be set to nil.
// This isn't consensus critical but merely to for behaviour reasons such as

View File

@ -27,7 +27,9 @@ import (
"github.com/ethereum/go-ethereum/params"
)
var bigZero = new(big.Int)
var (
bigZero = new(big.Int)
)
func opAdd(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
x, y := stack.pop(), stack.pop()
@ -599,7 +601,7 @@ func opCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Sta
contract.Gas += returnGas
evm.interpreter.intPool.put(addr, value, inOffset, inSize, retOffset, retSize)
return nil, nil
return ret, nil
}
func opCallCode(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
@ -633,16 +635,10 @@ func opCallCode(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack
contract.Gas += returnGas
evm.interpreter.intPool.put(addr, value, inOffset, inSize, retOffset, retSize)
return nil, nil
return ret, nil
}
func opDelegateCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
// if not homestead return an error. DELEGATECALL is not supported
// during pre-homestead.
if !evm.ChainConfig().IsHomestead(evm.BlockNumber) {
return nil, fmt.Errorf("invalid opcode %x", DELEGATECALL)
}
gas, to, inOffset, inSize, outOffset, outSize := stack.pop().Uint64(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
toAddr := common.BigToAddress(to)
@ -658,7 +654,7 @@ func opDelegateCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, st
contract.Gas += returnGas
evm.interpreter.intPool.put(to, inOffset, inSize, outOffset, outSize)
return nil, nil
return ret, nil
}
func opReturn(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
@ -666,6 +662,7 @@ func opReturn(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S
ret := memory.GetPtr(offset.Int64(), size.Int64())
evm.interpreter.intPool.put(offset, size)
return ret, nil
}
@ -709,10 +706,23 @@ func makeLog(size int) executionFunc {
}
// make push instruction function
func makePush(size uint64, bsize *big.Int) executionFunc {
func makePush(size uint64, pushByteSize int) executionFunc {
return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
byts := getData(contract.Code, evm.interpreter.intPool.get().SetUint64(*pc+1), bsize)
stack.push(new(big.Int).SetBytes(byts))
codeLen := len(contract.Code)
startMin := codeLen
if int(*pc+1) < startMin {
startMin = int(*pc + 1)
}
endMin := codeLen
if startMin+pushByteSize < endMin {
endMin = startMin + pushByteSize
}
integer := evm.interpreter.intPool.get()
stack.push(integer.SetBytes(common.RightPadBytes(contract.Code[startMin:endMin], pushByteSize)))
*pc += size
return nil, nil
}
@ -721,7 +731,7 @@ func makePush(size uint64, bsize *big.Int) executionFunc {
// make push instruction function
func makeDup(size int64) executionFunc {
return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
stack.dup(int(size))
stack.dup(evm.interpreter.intPool, int(size))
return nil, nil
}
}

View File

@ -52,43 +52,53 @@ type Config struct {
}
// Interpreter is used to run Ethereum based contracts and will utilise the
// passed environment to query external sources for state information.
// passed evmironment to query external sources for state information.
// The Interpreter will run the byte code VM or JIT VM based on the passed
// configuration.
type Interpreter struct {
env *EVM
evm *EVM
cfg Config
gasTable params.GasTable
intPool *intPool
readonly bool
}
// NewInterpreter returns a new instance of the Interpreter.
func NewInterpreter(env *EVM, cfg Config) *Interpreter {
func NewInterpreter(evm *EVM, cfg Config) *Interpreter {
// We use the STOP instruction whether to see
// the jump table was initialised. If it was not
// we'll set the default jump table.
if !cfg.JumpTable[STOP].valid {
cfg.JumpTable = defaultJumpTable
switch {
case evm.ChainConfig().IsHomestead(evm.BlockNumber):
cfg.JumpTable = homesteadInstructionSet
default:
cfg.JumpTable = frontierInstructionSet
}
}
return &Interpreter{
env: env,
evm: evm,
cfg: cfg,
gasTable: env.ChainConfig().GasTable(env.BlockNumber),
gasTable: evm.ChainConfig().GasTable(evm.BlockNumber),
intPool: newIntPool(),
}
}
// Run loops and evaluates the contract's code with the given input data
func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err error) {
evm.env.depth++
defer func() { evm.env.depth-- }()
func (in *Interpreter) enforceRestrictions(op OpCode, operation operation, stack *Stack) error {
return nil
}
if contract.CodeAddr != nil {
if p := PrecompiledContracts[*contract.CodeAddr]; p != nil {
return RunPrecompiledContract(p, input, contract)
}
}
// Run loops and evaluates the contract's code with the given input data and returns
// the return byte-slice and an error if one occurred.
//
// It's important to note that any errors returned by the interpreter should be
// considered a revert-and-consume-all-gas operation. No error specific checks
// should be handled to reduce complexity and errors further down the in.
func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret []byte, err error) {
in.evm.depth++
defer func() { in.evm.depth-- }()
// Don't bother with the execution if there's no code.
if len(contract.Code) == 0 {
@ -105,7 +115,8 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
mem = NewMemory() // bound memory
stack = newstack() // local stack
// For optimisation reason we're using uint64 as the program counter.
// It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Practically much less so feasible.
// It's theoretically possible to go above 2^64. The YP defines the PC
// to be uint256. Practically much less so feasible.
pc = uint64(0) // program counter
cost uint64
)
@ -113,31 +124,34 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
// User defer pattern to check for an error and, based on the error being nil or not, use all gas and return.
defer func() {
if err != nil && evm.cfg.Debug {
if err != nil && in.cfg.Debug {
// XXX For debugging
//fmt.Printf("%04d: %8v cost = %-8d stack = %-8d ERR = %v\n", pc, op, cost, stack.len(), err)
evm.cfg.Tracer.CaptureState(evm.env, pc, op, contract.Gas, cost, mem, stack, contract, evm.env.depth, err)
in.cfg.Tracer.CaptureState(in.evm, pc, op, contract.Gas, cost, mem, stack, contract, in.evm.depth, err)
}
}()
log.Debug("EVM running contract", "hash", codehash[:])
log.Debug("interpreter running contract", "hash", codehash[:])
tstart := time.Now()
defer log.Debug("EVM finished running contract", "hash", codehash[:], "elapsed", time.Since(tstart))
defer log.Debug("interpreter finished running contract", "hash", codehash[:], "elapsed", time.Since(tstart))
// The Interpreter main run loop (contextual). This loop runs until either an
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
// the execution of one of the operations or until the evm.done is set by
// the parent context.Context.
for atomic.LoadInt32(&evm.env.abort) == 0 {
// the execution of one of the operations or until the done flag is set by the
// parent context.
for atomic.LoadInt32(&in.evm.abort) == 0 {
// Get the memory location of pc
op = contract.GetOp(pc)
// get the operation from the jump table matching the opcode
operation := evm.cfg.JumpTable[op]
operation := in.cfg.JumpTable[op]
if err := in.enforceRestrictions(op, operation, stack); err != nil {
return nil, err
}
// if the op is invalid abort the process and return an error
if !operation.valid {
return nil, fmt.Errorf("invalid opcode %x", op)
return nil, fmt.Errorf("invalid opcode 0x%x", int(op))
}
// validate the stack and make sure there enough stack items available
@ -161,10 +175,10 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
}
}
if !evm.cfg.DisableGasMetering {
if !in.cfg.DisableGasMetering {
// consume the gas and return an error if not enough gas is available.
// cost is explicitly set so that the capture state defer method cas get the proper cost
cost, err = operation.gasCost(evm.gasTable, evm.env, contract, stack, mem, memorySize)
cost, err = operation.gasCost(in.gasTable, in.evm, contract, stack, mem, memorySize)
if err != nil || !contract.UseGas(cost) {
return nil, ErrOutOfGas
}
@ -173,19 +187,20 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
mem.Resize(memorySize)
}
if evm.cfg.Debug {
evm.cfg.Tracer.CaptureState(evm.env, pc, op, contract.Gas, cost, mem, stack, contract, evm.env.depth, err)
if in.cfg.Debug {
in.cfg.Tracer.CaptureState(in.evm, pc, op, contract.Gas, cost, mem, stack, contract, in.evm.depth, err)
}
// XXX For debugging
//fmt.Printf("%04d: %8v cost = %-8d stack = %-8d\n", pc, op, cost, stack.len())
// execute the operation
res, err := operation.execute(&pc, evm.env, contract, mem, stack)
res, err := operation.execute(&pc, in.evm, contract, mem, stack)
// verifyPool is a build flag. Pool verification makes sure the integrity
// of the integer pool by comparing values to a default value.
if verifyPool {
verifyIntegerPool(evm.intPool)
verifyIntegerPool(in.intPool)
}
switch {
case err != nil:
return nil, err
@ -194,6 +209,11 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
case !operation.jumps:
pc++
}
// if the operation returned a value make sure that is also set
// the last return data.
if res != nil {
mem.lastReturn = ret
}
}
return nil, nil
}

View File

@ -20,6 +20,8 @@ import "math/big"
var checkVal = big.NewInt(-42)
const poolLimit = 256
// intPool is a pool of big integers that
// can be reused for all big.Int operations.
type intPool struct {
@ -37,6 +39,10 @@ func (p *intPool) get() *big.Int {
return new(big.Int)
}
func (p *intPool) put(is ...*big.Int) {
if len(p.pool.data) > poolLimit {
return
}
for _, i := range is {
// verifyPool is a build flag. Pool verification makes sure the integrity
// of the integer pool by comparing values to a default value.

View File

@ -47,13 +47,36 @@ type operation struct {
// jumps indicates whether operation made a jump. This prevents the program
// counter from further incrementing.
jumps bool
// writes determines whether this a state modifying operation
writes bool
// valid is used to check whether the retrieved operation is valid and known
valid bool
// reverts determined whether the operation reverts state
reverts bool
}
var defaultJumpTable = NewJumpTable()
var (
frontierInstructionSet = NewFrontierInstructionSet()
homesteadInstructionSet = NewHomesteadInstructionSet()
)
func NewJumpTable() [256]operation {
// NewHomesteadInstructionSet returns the frontier and homestead
// instructions that can be executed during the homestead phase.
func NewHomesteadInstructionSet() [256]operation {
instructionSet := NewFrontierInstructionSet()
instructionSet[DELEGATECALL] = operation{
execute: opDelegateCall,
gasCost: gasDelegateCall,
validateStack: makeStackFunc(6, 1),
memorySize: memoryDelegateCall,
valid: true,
}
return instructionSet
}
// NewFrontierInstructionSet returns the frontier instructions
// that can be executed during the frontier phase.
func NewFrontierInstructionSet() [256]operation {
return [256]operation{
STOP: {
execute: opStop,
@ -357,6 +380,7 @@ func NewJumpTable() [256]operation {
gasCost: gasSStore,
validateStack: makeStackFunc(2, 0),
valid: true,
writes: true,
},
JUMP: {
execute: opJump,
@ -397,193 +421,193 @@ func NewJumpTable() [256]operation {
valid: true,
},
PUSH1: {
execute: makePush(1, big.NewInt(1)),
execute: makePush(1, 1),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH2: {
execute: makePush(2, big.NewInt(2)),
execute: makePush(2, 2),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH3: {
execute: makePush(3, big.NewInt(3)),
execute: makePush(3, 3),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH4: {
execute: makePush(4, big.NewInt(4)),
execute: makePush(4, 4),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH5: {
execute: makePush(5, big.NewInt(5)),
execute: makePush(5, 5),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH6: {
execute: makePush(6, big.NewInt(6)),
execute: makePush(6, 6),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH7: {
execute: makePush(7, big.NewInt(7)),
execute: makePush(7, 7),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH8: {
execute: makePush(8, big.NewInt(8)),
execute: makePush(8, 8),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH9: {
execute: makePush(9, big.NewInt(9)),
execute: makePush(9, 9),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH10: {
execute: makePush(10, big.NewInt(10)),
execute: makePush(10, 10),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH11: {
execute: makePush(11, big.NewInt(11)),
execute: makePush(11, 11),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH12: {
execute: makePush(12, big.NewInt(12)),
execute: makePush(12, 12),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH13: {
execute: makePush(13, big.NewInt(13)),
execute: makePush(13, 13),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH14: {
execute: makePush(14, big.NewInt(14)),
execute: makePush(14, 14),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH15: {
execute: makePush(15, big.NewInt(15)),
execute: makePush(15, 15),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH16: {
execute: makePush(16, big.NewInt(16)),
execute: makePush(16, 16),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH17: {
execute: makePush(17, big.NewInt(17)),
execute: makePush(17, 17),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH18: {
execute: makePush(18, big.NewInt(18)),
execute: makePush(18, 18),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH19: {
execute: makePush(19, big.NewInt(19)),
execute: makePush(19, 19),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH20: {
execute: makePush(20, big.NewInt(20)),
execute: makePush(20, 20),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH21: {
execute: makePush(21, big.NewInt(21)),
execute: makePush(21, 21),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH22: {
execute: makePush(22, big.NewInt(22)),
execute: makePush(22, 22),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH23: {
execute: makePush(23, big.NewInt(23)),
execute: makePush(23, 23),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH24: {
execute: makePush(24, big.NewInt(24)),
execute: makePush(24, 24),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH25: {
execute: makePush(25, big.NewInt(25)),
execute: makePush(25, 25),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH26: {
execute: makePush(26, big.NewInt(26)),
execute: makePush(26, 26),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH27: {
execute: makePush(27, big.NewInt(27)),
execute: makePush(27, 27),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH28: {
execute: makePush(28, big.NewInt(28)),
execute: makePush(28, 28),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH29: {
execute: makePush(29, big.NewInt(29)),
execute: makePush(29, 29),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH30: {
execute: makePush(30, big.NewInt(30)),
execute: makePush(30, 30),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH31: {
execute: makePush(31, big.NewInt(31)),
execute: makePush(31, 31),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
},
PUSH32: {
execute: makePush(32, big.NewInt(32)),
execute: makePush(32, 32),
gasCost: gasPush,
validateStack: makeStackFunc(0, 1),
valid: true,
@ -821,6 +845,7 @@ func NewJumpTable() [256]operation {
validateStack: makeStackFunc(3, 1),
memorySize: memoryCreate,
valid: true,
writes: true,
},
CALL: {
execute: opCall,
@ -844,19 +869,13 @@ func NewJumpTable() [256]operation {
halts: true,
valid: true,
},
DELEGATECALL: {
execute: opDelegateCall,
gasCost: gasDelegateCall,
validateStack: makeStackFunc(6, 1),
memorySize: memoryDelegateCall,
valid: true,
},
SELFDESTRUCT: {
execute: opSuicide,
gasCost: gasSuicide,
validateStack: makeStackFunc(1, 0),
halts: true,
valid: true,
writes: true,
},
}
}

View File

@ -22,6 +22,7 @@ import "fmt"
type Memory struct {
store []byte
lastGasCost uint64
lastReturn []byte
}
func NewMemory() *Memory {

View File

@ -29,7 +29,7 @@ type Stack struct {
}
func newstack() *Stack {
return &Stack{}
return &Stack{data: make([]*big.Int, 0, 1024)}
}
func (st *Stack) Data() []*big.Int {
@ -60,8 +60,8 @@ func (st *Stack) swap(n int) {
st.data[st.len()-n], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-n]
}
func (st *Stack) dup(n int) {
st.push(new(big.Int).Set(st.data[st.len()-n]))
func (st *Stack) dup(pool *intPool, n int) {
st.push(pool.get().Set(st.data[st.len()-n]))
}
func (st *Stack) peek() *big.Int {

428
crypto/bn256/bn256.go Normal file
View File

@ -0,0 +1,428 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bn256 implements a particular bilinear group at the 128-bit security level.
//
// Bilinear groups are the basis of many of the new cryptographic protocols
// that have been proposed over the past decade. They consist of a triplet of
// groups (G₁, G₂ and GT) such that there exists a function e(g₁ˣ,g₂ʸ)=gTˣʸ
// (where gₓ is a generator of the respective group). That function is called
// a pairing function.
//
// This package specifically implements the Optimal Ate pairing over a 256-bit
// Barreto-Naehrig curve as described in
// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible
// with the implementation described in that paper.
package bn256
import (
"crypto/rand"
"io"
"math/big"
)
// BUG(agl): this implementation is not constant time.
// TODO(agl): keep GF(p²) elements in Mongomery form.
// G1 is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type G1 struct {
p *curvePoint
}
// RandomG1 returns x and g₁ˣ where x is a random, non-zero number read from r.
func RandomG1(r io.Reader) (*big.Int, *G1, error) {
var k *big.Int
var err error
for {
k, err = rand.Int(r, Order)
if err != nil {
return nil, nil, err
}
if k.Sign() > 0 {
break
}
}
return k, new(G1).ScalarBaseMult(k), nil
}
func (g *G1) String() string {
return "bn256.G1" + g.p.String()
}
// CurvePoints returns p's curve points in big integer
func (e *G1) CurvePoints() (*big.Int, *big.Int, *big.Int, *big.Int) {
return e.p.x, e.p.y, e.p.z, e.p.t
}
// ScalarBaseMult sets e to g*k where g is the generator of the group and
// then returns e.
func (e *G1) ScalarBaseMult(k *big.Int) *G1 {
if e.p == nil {
e.p = newCurvePoint(nil)
}
e.p.Mul(curveGen, k, new(bnPool))
return e
}
// ScalarMult sets e to a*k and then returns e.
func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 {
if e.p == nil {
e.p = newCurvePoint(nil)
}
e.p.Mul(a.p, k, new(bnPool))
return e
}
// Add sets e to a+b and then returns e.
// BUG(agl): this function is not complete: a==b fails.
func (e *G1) Add(a, b *G1) *G1 {
if e.p == nil {
e.p = newCurvePoint(nil)
}
e.p.Add(a.p, b.p, new(bnPool))
return e
}
// Neg sets e to -a and then returns e.
func (e *G1) Neg(a *G1) *G1 {
if e.p == nil {
e.p = newCurvePoint(nil)
}
e.p.Negative(a.p)
return e
}
// Marshal converts n to a byte slice.
func (n *G1) Marshal() []byte {
n.p.MakeAffine(nil)
xBytes := new(big.Int).Mod(n.p.x, P).Bytes()
yBytes := new(big.Int).Mod(n.p.y, P).Bytes()
// Each value is a 256-bit number.
const numBytes = 256 / 8
ret := make([]byte, numBytes*2)
copy(ret[1*numBytes-len(xBytes):], xBytes)
copy(ret[2*numBytes-len(yBytes):], yBytes)
return ret
}
// Unmarshal sets e to the result of converting the output of Marshal back into
// a group element and then returns e.
func (e *G1) Unmarshal(m []byte) (*G1, bool) {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if len(m) != 2*numBytes {
return nil, false
}
if e.p == nil {
e.p = newCurvePoint(nil)
}
e.p.x.SetBytes(m[0*numBytes : 1*numBytes])
e.p.y.SetBytes(m[1*numBytes : 2*numBytes])
if e.p.x.Sign() == 0 && e.p.y.Sign() == 0 {
// This is the point at infinity.
e.p.y.SetInt64(1)
e.p.z.SetInt64(0)
e.p.t.SetInt64(0)
} else {
e.p.z.SetInt64(1)
e.p.t.SetInt64(1)
if !e.p.IsOnCurve() {
return nil, false
}
}
return e, true
}
// G2 is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type G2 struct {
p *twistPoint
}
// RandomG1 returns x and g₂ˣ where x is a random, non-zero number read from r.
func RandomG2(r io.Reader) (*big.Int, *G2, error) {
var k *big.Int
var err error
for {
k, err = rand.Int(r, Order)
if err != nil {
return nil, nil, err
}
if k.Sign() > 0 {
break
}
}
return k, new(G2).ScalarBaseMult(k), nil
}
func (g *G2) String() string {
return "bn256.G2" + g.p.String()
}
// CurvePoints returns the curve points of p which includes the real
// and imaginary parts of the curve point.
func (e *G2) CurvePoints() (*gfP2, *gfP2, *gfP2, *gfP2) {
return e.p.x, e.p.y, e.p.z, e.p.t
}
// ScalarBaseMult sets e to g*k where g is the generator of the group and
// then returns out.
func (e *G2) ScalarBaseMult(k *big.Int) *G2 {
if e.p == nil {
e.p = newTwistPoint(nil)
}
e.p.Mul(twistGen, k, new(bnPool))
return e
}
// ScalarMult sets e to a*k and then returns e.
func (e *G2) ScalarMult(a *G2, k *big.Int) *G2 {
if e.p == nil {
e.p = newTwistPoint(nil)
}
e.p.Mul(a.p, k, new(bnPool))
return e
}
// Add sets e to a+b and then returns e.
// BUG(agl): this function is not complete: a==b fails.
func (e *G2) Add(a, b *G2) *G2 {
if e.p == nil {
e.p = newTwistPoint(nil)
}
e.p.Add(a.p, b.p, new(bnPool))
return e
}
// Marshal converts n into a byte slice.
func (n *G2) Marshal() []byte {
n.p.MakeAffine(nil)
xxBytes := new(big.Int).Mod(n.p.x.x, P).Bytes()
xyBytes := new(big.Int).Mod(n.p.x.y, P).Bytes()
yxBytes := new(big.Int).Mod(n.p.y.x, P).Bytes()
yyBytes := new(big.Int).Mod(n.p.y.y, P).Bytes()
// Each value is a 256-bit number.
const numBytes = 256 / 8
ret := make([]byte, numBytes*4)
copy(ret[1*numBytes-len(xxBytes):], xxBytes)
copy(ret[2*numBytes-len(xyBytes):], xyBytes)
copy(ret[3*numBytes-len(yxBytes):], yxBytes)
copy(ret[4*numBytes-len(yyBytes):], yyBytes)
return ret
}
// Unmarshal sets e to the result of converting the output of Marshal back into
// a group element and then returns e.
func (e *G2) Unmarshal(m []byte) (*G2, bool) {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if len(m) != 4*numBytes {
return nil, false
}
if e.p == nil {
e.p = newTwistPoint(nil)
}
e.p.x.x.SetBytes(m[0*numBytes : 1*numBytes])
e.p.x.y.SetBytes(m[1*numBytes : 2*numBytes])
e.p.y.x.SetBytes(m[2*numBytes : 3*numBytes])
e.p.y.y.SetBytes(m[3*numBytes : 4*numBytes])
if e.p.x.x.Sign() == 0 &&
e.p.x.y.Sign() == 0 &&
e.p.y.x.Sign() == 0 &&
e.p.y.y.Sign() == 0 {
// This is the point at infinity.
e.p.y.SetOne()
e.p.z.SetZero()
e.p.t.SetZero()
} else {
e.p.z.SetOne()
e.p.t.SetOne()
if !e.p.IsOnCurve() {
return nil, false
}
}
return e, true
}
// GT is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type GT struct {
p *gfP12
}
func (g *GT) String() string {
return "bn256.GT" + g.p.String()
}
// ScalarMult sets e to a*k and then returns e.
func (e *GT) ScalarMult(a *GT, k *big.Int) *GT {
if e.p == nil {
e.p = newGFp12(nil)
}
e.p.Exp(a.p, k, new(bnPool))
return e
}
// Add sets e to a+b and then returns e.
func (e *GT) Add(a, b *GT) *GT {
if e.p == nil {
e.p = newGFp12(nil)
}
e.p.Mul(a.p, b.p, new(bnPool))
return e
}
// Neg sets e to -a and then returns e.
func (e *GT) Neg(a *GT) *GT {
if e.p == nil {
e.p = newGFp12(nil)
}
e.p.Invert(a.p, new(bnPool))
return e
}
// Marshal converts n into a byte slice.
func (n *GT) Marshal() []byte {
n.p.Minimal()
xxxBytes := n.p.x.x.x.Bytes()
xxyBytes := n.p.x.x.y.Bytes()
xyxBytes := n.p.x.y.x.Bytes()
xyyBytes := n.p.x.y.y.Bytes()
xzxBytes := n.p.x.z.x.Bytes()
xzyBytes := n.p.x.z.y.Bytes()
yxxBytes := n.p.y.x.x.Bytes()
yxyBytes := n.p.y.x.y.Bytes()
yyxBytes := n.p.y.y.x.Bytes()
yyyBytes := n.p.y.y.y.Bytes()
yzxBytes := n.p.y.z.x.Bytes()
yzyBytes := n.p.y.z.y.Bytes()
// Each value is a 256-bit number.
const numBytes = 256 / 8
ret := make([]byte, numBytes*12)
copy(ret[1*numBytes-len(xxxBytes):], xxxBytes)
copy(ret[2*numBytes-len(xxyBytes):], xxyBytes)
copy(ret[3*numBytes-len(xyxBytes):], xyxBytes)
copy(ret[4*numBytes-len(xyyBytes):], xyyBytes)
copy(ret[5*numBytes-len(xzxBytes):], xzxBytes)
copy(ret[6*numBytes-len(xzyBytes):], xzyBytes)
copy(ret[7*numBytes-len(yxxBytes):], yxxBytes)
copy(ret[8*numBytes-len(yxyBytes):], yxyBytes)
copy(ret[9*numBytes-len(yyxBytes):], yyxBytes)
copy(ret[10*numBytes-len(yyyBytes):], yyyBytes)
copy(ret[11*numBytes-len(yzxBytes):], yzxBytes)
copy(ret[12*numBytes-len(yzyBytes):], yzyBytes)
return ret
}
// Unmarshal sets e to the result of converting the output of Marshal back into
// a group element and then returns e.
func (e *GT) Unmarshal(m []byte) (*GT, bool) {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if len(m) != 12*numBytes {
return nil, false
}
if e.p == nil {
e.p = newGFp12(nil)
}
e.p.x.x.x.SetBytes(m[0*numBytes : 1*numBytes])
e.p.x.x.y.SetBytes(m[1*numBytes : 2*numBytes])
e.p.x.y.x.SetBytes(m[2*numBytes : 3*numBytes])
e.p.x.y.y.SetBytes(m[3*numBytes : 4*numBytes])
e.p.x.z.x.SetBytes(m[4*numBytes : 5*numBytes])
e.p.x.z.y.SetBytes(m[5*numBytes : 6*numBytes])
e.p.y.x.x.SetBytes(m[6*numBytes : 7*numBytes])
e.p.y.x.y.SetBytes(m[7*numBytes : 8*numBytes])
e.p.y.y.x.SetBytes(m[8*numBytes : 9*numBytes])
e.p.y.y.y.SetBytes(m[9*numBytes : 10*numBytes])
e.p.y.z.x.SetBytes(m[10*numBytes : 11*numBytes])
e.p.y.z.y.SetBytes(m[11*numBytes : 12*numBytes])
return e, true
}
// Pair calculates an Optimal Ate pairing.
func Pair(g1 *G1, g2 *G2) *GT {
return &GT{optimalAte(g2.p, g1.p, new(bnPool))}
}
func PairingCheck(a []*G1, b []*G2) bool {
pool := new(bnPool)
e := newGFp12(pool)
e.SetOne()
for i := 0; i < len(a); i++ {
new_e := miller(b[i].p, a[i].p, pool)
e.Mul(e, new_e, pool)
}
ret := finalExponentiation(e, pool)
e.Put(pool)
return ret.IsOne()
}
// bnPool implements a tiny cache of *big.Int objects that's used to reduce the
// number of allocations made during processing.
type bnPool struct {
bns []*big.Int
count int
}
func (pool *bnPool) Get() *big.Int {
if pool == nil {
return new(big.Int)
}
pool.count++
l := len(pool.bns)
if l == 0 {
return new(big.Int)
}
bn := pool.bns[l-1]
pool.bns = pool.bns[:l-1]
return bn
}
func (pool *bnPool) Put(bn *big.Int) {
if pool == nil {
return
}
pool.bns = append(pool.bns, bn)
pool.count--
}
func (pool *bnPool) Count() int {
return pool.count
}

304
crypto/bn256/bn256_test.go Normal file
View File

@ -0,0 +1,304 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
import (
"bytes"
"crypto/rand"
"math/big"
"testing"
)
func TestGFp2Invert(t *testing.T) {
pool := new(bnPool)
a := newGFp2(pool)
a.x.SetString("23423492374", 10)
a.y.SetString("12934872398472394827398470", 10)
inv := newGFp2(pool)
inv.Invert(a, pool)
b := newGFp2(pool).Mul(inv, a, pool)
if b.x.Int64() != 0 || b.y.Int64() != 1 {
t.Fatalf("bad result for a^-1*a: %s %s", b.x, b.y)
}
a.Put(pool)
b.Put(pool)
inv.Put(pool)
if c := pool.Count(); c > 0 {
t.Errorf("Pool count non-zero: %d\n", c)
}
}
func isZero(n *big.Int) bool {
return new(big.Int).Mod(n, P).Int64() == 0
}
func isOne(n *big.Int) bool {
return new(big.Int).Mod(n, P).Int64() == 1
}
func TestGFp6Invert(t *testing.T) {
pool := new(bnPool)
a := newGFp6(pool)
a.x.x.SetString("239487238491", 10)
a.x.y.SetString("2356249827341", 10)
a.y.x.SetString("082659782", 10)
a.y.y.SetString("182703523765", 10)
a.z.x.SetString("978236549263", 10)
a.z.y.SetString("64893242", 10)
inv := newGFp6(pool)
inv.Invert(a, pool)
b := newGFp6(pool).Mul(inv, a, pool)
if !isZero(b.x.x) ||
!isZero(b.x.y) ||
!isZero(b.y.x) ||
!isZero(b.y.y) ||
!isZero(b.z.x) ||
!isOne(b.z.y) {
t.Fatalf("bad result for a^-1*a: %s", b)
}
a.Put(pool)
b.Put(pool)
inv.Put(pool)
if c := pool.Count(); c > 0 {
t.Errorf("Pool count non-zero: %d\n", c)
}
}
func TestGFp12Invert(t *testing.T) {
pool := new(bnPool)
a := newGFp12(pool)
a.x.x.x.SetString("239846234862342323958623", 10)
a.x.x.y.SetString("2359862352529835623", 10)
a.x.y.x.SetString("928836523", 10)
a.x.y.y.SetString("9856234", 10)
a.x.z.x.SetString("235635286", 10)
a.x.z.y.SetString("5628392833", 10)
a.y.x.x.SetString("252936598265329856238956532167968", 10)
a.y.x.y.SetString("23596239865236954178968", 10)
a.y.y.x.SetString("95421692834", 10)
a.y.y.y.SetString("236548", 10)
a.y.z.x.SetString("924523", 10)
a.y.z.y.SetString("12954623", 10)
inv := newGFp12(pool)
inv.Invert(a, pool)
b := newGFp12(pool).Mul(inv, a, pool)
if !isZero(b.x.x.x) ||
!isZero(b.x.x.y) ||
!isZero(b.x.y.x) ||
!isZero(b.x.y.y) ||
!isZero(b.x.z.x) ||
!isZero(b.x.z.y) ||
!isZero(b.y.x.x) ||
!isZero(b.y.x.y) ||
!isZero(b.y.y.x) ||
!isZero(b.y.y.y) ||
!isZero(b.y.z.x) ||
!isOne(b.y.z.y) {
t.Fatalf("bad result for a^-1*a: %s", b)
}
a.Put(pool)
b.Put(pool)
inv.Put(pool)
if c := pool.Count(); c > 0 {
t.Errorf("Pool count non-zero: %d\n", c)
}
}
func TestCurveImpl(t *testing.T) {
pool := new(bnPool)
g := &curvePoint{
pool.Get().SetInt64(1),
pool.Get().SetInt64(-2),
pool.Get().SetInt64(1),
pool.Get().SetInt64(0),
}
x := pool.Get().SetInt64(32498273234)
X := newCurvePoint(pool).Mul(g, x, pool)
y := pool.Get().SetInt64(98732423523)
Y := newCurvePoint(pool).Mul(g, y, pool)
s1 := newCurvePoint(pool).Mul(X, y, pool).MakeAffine(pool)
s2 := newCurvePoint(pool).Mul(Y, x, pool).MakeAffine(pool)
if s1.x.Cmp(s2.x) != 0 ||
s2.x.Cmp(s1.x) != 0 {
t.Errorf("DH points don't match: (%s, %s) (%s, %s)", s1.x, s1.y, s2.x, s2.y)
}
pool.Put(x)
X.Put(pool)
pool.Put(y)
Y.Put(pool)
s1.Put(pool)
s2.Put(pool)
g.Put(pool)
if c := pool.Count(); c > 0 {
t.Errorf("Pool count non-zero: %d\n", c)
}
}
func TestOrderG1(t *testing.T) {
g := new(G1).ScalarBaseMult(Order)
if !g.p.IsInfinity() {
t.Error("G1 has incorrect order")
}
one := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
g.Add(g, one)
g.p.MakeAffine(nil)
if g.p.x.Cmp(one.p.x) != 0 || g.p.y.Cmp(one.p.y) != 0 {
t.Errorf("1+0 != 1 in G1")
}
}
func TestOrderG2(t *testing.T) {
g := new(G2).ScalarBaseMult(Order)
if !g.p.IsInfinity() {
t.Error("G2 has incorrect order")
}
one := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
g.Add(g, one)
g.p.MakeAffine(nil)
if g.p.x.x.Cmp(one.p.x.x) != 0 ||
g.p.x.y.Cmp(one.p.x.y) != 0 ||
g.p.y.x.Cmp(one.p.y.x) != 0 ||
g.p.y.y.Cmp(one.p.y.y) != 0 {
t.Errorf("1+0 != 1 in G2")
}
}
func TestOrderGT(t *testing.T) {
gt := Pair(&G1{curveGen}, &G2{twistGen})
g := new(GT).ScalarMult(gt, Order)
if !g.p.IsOne() {
t.Error("GT has incorrect order")
}
}
func TestBilinearity(t *testing.T) {
for i := 0; i < 2; i++ {
a, p1, _ := RandomG1(rand.Reader)
b, p2, _ := RandomG2(rand.Reader)
e1 := Pair(p1, p2)
e2 := Pair(&G1{curveGen}, &G2{twistGen})
e2.ScalarMult(e2, a)
e2.ScalarMult(e2, b)
minusE2 := new(GT).Neg(e2)
e1.Add(e1, minusE2)
if !e1.p.IsOne() {
t.Fatalf("bad pairing result: %s", e1)
}
}
}
func TestG1Marshal(t *testing.T) {
g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
form := g.Marshal()
_, ok := new(G1).Unmarshal(form)
if !ok {
t.Fatalf("failed to unmarshal")
}
g.ScalarBaseMult(Order)
form = g.Marshal()
g2, ok := new(G1).Unmarshal(form)
if !ok {
t.Fatalf("failed to unmarshal ∞")
}
if !g2.p.IsInfinity() {
t.Fatalf("∞ unmarshaled incorrectly")
}
}
func TestG2Marshal(t *testing.T) {
g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
form := g.Marshal()
_, ok := new(G2).Unmarshal(form)
if !ok {
t.Fatalf("failed to unmarshal")
}
g.ScalarBaseMult(Order)
form = g.Marshal()
g2, ok := new(G2).Unmarshal(form)
if !ok {
t.Fatalf("failed to unmarshal ∞")
}
if !g2.p.IsInfinity() {
t.Fatalf("∞ unmarshaled incorrectly")
}
}
func TestG1Identity(t *testing.T) {
g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(0))
if !g.p.IsInfinity() {
t.Error("failure")
}
}
func TestG2Identity(t *testing.T) {
g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(0))
if !g.p.IsInfinity() {
t.Error("failure")
}
}
func TestTripartiteDiffieHellman(t *testing.T) {
a, _ := rand.Int(rand.Reader, Order)
b, _ := rand.Int(rand.Reader, Order)
c, _ := rand.Int(rand.Reader, Order)
pa, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
qa, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
pb, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
qb, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
pc, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
qc, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
k1 := Pair(pb, qc)
k1.ScalarMult(k1, a)
k1Bytes := k1.Marshal()
k2 := Pair(pc, qa)
k2.ScalarMult(k2, b)
k2Bytes := k2.Marshal()
k3 := Pair(pa, qb)
k3.ScalarMult(k3, c)
k3Bytes := k3.Marshal()
if !bytes.Equal(k1Bytes, k2Bytes) || !bytes.Equal(k2Bytes, k3Bytes) {
t.Errorf("keys didn't agree")
}
}
func BenchmarkPairing(b *testing.B) {
for i := 0; i < b.N; i++ {
Pair(&G1{curveGen}, &G2{twistGen})
}
}

44
crypto/bn256/constants.go Normal file
View File

@ -0,0 +1,44 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
import (
"math/big"
)
func bigFromBase10(s string) *big.Int {
n, _ := new(big.Int).SetString(s, 10)
return n
}
// u is the BN parameter that determines the prime: 1868033³.
var u = bigFromBase10("4965661367192848881")
// p is a prime over which we form a basic field: 36u⁴+36u³+24u²+6u+1.
var P = bigFromBase10("21888242871839275222246405745257275088696311157297823662689037894645226208583")
// Order is the number of elements in both G₁ and G₂: 36u⁴+36u³+18u²+6u+1.
var Order = bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495617")
// xiToPMinus1Over6 is ξ^((p-1)/6) where ξ = i+9.
var xiToPMinus1Over6 = &gfP2{bigFromBase10("16469823323077808223889137241176536799009286646108169935659301613961712198316"), bigFromBase10("8376118865763821496583973867626364092589906065868298776909617916018768340080")}
// xiToPMinus1Over3 is ξ^((p-1)/3) where ξ = i+9.
var xiToPMinus1Over3 = &gfP2{bigFromBase10("10307601595873709700152284273816112264069230130616436755625194854815875713954"), bigFromBase10("21575463638280843010398324269430826099269044274347216827212613867836435027261")}
// xiToPMinus1Over2 is ξ^((p-1)/2) where ξ = i+9.
var xiToPMinus1Over2 = &gfP2{bigFromBase10("3505843767911556378687030309984248845540243509899259641013678093033130930403"), bigFromBase10("2821565182194536844548159561693502659359617185244120367078079554186484126554")}
// xiToPSquaredMinus1Over3 is ξ^((p²-1)/3) where ξ = i+9.
var xiToPSquaredMinus1Over3 = bigFromBase10("21888242871839275220042445260109153167277707414472061641714758635765020556616")
// xiTo2PSquaredMinus2Over3 is ξ^((2p²-2)/3) where ξ = i+9 (a cubic root of unity, mod p).
var xiTo2PSquaredMinus2Over3 = bigFromBase10("2203960485148121921418603742825762020974279258880205651966")
// xiToPSquaredMinus1Over6 is ξ^((1p²-1)/6) where ξ = i+9 (a cubic root of -1, mod p).
var xiToPSquaredMinus1Over6 = bigFromBase10("21888242871839275220042445260109153167277707414472061641714758635765020556617")
// xiTo2PMinus2Over3 is ξ^((2p-2)/3) where ξ = i+9.
var xiTo2PMinus2Over3 = &gfP2{bigFromBase10("19937756971775647987995932169929341994314640652964949448313374472400716661030"), bigFromBase10("2581911344467009335267311115468803099551665605076196740867805258568234346338")}

278
crypto/bn256/curve.go Normal file
View File

@ -0,0 +1,278 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
import (
"math/big"
)
// curvePoint implements the elliptic curve y²=x³+3. Points are kept in
// Jacobian form and t=z² when valid. G₁ is the set of points of this curve on
// GF(p).
type curvePoint struct {
x, y, z, t *big.Int
}
var curveB = new(big.Int).SetInt64(3)
// curveGen is the generator of G₁.
var curveGen = &curvePoint{
new(big.Int).SetInt64(1),
new(big.Int).SetInt64(-2),
new(big.Int).SetInt64(1),
new(big.Int).SetInt64(1),
}
func newCurvePoint(pool *bnPool) *curvePoint {
return &curvePoint{
pool.Get(),
pool.Get(),
pool.Get(),
pool.Get(),
}
}
func (c *curvePoint) String() string {
c.MakeAffine(new(bnPool))
return "(" + c.x.String() + ", " + c.y.String() + ")"
}
func (c *curvePoint) Put(pool *bnPool) {
pool.Put(c.x)
pool.Put(c.y)
pool.Put(c.z)
pool.Put(c.t)
}
func (c *curvePoint) Set(a *curvePoint) {
c.x.Set(a.x)
c.y.Set(a.y)
c.z.Set(a.z)
c.t.Set(a.t)
}
// IsOnCurve returns true iff c is on the curve where c must be in affine form.
func (c *curvePoint) IsOnCurve() bool {
yy := new(big.Int).Mul(c.y, c.y)
xxx := new(big.Int).Mul(c.x, c.x)
xxx.Mul(xxx, c.x)
yy.Sub(yy, xxx)
yy.Sub(yy, curveB)
if yy.Sign() < 0 || yy.Cmp(P) >= 0 {
yy.Mod(yy, P)
}
return yy.Sign() == 0
}
func (c *curvePoint) SetInfinity() {
c.z.SetInt64(0)
}
func (c *curvePoint) IsInfinity() bool {
return c.z.Sign() == 0
}
func (c *curvePoint) Add(a, b *curvePoint, pool *bnPool) {
if a.IsInfinity() {
c.Set(b)
return
}
if b.IsInfinity() {
c.Set(a)
return
}
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
// Normalize the points by replacing a = [x1:y1:z1] and b = [x2:y2:z2]
// by [u1:s1:z1·z2] and [u2:s2:z1·z2]
// where u1 = x1·z2², s1 = y1·z2³ and u1 = x2·z1², s2 = y2·z1³
z1z1 := pool.Get().Mul(a.z, a.z)
z1z1.Mod(z1z1, P)
z2z2 := pool.Get().Mul(b.z, b.z)
z2z2.Mod(z2z2, P)
u1 := pool.Get().Mul(a.x, z2z2)
u1.Mod(u1, P)
u2 := pool.Get().Mul(b.x, z1z1)
u2.Mod(u2, P)
t := pool.Get().Mul(b.z, z2z2)
t.Mod(t, P)
s1 := pool.Get().Mul(a.y, t)
s1.Mod(s1, P)
t.Mul(a.z, z1z1)
t.Mod(t, P)
s2 := pool.Get().Mul(b.y, t)
s2.Mod(s2, P)
// Compute x = (2h)²(s²-u1-u2)
// where s = (s2-s1)/(u2-u1) is the slope of the line through
// (u1,s1) and (u2,s2). The extra factor 2h = 2(u2-u1) comes from the value of z below.
// This is also:
// 4(s2-s1)² - 4h²(u1+u2) = 4(s2-s1)² - 4h³ - 4h²(2u1)
// = r² - j - 2v
// with the notations below.
h := pool.Get().Sub(u2, u1)
xEqual := h.Sign() == 0
t.Add(h, h)
// i = 4h²
i := pool.Get().Mul(t, t)
i.Mod(i, P)
// j = 4h³
j := pool.Get().Mul(h, i)
j.Mod(j, P)
t.Sub(s2, s1)
yEqual := t.Sign() == 0
if xEqual && yEqual {
c.Double(a, pool)
return
}
r := pool.Get().Add(t, t)
v := pool.Get().Mul(u1, i)
v.Mod(v, P)
// t4 = 4(s2-s1)²
t4 := pool.Get().Mul(r, r)
t4.Mod(t4, P)
t.Add(v, v)
t6 := pool.Get().Sub(t4, j)
c.x.Sub(t6, t)
// Set y = -(2h)³(s1 + s*(x/4h²-u1))
// This is also
// y = - 2·s1·j - (s2-s1)(2x - 2i·u1) = r(v-x) - 2·s1·j
t.Sub(v, c.x) // t7
t4.Mul(s1, j) // t8
t4.Mod(t4, P)
t6.Add(t4, t4) // t9
t4.Mul(r, t) // t10
t4.Mod(t4, P)
c.y.Sub(t4, t6)
// Set z = 2(u2-u1)·z1·z2 = 2h·z1·z2
t.Add(a.z, b.z) // t11
t4.Mul(t, t) // t12
t4.Mod(t4, P)
t.Sub(t4, z1z1) // t13
t4.Sub(t, z2z2) // t14
c.z.Mul(t4, h)
c.z.Mod(c.z, P)
pool.Put(z1z1)
pool.Put(z2z2)
pool.Put(u1)
pool.Put(u2)
pool.Put(t)
pool.Put(s1)
pool.Put(s2)
pool.Put(h)
pool.Put(i)
pool.Put(j)
pool.Put(r)
pool.Put(v)
pool.Put(t4)
pool.Put(t6)
}
func (c *curvePoint) Double(a *curvePoint, pool *bnPool) {
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
A := pool.Get().Mul(a.x, a.x)
A.Mod(A, P)
B := pool.Get().Mul(a.y, a.y)
B.Mod(B, P)
C_ := pool.Get().Mul(B, B)
C_.Mod(C_, P)
t := pool.Get().Add(a.x, B)
t2 := pool.Get().Mul(t, t)
t2.Mod(t2, P)
t.Sub(t2, A)
t2.Sub(t, C_)
d := pool.Get().Add(t2, t2)
t.Add(A, A)
e := pool.Get().Add(t, A)
f := pool.Get().Mul(e, e)
f.Mod(f, P)
t.Add(d, d)
c.x.Sub(f, t)
t.Add(C_, C_)
t2.Add(t, t)
t.Add(t2, t2)
c.y.Sub(d, c.x)
t2.Mul(e, c.y)
t2.Mod(t2, P)
c.y.Sub(t2, t)
t.Mul(a.y, a.z)
t.Mod(t, P)
c.z.Add(t, t)
pool.Put(A)
pool.Put(B)
pool.Put(C_)
pool.Put(t)
pool.Put(t2)
pool.Put(d)
pool.Put(e)
pool.Put(f)
}
func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int, pool *bnPool) *curvePoint {
sum := newCurvePoint(pool)
sum.SetInfinity()
t := newCurvePoint(pool)
for i := scalar.BitLen(); i >= 0; i-- {
t.Double(sum, pool)
if scalar.Bit(i) != 0 {
sum.Add(t, a, pool)
} else {
sum.Set(t)
}
}
c.Set(sum)
sum.Put(pool)
t.Put(pool)
return c
}
func (c *curvePoint) MakeAffine(pool *bnPool) *curvePoint {
if words := c.z.Bits(); len(words) == 1 && words[0] == 1 {
return c
}
zInv := pool.Get().ModInverse(c.z, P)
t := pool.Get().Mul(c.y, zInv)
t.Mod(t, P)
zInv2 := pool.Get().Mul(zInv, zInv)
zInv2.Mod(zInv2, P)
c.y.Mul(t, zInv2)
c.y.Mod(c.y, P)
t.Mul(c.x, zInv2)
t.Mod(t, P)
c.x.Set(t)
c.z.SetInt64(1)
c.t.SetInt64(1)
pool.Put(zInv)
pool.Put(t)
pool.Put(zInv2)
return c
}
func (c *curvePoint) Negative(a *curvePoint) {
c.x.Set(a.x)
c.y.Neg(a.y)
c.z.Set(a.z)
c.t.SetInt64(0)
}

View File

@ -0,0 +1,43 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
import (
"crypto/rand"
)
func ExamplePair() {
// This implements the tripartite Diffie-Hellman algorithm from "A One
// Round Protocol for Tripartite Diffie-Hellman", A. Joux.
// http://www.springerlink.com/content/cddc57yyva0hburb/fulltext.pdf
// Each of three parties, a, b and c, generate a private value.
a, _ := rand.Int(rand.Reader, Order)
b, _ := rand.Int(rand.Reader, Order)
c, _ := rand.Int(rand.Reader, Order)
// Then each party calculates g₁ and g₂ times their private value.
pa := new(G1).ScalarBaseMult(a)
qa := new(G2).ScalarBaseMult(a)
pb := new(G1).ScalarBaseMult(b)
qb := new(G2).ScalarBaseMult(b)
pc := new(G1).ScalarBaseMult(c)
qc := new(G2).ScalarBaseMult(c)
// Now each party exchanges its public values with the other two and
// all parties can calculate the shared key.
k1 := Pair(pb, qc)
k1.ScalarMult(k1, a)
k2 := Pair(pc, qa)
k2.ScalarMult(k2, b)
k3 := Pair(pa, qb)
k3.ScalarMult(k3, c)
// k1, k2 and k3 will all be equal.
}

200
crypto/bn256/gfp12.go Normal file
View File

@ -0,0 +1,200 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
// For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf.
import (
"math/big"
)
// gfP12 implements the field of size p¹² as a quadratic extension of gfP6
// where ω²=τ.
type gfP12 struct {
x, y *gfP6 // value is xω + y
}
func newGFp12(pool *bnPool) *gfP12 {
return &gfP12{newGFp6(pool), newGFp6(pool)}
}
func (e *gfP12) String() string {
return "(" + e.x.String() + "," + e.y.String() + ")"
}
func (e *gfP12) Put(pool *bnPool) {
e.x.Put(pool)
e.y.Put(pool)
}
func (e *gfP12) Set(a *gfP12) *gfP12 {
e.x.Set(a.x)
e.y.Set(a.y)
return e
}
func (e *gfP12) SetZero() *gfP12 {
e.x.SetZero()
e.y.SetZero()
return e
}
func (e *gfP12) SetOne() *gfP12 {
e.x.SetZero()
e.y.SetOne()
return e
}
func (e *gfP12) Minimal() {
e.x.Minimal()
e.y.Minimal()
}
func (e *gfP12) IsZero() bool {
e.Minimal()
return e.x.IsZero() && e.y.IsZero()
}
func (e *gfP12) IsOne() bool {
e.Minimal()
return e.x.IsZero() && e.y.IsOne()
}
func (e *gfP12) Conjugate(a *gfP12) *gfP12 {
e.x.Negative(a.x)
e.y.Set(a.y)
return a
}
func (e *gfP12) Negative(a *gfP12) *gfP12 {
e.x.Negative(a.x)
e.y.Negative(a.y)
return e
}
// Frobenius computes (xω+y)^p = x^p ω·ξ^((p-1)/6) + y^p
func (e *gfP12) Frobenius(a *gfP12, pool *bnPool) *gfP12 {
e.x.Frobenius(a.x, pool)
e.y.Frobenius(a.y, pool)
e.x.MulScalar(e.x, xiToPMinus1Over6, pool)
return e
}
// FrobeniusP2 computes (xω+y)^p² = x^p² ω·ξ^((p²-1)/6) + y^p²
func (e *gfP12) FrobeniusP2(a *gfP12, pool *bnPool) *gfP12 {
e.x.FrobeniusP2(a.x)
e.x.MulGFP(e.x, xiToPSquaredMinus1Over6)
e.y.FrobeniusP2(a.y)
return e
}
func (e *gfP12) Add(a, b *gfP12) *gfP12 {
e.x.Add(a.x, b.x)
e.y.Add(a.y, b.y)
return e
}
func (e *gfP12) Sub(a, b *gfP12) *gfP12 {
e.x.Sub(a.x, b.x)
e.y.Sub(a.y, b.y)
return e
}
func (e *gfP12) Mul(a, b *gfP12, pool *bnPool) *gfP12 {
tx := newGFp6(pool)
tx.Mul(a.x, b.y, pool)
t := newGFp6(pool)
t.Mul(b.x, a.y, pool)
tx.Add(tx, t)
ty := newGFp6(pool)
ty.Mul(a.y, b.y, pool)
t.Mul(a.x, b.x, pool)
t.MulTau(t, pool)
e.y.Add(ty, t)
e.x.Set(tx)
tx.Put(pool)
ty.Put(pool)
t.Put(pool)
return e
}
func (e *gfP12) MulScalar(a *gfP12, b *gfP6, pool *bnPool) *gfP12 {
e.x.Mul(e.x, b, pool)
e.y.Mul(e.y, b, pool)
return e
}
func (c *gfP12) Exp(a *gfP12, power *big.Int, pool *bnPool) *gfP12 {
sum := newGFp12(pool)
sum.SetOne()
t := newGFp12(pool)
for i := power.BitLen() - 1; i >= 0; i-- {
t.Square(sum, pool)
if power.Bit(i) != 0 {
sum.Mul(t, a, pool)
} else {
sum.Set(t)
}
}
c.Set(sum)
sum.Put(pool)
t.Put(pool)
return c
}
func (e *gfP12) Square(a *gfP12, pool *bnPool) *gfP12 {
// Complex squaring algorithm
v0 := newGFp6(pool)
v0.Mul(a.x, a.y, pool)
t := newGFp6(pool)
t.MulTau(a.x, pool)
t.Add(a.y, t)
ty := newGFp6(pool)
ty.Add(a.x, a.y)
ty.Mul(ty, t, pool)
ty.Sub(ty, v0)
t.MulTau(v0, pool)
ty.Sub(ty, t)
e.y.Set(ty)
e.x.Double(v0)
v0.Put(pool)
t.Put(pool)
ty.Put(pool)
return e
}
func (e *gfP12) Invert(a *gfP12, pool *bnPool) *gfP12 {
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
// ftp://136.206.11.249/pub/crypto/pairings.pdf
t1 := newGFp6(pool)
t2 := newGFp6(pool)
t1.Square(a.x, pool)
t2.Square(a.y, pool)
t1.MulTau(t1, pool)
t1.Sub(t2, t1)
t2.Invert(t1, pool)
e.x.Negative(a.x)
e.y.Set(a.y)
e.MulScalar(e, t2, pool)
t1.Put(pool)
t2.Put(pool)
return e
}

227
crypto/bn256/gfp2.go Normal file
View File

@ -0,0 +1,227 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
// For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf.
import (
"math/big"
)
// gfP2 implements a field of size p² as a quadratic extension of the base
// field where i²=-1.
type gfP2 struct {
x, y *big.Int // value is xi+y.
}
func newGFp2(pool *bnPool) *gfP2 {
return &gfP2{pool.Get(), pool.Get()}
}
func (e *gfP2) String() string {
x := new(big.Int).Mod(e.x, P)
y := new(big.Int).Mod(e.y, P)
return "(" + x.String() + "," + y.String() + ")"
}
func (e *gfP2) Put(pool *bnPool) {
pool.Put(e.x)
pool.Put(e.y)
}
func (e *gfP2) Set(a *gfP2) *gfP2 {
e.x.Set(a.x)
e.y.Set(a.y)
return e
}
func (e *gfP2) SetZero() *gfP2 {
e.x.SetInt64(0)
e.y.SetInt64(0)
return e
}
func (e *gfP2) SetOne() *gfP2 {
e.x.SetInt64(0)
e.y.SetInt64(1)
return e
}
func (e *gfP2) Minimal() {
if e.x.Sign() < 0 || e.x.Cmp(P) >= 0 {
e.x.Mod(e.x, P)
}
if e.y.Sign() < 0 || e.y.Cmp(P) >= 0 {
e.y.Mod(e.y, P)
}
}
func (e *gfP2) IsZero() bool {
return e.x.Sign() == 0 && e.y.Sign() == 0
}
func (e *gfP2) IsOne() bool {
if e.x.Sign() != 0 {
return false
}
words := e.y.Bits()
return len(words) == 1 && words[0] == 1
}
func (e *gfP2) Conjugate(a *gfP2) *gfP2 {
e.y.Set(a.y)
e.x.Neg(a.x)
return e
}
func (e *gfP2) Negative(a *gfP2) *gfP2 {
e.x.Neg(a.x)
e.y.Neg(a.y)
return e
}
func (e *gfP2) Add(a, b *gfP2) *gfP2 {
e.x.Add(a.x, b.x)
e.y.Add(a.y, b.y)
return e
}
func (e *gfP2) Sub(a, b *gfP2) *gfP2 {
e.x.Sub(a.x, b.x)
e.y.Sub(a.y, b.y)
return e
}
func (e *gfP2) Double(a *gfP2) *gfP2 {
e.x.Lsh(a.x, 1)
e.y.Lsh(a.y, 1)
return e
}
func (c *gfP2) Exp(a *gfP2, power *big.Int, pool *bnPool) *gfP2 {
sum := newGFp2(pool)
sum.SetOne()
t := newGFp2(pool)
for i := power.BitLen() - 1; i >= 0; i-- {
t.Square(sum, pool)
if power.Bit(i) != 0 {
sum.Mul(t, a, pool)
} else {
sum.Set(t)
}
}
c.Set(sum)
sum.Put(pool)
t.Put(pool)
return c
}
// See "Multiplication and Squaring in Pairing-Friendly Fields",
// http://eprint.iacr.org/2006/471.pdf
func (e *gfP2) Mul(a, b *gfP2, pool *bnPool) *gfP2 {
tx := pool.Get().Mul(a.x, b.y)
t := pool.Get().Mul(b.x, a.y)
tx.Add(tx, t)
tx.Mod(tx, P)
ty := pool.Get().Mul(a.y, b.y)
t.Mul(a.x, b.x)
ty.Sub(ty, t)
e.y.Mod(ty, P)
e.x.Set(tx)
pool.Put(tx)
pool.Put(ty)
pool.Put(t)
return e
}
func (e *gfP2) MulScalar(a *gfP2, b *big.Int) *gfP2 {
e.x.Mul(a.x, b)
e.y.Mul(a.y, b)
return e
}
// MulXi sets e=ξa where ξ=i+9 and then returns e.
func (e *gfP2) MulXi(a *gfP2, pool *bnPool) *gfP2 {
// (xi+y)(i+3) = (9x+y)i+(9y-x)
tx := pool.Get().Lsh(a.x, 3)
tx.Add(tx, a.x)
tx.Add(tx, a.y)
ty := pool.Get().Lsh(a.y, 3)
ty.Add(ty, a.y)
ty.Sub(ty, a.x)
e.x.Set(tx)
e.y.Set(ty)
pool.Put(tx)
pool.Put(ty)
return e
}
func (e *gfP2) Square(a *gfP2, pool *bnPool) *gfP2 {
// Complex squaring algorithm:
// (xi+b)² = (x+y)(y-x) + 2*i*x*y
t1 := pool.Get().Sub(a.y, a.x)
t2 := pool.Get().Add(a.x, a.y)
ty := pool.Get().Mul(t1, t2)
ty.Mod(ty, P)
t1.Mul(a.x, a.y)
t1.Lsh(t1, 1)
e.x.Mod(t1, P)
e.y.Set(ty)
pool.Put(t1)
pool.Put(t2)
pool.Put(ty)
return e
}
func (e *gfP2) Invert(a *gfP2, pool *bnPool) *gfP2 {
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
// ftp://136.206.11.249/pub/crypto/pairings.pdf
t := pool.Get()
t.Mul(a.y, a.y)
t2 := pool.Get()
t2.Mul(a.x, a.x)
t.Add(t, t2)
inv := pool.Get()
inv.ModInverse(t, P)
e.x.Neg(a.x)
e.x.Mul(e.x, inv)
e.x.Mod(e.x, P)
e.y.Mul(a.y, inv)
e.y.Mod(e.y, P)
pool.Put(t)
pool.Put(t2)
pool.Put(inv)
return e
}
func (e *gfP2) Real() *big.Int {
return e.x
}
func (e *gfP2) Imag() *big.Int {
return e.y
}

296
crypto/bn256/gfp6.go Normal file
View File

@ -0,0 +1,296 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
// For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf.
import (
"math/big"
)
// gfP6 implements the field of size p⁶ as a cubic extension of gfP2 where τ³=ξ
// and ξ=i+9.
type gfP6 struct {
x, y, z *gfP2 // value is xτ² + yτ + z
}
func newGFp6(pool *bnPool) *gfP6 {
return &gfP6{newGFp2(pool), newGFp2(pool), newGFp2(pool)}
}
func (e *gfP6) String() string {
return "(" + e.x.String() + "," + e.y.String() + "," + e.z.String() + ")"
}
func (e *gfP6) Put(pool *bnPool) {
e.x.Put(pool)
e.y.Put(pool)
e.z.Put(pool)
}
func (e *gfP6) Set(a *gfP6) *gfP6 {
e.x.Set(a.x)
e.y.Set(a.y)
e.z.Set(a.z)
return e
}
func (e *gfP6) SetZero() *gfP6 {
e.x.SetZero()
e.y.SetZero()
e.z.SetZero()
return e
}
func (e *gfP6) SetOne() *gfP6 {
e.x.SetZero()
e.y.SetZero()
e.z.SetOne()
return e
}
func (e *gfP6) Minimal() {
e.x.Minimal()
e.y.Minimal()
e.z.Minimal()
}
func (e *gfP6) IsZero() bool {
return e.x.IsZero() && e.y.IsZero() && e.z.IsZero()
}
func (e *gfP6) IsOne() bool {
return e.x.IsZero() && e.y.IsZero() && e.z.IsOne()
}
func (e *gfP6) Negative(a *gfP6) *gfP6 {
e.x.Negative(a.x)
e.y.Negative(a.y)
e.z.Negative(a.z)
return e
}
func (e *gfP6) Frobenius(a *gfP6, pool *bnPool) *gfP6 {
e.x.Conjugate(a.x)
e.y.Conjugate(a.y)
e.z.Conjugate(a.z)
e.x.Mul(e.x, xiTo2PMinus2Over3, pool)
e.y.Mul(e.y, xiToPMinus1Over3, pool)
return e
}
// FrobeniusP2 computes (xτ²+yτ+z)^(p²) = xτ^(2p²) + yτ^(p²) + z
func (e *gfP6) FrobeniusP2(a *gfP6) *gfP6 {
// τ^(2p²) = τ²τ^(2p²-2) = τ²ξ^((2p²-2)/3)
e.x.MulScalar(a.x, xiTo2PSquaredMinus2Over3)
// τ^(p²) = ττ^(p²-1) = τξ^((p²-1)/3)
e.y.MulScalar(a.y, xiToPSquaredMinus1Over3)
e.z.Set(a.z)
return e
}
func (e *gfP6) Add(a, b *gfP6) *gfP6 {
e.x.Add(a.x, b.x)
e.y.Add(a.y, b.y)
e.z.Add(a.z, b.z)
return e
}
func (e *gfP6) Sub(a, b *gfP6) *gfP6 {
e.x.Sub(a.x, b.x)
e.y.Sub(a.y, b.y)
e.z.Sub(a.z, b.z)
return e
}
func (e *gfP6) Double(a *gfP6) *gfP6 {
e.x.Double(a.x)
e.y.Double(a.y)
e.z.Double(a.z)
return e
}
func (e *gfP6) Mul(a, b *gfP6, pool *bnPool) *gfP6 {
// "Multiplication and Squaring on Pairing-Friendly Fields"
// Section 4, Karatsuba method.
// http://eprint.iacr.org/2006/471.pdf
v0 := newGFp2(pool)
v0.Mul(a.z, b.z, pool)
v1 := newGFp2(pool)
v1.Mul(a.y, b.y, pool)
v2 := newGFp2(pool)
v2.Mul(a.x, b.x, pool)
t0 := newGFp2(pool)
t0.Add(a.x, a.y)
t1 := newGFp2(pool)
t1.Add(b.x, b.y)
tz := newGFp2(pool)
tz.Mul(t0, t1, pool)
tz.Sub(tz, v1)
tz.Sub(tz, v2)
tz.MulXi(tz, pool)
tz.Add(tz, v0)
t0.Add(a.y, a.z)
t1.Add(b.y, b.z)
ty := newGFp2(pool)
ty.Mul(t0, t1, pool)
ty.Sub(ty, v0)
ty.Sub(ty, v1)
t0.MulXi(v2, pool)
ty.Add(ty, t0)
t0.Add(a.x, a.z)
t1.Add(b.x, b.z)
tx := newGFp2(pool)
tx.Mul(t0, t1, pool)
tx.Sub(tx, v0)
tx.Add(tx, v1)
tx.Sub(tx, v2)
e.x.Set(tx)
e.y.Set(ty)
e.z.Set(tz)
t0.Put(pool)
t1.Put(pool)
tx.Put(pool)
ty.Put(pool)
tz.Put(pool)
v0.Put(pool)
v1.Put(pool)
v2.Put(pool)
return e
}
func (e *gfP6) MulScalar(a *gfP6, b *gfP2, pool *bnPool) *gfP6 {
e.x.Mul(a.x, b, pool)
e.y.Mul(a.y, b, pool)
e.z.Mul(a.z, b, pool)
return e
}
func (e *gfP6) MulGFP(a *gfP6, b *big.Int) *gfP6 {
e.x.MulScalar(a.x, b)
e.y.MulScalar(a.y, b)
e.z.MulScalar(a.z, b)
return e
}
// MulTau computes τ·(aτ²+bτ+c) = bτ²+cτ+aξ
func (e *gfP6) MulTau(a *gfP6, pool *bnPool) {
tz := newGFp2(pool)
tz.MulXi(a.x, pool)
ty := newGFp2(pool)
ty.Set(a.y)
e.y.Set(a.z)
e.x.Set(ty)
e.z.Set(tz)
tz.Put(pool)
ty.Put(pool)
}
func (e *gfP6) Square(a *gfP6, pool *bnPool) *gfP6 {
v0 := newGFp2(pool).Square(a.z, pool)
v1 := newGFp2(pool).Square(a.y, pool)
v2 := newGFp2(pool).Square(a.x, pool)
c0 := newGFp2(pool).Add(a.x, a.y)
c0.Square(c0, pool)
c0.Sub(c0, v1)
c0.Sub(c0, v2)
c0.MulXi(c0, pool)
c0.Add(c0, v0)
c1 := newGFp2(pool).Add(a.y, a.z)
c1.Square(c1, pool)
c1.Sub(c1, v0)
c1.Sub(c1, v1)
xiV2 := newGFp2(pool).MulXi(v2, pool)
c1.Add(c1, xiV2)
c2 := newGFp2(pool).Add(a.x, a.z)
c2.Square(c2, pool)
c2.Sub(c2, v0)
c2.Add(c2, v1)
c2.Sub(c2, v2)
e.x.Set(c2)
e.y.Set(c1)
e.z.Set(c0)
v0.Put(pool)
v1.Put(pool)
v2.Put(pool)
c0.Put(pool)
c1.Put(pool)
c2.Put(pool)
xiV2.Put(pool)
return e
}
func (e *gfP6) Invert(a *gfP6, pool *bnPool) *gfP6 {
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
// ftp://136.206.11.249/pub/crypto/pairings.pdf
// Here we can give a short explanation of how it works: let j be a cubic root of
// unity in GF(p²) so that 1+j+j²=0.
// Then (xτ² + yτ + z)(xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
// = (xτ² + yτ + z)(Cτ²+Bτ+A)
// = (x³ξ²+y³ξ+z³-3ξxyz) = F is an element of the base field (the norm).
//
// On the other hand (xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
// = τ²(y²-ξxz) + τ(ξx²-yz) + (z²-ξxy)
//
// So that's why A = (z²-ξxy), B = (ξx²-yz), C = (y²-ξxz)
t1 := newGFp2(pool)
A := newGFp2(pool)
A.Square(a.z, pool)
t1.Mul(a.x, a.y, pool)
t1.MulXi(t1, pool)
A.Sub(A, t1)
B := newGFp2(pool)
B.Square(a.x, pool)
B.MulXi(B, pool)
t1.Mul(a.y, a.z, pool)
B.Sub(B, t1)
C_ := newGFp2(pool)
C_.Square(a.y, pool)
t1.Mul(a.x, a.z, pool)
C_.Sub(C_, t1)
F := newGFp2(pool)
F.Mul(C_, a.y, pool)
F.MulXi(F, pool)
t1.Mul(A, a.z, pool)
F.Add(F, t1)
t1.Mul(B, a.x, pool)
t1.MulXi(t1, pool)
F.Add(F, t1)
F.Invert(F, pool)
e.x.Mul(C_, F, pool)
e.y.Mul(B, F, pool)
e.z.Mul(A, F, pool)
t1.Put(pool)
A.Put(pool)
B.Put(pool)
C_.Put(pool)
F.Put(pool)
return e
}

71
crypto/bn256/main_test.go Normal file
View File

@ -0,0 +1,71 @@
package bn256
import (
"testing"
"crypto/rand"
)
func TestRandomG2Marshal(t *testing.T) {
for i := 0; i < 10; i++ {
n, g2, err := RandomG2(rand.Reader)
if err != nil {
t.Error(err)
continue
}
t.Logf("%d: %x\n", n, g2.Marshal())
}
}
func TestPairings(t *testing.T) {
a1 := new(G1).ScalarBaseMult(bigFromBase10("1"))
a2 := new(G1).ScalarBaseMult(bigFromBase10("2"))
a37 := new(G1).ScalarBaseMult(bigFromBase10("37"))
an1 := new(G1).ScalarBaseMult(bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495616"))
b0 := new(G2).ScalarBaseMult(bigFromBase10("0"))
b1 := new(G2).ScalarBaseMult(bigFromBase10("1"))
b2 := new(G2).ScalarBaseMult(bigFromBase10("2"))
b27 := new(G2).ScalarBaseMult(bigFromBase10("27"))
b999 := new(G2).ScalarBaseMult(bigFromBase10("999"))
bn1 := new(G2).ScalarBaseMult(bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495616"))
p1 := Pair(a1, b1)
pn1 := Pair(a1, bn1)
np1 := Pair(an1, b1)
if pn1.String() != np1.String() {
t.Error("Pairing mismatch: e(a, -b) != e(-a, b)")
}
if !PairingCheck([]*G1{a1, an1}, []*G2{b1, b1}) {
t.Error("MultiAte check gave false negative!")
}
p0 := new(GT).Add(p1, pn1)
p0_2 := Pair(a1, b0)
if p0.String() != p0_2.String() {
t.Error("Pairing mismatch: e(a, b) * e(a, -b) != 1")
}
p0_3 := new(GT).ScalarMult(p1, bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495617"))
if p0.String() != p0_3.String() {
t.Error("Pairing mismatch: e(a, b) has wrong order")
}
p2 := Pair(a2, b1)
p2_2 := Pair(a1, b2)
p2_3 := new(GT).ScalarMult(p1, bigFromBase10("2"))
if p2.String() != p2_2.String() {
t.Error("Pairing mismatch: e(a, b * 2) != e(a * 2, b)")
}
if p2.String() != p2_3.String() {
t.Error("Pairing mismatch: e(a, b * 2) != e(a, b) ** 2")
}
if p2.String() == p1.String() {
t.Error("Pairing is degenerate!")
}
if PairingCheck([]*G1{a1, a1}, []*G2{b1, b1}) {
t.Error("MultiAte check gave false positive!")
}
p999 := Pair(a37, b27)
p999_2 := Pair(a1, b999)
if p999.String() != p999_2.String() {
t.Error("Pairing mismatch: e(a * 37, b * 27) != e(a, b * 999)")
}
}

398
crypto/bn256/optate.go Normal file
View File

@ -0,0 +1,398 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
func lineFunctionAdd(r, p *twistPoint, q *curvePoint, r2 *gfP2, pool *bnPool) (a, b, c *gfP2, rOut *twistPoint) {
// See the mixed addition algorithm from "Faster Computation of the
// Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
B := newGFp2(pool).Mul(p.x, r.t, pool)
D := newGFp2(pool).Add(p.y, r.z)
D.Square(D, pool)
D.Sub(D, r2)
D.Sub(D, r.t)
D.Mul(D, r.t, pool)
H := newGFp2(pool).Sub(B, r.x)
I := newGFp2(pool).Square(H, pool)
E := newGFp2(pool).Add(I, I)
E.Add(E, E)
J := newGFp2(pool).Mul(H, E, pool)
L1 := newGFp2(pool).Sub(D, r.y)
L1.Sub(L1, r.y)
V := newGFp2(pool).Mul(r.x, E, pool)
rOut = newTwistPoint(pool)
rOut.x.Square(L1, pool)
rOut.x.Sub(rOut.x, J)
rOut.x.Sub(rOut.x, V)
rOut.x.Sub(rOut.x, V)
rOut.z.Add(r.z, H)
rOut.z.Square(rOut.z, pool)
rOut.z.Sub(rOut.z, r.t)
rOut.z.Sub(rOut.z, I)
t := newGFp2(pool).Sub(V, rOut.x)
t.Mul(t, L1, pool)
t2 := newGFp2(pool).Mul(r.y, J, pool)
t2.Add(t2, t2)
rOut.y.Sub(t, t2)
rOut.t.Square(rOut.z, pool)
t.Add(p.y, rOut.z)
t.Square(t, pool)
t.Sub(t, r2)
t.Sub(t, rOut.t)
t2.Mul(L1, p.x, pool)
t2.Add(t2, t2)
a = newGFp2(pool)
a.Sub(t2, t)
c = newGFp2(pool)
c.MulScalar(rOut.z, q.y)
c.Add(c, c)
b = newGFp2(pool)
b.SetZero()
b.Sub(b, L1)
b.MulScalar(b, q.x)
b.Add(b, b)
B.Put(pool)
D.Put(pool)
H.Put(pool)
I.Put(pool)
E.Put(pool)
J.Put(pool)
L1.Put(pool)
V.Put(pool)
t.Put(pool)
t2.Put(pool)
return
}
func lineFunctionDouble(r *twistPoint, q *curvePoint, pool *bnPool) (a, b, c *gfP2, rOut *twistPoint) {
// See the doubling algorithm for a=0 from "Faster Computation of the
// Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
A := newGFp2(pool).Square(r.x, pool)
B := newGFp2(pool).Square(r.y, pool)
C_ := newGFp2(pool).Square(B, pool)
D := newGFp2(pool).Add(r.x, B)
D.Square(D, pool)
D.Sub(D, A)
D.Sub(D, C_)
D.Add(D, D)
E := newGFp2(pool).Add(A, A)
E.Add(E, A)
G := newGFp2(pool).Square(E, pool)
rOut = newTwistPoint(pool)
rOut.x.Sub(G, D)
rOut.x.Sub(rOut.x, D)
rOut.z.Add(r.y, r.z)
rOut.z.Square(rOut.z, pool)
rOut.z.Sub(rOut.z, B)
rOut.z.Sub(rOut.z, r.t)
rOut.y.Sub(D, rOut.x)
rOut.y.Mul(rOut.y, E, pool)
t := newGFp2(pool).Add(C_, C_)
t.Add(t, t)
t.Add(t, t)
rOut.y.Sub(rOut.y, t)
rOut.t.Square(rOut.z, pool)
t.Mul(E, r.t, pool)
t.Add(t, t)
b = newGFp2(pool)
b.SetZero()
b.Sub(b, t)
b.MulScalar(b, q.x)
a = newGFp2(pool)
a.Add(r.x, E)
a.Square(a, pool)
a.Sub(a, A)
a.Sub(a, G)
t.Add(B, B)
t.Add(t, t)
a.Sub(a, t)
c = newGFp2(pool)
c.Mul(rOut.z, r.t, pool)
c.Add(c, c)
c.MulScalar(c, q.y)
A.Put(pool)
B.Put(pool)
C_.Put(pool)
D.Put(pool)
E.Put(pool)
G.Put(pool)
t.Put(pool)
return
}
func mulLine(ret *gfP12, a, b, c *gfP2, pool *bnPool) {
a2 := newGFp6(pool)
a2.x.SetZero()
a2.y.Set(a)
a2.z.Set(b)
a2.Mul(a2, ret.x, pool)
t3 := newGFp6(pool).MulScalar(ret.y, c, pool)
t := newGFp2(pool)
t.Add(b, c)
t2 := newGFp6(pool)
t2.x.SetZero()
t2.y.Set(a)
t2.z.Set(t)
ret.x.Add(ret.x, ret.y)
ret.y.Set(t3)
ret.x.Mul(ret.x, t2, pool)
ret.x.Sub(ret.x, a2)
ret.x.Sub(ret.x, ret.y)
a2.MulTau(a2, pool)
ret.y.Add(ret.y, a2)
a2.Put(pool)
t3.Put(pool)
t2.Put(pool)
t.Put(pool)
}
// sixuPlus2NAF is 6u+2 in non-adjacent form.
var sixuPlus2NAF = []int8{0, 0, 0, 1, 0, 1, 0, -1, 0, 0, 1, -1, 0, 0, 1, 0,
0, 1, 1, 0, -1, 0, 0, 1, 0, -1, 0, 0, 0, 0, 1, 1,
1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 1,
1, 0, 0, -1, 0, 0, 0, 1, 1, 0, -1, 0, 0, 1, 0, 1, 1}
// miller implements the Miller loop for calculating the Optimal Ate pairing.
// See algorithm 1 from http://cryptojedi.org/papers/dclxvi-20100714.pdf
func miller(q *twistPoint, p *curvePoint, pool *bnPool) *gfP12 {
ret := newGFp12(pool)
ret.SetOne()
aAffine := newTwistPoint(pool)
aAffine.Set(q)
aAffine.MakeAffine(pool)
bAffine := newCurvePoint(pool)
bAffine.Set(p)
bAffine.MakeAffine(pool)
minusA := newTwistPoint(pool)
minusA.Negative(aAffine, pool)
r := newTwistPoint(pool)
r.Set(aAffine)
r2 := newGFp2(pool)
r2.Square(aAffine.y, pool)
for i := len(sixuPlus2NAF) - 1; i > 0; i-- {
a, b, c, newR := lineFunctionDouble(r, bAffine, pool)
if i != len(sixuPlus2NAF)-1 {
ret.Square(ret, pool)
}
mulLine(ret, a, b, c, pool)
a.Put(pool)
b.Put(pool)
c.Put(pool)
r.Put(pool)
r = newR
switch sixuPlus2NAF[i-1] {
case 1:
a, b, c, newR = lineFunctionAdd(r, aAffine, bAffine, r2, pool)
case -1:
a, b, c, newR = lineFunctionAdd(r, minusA, bAffine, r2, pool)
default:
continue
}
mulLine(ret, a, b, c, pool)
a.Put(pool)
b.Put(pool)
c.Put(pool)
r.Put(pool)
r = newR
}
// In order to calculate Q1 we have to convert q from the sextic twist
// to the full GF(p^12) group, apply the Frobenius there, and convert
// back.
//
// The twist isomorphism is (x', y') -> (xω², yω³). If we consider just
// x for a moment, then after applying the Frobenius, we have x̄ω^(2p)
// where x̄ is the conjugate of x. If we are going to apply the inverse
// isomorphism we need a value with a single coefficient of ω² so we
// rewrite this as x̄ω^(2p-2)ω². ξ⁶ = ω and, due to the construction of
// p, 2p-2 is a multiple of six. Therefore we can rewrite as
// x̄ξ^((p-1)/3)ω² and applying the inverse isomorphism eliminates the
// ω².
//
// A similar argument can be made for the y value.
q1 := newTwistPoint(pool)
q1.x.Conjugate(aAffine.x)
q1.x.Mul(q1.x, xiToPMinus1Over3, pool)
q1.y.Conjugate(aAffine.y)
q1.y.Mul(q1.y, xiToPMinus1Over2, pool)
q1.z.SetOne()
q1.t.SetOne()
// For Q2 we are applying the p² Frobenius. The two conjugations cancel
// out and we are left only with the factors from the isomorphism. In
// the case of x, we end up with a pure number which is why
// xiToPSquaredMinus1Over3 is ∈ GF(p). With y we get a factor of -1. We
// ignore this to end up with -Q2.
minusQ2 := newTwistPoint(pool)
minusQ2.x.MulScalar(aAffine.x, xiToPSquaredMinus1Over3)
minusQ2.y.Set(aAffine.y)
minusQ2.z.SetOne()
minusQ2.t.SetOne()
r2.Square(q1.y, pool)
a, b, c, newR := lineFunctionAdd(r, q1, bAffine, r2, pool)
mulLine(ret, a, b, c, pool)
a.Put(pool)
b.Put(pool)
c.Put(pool)
r.Put(pool)
r = newR
r2.Square(minusQ2.y, pool)
a, b, c, newR = lineFunctionAdd(r, minusQ2, bAffine, r2, pool)
mulLine(ret, a, b, c, pool)
a.Put(pool)
b.Put(pool)
c.Put(pool)
r.Put(pool)
r = newR
aAffine.Put(pool)
bAffine.Put(pool)
minusA.Put(pool)
r.Put(pool)
r2.Put(pool)
return ret
}
// finalExponentiation computes the (p¹²-1)/Order-th power of an element of
// GF(p¹²) to obtain an element of GT (steps 13-15 of algorithm 1 from
// http://cryptojedi.org/papers/dclxvi-20100714.pdf)
func finalExponentiation(in *gfP12, pool *bnPool) *gfP12 {
t1 := newGFp12(pool)
// This is the p^6-Frobenius
t1.x.Negative(in.x)
t1.y.Set(in.y)
inv := newGFp12(pool)
inv.Invert(in, pool)
t1.Mul(t1, inv, pool)
t2 := newGFp12(pool).FrobeniusP2(t1, pool)
t1.Mul(t1, t2, pool)
fp := newGFp12(pool).Frobenius(t1, pool)
fp2 := newGFp12(pool).FrobeniusP2(t1, pool)
fp3 := newGFp12(pool).Frobenius(fp2, pool)
fu, fu2, fu3 := newGFp12(pool), newGFp12(pool), newGFp12(pool)
fu.Exp(t1, u, pool)
fu2.Exp(fu, u, pool)
fu3.Exp(fu2, u, pool)
y3 := newGFp12(pool).Frobenius(fu, pool)
fu2p := newGFp12(pool).Frobenius(fu2, pool)
fu3p := newGFp12(pool).Frobenius(fu3, pool)
y2 := newGFp12(pool).FrobeniusP2(fu2, pool)
y0 := newGFp12(pool)
y0.Mul(fp, fp2, pool)
y0.Mul(y0, fp3, pool)
y1, y4, y5 := newGFp12(pool), newGFp12(pool), newGFp12(pool)
y1.Conjugate(t1)
y5.Conjugate(fu2)
y3.Conjugate(y3)
y4.Mul(fu, fu2p, pool)
y4.Conjugate(y4)
y6 := newGFp12(pool)
y6.Mul(fu3, fu3p, pool)
y6.Conjugate(y6)
t0 := newGFp12(pool)
t0.Square(y6, pool)
t0.Mul(t0, y4, pool)
t0.Mul(t0, y5, pool)
t1.Mul(y3, y5, pool)
t1.Mul(t1, t0, pool)
t0.Mul(t0, y2, pool)
t1.Square(t1, pool)
t1.Mul(t1, t0, pool)
t1.Square(t1, pool)
t0.Mul(t1, y1, pool)
t1.Mul(t1, y0, pool)
t0.Square(t0, pool)
t0.Mul(t0, t1, pool)
inv.Put(pool)
t1.Put(pool)
t2.Put(pool)
fp.Put(pool)
fp2.Put(pool)
fp3.Put(pool)
fu.Put(pool)
fu2.Put(pool)
fu3.Put(pool)
fu2p.Put(pool)
fu3p.Put(pool)
y0.Put(pool)
y1.Put(pool)
y2.Put(pool)
y3.Put(pool)
y4.Put(pool)
y5.Put(pool)
y6.Put(pool)
return t0
}
func optimalAte(a *twistPoint, b *curvePoint, pool *bnPool) *gfP12 {
e := miller(a, b, pool)
ret := finalExponentiation(e, pool)
e.Put(pool)
if a.IsInfinity() || b.IsInfinity() {
ret.SetOne()
}
return ret
}

249
crypto/bn256/twist.go Normal file
View File

@ -0,0 +1,249 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
import (
"math/big"
)
// twistPoint implements the elliptic curve y²=x³+3/ξ over GF(p²). Points are
// kept in Jacobian form and t=z² when valid. The group G₂ is the set of
// n-torsion points of this curve over GF(p²) (where n = Order)
type twistPoint struct {
x, y, z, t *gfP2
}
var twistB = &gfP2{
bigFromBase10("266929791119991161246907387137283842545076965332900288569378510910307636690"),
bigFromBase10("19485874751759354771024239261021720505790618469301721065564631296452457478373"),
}
// twistGen is the generator of group G₂.
var twistGen = &twistPoint{
&gfP2{
bigFromBase10("11559732032986387107991004021392285783925812861821192530917403151452391805634"),
bigFromBase10("10857046999023057135944570762232829481370756359578518086990519993285655852781"),
},
&gfP2{
bigFromBase10("4082367875863433681332203403145435568316851327593401208105741076214120093531"),
bigFromBase10("8495653923123431417604973247489272438418190587263600148770280649306958101930"),
},
&gfP2{
bigFromBase10("0"),
bigFromBase10("1"),
},
&gfP2{
bigFromBase10("0"),
bigFromBase10("1"),
},
}
func newTwistPoint(pool *bnPool) *twistPoint {
return &twistPoint{
newGFp2(pool),
newGFp2(pool),
newGFp2(pool),
newGFp2(pool),
}
}
func (c *twistPoint) String() string {
return "(" + c.x.String() + ", " + c.y.String() + ", " + c.z.String() + ")"
}
func (c *twistPoint) Put(pool *bnPool) {
c.x.Put(pool)
c.y.Put(pool)
c.z.Put(pool)
c.t.Put(pool)
}
func (c *twistPoint) Set(a *twistPoint) {
c.x.Set(a.x)
c.y.Set(a.y)
c.z.Set(a.z)
c.t.Set(a.t)
}
// IsOnCurve returns true iff c is on the curve where c must be in affine form.
func (c *twistPoint) IsOnCurve() bool {
pool := new(bnPool)
yy := newGFp2(pool).Square(c.y, pool)
xxx := newGFp2(pool).Square(c.x, pool)
xxx.Mul(xxx, c.x, pool)
yy.Sub(yy, xxx)
yy.Sub(yy, twistB)
yy.Minimal()
return yy.x.Sign() == 0 && yy.y.Sign() == 0
}
func (c *twistPoint) SetInfinity() {
c.z.SetZero()
}
func (c *twistPoint) IsInfinity() bool {
return c.z.IsZero()
}
func (c *twistPoint) Add(a, b *twistPoint, pool *bnPool) {
// For additional comments, see the same function in curve.go.
if a.IsInfinity() {
c.Set(b)
return
}
if b.IsInfinity() {
c.Set(a)
return
}
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
z1z1 := newGFp2(pool).Square(a.z, pool)
z2z2 := newGFp2(pool).Square(b.z, pool)
u1 := newGFp2(pool).Mul(a.x, z2z2, pool)
u2 := newGFp2(pool).Mul(b.x, z1z1, pool)
t := newGFp2(pool).Mul(b.z, z2z2, pool)
s1 := newGFp2(pool).Mul(a.y, t, pool)
t.Mul(a.z, z1z1, pool)
s2 := newGFp2(pool).Mul(b.y, t, pool)
h := newGFp2(pool).Sub(u2, u1)
xEqual := h.IsZero()
t.Add(h, h)
i := newGFp2(pool).Square(t, pool)
j := newGFp2(pool).Mul(h, i, pool)
t.Sub(s2, s1)
yEqual := t.IsZero()
if xEqual && yEqual {
c.Double(a, pool)
return
}
r := newGFp2(pool).Add(t, t)
v := newGFp2(pool).Mul(u1, i, pool)
t4 := newGFp2(pool).Square(r, pool)
t.Add(v, v)
t6 := newGFp2(pool).Sub(t4, j)
c.x.Sub(t6, t)
t.Sub(v, c.x) // t7
t4.Mul(s1, j, pool) // t8
t6.Add(t4, t4) // t9
t4.Mul(r, t, pool) // t10
c.y.Sub(t4, t6)
t.Add(a.z, b.z) // t11
t4.Square(t, pool) // t12
t.Sub(t4, z1z1) // t13
t4.Sub(t, z2z2) // t14
c.z.Mul(t4, h, pool)
z1z1.Put(pool)
z2z2.Put(pool)
u1.Put(pool)
u2.Put(pool)
t.Put(pool)
s1.Put(pool)
s2.Put(pool)
h.Put(pool)
i.Put(pool)
j.Put(pool)
r.Put(pool)
v.Put(pool)
t4.Put(pool)
t6.Put(pool)
}
func (c *twistPoint) Double(a *twistPoint, pool *bnPool) {
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
A := newGFp2(pool).Square(a.x, pool)
B := newGFp2(pool).Square(a.y, pool)
C_ := newGFp2(pool).Square(B, pool)
t := newGFp2(pool).Add(a.x, B)
t2 := newGFp2(pool).Square(t, pool)
t.Sub(t2, A)
t2.Sub(t, C_)
d := newGFp2(pool).Add(t2, t2)
t.Add(A, A)
e := newGFp2(pool).Add(t, A)
f := newGFp2(pool).Square(e, pool)
t.Add(d, d)
c.x.Sub(f, t)
t.Add(C_, C_)
t2.Add(t, t)
t.Add(t2, t2)
c.y.Sub(d, c.x)
t2.Mul(e, c.y, pool)
c.y.Sub(t2, t)
t.Mul(a.y, a.z, pool)
c.z.Add(t, t)
A.Put(pool)
B.Put(pool)
C_.Put(pool)
t.Put(pool)
t2.Put(pool)
d.Put(pool)
e.Put(pool)
f.Put(pool)
}
func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int, pool *bnPool) *twistPoint {
sum := newTwistPoint(pool)
sum.SetInfinity()
t := newTwistPoint(pool)
for i := scalar.BitLen(); i >= 0; i-- {
t.Double(sum, pool)
if scalar.Bit(i) != 0 {
sum.Add(t, a, pool)
} else {
sum.Set(t)
}
}
c.Set(sum)
sum.Put(pool)
t.Put(pool)
return c
}
func (c *twistPoint) MakeAffine(pool *bnPool) *twistPoint {
if c.z.IsOne() {
return c
}
zInv := newGFp2(pool).Invert(c.z, pool)
t := newGFp2(pool).Mul(c.y, zInv, pool)
zInv2 := newGFp2(pool).Square(zInv, pool)
c.y.Mul(t, zInv2, pool)
t.Mul(c.x, zInv2, pool)
c.x.Set(t)
c.z.SetOne()
c.t.SetOne()
zInv.Put(pool)
t.Put(pool)
zInv2.Put(pool)
return c
}
func (c *twistPoint) Negative(a *twistPoint, pool *bnPool) {
c.x.Set(a.x)
c.y.SetZero()
c.y.Sub(c.y, a.y)
c.z.Set(a.z)
c.t.SetZero()
}

View File

@ -22,12 +22,14 @@ import (
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"math/big"
"os"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
)
@ -66,9 +68,6 @@ func Keccak512(data ...[]byte) []byte {
return d.Sum(nil)
}
// Deprecated: For backward compatibility as other packages depend on these
func Sha3Hash(data ...[]byte) common.Hash { return Keccak256Hash(data...) }
// Creates an ethereum address given the bytes and the nonce
func CreateAddress(b common.Address, nonce uint64) common.Address {
data, _ := rlp.EncodeToBytes([]interface{}{b, nonce})
@ -76,23 +75,38 @@ func CreateAddress(b common.Address, nonce uint64) common.Address {
}
// ToECDSA creates a private key with the given D value.
func ToECDSA(prv []byte) *ecdsa.PrivateKey {
if len(prv) == 0 {
return nil
}
func ToECDSA(d []byte) (*ecdsa.PrivateKey, error) {
return toECDSA(d, true)
}
priv := new(ecdsa.PrivateKey)
priv.PublicKey.Curve = S256()
priv.D = new(big.Int).SetBytes(prv)
priv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(prv)
// ToECDSAUnsafe blidly converts a binary blob to a private key. It should almost
// never be used unless you are sure the input is valid and want to avoid hitting
// errors due to bad origin encoding (0 prefixes cut off).
func ToECDSAUnsafe(d []byte) *ecdsa.PrivateKey {
priv, _ := toECDSA(d, false)
return priv
}
func FromECDSA(prv *ecdsa.PrivateKey) []byte {
if prv == nil {
// toECDSA creates a private key with the given D value. The strict parameter
// controls whether the key's length should be enforced at the curve size or
// it can also accept legacy encodings (0 prefixes).
func toECDSA(d []byte, strict bool) (*ecdsa.PrivateKey, error) {
priv := new(ecdsa.PrivateKey)
priv.PublicKey.Curve = S256()
if strict && 8*len(d) != priv.Params().BitSize {
return nil, fmt.Errorf("invalid length, need %d bits", priv.Params().BitSize)
}
priv.D = new(big.Int).SetBytes(d)
priv.PublicKey.X, priv.PublicKey.Y = priv.PublicKey.Curve.ScalarBaseMult(d)
return priv, nil
}
// FromECDSA exports a private key into a binary dump.
func FromECDSA(priv *ecdsa.PrivateKey) []byte {
if priv == nil {
return nil
}
return prv.D.Bytes()
return math.PaddedBigBytes(priv.D, priv.Params().BitSize/8)
}
func ToECDSAPub(pub []byte) *ecdsa.PublicKey {
@ -116,14 +130,10 @@ func HexToECDSA(hexkey string) (*ecdsa.PrivateKey, error) {
if err != nil {
return nil, errors.New("invalid hex string")
}
if len(b) != 32 {
return nil, errors.New("invalid length, need 256 bits")
}
return ToECDSA(b), nil
return ToECDSA(b)
}
// LoadECDSA loads a secp256k1 private key from the given file.
// The key data is expected to be hex-encoded.
func LoadECDSA(file string) (*ecdsa.PrivateKey, error) {
buf := make([]byte, 64)
fd, err := os.Open(file)
@ -139,8 +149,7 @@ func LoadECDSA(file string) (*ecdsa.PrivateKey, error) {
if err != nil {
return nil, err
}
return ToECDSA(key), nil
return ToECDSA(key)
}
// SaveECDSA saves a secp256k1 private key to the given file with

View File

@ -36,7 +36,7 @@ var testPrivHex = "289c2857d4598e37fb9647507e47a309d6133539bf21a8b9cb6df88fd5232
// These tests are sanity checks.
// They should ensure that we don't e.g. use Sha3-224 instead of Sha3-256
// and that the sha3 library uses keccak-f permutation.
func TestSha3Hash(t *testing.T) {
func TestKeccak256Hash(t *testing.T) {
msg := []byte("abc")
exp, _ := hex.DecodeString("4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45")
checkhash(t, "Sha3-256-array", func(in []byte) []byte { h := Keccak256Hash(in); return h[:] }, msg, exp)

View File

@ -153,6 +153,12 @@ func (api *PrivateMinerAPI) Start(threads *int) error {
}
// Start the miner and return
if !api.e.IsMining() {
// Propagate the initial price point to the transaction pool
api.e.lock.RLock()
price := api.e.gasPrice
api.e.lock.RUnlock()
api.e.txPool.SetGasPrice(price)
return api.e.StartMining(true)
}
return nil
@ -180,7 +186,11 @@ func (api *PrivateMinerAPI) SetExtra(extra string) (bool, error) {
// SetGasPrice sets the minimum accepted gas price for the miner.
func (api *PrivateMinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {
api.e.Miner().SetGasPrice((*big.Int)(&gasPrice))
api.e.lock.Lock()
api.e.gasPrice = (*big.Int)(&gasPrice)
api.e.lock.Unlock()
api.e.txPool.SetGasPrice((*big.Int)(&gasPrice))
return true
}

View File

@ -20,6 +20,7 @@ package eth
import (
"errors"
"fmt"
"math/big"
"runtime"
"sync"
"sync/atomic"
@ -75,13 +76,14 @@ type Ethereum struct {
ApiBackend *EthApiBackend
miner *miner.Miner
Mining bool
MinerThreads int
etherbase common.Address
miner *miner.Miner
gasPrice *big.Int
etherbase common.Address
networkId uint64
netRPCService *ethapi.PublicNetAPI
lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)
}
func (s *Ethereum) AddLesServer(ls LesServer) {
@ -119,8 +121,8 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
shutdownChan: make(chan bool),
stopDbUpgrade: stopDbUpgrade,
networkId: config.NetworkId,
gasPrice: config.GasPrice,
etherbase: config.Etherbase,
MinerThreads: config.MinerThreads,
}
if err := addMipmapBloomBins(chainDb); err != nil {
@ -148,7 +150,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
core.WriteChainConfig(chainDb, genesisHash, chainConfig)
}
newPool := core.NewTxPool(eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
newPool := core.NewTxPool(config.TxPool, eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit)
eth.txPool = newPool
maxPeers := config.MaxPeers
@ -167,7 +169,6 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
}
eth.miner = miner.New(eth, eth.chainConfig, eth.EventMux(), eth.engine)
eth.miner.SetGasPrice(config.GasPrice)
eth.miner.SetExtra(makeExtraData(config.ExtraData))
eth.ApiBackend = &EthApiBackend{eth, nil}
@ -293,8 +294,12 @@ func (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) {
}
func (s *Ethereum) Etherbase() (eb common.Address, err error) {
if s.etherbase != (common.Address{}) {
return s.etherbase, nil
s.lock.RLock()
etherbase := s.etherbase
s.lock.RUnlock()
if etherbase != (common.Address{}) {
return etherbase, nil
}
if wallets := s.AccountManager().Wallets(); len(wallets) > 0 {
if accounts := wallets[0].Accounts(); len(accounts) > 0 {
@ -306,7 +311,10 @@ func (s *Ethereum) Etherbase() (eb common.Address, err error) {
// set in js console via admin interface or wrapper from cli flags
func (self *Ethereum) SetEtherbase(etherbase common.Address) {
self.lock.Lock()
self.etherbase = etherbase
self.lock.Unlock()
self.miner.SetEtherbase(etherbase)
}

View File

@ -48,7 +48,7 @@ func NewContractBackend(apiBackend ethapi.Backend) *ContractBackend {
return &ContractBackend{
eapi: ethapi.NewPublicEthereumAPI(apiBackend),
bcapi: ethapi.NewPublicBlockChainAPI(apiBackend),
txapi: ethapi.NewPublicTransactionPoolAPI(apiBackend),
txapi: ethapi.NewPublicTransactionPoolAPI(apiBackend, new(ethapi.AddrLocker)),
}
}

View File

@ -42,8 +42,9 @@ var DefaultConfig = Config{
NetworkId: 1,
LightPeers: 20,
DatabaseCache: 128,
GasPrice: big.NewInt(20 * params.Shannon),
GasPrice: big.NewInt(18 * params.Shannon),
TxPool: core.DefaultTxPoolConfig,
GPO: gasprice.Config{
Blocks: 10,
Percentile: 50,
@ -99,6 +100,9 @@ type Config struct {
EthashDatasetsInMem int
EthashDatasetsOnDisk int
// Transaction pool options
TxPool core.TxPoolConfig
// Gas Price Oracle options
GPO gasprice.Config

View File

@ -33,6 +33,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
EthashDatasetDir string
EthashDatasetsInMem int
EthashDatasetsOnDisk int
TxPool core.TxPoolConfig
GPO gasprice.Config
EnablePreimageRecording bool
DocRoot string `toml:"-"`
@ -60,6 +61,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.EthashDatasetDir = c.EthashDatasetDir
enc.EthashDatasetsInMem = c.EthashDatasetsInMem
enc.EthashDatasetsOnDisk = c.EthashDatasetsOnDisk
enc.TxPool = c.TxPool
enc.GPO = c.GPO
enc.EnablePreimageRecording = c.EnablePreimageRecording
enc.DocRoot = c.DocRoot
@ -90,6 +92,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
EthashDatasetDir *string
EthashDatasetsInMem *int
EthashDatasetsOnDisk *int
TxPool *core.TxPoolConfig
GPO *gasprice.Config
EnablePreimageRecording *bool
DocRoot *string `toml:"-"`
@ -158,6 +161,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.EthashDatasetsOnDisk != nil {
c.EthashDatasetsOnDisk = *dec.EthashDatasetsOnDisk
}
if dec.TxPool != nil {
c.TxPool = *dec.TxPool
}
if dec.GPO != nil {
c.GPO = *dec.GPO
}

View File

@ -171,6 +171,11 @@ func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, ne
return blockchain.CurrentBlock().NumberU64()
}
inserter := func(blocks types.Blocks) (int, error) {
// If fast sync is running, deny importing weird blocks
if atomic.LoadUint32(&manager.fastSync) == 1 {
log.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
return 0, nil
}
atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
return manager.blockchain.InsertChain(blocks)
}

View File

@ -183,6 +183,7 @@ func (pm *ProtocolManager) synchronise(peer *peer) {
// The only scenario where this can happen is if the user manually (or via a
// bad block) rolled back a fast sync node below the sync point. In this case
// however it's safe to reenable fast sync.
atomic.StoreUint32(&pm.fastSync, 1)
mode = downloader.FastSync
}
if err := pm.downloader.Synchronise(peer.id, pHead, pTd, mode); err != nil {

View File

@ -18,6 +18,7 @@
package ethstats
import (
"context"
"encoding/json"
"errors"
"fmt"
@ -30,6 +31,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
@ -118,7 +120,7 @@ func (s *Service) Stop() error {
// loop keeps trying to connect to the netstats server, reporting chain events
// until termination.
func (s *Service) loop() {
// Subscribe tso chain events to execute updates on
// Subscribe to chain events to execute updates on
var emux *event.TypeMux
if s.eth != nil {
emux = s.eth.EventMux()
@ -131,6 +133,46 @@ func (s *Service) loop() {
txSub := emux.Subscribe(core.TxPreEvent{})
defer txSub.Unsubscribe()
// Start a goroutine that exhausts the subsciptions to avoid events piling up
var (
quitCh = make(chan struct{})
headCh = make(chan *types.Block, 1)
txCh = make(chan struct{}, 1)
)
go func() {
var lastTx mclock.AbsTime
for {
select {
// Notify of chain head events, but drop if too frequent
case head, ok := <-headSub.Chan():
if !ok { // node stopped
close(quitCh)
return
}
select {
case headCh <- head.Data.(core.ChainHeadEvent).Block:
default:
}
// Notify of new transaction events, but drop if too frequent
case _, ok := <-txSub.Chan():
if !ok { // node stopped
close(quitCh)
return
}
if time.Duration(mclock.Now()-lastTx) < time.Second {
continue
}
lastTx = mclock.Now()
select {
case txCh <- struct{}{}:
default:
}
}
}
}()
// Loop reporting until termination
for {
// Resolve the URL, defaulting to TLS, but falling back to none too
@ -150,7 +192,7 @@ func (s *Service) loop() {
if conf, err = websocket.NewConfig(url, "http://localhost/"); err != nil {
continue
}
conf.Dialer = &net.Dialer{Timeout: 3 * time.Second}
conf.Dialer = &net.Dialer{Timeout: 5 * time.Second}
if conn, err = websocket.DialConfig(conf); err == nil {
break
}
@ -180,6 +222,10 @@ func (s *Service) loop() {
for err == nil {
select {
case <-quitCh:
conn.Close()
return
case <-fullReport.C:
if err = s.report(conn); err != nil {
log.Warn("Full stats report failed", "err", err)
@ -188,30 +234,14 @@ func (s *Service) loop() {
if err = s.reportHistory(conn, list); err != nil {
log.Warn("Requested history report failed", "err", err)
}
case head, ok := <-headSub.Chan():
if !ok { // node stopped
conn.Close()
return
}
if err = s.reportBlock(conn, head.Data.(core.ChainHeadEvent).Block); err != nil {
case head := <-headCh:
if err = s.reportBlock(conn, head); err != nil {
log.Warn("Block stats report failed", "err", err)
}
if err = s.reportPending(conn); err != nil {
log.Warn("Post-block transaction stats report failed", "err", err)
}
case _, ok := <-txSub.Chan():
if !ok { // node stopped
conn.Close()
return
}
// Exhaust events to avoid reporting too frequently
for exhausted := false; !exhausted; {
select {
case <-headSub.Chan():
default:
exhausted = true
}
}
case <-txCh:
if err = s.reportPending(conn); err != nil {
log.Warn("Transaction stats report failed", "err", err)
}
@ -397,7 +427,7 @@ func (s *Service) reportLatency(conn *websocket.Conn) error {
select {
case <-s.pongCh:
// Pong delivered, report the latency
case <-time.After(3 * time.Second):
case <-time.After(5 * time.Second):
// Ping timeout, abort
return errors.New("ping timed out")
}
@ -426,21 +456,15 @@ type blockStats struct {
GasLimit *big.Int `json:"gasLimit"`
Diff string `json:"difficulty"`
TotalDiff string `json:"totalDifficulty"`
Txs txStats `json:"transactions"`
Txs []txStats `json:"transactions"`
TxHash common.Hash `json:"transactionsRoot"`
Root common.Hash `json:"stateRoot"`
Uncles uncleStats `json:"uncles"`
}
// txStats is a custom wrapper around a transaction array to force serializing
// empty arrays instead of returning null for them.
type txStats []*types.Transaction
func (s txStats) MarshalJSON() ([]byte, error) {
if txs := ([]*types.Transaction)(s); len(txs) > 0 {
return json.Marshal(txs)
}
return []byte("[]"), nil
// txStats is the information to report about individual transactions.
type txStats struct {
Hash common.Hash `json:"hash"`
}
// uncleStats is a custom wrapper around an uncle array to force serializing
@ -479,7 +503,7 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
var (
header *types.Header
td *big.Int
txs []*types.Transaction
txs []txStats
uncles []*types.Header
)
if s.eth != nil {
@ -490,7 +514,10 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
header = block.Header()
td = s.eth.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
txs = block.Transactions()
txs = make([]txStats, len(block.Transactions()))
for i, tx := range block.Transactions() {
txs[i].Hash = tx.Hash()
}
uncles = block.Uncles()
} else {
// Light nodes would need on-demand lookups for transactions/uncles, skip
@ -500,6 +527,7 @@ func (s *Service) assembleBlockStats(block *types.Block) *blockStats {
header = s.les.BlockChain().CurrentHeader()
}
td = s.les.BlockChain().GetTd(header.Hash(), header.Number.Uint64())
txs = []txStats{}
}
// Assemble and return the block stats
author, _ := s.engine.Author(header)
@ -639,7 +667,8 @@ func (s *Service) reportStats(conn *websocket.Conn) error {
sync := s.eth.Downloader().Progress()
syncing = s.eth.BlockChain().CurrentHeader().Number.Uint64() >= sync.HighestBlock
gasprice = int(s.eth.Miner().GasPrice().Uint64())
price, _ := s.eth.ApiBackend.SuggestPrice(context.Background())
gasprice = int(price.Uint64())
} else {
sync := s.les.Downloader().Progress()
syncing = s.les.BlockChain().CurrentHeader().Number.Uint64() >= sync.HighestBlock

View File

@ -0,0 +1,53 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethapi
import (
"sync"
"github.com/ethereum/go-ethereum/common"
)
type AddrLocker struct {
mu sync.Mutex
locks map[common.Address]*sync.Mutex
}
// lock returns the lock of the given address.
func (l *AddrLocker) lock(address common.Address) *sync.Mutex {
l.mu.Lock()
defer l.mu.Unlock()
if l.locks == nil {
l.locks = make(map[common.Address]*sync.Mutex)
}
if _, ok := l.locks[address]; !ok {
l.locks[address] = new(sync.Mutex)
}
return l.locks[address]
}
// LockAddr locks an account's mutex. This is used to prevent another tx getting the
// same nonce until the lock is released. The mutex prevents the (an identical nonce) from
// being read again during the time that the first transaction is being signed.
func (l *AddrLocker) LockAddr(address common.Address) {
l.lock(address).Lock()
}
// UnlockAddr unlocks the mutex of the given account.
func (l *AddrLocker) UnlockAddr(address common.Address) {
l.lock(address).Unlock()
}

View File

@ -19,7 +19,6 @@ package ethapi
import (
"bytes"
"context"
"encoding/hex"
"errors"
"fmt"
"math/big"
@ -118,16 +117,16 @@ func (s *PublicTxPoolAPI) Content() map[string]map[string]map[string]*RPCTransac
// Flatten the pending transactions
for account, txs := range pending {
dump := make(map[string]*RPCTransaction)
for nonce, tx := range txs {
dump[fmt.Sprintf("%d", nonce)] = newRPCPendingTransaction(tx)
for _, tx := range txs {
dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx)
}
content["pending"][account.Hex()] = dump
}
// Flatten the queued transactions
for account, txs := range queue {
dump := make(map[string]*RPCTransaction)
for nonce, tx := range txs {
dump[fmt.Sprintf("%d", nonce)] = newRPCPendingTransaction(tx)
for _, tx := range txs {
dump[fmt.Sprintf("%d", tx.Nonce())] = newRPCPendingTransaction(tx)
}
content["queued"][account.Hex()] = dump
}
@ -162,16 +161,16 @@ func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string]string {
// Flatten the pending transactions
for account, txs := range pending {
dump := make(map[string]string)
for nonce, tx := range txs {
dump[fmt.Sprintf("%d", nonce)] = format(tx)
for _, tx := range txs {
dump[fmt.Sprintf("%d", tx.Nonce())] = format(tx)
}
content["pending"][account.Hex()] = dump
}
// Flatten the queued transactions
for account, txs := range queue {
dump := make(map[string]string)
for nonce, tx := range txs {
dump[fmt.Sprintf("%d", nonce)] = format(tx)
for _, tx := range txs {
dump[fmt.Sprintf("%d", tx.Nonce())] = format(tx)
}
content["queued"][account.Hex()] = dump
}
@ -204,15 +203,17 @@ func (s *PublicAccountAPI) Accounts() []common.Address {
// It offers methods to create, (un)lock en list accounts. Some methods accept
// passwords and are therefore considered private by default.
type PrivateAccountAPI struct {
am *accounts.Manager
b Backend
am *accounts.Manager
nonceLock *AddrLocker
b Backend
}
// NewPrivateAccountAPI create a new PrivateAccountAPI.
func NewPrivateAccountAPI(b Backend) *PrivateAccountAPI {
func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI {
return &PrivateAccountAPI{
am: b.AccountManager(),
b: b,
am: b.AccountManager(),
nonceLock: nonceLock,
b: b,
}
}
@ -282,12 +283,11 @@ func fetchKeystore(am *accounts.Manager) *keystore.KeyStore {
// ImportRawKey stores the given hex encoded ECDSA key into the key directory,
// encrypting it with the passphrase.
func (s *PrivateAccountAPI) ImportRawKey(privkey string, password string) (common.Address, error) {
hexkey, err := hex.DecodeString(privkey)
key, err := crypto.HexToECDSA(privkey)
if err != nil {
return common.Address{}, err
}
acc, err := fetchKeystore(s.am).ImportECDSA(crypto.ToECDSA(hexkey), password)
acc, err := fetchKeystore(s.am).ImportECDSA(key, password)
return acc.Address, err
}
@ -317,10 +317,6 @@ func (s *PrivateAccountAPI) LockAccount(addr common.Address) bool {
// tries to sign it with the key associated with args.To. If the given passwd isn't
// able to decrypt the key it fails.
func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs, passwd string) (common.Hash, error) {
// Set some sanity defaults and terminate on failure
if err := args.setDefaults(ctx, s.b); err != nil {
return common.Hash{}, err
}
// Look up the wallet containing the requested signer
account := accounts.Account{Address: args.From}
@ -328,6 +324,18 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs
if err != nil {
return common.Hash{}, err
}
if args.Nonce == nil {
// Hold the addresse's mutex around signing to prevent concurrent assignment of
// the same nonce to multiple accounts.
s.nonceLock.LockAddr(args.From)
defer s.nonceLock.UnlockAddr(args.From)
}
// Set some sanity defaults and terminate on failure
if err := args.setDefaults(ctx, s.b); err != nil {
return common.Hash{}, err
}
// Assemble the transaction and sign with the wallet
tx := args.toTransaction()
@ -887,12 +895,13 @@ func newRPCTransaction(b *types.Block, txHash common.Hash) (*RPCTransaction, err
// PublicTransactionPoolAPI exposes methods for the RPC interface
type PublicTransactionPoolAPI struct {
b Backend
b Backend
nonceLock *AddrLocker
}
// NewPublicTransactionPoolAPI creates a new RPC service with methods specific for the transaction pool.
func NewPublicTransactionPoolAPI(b Backend) *PublicTransactionPoolAPI {
return &PublicTransactionPoolAPI{b}
func NewPublicTransactionPoolAPI(b Backend, nonceLock *AddrLocker) *PublicTransactionPoolAPI {
return &PublicTransactionPoolAPI{b, nonceLock}
}
func getTransaction(chainDb ethdb.Database, b Backend, txHash common.Hash) (*types.Transaction, bool, error) {
@ -1170,10 +1179,7 @@ func submitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c
// SendTransaction creates a transaction for the given argument, sign it and submit it to the
// transaction pool.
func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args SendTxArgs) (common.Hash, error) {
// Set some sanity defaults and terminate on failure
if err := args.setDefaults(ctx, s.b); err != nil {
return common.Hash{}, err
}
// Look up the wallet containing the requested signer
account := accounts.Account{Address: args.From}
@ -1181,6 +1187,18 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen
if err != nil {
return common.Hash{}, err
}
if args.Nonce == nil {
// Hold the addresse's mutex around signing to prevent concurrent assignment of
// the same nonce to multiple accounts.
s.nonceLock.LockAddr(args.From)
defer s.nonceLock.UnlockAddr(args.From)
}
// Set some sanity defaults and terminate on failure
if err := args.setDefaults(ctx, s.b); err != nil {
return common.Hash{}, err
}
// Assemble the transaction and sign with the wallet
tx := args.toTransaction()
@ -1257,6 +1275,12 @@ type SignTransactionResult struct {
// The node needs to have the private key of the account corresponding with
// the given from address and it needs to be unlocked.
func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args SendTxArgs) (*SignTransactionResult, error) {
if args.Nonce == nil {
// Hold the addresse's mutex around signing to prevent concurrent assignment of
// the same nonce to multiple accounts.
s.nonceLock.LockAddr(args.From)
defer s.nonceLock.UnlockAddr(args.From)
}
if err := args.setDefaults(ctx, s.b); err != nil {
return nil, err
}

View File

@ -73,6 +73,7 @@ type State interface {
}
func GetAPIs(apiBackend Backend) []rpc.API {
nonceLock := new(AddrLocker)
return []rpc.API{
{
Namespace: "eth",
@ -87,7 +88,7 @@ func GetAPIs(apiBackend Backend) []rpc.API {
}, {
Namespace: "eth",
Version: "1.0",
Service: NewPublicTransactionPoolAPI(apiBackend),
Service: NewPublicTransactionPoolAPI(apiBackend, nonceLock),
Public: true,
}, {
Namespace: "txpool",
@ -111,7 +112,7 @@ func GetAPIs(apiBackend Backend) []rpc.API {
}, {
Namespace: "personal",
Version: "1.0",
Service: NewPrivateAccountAPI(apiBackend),
Service: NewPrivateAccountAPI(apiBackend, nonceLock),
Public: false,
},
}

View File

@ -48,7 +48,7 @@ func runTrace(tracer *JavascriptTracer) (interface{}, error) {
contract := vm.NewContract(account{}, account{}, big.NewInt(0), 10000)
contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0}
_, err := env.Interpreter().Run(contract, []byte{})
_, err := env.Interpreter().Run(0, contract, []byte{})
if err != nil {
return nil, err
}

View File

@ -16,56 +16,82 @@
package les
import (
"sync/atomic"
)
import "sync"
// ExecQueue implements a queue that executes function calls in a single thread,
// execQueue implements a queue that executes function calls in a single thread,
// in the same order as they have been queued.
type execQueue struct {
chn chan func()
cnt, stop, capacity int32
mu sync.Mutex
cond *sync.Cond
funcs []func()
closeWait chan struct{}
}
// NewExecQueue creates a new execution queue.
func newExecQueue(capacity int32) *execQueue {
q := &execQueue{
chn: make(chan func(), capacity),
capacity: capacity,
}
// newExecQueue creates a new execution queue.
func newExecQueue(capacity int) *execQueue {
q := &execQueue{funcs: make([]func(), 0, capacity)}
q.cond = sync.NewCond(&q.mu)
go q.loop()
return q
}
func (q *execQueue) loop() {
for f := range q.chn {
atomic.AddInt32(&q.cnt, -1)
if atomic.LoadInt32(&q.stop) != 0 {
return
}
for f := q.waitNext(false); f != nil; f = q.waitNext(true) {
f()
}
close(q.closeWait)
}
// CanQueue returns true if more function calls can be added to the execution queue.
func (q *execQueue) waitNext(drop bool) (f func()) {
q.mu.Lock()
if drop {
// Remove the function that just executed. We do this here instead of when
// dequeuing so len(q.funcs) includes the function that is running.
q.funcs = append(q.funcs[:0], q.funcs[1:]...)
}
for !q.isClosed() {
if len(q.funcs) > 0 {
f = q.funcs[0]
break
}
q.cond.Wait()
}
q.mu.Unlock()
return f
}
func (q *execQueue) isClosed() bool {
return q.closeWait != nil
}
// canQueue returns true if more function calls can be added to the execution queue.
func (q *execQueue) canQueue() bool {
return atomic.LoadInt32(&q.stop) == 0 && atomic.LoadInt32(&q.cnt) < q.capacity
q.mu.Lock()
ok := !q.isClosed() && len(q.funcs) < cap(q.funcs)
q.mu.Unlock()
return ok
}
// Queue adds a function call to the execution queue. Returns true if successful.
// queue adds a function call to the execution queue. Returns true if successful.
func (q *execQueue) queue(f func()) bool {
if atomic.LoadInt32(&q.stop) != 0 {
return false
q.mu.Lock()
ok := !q.isClosed() && len(q.funcs) < cap(q.funcs)
if ok {
q.funcs = append(q.funcs, f)
q.cond.Signal()
}
if atomic.AddInt32(&q.cnt, 1) > q.capacity {
atomic.AddInt32(&q.cnt, -1)
return false
}
q.chn <- f
return true
q.mu.Unlock()
return ok
}
// Stop stops the exec queue.
// quit stops the exec queue.
// quit waits for the current execution to finish before returning.
func (q *execQueue) quit() {
atomic.StoreInt32(&q.stop, 1)
q.mu.Lock()
if !q.isClosed() {
q.closeWait = make(chan struct{})
q.cond.Signal()
}
q.mu.Unlock()
<-q.closeWait
}

62
les/execqueue_test.go Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"testing"
)
func TestExecQueue(t *testing.T) {
var (
N = 10000
q = newExecQueue(N)
counter int
execd = make(chan int)
testexit = make(chan struct{})
)
defer q.quit()
defer close(testexit)
check := func(state string, wantOK bool) {
c := counter
counter++
qf := func() {
select {
case execd <- c:
case <-testexit:
}
}
if q.canQueue() != wantOK {
t.Fatalf("canQueue() == %t for %s", !wantOK, state)
}
if q.queue(qf) != wantOK {
t.Fatalf("canQueue() == %t for %s", !wantOK, state)
}
}
for i := 0; i < N; i++ {
check("queue below cap", true)
}
check("full queue", false)
for i := 0; i < N; i++ {
if c := <-execd; c != i {
t.Fatal("execution out of order")
}
}
q.quit()
check("closed queue", false)
}

View File

@ -19,7 +19,6 @@ package miner
import (
"fmt"
"math/big"
"sync/atomic"
"github.com/ethereum/go-ethereum/accounts"
@ -104,18 +103,6 @@ out:
}
}
func (m *Miner) GasPrice() *big.Int {
return new(big.Int).Set(m.worker.gasPrice)
}
func (m *Miner) SetGasPrice(price *big.Int) {
// FIXME block tests set a nil gas price. Quick dirty fix
if price == nil {
return
}
m.worker.setGasPrice(price)
}
func (self *Miner) Start(coinbase common.Address) {
atomic.StoreInt32(&self.shouldStart, 1)
self.worker.setEtherbase(coinbase)

View File

@ -59,14 +59,12 @@ type Work struct {
config *params.ChainConfig
signer types.Signer
state *state.StateDB // apply state changes here
ancestors *set.Set // ancestor set (used for checking uncle parent validity)
family *set.Set // family set (used for checking uncle invalidity)
uncles *set.Set // uncle set
tcount int // tx count in cycle
ownedAccounts *set.Set
lowGasTxs types.Transactions
failedTxs types.Transactions
state *state.StateDB // apply state changes here
ancestors *set.Set // ancestor set (used for checking uncle parent validity)
family *set.Set // family set (used for checking uncle invalidity)
uncles *set.Set // uncle set
tcount int // tx count in cycle
failedTxs types.Transactions
Block *types.Block // the new block
@ -103,7 +101,6 @@ type worker struct {
chainDb ethdb.Database
coinbase common.Address
gasPrice *big.Int
extra []byte
currentMu sync.Mutex
@ -132,7 +129,6 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, coinbase com
mux: mux,
chainDb: eth.ChainDb(),
recv: make(chan *Result, resultQueueSize),
gasPrice: new(big.Int),
chain: eth.BlockChain(),
proc: eth.BlockChain().Validator(),
possibleUncles: make(map[common.Hash]*types.Block),
@ -252,7 +248,7 @@ func (self *worker) update() {
txs := map[common.Address]types.Transactions{acc: {ev.Tx}}
txset := types.NewTransactionsByPriceAndNonce(txs)
self.current.commitTransactions(self.mux, txset, self.gasPrice, self.chain, self.coinbase)
self.current.commitTransactions(self.mux, txset, self.chain, self.coinbase)
self.currentMu.Unlock()
}
}
@ -375,22 +371,10 @@ func (self *worker) makeCurrent(parent *types.Block, header *types.Header) error
}
// Keep track of transactions which return errors so they can be removed
work.tcount = 0
work.ownedAccounts = accountAddressesSet(accounts)
self.current = work
return nil
}
func (w *worker) setGasPrice(p *big.Int) {
w.mu.Lock()
defer w.mu.Unlock()
// calculate the minimal gas price the miner accepts when sorting out transactions.
const pct = int64(90)
w.gasPrice = gasprice(p, pct)
w.mux.Post(core.GasPriceChanged{Price: w.gasPrice})
}
func (self *worker) commitNewWork() {
self.mu.Lock()
defer self.mu.Unlock()
@ -460,9 +444,8 @@ func (self *worker) commitNewWork() {
return
}
txs := types.NewTransactionsByPriceAndNonce(pending)
work.commitTransactions(self.mux, txs, self.gasPrice, self.chain, self.coinbase)
work.commitTransactions(self.mux, txs, self.chain, self.coinbase)
self.eth.TxPool().RemoveBatch(work.lowGasTxs)
self.eth.TxPool().RemoveBatch(work.failedTxs)
// compute uncles for the new block.
@ -515,7 +498,7 @@ func (self *worker) commitUncle(work *Work, uncle *types.Header) error {
return nil
}
func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, gasPrice *big.Int, bc *core.BlockChain, coinbase common.Address) {
func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, bc *core.BlockChain, coinbase common.Address) {
gp := new(core.GasPool).AddGas(env.header.GasLimit)
var coalescedLogs []*types.Log
@ -539,19 +522,8 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB
txs.Pop()
continue
}
// Ignore any transactions (and accounts subsequently) with low gas limits
if tx.GasPrice().Cmp(gasPrice) < 0 && !env.ownedAccounts.Has(from) {
// Pop the current low-priced transaction without shifting in the next from the account
log.Warn("Transaction below gas price", "sender", from, "hash", tx.Hash(), "have", tx.GasPrice(), "want", gasPrice)
env.lowGasTxs = append(env.lowGasTxs, tx)
txs.Pop()
continue
}
// Start executing the transaction
env.state.StartRecord(tx.Hash(), common.Hash{}, env.tcount)
env.state.Prepare(tx.Hash(), common.Hash{}, env.tcount)
err, logs := env.commitTransaction(tx, bc, coinbase, gp)
switch err {
@ -607,25 +579,3 @@ func (env *Work) commitTransaction(tx *types.Transaction, bc *core.BlockChain, c
return nil, receipt.Logs
}
// TODO: remove or use
func (self *worker) HashRate() int64 {
return 0
}
// gasprice calculates a reduced gas price based on the pct
// XXX Use big.Rat?
func gasprice(price *big.Int, pct int64) *big.Int {
p := new(big.Int).Set(price)
p.Div(p, big.NewInt(100))
p.Mul(p, big.NewInt(pct))
return p
}
func accountAddressesSet(accounts []accounts.Account) *set.Set {
accountSet := set.New()
for _, account := range accounts {
accountSet.Add(account.Address)
}
return accountSet
}

View File

@ -25,6 +25,7 @@ import (
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/crypto"
)
const (
@ -115,6 +116,9 @@ func (ks *KeyStore) SignHash(address *Address, hash []byte) (signature []byte, _
// SignTx signs the given transaction with the requested account.
func (ks *KeyStore) SignTx(account *Account, tx *Transaction, chainID *BigInt) (*Transaction, error) {
if chainID == nil { // Null passed from mobile app
chainID = new(BigInt)
}
signed, err := ks.keystore.SignTx(account.account, tx.tx, chainID.bigint)
if err != nil {
return nil, err
@ -132,6 +136,9 @@ func (ks *KeyStore) SignHashPassphrase(account *Account, passphrase string, hash
// SignTxPassphrase signs the transaction if the private key matching the
// given address can be decrypted with the given passphrase.
func (ks *KeyStore) SignTxPassphrase(account *Account, passphrase string, tx *Transaction, chainID *BigInt) (*Transaction, error) {
if chainID == nil { // Null passed from mobile app
chainID = new(BigInt)
}
signed, err := ks.keystore.SignTxWithPassphrase(account.account, passphrase, tx.tx, chainID.bigint)
if err != nil {
return nil, err
@ -170,6 +177,11 @@ func (ks *KeyStore) NewAccount(passphrase string) (*Account, error) {
return &Account{account}, nil
}
// UpdateAccount changes the passphrase of an existing account.
func (ks *KeyStore) UpdateAccount(account *Account, passphrase, newPassphrase string) error {
return ks.keystore.Update(account.account, passphrase, newPassphrase)
}
// ExportKey exports as a JSON key, encrypted with newPassphrase.
func (ks *KeyStore) ExportKey(account *Account, passphrase, newPassphrase string) (key []byte, _ error) {
return ks.keystore.Export(account.account, passphrase, newPassphrase)
@ -184,9 +196,17 @@ func (ks *KeyStore) ImportKey(keyJSON []byte, passphrase, newPassphrase string)
return &Account{acc}, nil
}
// UpdateAccount changes the passphrase of an existing account.
func (ks *KeyStore) UpdateAccount(account *Account, passphrase, newPassphrase string) error {
return ks.keystore.Update(account.account, passphrase, newPassphrase)
// ImportECDSAKey stores the given encrypted JSON key into the key directory.
func (ks *KeyStore) ImportECDSAKey(key []byte, passphrase string) (account *Account, _ error) {
privkey, err := crypto.ToECDSA(key)
if err != nil {
return nil, err
}
acc, err := ks.keystore.ImportECDSA(privkey, passphrase)
if err != nil {
return nil, err
}
return &Account{acc}, nil
}
// ImportPreSaleKey decrypts the given Ethereum presale wallet and stores

Some files were not shown because too many files have changed in this diff Show More