Compare commits
161 Commits
Author | SHA1 | Date | |
---|---|---|---|
9c2882b2e5 | |||
1a0eb903f1 | |||
0036e2a747 | |||
727eadacca | |||
99cba96f26 | |||
f272879e5a | |||
72dd51e25a | |||
799a469000 | |||
f4d81178d8 | |||
310d2e7ef4 | |||
3ecde4e2aa | |||
a355b401db | |||
cba33029a8 | |||
9702badd83 | |||
067dc2cbf5 | |||
65979770e6 | |||
41bdf49eed | |||
ea11f7dd7a | |||
8df24760d7 | |||
71814bf6c4 | |||
ec1700600a | |||
b0f30b0b37 | |||
e96f2981e2 | |||
09d59da3a1 | |||
280609c99b | |||
309da541de | |||
dd06c85843 | |||
ae40d51410 | |||
b865fad888 | |||
afb17cf071 | |||
08959bbc70 | |||
673c92db6b | |||
c2a494c743 | |||
afdd23b5ca | |||
cb809c03da | |||
45421d3130 | |||
115e7d71cc | |||
dd5ed01f3b | |||
b7ff0d42e3 | |||
c98bce709c | |||
17f0b11942 | |||
6231edcbab | |||
07aae19e5d | |||
b596b4ba5b | |||
8b1e4c4c5e | |||
846d091bd2 | |||
a346aedb90 | |||
ef25b826e6 | |||
261b3e2351 | |||
344f25fb3e | |||
1afaea4bfe | |||
11cf5b7ead | |||
069cb661c3 | |||
3b8915e387 | |||
437ceaa9be | |||
136f78ff0a | |||
aa73420207 | |||
3556962053 | |||
e1e87d8b1a | |||
30cc1c3bf0 | |||
10582a97ca | |||
e16a7ef60f | |||
a816e75662 | |||
3ee75bec9f | |||
04b668b232 | |||
da636c53d6 | |||
2a41e76b39 | |||
4a2c17b1ab | |||
bc75351edf | |||
33b158e0ed | |||
83721a95ce | |||
e7119ce12d | |||
a5f6a1cb7c | |||
e6aff513db | |||
8a4c1fb799 | |||
10a57fc3d4 | |||
a2f23ca9b1 | |||
e20158176d | |||
ef7b9fb7d0 | |||
b0d0fafd68 | |||
90c7155ef4 | |||
df4e7eccf5 | |||
7c707d14d1 | |||
953a995116 | |||
c5840ce12f | |||
3b3989de6a | |||
40976ea1a0 | |||
d18b509e40 | |||
2e4d23a793 | |||
60293820b7 | |||
82defe5c56 | |||
dd483d7d0d | |||
dddebe469b | |||
cf19586cfb | |||
fd5d51c9ae | |||
2ec5cf1673 | |||
36a800a1d2 | |||
93832b633e | |||
021c3c2816 | |||
ff2c966e7f | |||
14cc40a31a | |||
881df0e629 | |||
1c2f6f5597 | |||
d51a9fd6b7 | |||
464f30d301 | |||
8a28408616 | |||
e1dc7ece62 | |||
99127ff2e7 | |||
81d6ec908a | |||
38b4fc8069 | |||
11ab73f6d8 | |||
18e9cb1187 | |||
502a2bd69f | |||
8b517d7f00 | |||
cad071009d | |||
181a3309df | |||
02fa3e3179 | |||
a8eafcdc0e | |||
bcf2465b0b | |||
c3dc01caf1 | |||
cf4faa491a | |||
59966255ad | |||
f8acc0af7e | |||
02a29060d2 | |||
0255ed6335 | |||
96c2ab22e0 | |||
5494e6e7ab | |||
32db571681 | |||
a6af56fa4d | |||
5884606ec3 | |||
f6c0f76cc5 | |||
f9be9a2302 | |||
95f0bd0acf | |||
8dce4c283d | |||
fff16169c6 | |||
5f7eb78918 | |||
e61035c5a3 | |||
37e3f561f1 | |||
ba3bcd16a6 | |||
207bd7d2cd | |||
4047ccad2f | |||
a13e920af0 | |||
f958d7d482 | |||
7cc6abeef6 | |||
54253aae4c | |||
09aabaea9f | |||
ecec454e92 | |||
7b2fc0643f | |||
d2fda73ad7 | |||
5aa21d8b32 | |||
9fc90b6747 | |||
edef84da2b | |||
6430e672c9 | |||
a31d268b76 | |||
e353f9c088 | |||
af48a331bf | |||
80e74fc1e0 | |||
03dffe3efd | |||
cb3f5f8b93 | |||
c7a4d9cf8a | |||
7d0ac94809 |
26
.travis.yml
26
.travis.yml
@ -6,7 +6,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.7.5
|
||||
go: 1.7.6
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
@ -19,7 +19,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
script:
|
||||
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install fuse
|
||||
- sudo modprobe fuse
|
||||
@ -29,7 +29,7 @@ matrix:
|
||||
- go run build/ci.go test -coverage -misspell
|
||||
|
||||
- os: osx
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
sudo: required
|
||||
script:
|
||||
- brew update
|
||||
@ -42,7 +42,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
env:
|
||||
- ubuntu-ppa
|
||||
- azure-linux
|
||||
@ -80,7 +80,7 @@ matrix:
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
env:
|
||||
- azure-linux-mips
|
||||
script:
|
||||
@ -120,16 +120,16 @@ matrix:
|
||||
- azure-android
|
||||
- maven-android
|
||||
before_install:
|
||||
- curl https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz | tar -xz
|
||||
- curl https://storage.googleapis.com/golang/go1.8.3.linux-amd64.tar.gz | tar -xz
|
||||
- export PATH=`pwd`/go/bin:$PATH
|
||||
- export GOROOT=`pwd`/go
|
||||
- export GOPATH=$HOME/go
|
||||
script:
|
||||
# Build the Android archive and upload it to Maven Central and Azure
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip -o android-ndk-r13b.zip
|
||||
- unzip -q android-ndk-r13b.zip && rm android-ndk-r13b.zip
|
||||
- mv android-ndk-r13b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r13b
|
||||
- curl https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip -o android-ndk-r14b.zip
|
||||
- unzip -q android-ndk-r14b.zip && rm android-ndk-r14b.zip
|
||||
- mv android-ndk-r14b $HOME
|
||||
- export ANDROID_NDK=$HOME/android-ndk-r14b
|
||||
|
||||
- mkdir -p $GOPATH/src/github.com/ethereum
|
||||
- ln -s `pwd` $GOPATH/src/github.com/ethereum
|
||||
@ -137,7 +137,7 @@ matrix:
|
||||
|
||||
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
|
||||
- os: osx
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
env:
|
||||
- azure-osx
|
||||
- azure-ios
|
||||
@ -147,7 +147,7 @@ matrix:
|
||||
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds
|
||||
|
||||
# Build the iOS framework and upload it to CocoaPods and Azure
|
||||
- gem uninstall cocoapods -a
|
||||
- gem uninstall cocoapods -a -x
|
||||
- gem install cocoapods
|
||||
|
||||
- mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak
|
||||
@ -163,7 +163,7 @@ matrix:
|
||||
- os: linux
|
||||
dist: trusty
|
||||
sudo: required
|
||||
go: 1.8.1
|
||||
go: 1.8.3
|
||||
env:
|
||||
- azure-purge
|
||||
script:
|
||||
|
@ -4,11 +4,12 @@ ADD . /go-ethereum
|
||||
RUN \
|
||||
apk add --update git go make gcc musl-dev linux-headers && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
cp go-ethereum/build/bin/geth /usr/local/bin/ && \
|
||||
apk del git go make gcc musl-dev linux-headers && \
|
||||
rm -rf /go-ethereum && rm -rf /var/cache/apk/*
|
||||
|
||||
EXPOSE 8545
|
||||
EXPOSE 30303
|
||||
EXPOSE 30303/udp
|
||||
|
||||
ENTRYPOINT ["/geth"]
|
||||
ENTRYPOINT ["geth"]
|
||||
|
24
README.md
24
README.md
@ -70,7 +70,7 @@ This command will:
|
||||
(via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API)
|
||||
as well as Geth's own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs).
|
||||
This too is optional and if you leave it out you can always attach to an already running Geth instance
|
||||
with `geth --attach`.
|
||||
with `geth attach`.
|
||||
|
||||
### Full node on the Ethereum test network
|
||||
|
||||
@ -84,21 +84,23 @@ $ geth --testnet --fast --cache=512 console
|
||||
```
|
||||
|
||||
The `--fast`, `--cache` flags and `console` subcommand have the exact same meaning as above and they
|
||||
are equially useful on the testnet too. Please see above for their explanations if you've skipped to
|
||||
are equally useful on the testnet too. Please see above for their explanations if you've skipped to
|
||||
here.
|
||||
|
||||
Specifying the `--testnet` flag however will reconfigure your Geth instance a bit:
|
||||
|
||||
* Instead of using the default data directory (`~/.ethereum` on Linux for example), Geth will nest
|
||||
itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on Linux).
|
||||
itself one level deeper into a `testnet` subfolder (`~/.ethereum/testnet` on Linux). Note, on OSX
|
||||
and Linux this also means that attaching to a running testnet node requires the use of a custom
|
||||
endpoint since `geth attach` will try to attach to a production node endpoint by default. E.g.
|
||||
`geth attach <datadir>/testnet/geth.ipc`. Windows users are not affected by this.
|
||||
* Instead of connecting the main Ethereum network, the client will connect to the test network,
|
||||
which uses different P2P bootnodes, different network IDs and genesis states.
|
||||
|
||||
|
||||
*Note: Although there are some internal protective measures to prevent transactions from crossing
|
||||
over between the main network and test network (different starting nonces), you should make sure to
|
||||
always use separate accounts for play-money and real-money. Unless you manually move accounts, Geth
|
||||
will by default correctly separate the two networks and will not make any accounts available between
|
||||
them.*
|
||||
over between the main network and test network, you should make sure to always use separate accounts
|
||||
for play-money and real-money. Unless you manually move accounts, Geth will by default correctly
|
||||
separate the two networks and will not make any accounts available between them.*
|
||||
|
||||
#### Docker quick start
|
||||
|
||||
@ -161,6 +163,12 @@ and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`)
|
||||
|
||||
```json
|
||||
{
|
||||
"config": {
|
||||
"chainId": 0,
|
||||
"homesteadBlock": 0,
|
||||
"eip155Block": 0,
|
||||
"eip158Block": 0
|
||||
},
|
||||
"alloc" : {},
|
||||
"coinbase" : "0x0000000000000000000000000000000000000000",
|
||||
"difficulty" : "0x20000",
|
||||
|
@ -124,14 +124,13 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
privkey, err := hex.DecodeString(keyJSON.PrivateKey)
|
||||
privkey, err := crypto.HexToECDSA(keyJSON.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k.Address = common.BytesToAddress(addr)
|
||||
k.PrivateKey = crypto.ToECDSA(privkey)
|
||||
k.PrivateKey = privkey
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -450,7 +450,6 @@ func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (acco
|
||||
if ks.cache.hasAddress(key.Address) {
|
||||
return accounts.Account{}, fmt.Errorf("account already exists")
|
||||
}
|
||||
|
||||
return ks.importKey(key, passphrase)
|
||||
}
|
||||
|
||||
|
@ -182,7 +182,8 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key := crypto.ToECDSA(keyBytes)
|
||||
key := crypto.ToECDSAUnsafe(keyBytes)
|
||||
|
||||
return &Key{
|
||||
Id: uuid.UUID(keyId),
|
||||
Address: crypto.PubkeyToAddress(key.PublicKey),
|
||||
|
@ -46,7 +46,7 @@ func TestKeyEncryptDecrypt(t *testing.T) {
|
||||
// Decrypt with the correct password
|
||||
key, err := DecryptKey(keyjson, password)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: json key failed to decrypt: %v", i, err)
|
||||
t.Fatalf("test %d: json key failed to decrypt: %v", i, err)
|
||||
}
|
||||
if key.Address != address {
|
||||
t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address)
|
||||
|
@ -74,7 +74,8 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error
|
||||
return nil, err
|
||||
}
|
||||
ethPriv := crypto.Keccak256(plainText)
|
||||
ecKey := crypto.ToECDSA(ethPriv)
|
||||
ecKey := crypto.ToECDSAUnsafe(ethPriv)
|
||||
|
||||
key = &Key{
|
||||
Id: nil,
|
||||
Address: crypto.PubkeyToAddress(ecKey.PublicKey),
|
||||
|
@ -22,8 +22,8 @@ environment:
|
||||
|
||||
install:
|
||||
- rmdir C:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.1.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.8.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.3.windows-%GETH_ARCH%.zip
|
||||
- 7z x go1.8.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- gcc --version
|
||||
|
||||
|
@ -77,6 +77,7 @@ var (
|
||||
executablePath("puppeth"),
|
||||
executablePath("rlpdump"),
|
||||
executablePath("swarm"),
|
||||
executablePath("wnode"),
|
||||
}
|
||||
|
||||
// A debian package is created for all executables listed here.
|
||||
@ -109,6 +110,10 @@ var (
|
||||
Name: "swarm",
|
||||
Description: "Ethereum Swarm daemon and tools",
|
||||
},
|
||||
{
|
||||
Name: "wnode",
|
||||
Description: "Ethereum Whisper diagnostic tool",
|
||||
},
|
||||
}
|
||||
|
||||
// Distros for which packages are created.
|
||||
|
@ -47,11 +47,14 @@ var (
|
||||
// boring stuff
|
||||
"vendor/", "tests/files/", "build/",
|
||||
// don't relicense vendored sources
|
||||
"crypto/sha3/", "crypto/ecies/", "log/",
|
||||
"crypto/secp256k1/curve.go",
|
||||
"consensus/ethash/xor.go",
|
||||
"internal/jsre/deps",
|
||||
"cmd/internal/browser",
|
||||
"consensus/ethash/xor.go",
|
||||
"crypto/bn256/",
|
||||
"crypto/ecies/",
|
||||
"crypto/secp256k1/curve.go",
|
||||
"crypto/sha3/",
|
||||
"internal/jsre/deps",
|
||||
"log/",
|
||||
// don't license generated files
|
||||
"contracts/chequebook/contract/",
|
||||
"contracts/ens/contract/",
|
||||
|
@ -68,6 +68,7 @@ func main() {
|
||||
if err = crypto.SaveECDSA(*genKey, nodeKey); err != nil {
|
||||
utils.Fatalf("%v", err)
|
||||
}
|
||||
return
|
||||
case *nodeKeyFile == "" && *nodeKeyHex == "":
|
||||
utils.Fatalf("Use -nodekey or -nodekeyhex to specify a private key")
|
||||
case *nodeKeyFile != "" && *nodeKeyHex != "":
|
||||
|
@ -35,6 +35,18 @@ var (
|
||||
Name: "debug",
|
||||
Usage: "output full trace logs",
|
||||
}
|
||||
MemProfileFlag = cli.StringFlag{
|
||||
Name: "memprofile",
|
||||
Usage: "creates a memory profile at the given path",
|
||||
}
|
||||
CPUProfileFlag = cli.StringFlag{
|
||||
Name: "cpuprofile",
|
||||
Usage: "creates a CPU profile at the given path",
|
||||
}
|
||||
StatDumpFlag = cli.BoolFlag{
|
||||
Name: "statdump",
|
||||
Usage: "displays stack and heap memory information",
|
||||
}
|
||||
CodeFlag = cli.StringFlag{
|
||||
Name: "code",
|
||||
Usage: "EVM code",
|
||||
@ -93,6 +105,9 @@ func init() {
|
||||
DumpFlag,
|
||||
InputFlag,
|
||||
DisableGasMeteringFlag,
|
||||
MemProfileFlag,
|
||||
CPUProfileFlag,
|
||||
StatDumpFlag,
|
||||
}
|
||||
app.Commands = []cli.Command{
|
||||
compileCommand,
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
goruntime "runtime"
|
||||
@ -108,6 +109,19 @@ func runCmd(ctx *cli.Context) error {
|
||||
},
|
||||
}
|
||||
|
||||
if cpuProfilePath := ctx.GlobalString(CPUProfileFlag.Name); cpuProfilePath != "" {
|
||||
f, err := os.Create(cpuProfilePath)
|
||||
if err != nil {
|
||||
fmt.Println("could not create CPU profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.StartCPUProfile(f); err != nil {
|
||||
fmt.Println("could not start CPU profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer pprof.StopCPUProfile()
|
||||
}
|
||||
|
||||
tstart := time.Now()
|
||||
if ctx.GlobalBool(CreateFlag.Name) {
|
||||
input := append(code, common.Hex2Bytes(ctx.GlobalString(InputFlag.Name))...)
|
||||
@ -125,12 +139,27 @@ func runCmd(ctx *cli.Context) error {
|
||||
fmt.Println(string(statedb.Dump()))
|
||||
}
|
||||
|
||||
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" {
|
||||
f, err := os.Create(memProfilePath)
|
||||
if err != nil {
|
||||
fmt.Println("could not create memory profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||
fmt.Println("could not write memory profile: ", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(DebugFlag.Name) {
|
||||
fmt.Fprintln(os.Stderr, "#### TRACE ####")
|
||||
vm.WriteTrace(os.Stderr, logger.StructLogs())
|
||||
fmt.Fprintln(os.Stderr, "#### LOGS ####")
|
||||
vm.WriteLogs(os.Stderr, statedb.Logs())
|
||||
}
|
||||
|
||||
if ctx.GlobalBool(StatDumpFlag.Name) {
|
||||
var mem goruntime.MemStats
|
||||
goruntime.ReadMemStats(&mem)
|
||||
fmt.Fprintf(os.Stderr, `evm execution time: %v
|
||||
|
@ -27,10 +27,13 @@ import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -60,12 +63,13 @@ var (
|
||||
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
|
||||
ethPortFlag = flag.Int("ethport", 30303, "Listener port for the devp2p connection")
|
||||
bootFlag = flag.String("bootnodes", "", "Comma separated bootnode enode URLs to seed with")
|
||||
netFlag = flag.Int("network", 0, "Network ID to use for the Ethereum protocol")
|
||||
netFlag = flag.Uint64("network", 0, "Network ID to use for the Ethereum protocol")
|
||||
statsFlag = flag.String("ethstats", "", "Ethstats network monitoring auth string")
|
||||
|
||||
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
|
||||
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
|
||||
minutesFlag = flag.Int("faucet.minutes", 1440, "Number of minutes to wait between funding rounds")
|
||||
tiersFlag = flag.Int("faucet.tiers", 3, "Number of funding tiers to enable (x3 time, x2.5 funds)")
|
||||
|
||||
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
|
||||
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
|
||||
@ -73,6 +77,9 @@ var (
|
||||
githubUser = flag.String("github.user", "", "GitHub user to authenticate with for Gist access")
|
||||
githubToken = flag.String("github.token", "", "GitHub personal token to access Gists with")
|
||||
|
||||
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
|
||||
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
|
||||
|
||||
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
|
||||
)
|
||||
|
||||
@ -85,21 +92,47 @@ func main() {
|
||||
flag.Parse()
|
||||
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*logFlag), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||
|
||||
// Construct the payout tiers
|
||||
amounts := make([]string, *tiersFlag)
|
||||
periods := make([]string, *tiersFlag)
|
||||
for i := 0; i < *tiersFlag; i++ {
|
||||
// Calculate the amount for the next tier and format it
|
||||
amount := float64(*payoutFlag) * math.Pow(2.5, float64(i))
|
||||
amounts[i] = fmt.Sprintf("%s Ethers", strconv.FormatFloat(amount, 'f', -1, 64))
|
||||
if amount == 1 {
|
||||
amounts[i] = strings.TrimSuffix(amounts[i], "s")
|
||||
}
|
||||
// Calculate the period for the next tier and format it
|
||||
period := *minutesFlag * int(math.Pow(3, float64(i)))
|
||||
periods[i] = fmt.Sprintf("%d mins", period)
|
||||
if period%60 == 0 {
|
||||
period /= 60
|
||||
periods[i] = fmt.Sprintf("%d hours", period)
|
||||
|
||||
if period%24 == 0 {
|
||||
period /= 24
|
||||
periods[i] = fmt.Sprintf("%d days", period)
|
||||
}
|
||||
}
|
||||
if period == 1 {
|
||||
periods[i] = strings.TrimSuffix(periods[i], "s")
|
||||
}
|
||||
}
|
||||
// Load up and render the faucet website
|
||||
tmpl, err := Asset("faucet.html")
|
||||
if err != nil {
|
||||
log.Crit("Failed to load the faucet template", "err", err)
|
||||
}
|
||||
period := fmt.Sprintf("%d minute(s)", *minutesFlag)
|
||||
if *minutesFlag%60 == 0 {
|
||||
period = fmt.Sprintf("%d hour(s)", *minutesFlag/60)
|
||||
}
|
||||
website := new(bytes.Buffer)
|
||||
template.Must(template.New("").Parse(string(tmpl))).Execute(website, map[string]interface{}{
|
||||
"Network": *netnameFlag,
|
||||
"Amount": *payoutFlag,
|
||||
"Period": period,
|
||||
err = template.Must(template.New("").Parse(string(tmpl))).Execute(website, map[string]interface{}{
|
||||
"Network": *netnameFlag,
|
||||
"Amounts": amounts,
|
||||
"Periods": periods,
|
||||
"Recaptcha": *captchaToken,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("Failed to render the faucet template", "err", err)
|
||||
}
|
||||
// Load and parse the genesis block requested by the user
|
||||
blob, err := ioutil.ReadFile(*genesisFlag)
|
||||
if err != nil {
|
||||
@ -166,15 +199,15 @@ type faucet struct {
|
||||
nonce uint64 // Current pending nonce of the faucet
|
||||
price *big.Int // Current gas price to issue funds with
|
||||
|
||||
conns []*websocket.Conn // Currently live websocket connections
|
||||
history map[string]time.Time // History of users and their funding requests
|
||||
reqs []*request // Currently pending funding requests
|
||||
update chan struct{} // Channel to signal request updates
|
||||
conns []*websocket.Conn // Currently live websocket connections
|
||||
timeouts map[string]time.Time // History of users and their funding timeouts
|
||||
reqs []*request // Currently pending funding requests
|
||||
update chan struct{} // Channel to signal request updates
|
||||
|
||||
lock sync.RWMutex // Lock protecting the faucet's internals
|
||||
}
|
||||
|
||||
func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network int, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) {
|
||||
func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) {
|
||||
// Assemble the raw devp2p protocol stack
|
||||
stack, err := node.New(&node.Config{
|
||||
Name: "geth",
|
||||
@ -236,7 +269,7 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network i
|
||||
index: index,
|
||||
keystore: ks,
|
||||
account: ks.Accounts()[0],
|
||||
history: make(map[string]time.Time),
|
||||
timeouts: make(map[string]time.Time),
|
||||
update: make(chan struct{}, 1),
|
||||
}, nil
|
||||
}
|
||||
@ -290,14 +323,23 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
"peers": f.stack.Server().PeerCount(),
|
||||
"requests": f.reqs,
|
||||
})
|
||||
header, _ := f.client.HeaderByNumber(context.Background(), nil)
|
||||
websocket.JSON.Send(conn, header)
|
||||
// Send the initial block to the client
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
header, err := f.client.HeaderByNumber(ctx, nil)
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to retrieve latest header", "err", err)
|
||||
} else {
|
||||
websocket.JSON.Send(conn, header)
|
||||
}
|
||||
// Keep reading requests from the websocket until the connection breaks
|
||||
for {
|
||||
// Fetch the next funding request and validate against github
|
||||
var msg struct {
|
||||
URL string `json:"url"`
|
||||
URL string `json:"url"`
|
||||
Tier uint `json:"tier"`
|
||||
Captcha string `json:"captcha"`
|
||||
}
|
||||
if err := websocket.JSON.Receive(conn, &msg); err != nil {
|
||||
return
|
||||
@ -306,8 +348,39 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "URL doesn't link to GitHub Gists"})
|
||||
continue
|
||||
}
|
||||
log.Info("Faucet funds requested", "gist", msg.URL)
|
||||
if msg.Tier >= uint(*tiersFlag) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Invalid funding tier requested"})
|
||||
continue
|
||||
}
|
||||
log.Info("Faucet funds requested", "gist", msg.URL, "tier", msg.Tier)
|
||||
|
||||
// If captcha verifications are enabled, make sure we're not dealing with a robot
|
||||
if *captchaToken != "" {
|
||||
form := url.Values{}
|
||||
form.Add("secret", *captchaSecret)
|
||||
form.Add("response", msg.Captcha)
|
||||
|
||||
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
var result struct {
|
||||
Success bool `json:"success"`
|
||||
Errors json.RawMessage `json:"error-codes"`
|
||||
}
|
||||
err = json.NewDecoder(res.Body).Decode(&result)
|
||||
res.Body.Close()
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
if !result.Success {
|
||||
log.Warn("Captcha verification failed", "err", string(result.Errors))
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Beep-bop, you're a robot!"})
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Retrieve the gist from the GitHub Gist APIs
|
||||
parts := strings.Split(msg.URL, "/")
|
||||
req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil)
|
||||
@ -334,7 +407,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
continue
|
||||
}
|
||||
if gist.Owner.Login == "" {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Nice try ;)"})
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Anonymous Gists not allowed"})
|
||||
continue
|
||||
}
|
||||
// Iterate over all the files and look for Ethereum addresses
|
||||
@ -348,15 +421,30 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "No Ethereum address found to fund"})
|
||||
continue
|
||||
}
|
||||
// Validate the user's existence since the API is unhelpful here
|
||||
if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
continue
|
||||
}
|
||||
res.Body.Close()
|
||||
|
||||
if res.StatusCode != 200 {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": "Invalid user... boom!"})
|
||||
continue
|
||||
}
|
||||
// Ensure the user didn't request funds too recently
|
||||
f.lock.Lock()
|
||||
var (
|
||||
fund bool
|
||||
elapsed time.Duration
|
||||
timeout time.Time
|
||||
)
|
||||
if elapsed = time.Since(f.history[gist.Owner.Login]); elapsed > time.Duration(*minutesFlag)*time.Minute {
|
||||
if timeout = f.timeouts[gist.Owner.Login]; time.Now().After(timeout) {
|
||||
// User wasn't funded recently, create the funding transaction
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether), big.NewInt(21000), f.price, nil)
|
||||
amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether)
|
||||
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
|
||||
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
|
||||
|
||||
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, big.NewInt(21000), f.price, nil)
|
||||
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId)
|
||||
if err != nil {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": err.Error()})
|
||||
@ -375,14 +463,14 @@ func (f *faucet) apiHandler(conn *websocket.Conn) {
|
||||
Time: time.Now(),
|
||||
Tx: signed,
|
||||
})
|
||||
f.history[gist.Owner.Login] = time.Now()
|
||||
f.timeouts[gist.Owner.Login] = time.Now().Add(time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute)
|
||||
fund = true
|
||||
}
|
||||
f.lock.Unlock()
|
||||
|
||||
// Send an error if too frequent funding, othewise a success
|
||||
if !fund {
|
||||
websocket.JSON.Send(conn, map[string]string{"error": fmt.Sprintf("User already funded %s ago", common.PrettyDuration(elapsed))})
|
||||
websocket.JSON.Send(conn, map[string]string{"error": fmt.Sprintf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))})
|
||||
continue
|
||||
}
|
||||
websocket.JSON.Send(conn, map[string]string{"success": fmt.Sprintf("Funding request accepted for %s into %s", gist.Owner.Login, address.Hex())})
|
||||
|
@ -51,9 +51,13 @@
|
||||
<div class="input-group">
|
||||
<input id="gist" type="text" class="form-control" placeholder="GitHub Gist URL containing your Ethereum address...">
|
||||
<span class="input-group-btn">
|
||||
<button class="btn btn-default" type="button" onclick="submit()">Give me Ether!</button>
|
||||
<button class="btn btn-default dropdown-toggle" type="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">Give me Ether <i class="fa fa-caret-down" aria-hidden="true"></i></button>
|
||||
<ul class="dropdown-menu dropdown-menu-right">{{range $idx, $amount := .Amounts}}
|
||||
<li><a style="text-align: center;" onclick="tier={{$idx}}; {{if $.Recaptcha}}grecaptcha.execute(){{else}}submit({{$idx}}){{end}}">{{$amount}} / {{index $.Periods $idx}}</a></li>{{end}}
|
||||
</ul>
|
||||
</span>
|
||||
</div>
|
||||
</div>{{if .Recaptcha}}
|
||||
<div class="g-recaptcha" data-sitekey="{{.Recaptcha}}" data-callback="submit" data-size="invisible"></div>{{end}}
|
||||
</div>
|
||||
</div>
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
@ -76,8 +80,9 @@
|
||||
<div class="row" style="margin-top: 32px;">
|
||||
<div class="col-lg-12">
|
||||
<h3>How does this work?</h3>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to GitHub accounts. Anyone having a GitHub account may request funds within the permitted limit of <strong>{{.Amount}} Ether(s) / {{.Period}}</strong>.</p>
|
||||
<p>This Ether faucet is running on the {{.Network}} network. To prevent malicious actors from exhausting all available funds or accumulating enough Ether to mount long running spam attacks, requests are tied to GitHub accounts. Anyone having a GitHub account may request funds within the permitted limits.</p>
|
||||
<p>To request funds, simply create a <a href="https://gist.github.com/" target="_about:blank">GitHub Gist</a> with your Ethereum address pasted into the contents (the file name doesn't matter), copy paste the gists URL into the above input box and fire away! You can track the current pending requests below the input field to see how much you have to wait until your turn comes.</p>
|
||||
{{if .Recaptcha}}<em>The faucet is running invisible reCaptcha protection against bots.</em>{{end}}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@ -87,10 +92,12 @@
|
||||
// Global variables to hold the current status of the faucet
|
||||
var attempt = 0;
|
||||
var server;
|
||||
var tier = 0;
|
||||
|
||||
// Define the function that submits a gist url to the server
|
||||
var submit = function() {
|
||||
server.send(JSON.stringify({url: $("#gist")[0].value}));
|
||||
var submit = function({{if .Recaptcha}}captcha{{end}}) {
|
||||
server.send(JSON.stringify({url: $("#gist")[0].value, tier: tier{{if .Recaptcha}}, captcha: captcha{{end}}}));{{if .Recaptcha}}
|
||||
grecaptcha.reset();{{end}}
|
||||
};
|
||||
// Define a method to reconnect upon server loss
|
||||
var reconnect = function() {
|
||||
@ -134,10 +141,10 @@
|
||||
}
|
||||
}
|
||||
server.onclose = function() { setTimeout(reconnect, 3000); };
|
||||
server.onerror = function() { setTimeout(reconnect, 3000); };
|
||||
}
|
||||
// Establish a websocket connection to the API server
|
||||
reconnect();
|
||||
</script>
|
||||
</script>{{if .Recaptcha}}
|
||||
<script src="https://www.google.com/recaptcha/api.js" async defer></script>{{end}}
|
||||
</body>
|
||||
</html>
|
||||
|
File diff suppressed because one or more lines are too long
@ -40,32 +40,39 @@ var (
|
||||
|
||||
will prompt for your password and imports your ether presale account.
|
||||
It can be used non-interactively with the --password option taking a
|
||||
passwordfile as argument containing the wallet password in plaintext.
|
||||
|
||||
`,
|
||||
passwordfile as argument containing the wallet password in plaintext.`,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: importWallet,
|
||||
|
||||
Name: "import",
|
||||
Usage: "Import Ethereum presale wallet",
|
||||
ArgsUsage: "<keyFile>",
|
||||
Action: utils.MigrateFlags(importWallet),
|
||||
Category: "ACCOUNT COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
Description: `
|
||||
TODO: Please write this
|
||||
`,
|
||||
geth wallet [options] /path/to/my/presale.wallet
|
||||
|
||||
will prompt for your password and imports your ether presale account.
|
||||
It can be used non-interactively with the --password option taking a
|
||||
passwordfile as argument containing the wallet password in plaintext.`,
|
||||
},
|
||||
},
|
||||
}
|
||||
accountCommand = cli.Command{
|
||||
Action: accountList,
|
||||
Name: "account",
|
||||
Usage: "Manage accounts",
|
||||
ArgsUsage: "",
|
||||
Category: "ACCOUNT COMMANDS",
|
||||
Description: `
|
||||
Manage accounts lets you create new accounts, list all existing accounts,
|
||||
import a private key into a new account.
|
||||
|
||||
' help' shows a list of subcommands or help for one subcommand.
|
||||
accountCommand = cli.Command{
|
||||
Name: "account",
|
||||
Usage: "Manage accounts",
|
||||
Category: "ACCOUNT COMMANDS",
|
||||
Description: `
|
||||
|
||||
Manage accounts, list all existing accounts, import a private key into a new
|
||||
account, create a new account or update an existing account.
|
||||
|
||||
It supports interactive mode, when you are prompted for password as well as
|
||||
non-interactive mode where passwords are supplied via a given password file.
|
||||
@ -80,36 +87,34 @@ Note that exporting your key in unencrypted format is NOT supported.
|
||||
Keys are stored under <DATADIR>/keystore.
|
||||
It is safe to transfer the entire directory or the individual keys therein
|
||||
between ethereum nodes by simply copying.
|
||||
Make sure you backup your keys regularly.
|
||||
|
||||
In order to use your account to send transactions, you need to unlock them using
|
||||
the '--unlock' option. The argument is a space separated list of addresses or
|
||||
indexes. If used non-interactively with a passwordfile, the file should contain
|
||||
the respective passwords one per line. If you unlock n accounts and the password
|
||||
file contains less than n entries, then the last password is meant to apply to
|
||||
all remaining accounts.
|
||||
|
||||
And finally. DO NOT FORGET YOUR PASSWORD.
|
||||
`,
|
||||
Make sure you backup your keys regularly.`,
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Action: accountList,
|
||||
Name: "list",
|
||||
Usage: "Print account addresses",
|
||||
ArgsUsage: " ",
|
||||
Name: "list",
|
||||
Usage: "Print summary of existing accounts",
|
||||
Action: utils.MigrateFlags(accountList),
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
},
|
||||
Description: `
|
||||
TODO: Please write this
|
||||
`,
|
||||
Print a short summary of all accounts`,
|
||||
},
|
||||
{
|
||||
Action: accountCreate,
|
||||
Name: "new",
|
||||
Usage: "Create a new account",
|
||||
ArgsUsage: " ",
|
||||
Name: "new",
|
||||
Usage: "Create a new account",
|
||||
Action: utils.MigrateFlags(accountCreate),
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
Description: `
|
||||
geth account new
|
||||
|
||||
Creates a new account. Prints the address.
|
||||
Creates a new account and prints the address.
|
||||
|
||||
The account is saved in encrypted format, you are prompted for a passphrase.
|
||||
|
||||
@ -117,17 +122,20 @@ You must remember this passphrase to unlock your account in the future.
|
||||
|
||||
For non-interactive use the passphrase can be specified with the --password flag:
|
||||
|
||||
geth --password <passwordfile> account new
|
||||
|
||||
Note, this is meant to be used for testing only, it is a bad idea to save your
|
||||
password to file or expose in any other way.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: accountUpdate,
|
||||
Name: "update",
|
||||
Usage: "Update an existing account",
|
||||
Action: utils.MigrateFlags(accountUpdate),
|
||||
ArgsUsage: "<address>",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
Description: `
|
||||
geth account update <address>
|
||||
|
||||
@ -141,16 +149,22 @@ format to the newest format or change the password for an account.
|
||||
|
||||
For non-interactive use the passphrase can be specified with the --password flag:
|
||||
|
||||
geth --password <passwordfile> account update <address>
|
||||
geth account update [options] <address>
|
||||
|
||||
Since only one password can be given, only format update can be performed,
|
||||
changing your password is only possible interactively.
|
||||
`,
|
||||
},
|
||||
{
|
||||
Action: accountImport,
|
||||
Name: "import",
|
||||
Usage: "Import a private key into a new account",
|
||||
Name: "import",
|
||||
Usage: "Import a private key into a new account",
|
||||
Action: utils.MigrateFlags(accountImport),
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.LightKDFFlag,
|
||||
},
|
||||
ArgsUsage: "<keyFile>",
|
||||
Description: `
|
||||
geth account import <keyfile>
|
||||
@ -166,7 +180,7 @@ You must remember this passphrase to unlock your account in the future.
|
||||
|
||||
For non-interactive use the passphrase can be specified with the -password flag:
|
||||
|
||||
geth --password <passwordfile> account import <keyfile>
|
||||
geth account import [options] <keyfile>
|
||||
|
||||
Note:
|
||||
As you can directly copy your encrypted accounts to another ethereum instance,
|
||||
@ -298,10 +312,12 @@ func accountUpdate(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
|
||||
account, oldPassword := unlockAccount(ctx, ks, ctx.Args().First(), 0, nil)
|
||||
newPassword := getPassPhrase("Please give a new password. Do not forget this password.", true, 0, nil)
|
||||
if err := ks.Update(account, oldPassword, newPassword); err != nil {
|
||||
utils.Fatalf("Could not update the account: %v", err)
|
||||
for _, addr := range ctx.Args() {
|
||||
account, oldPassword := unlockAccount(ctx, ks, addr, 0, nil)
|
||||
newPassword := getPassPhrase("Please give a new password. Do not forget this password.", true, 0, nil)
|
||||
if err := ks.Update(account, oldPassword, newPassword); err != nil {
|
||||
utils.Fatalf("Could not update the account: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -43,13 +43,13 @@ func tmpDatadirWithKeystore(t *testing.T) string {
|
||||
}
|
||||
|
||||
func TestAccountListEmpty(t *testing.T) {
|
||||
geth := runGeth(t, "account")
|
||||
geth := runGeth(t, "account", "list")
|
||||
geth.expectExit()
|
||||
}
|
||||
|
||||
func TestAccountList(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t, "--datadir", datadir, "account")
|
||||
geth := runGeth(t, "account", "list", "--datadir", datadir)
|
||||
defer geth.expectExit()
|
||||
if runtime.GOOS == "windows" {
|
||||
geth.expect(`
|
||||
@ -67,7 +67,7 @@ Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/k
|
||||
}
|
||||
|
||||
func TestAccountNew(t *testing.T) {
|
||||
geth := runGeth(t, "--lightkdf", "account", "new")
|
||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
Your new account is locked with a password. Please give a password. Do not forget this password.
|
||||
@ -79,7 +79,7 @@ Repeat passphrase: {{.InputLine "foobar"}}
|
||||
}
|
||||
|
||||
func TestAccountNewBadRepeat(t *testing.T) {
|
||||
geth := runGeth(t, "--lightkdf", "account", "new")
|
||||
geth := runGeth(t, "account", "new", "--lightkdf")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
Your new account is locked with a password. Please give a password. Do not forget this password.
|
||||
@ -92,9 +92,9 @@ Fatal: Passphrases do not match
|
||||
|
||||
func TestAccountUpdate(t *testing.T) {
|
||||
datadir := tmpDatadirWithKeystore(t)
|
||||
geth := runGeth(t,
|
||||
geth := runGeth(t, "account", "update",
|
||||
"--datadir", datadir, "--lightkdf",
|
||||
"account", "update", "f466859ead1932d743d622cb74fc058882e8648a")
|
||||
"f466859ead1932d743d622cb74fc058882e8648a")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3
|
||||
@ -107,7 +107,7 @@ Repeat passphrase: {{.InputLine "foobar2"}}
|
||||
}
|
||||
|
||||
func TestWalletImport(t *testing.T) {
|
||||
geth := runGeth(t, "--lightkdf", "wallet", "import", "testdata/guswallet.json")
|
||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
@ -122,7 +122,7 @@ Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f}
|
||||
}
|
||||
|
||||
func TestWalletImportBadPassword(t *testing.T) {
|
||||
geth := runGeth(t, "--lightkdf", "wallet", "import", "testdata/guswallet.json")
|
||||
geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json")
|
||||
defer geth.expectExit()
|
||||
geth.expect(`
|
||||
!! Unsupported terminal, password will be echoed.
|
||||
|
@ -29,11 +29,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/cmd/internal/browser"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var bugCommand = cli.Command{
|
||||
Action: reportBug,
|
||||
Action: utils.MigrateFlags(reportBug),
|
||||
Name: "bug",
|
||||
Usage: "opens a window to report a bug on the geth repo",
|
||||
ArgsUsage: " ",
|
||||
|
@ -40,80 +40,98 @@ import (
|
||||
|
||||
var (
|
||||
initCommand = cli.Command{
|
||||
Action: initGenesis,
|
||||
Action: utils.MigrateFlags(initGenesis),
|
||||
Name: "init",
|
||||
Usage: "Bootstrap and initialize a new genesis block",
|
||||
ArgsUsage: "<genesisPath>",
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The init command initializes a new genesis block and definition for the network.
|
||||
This is a destructive action and changes the network in which you will be
|
||||
participating.
|
||||
`,
|
||||
|
||||
It expects the genesis file as argument.`,
|
||||
}
|
||||
importCommand = cli.Command{
|
||||
Action: importChain,
|
||||
Action: utils.MigrateFlags(importChain),
|
||||
Name: "import",
|
||||
Usage: "Import a blockchain file",
|
||||
ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The import command imports blocks from an RLP-encoded form. The form can be one file
|
||||
with several RLP-encoded blocks, or several files can be used.
|
||||
The import command imports blocks from an RLP-encoded form. The form can be one file
|
||||
with several RLP-encoded blocks, or several files can be used.
|
||||
|
||||
If only one file is used, import error will result in failure. If several files are used,
|
||||
processing will proceed even if an individual RLP-file import failure occurs.
|
||||
`,
|
||||
processing will proceed even if an individual RLP-file import failure occurs.`,
|
||||
}
|
||||
exportCommand = cli.Command{
|
||||
Action: exportChain,
|
||||
Action: utils.MigrateFlags(exportChain),
|
||||
Name: "export",
|
||||
Usage: "Export blockchain into file",
|
||||
ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
Requires a first argument of the file to write to.
|
||||
Optional second and third arguments control the first and
|
||||
last block to write. In this mode, the file will be appended
|
||||
if already existing.
|
||||
`,
|
||||
if already existing.`,
|
||||
}
|
||||
removedbCommand = cli.Command{
|
||||
Action: removeDB,
|
||||
Action: utils.MigrateFlags(removeDB),
|
||||
Name: "removedb",
|
||||
Usage: "Remove blockchain and state databases",
|
||||
ArgsUsage: " ",
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
TODO: Please write this
|
||||
`,
|
||||
Remove blockchain and state databases`,
|
||||
}
|
||||
dumpCommand = cli.Command{
|
||||
Action: dump,
|
||||
Action: utils.MigrateFlags(dump),
|
||||
Name: "dump",
|
||||
Usage: "Dump a specific block from storage",
|
||||
ArgsUsage: "[<blockHash> | <blockNum>]...",
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Flags: []cli.Flag{
|
||||
utils.DataDirFlag,
|
||||
utils.CacheFlag,
|
||||
utils.LightModeFlag,
|
||||
},
|
||||
Category: "BLOCKCHAIN COMMANDS",
|
||||
Description: `
|
||||
The arguments are interpreted as block numbers or hashes.
|
||||
Use "ethereum dump 0" to dump the genesis block.
|
||||
`,
|
||||
Use "ethereum dump 0" to dump the genesis block.`,
|
||||
}
|
||||
)
|
||||
|
||||
// initGenesis will initialise the given JSON format genesis file and writes it as
|
||||
// the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
|
||||
func initGenesis(ctx *cli.Context) error {
|
||||
// Make sure we have a valid genesis JSON
|
||||
genesisPath := ctx.Args().First()
|
||||
if len(genesisPath) == 0 {
|
||||
utils.Fatalf("must supply path to genesis JSON file")
|
||||
utils.Fatalf("Must supply path to genesis JSON file")
|
||||
}
|
||||
|
||||
stack := makeFullNode(ctx)
|
||||
chaindb := utils.MakeChainDatabase(ctx, stack)
|
||||
|
||||
file, err := os.Open(genesisPath)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to read genesis file: %v", err)
|
||||
utils.Fatalf("Failed to read genesis file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
@ -121,12 +139,19 @@ func initGenesis(ctx *cli.Context) error {
|
||||
if err := json.NewDecoder(file).Decode(genesis); err != nil {
|
||||
utils.Fatalf("invalid genesis file: %v", err)
|
||||
}
|
||||
|
||||
_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to write genesis block: %v", err)
|
||||
// Open an initialise both full and light databases
|
||||
stack := makeFullNode(ctx)
|
||||
for _, name := range []string{"chaindata", "lightchaindata"} {
|
||||
chaindb, err := stack.OpenDatabase(name, 0, 0)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to open database: %v", err)
|
||||
}
|
||||
_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to write genesis block: %v", err)
|
||||
}
|
||||
log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
|
||||
}
|
||||
log.Info("Successfully wrote genesis state", "hash", hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -245,24 +270,29 @@ func exportChain(ctx *cli.Context) error {
|
||||
|
||||
func removeDB(ctx *cli.Context) error {
|
||||
stack, _ := makeConfigNode(ctx)
|
||||
dbdir := stack.ResolvePath(utils.ChainDbName(ctx))
|
||||
if !common.FileExist(dbdir) {
|
||||
fmt.Println(dbdir, "does not exist")
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Println(dbdir)
|
||||
confirm, err := console.Stdin.PromptConfirm("Remove this database?")
|
||||
switch {
|
||||
case err != nil:
|
||||
utils.Fatalf("%v", err)
|
||||
case !confirm:
|
||||
fmt.Println("Operation aborted")
|
||||
default:
|
||||
fmt.Println("Removing...")
|
||||
start := time.Now()
|
||||
os.RemoveAll(dbdir)
|
||||
fmt.Printf("Removed in %v\n", time.Since(start))
|
||||
for _, name := range []string{"chaindata", "lightchaindata"} {
|
||||
// Ensure the database exists in the first place
|
||||
logger := log.New("database", name)
|
||||
|
||||
dbdir := stack.ResolvePath(name)
|
||||
if !common.FileExist(dbdir) {
|
||||
logger.Info("Database doesn't exist, skipping", "path", dbdir)
|
||||
continue
|
||||
}
|
||||
// Confirm removal and execute
|
||||
fmt.Println(dbdir)
|
||||
confirm, err := console.Stdin.PromptConfirm("Remove this database?")
|
||||
switch {
|
||||
case err != nil:
|
||||
utils.Fatalf("%v", err)
|
||||
case !confirm:
|
||||
logger.Warn("Database deletion aborted")
|
||||
default:
|
||||
start := time.Now()
|
||||
os.RemoveAll(dbdir)
|
||||
logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -38,10 +38,11 @@ import (
|
||||
|
||||
var (
|
||||
dumpConfigCommand = cli.Command{
|
||||
Action: dumpConfig,
|
||||
Action: utils.MigrateFlags(dumpConfig),
|
||||
Name: "dumpconfig",
|
||||
Usage: "Show configuration values",
|
||||
ArgsUsage: "",
|
||||
Flags: append(nodeFlags, rpcFlags...),
|
||||
Category: "MISCELLANEOUS COMMANDS",
|
||||
Description: `The dumpconfig command shows configuration values.`,
|
||||
}
|
||||
|
@ -29,41 +29,44 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
consoleFlags = []cli.Flag{utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag}
|
||||
|
||||
consoleCommand = cli.Command{
|
||||
Action: localConsole,
|
||||
Name: "console",
|
||||
Usage: "Start an interactive JavaScript environment",
|
||||
ArgsUsage: "", // TODO: Write this!
|
||||
Category: "CONSOLE COMMANDS",
|
||||
Action: utils.MigrateFlags(localConsole),
|
||||
Name: "console",
|
||||
Usage: "Start an interactive JavaScript environment",
|
||||
Flags: append(append(nodeFlags, rpcFlags...), consoleFlags...),
|
||||
Category: "CONSOLE COMMANDS",
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.`,
|
||||
}
|
||||
|
||||
attachCommand = cli.Command{
|
||||
Action: remoteConsole,
|
||||
Action: utils.MigrateFlags(remoteConsole),
|
||||
Name: "attach",
|
||||
Usage: "Start an interactive JavaScript environment (connect to node)",
|
||||
ArgsUsage: "", // TODO: Write this!
|
||||
ArgsUsage: "[endpoint]",
|
||||
Flags: append(consoleFlags, utils.DataDirFlag),
|
||||
Category: "CONSOLE COMMANDS",
|
||||
Description: `
|
||||
The Geth console is an interactive shell for the JavaScript runtime environment
|
||||
which exposes a node admin interface as well as the Ðapp JavaScript API.
|
||||
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.
|
||||
This command allows to open a console on a running geth node.
|
||||
`,
|
||||
This command allows to open a console on a running geth node.`,
|
||||
}
|
||||
|
||||
javascriptCommand = cli.Command{
|
||||
Action: ephemeralConsole,
|
||||
Action: utils.MigrateFlags(ephemeralConsole),
|
||||
Name: "js",
|
||||
Usage: "Execute the specified JavaScript files",
|
||||
ArgsUsage: "", // TODO: Write this!
|
||||
ArgsUsage: "<jsfile> [jsfile...]",
|
||||
Flags: append(nodeFlags, consoleFlags...),
|
||||
Category: "CONSOLE COMMANDS",
|
||||
Description: `
|
||||
The JavaScript VM exposes a node admin interface as well as the Ðapp
|
||||
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console
|
||||
`,
|
||||
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console`,
|
||||
}
|
||||
)
|
||||
|
||||
@ -81,11 +84,12 @@ func localConsole(ctx *cli.Context) error {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
config := console.Config{
|
||||
DataDir: node.DataDir(),
|
||||
DataDir: utils.MakeDataDir(ctx),
|
||||
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
@ -118,17 +122,18 @@ func remoteConsole(ctx *cli.Context) error {
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
}
|
||||
defer console.Stop(false)
|
||||
|
||||
// If only a short execution was requested, evaluate and return
|
||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||
console.Evaluate(script)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise print the welcome screen and enter interactive mode
|
||||
console.Welcome()
|
||||
console.Interactive()
|
||||
@ -151,7 +156,7 @@ func dialRPC(endpoint string) (*rpc.Client, error) {
|
||||
}
|
||||
|
||||
// ephemeralConsole starts a new geth node, attaches an ephemeral JavaScript
|
||||
// console to it, and each of the files specified as arguments and tears the
|
||||
// console to it, executes each of the files specified as arguments and tears
|
||||
// everything down.
|
||||
func ephemeralConsole(ctx *cli.Context) error {
|
||||
// Create and start the node based on the CLI flags
|
||||
@ -165,11 +170,12 @@ func ephemeralConsole(ctx *cli.Context) error {
|
||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
||||
}
|
||||
config := console.Config{
|
||||
DataDir: node.DataDir(),
|
||||
DataDir: utils.MakeDataDir(ctx),
|
||||
DocRoot: ctx.GlobalString(utils.JSpathFlag.Name),
|
||||
Client: client,
|
||||
Preload: utils.MakeConsolePreloads(ctx),
|
||||
}
|
||||
|
||||
console, err := console.New(config)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||
|
143
cmd/geth/main.go
143
cmd/geth/main.go
@ -49,6 +49,82 @@ var (
|
||||
relOracle = common.HexToAddress("0xfa7b9770ca4cb04296cac84f37736d4041251cdf")
|
||||
// The app that holds all commands and flags.
|
||||
app = utils.NewApp(gitCommit, "the go-ethereum command line interface")
|
||||
// flags that configure the node
|
||||
nodeFlags = []cli.Flag{
|
||||
utils.IdentityFlag,
|
||||
utils.UnlockedAccountFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.BootnodesFlag,
|
||||
utils.BootnodesV4Flag,
|
||||
utils.BootnodesV5Flag,
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.NoUSBFlag,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
utils.EthashDatasetDirFlag,
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
utils.TxPoolGlobalSlotsFlag,
|
||||
utils.TxPoolAccountQueueFlag,
|
||||
utils.TxPoolGlobalQueueFlag,
|
||||
utils.TxPoolLifetimeFlag,
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.LightServFlag,
|
||||
utils.LightPeersFlag,
|
||||
utils.LightKDFFlag,
|
||||
utils.CacheFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.GasPriceFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MiningEnabledFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.NATFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV5Flag,
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.NoCompactionFlag,
|
||||
utils.GpoBlocksFlag,
|
||||
utils.GpoPercentileFlag,
|
||||
utils.ExtraDataFlag,
|
||||
configFileFlag,
|
||||
}
|
||||
|
||||
rpcFlags = []cli.Flag{
|
||||
utils.RPCEnabledFlag,
|
||||
utils.RPCListenAddrFlag,
|
||||
utils.RPCPortFlag,
|
||||
utils.RPCApiFlag,
|
||||
utils.WSEnabledFlag,
|
||||
utils.WSListenAddrFlag,
|
||||
utils.WSPortFlag,
|
||||
utils.WSApiFlag,
|
||||
utils.WSAllowedOriginsFlag,
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -81,70 +157,9 @@ func init() {
|
||||
dumpConfigCommand,
|
||||
}
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
utils.IdentityFlag,
|
||||
utils.UnlockedAccountFlag,
|
||||
utils.PasswordFileFlag,
|
||||
utils.BootnodesFlag,
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
utils.EthashDatasetDirFlag,
|
||||
utils.EthashDatasetsInMemoryFlag,
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
utils.FastSyncFlag,
|
||||
utils.LightModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.LightServFlag,
|
||||
utils.LightPeersFlag,
|
||||
utils.LightKDFFlag,
|
||||
utils.CacheFlag,
|
||||
utils.TrieCacheGenFlag,
|
||||
utils.JSpathFlag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
utils.EtherbaseFlag,
|
||||
utils.GasPriceFlag,
|
||||
utils.MinerThreadsFlag,
|
||||
utils.MiningEnabledFlag,
|
||||
utils.TargetGasLimitFlag,
|
||||
utils.NATFlag,
|
||||
utils.NoDiscoverFlag,
|
||||
utils.DiscoveryV5Flag,
|
||||
utils.NetrestrictFlag,
|
||||
utils.NodeKeyFileFlag,
|
||||
utils.NodeKeyHexFlag,
|
||||
utils.RPCEnabledFlag,
|
||||
utils.RPCListenAddrFlag,
|
||||
utils.RPCPortFlag,
|
||||
utils.RPCApiFlag,
|
||||
utils.WSEnabledFlag,
|
||||
utils.WSListenAddrFlag,
|
||||
utils.WSPortFlag,
|
||||
utils.WSApiFlag,
|
||||
utils.WSAllowedOriginsFlag,
|
||||
utils.IPCDisabledFlag,
|
||||
utils.IPCPathFlag,
|
||||
utils.ExecFlag,
|
||||
utils.PreloadJSFlag,
|
||||
utils.WhisperEnabledFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.TestNetFlag,
|
||||
utils.VMEnableDebugFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.RPCCORSDomainFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
utils.MetricsEnabledFlag,
|
||||
utils.FakePoWFlag,
|
||||
utils.NoCompactionFlag,
|
||||
utils.GpoBlocksFlag,
|
||||
utils.GpoPercentileFlag,
|
||||
utils.ExtraDataFlag,
|
||||
configFileFlag,
|
||||
}
|
||||
app.Flags = append(app.Flags, nodeFlags...)
|
||||
app.Flags = append(app.Flags, rpcFlags...)
|
||||
app.Flags = append(app.Flags, consoleFlags...)
|
||||
app.Flags = append(app.Flags, debug.Flags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
|
@ -34,7 +34,7 @@ import (
|
||||
|
||||
var (
|
||||
makedagCommand = cli.Command{
|
||||
Action: makedag,
|
||||
Action: utils.MigrateFlags(makedag),
|
||||
Name: "makedag",
|
||||
Usage: "Generate ethash DAG (for testing)",
|
||||
ArgsUsage: "<blockNum> <outputDir>",
|
||||
@ -47,7 +47,7 @@ Regular users do not need to execute it.
|
||||
`,
|
||||
}
|
||||
versionCommand = cli.Command{
|
||||
Action: version,
|
||||
Action: utils.MigrateFlags(version),
|
||||
Name: "version",
|
||||
Usage: "Print version numbers",
|
||||
ArgsUsage: " ",
|
||||
@ -57,7 +57,7 @@ The output of this command is supposed to be machine-readable.
|
||||
`,
|
||||
}
|
||||
licenseCommand = cli.Command{
|
||||
Action: license,
|
||||
Action: utils.MigrateFlags(license),
|
||||
Name: "license",
|
||||
Usage: "Display license information",
|
||||
ArgsUsage: " ",
|
||||
@ -103,7 +103,7 @@ func version(ctx *cli.Context) error {
|
||||
}
|
||||
fmt.Println("Architecture:", runtime.GOARCH)
|
||||
fmt.Println("Protocol Versions:", eth.ProtocolVersions)
|
||||
fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name))
|
||||
fmt.Println("Network Id:", eth.DefaultConfig.NetworkId)
|
||||
fmt.Println("Go Version:", runtime.Version())
|
||||
fmt.Println("Operating System:", runtime.GOOS)
|
||||
fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH"))
|
||||
|
@ -49,7 +49,7 @@ var (
|
||||
Usage: "Refresh interval in seconds",
|
||||
}
|
||||
monitorCommand = cli.Command{
|
||||
Action: monitor,
|
||||
Action: utils.MigrateFlags(monitor), // keep track of migration progress
|
||||
Name: "monitor",
|
||||
Usage: "Monitor and visualize node metrics",
|
||||
ArgsUsage: " ",
|
||||
|
@ -20,6 +20,7 @@ package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
@ -67,8 +68,10 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
configFileFlag,
|
||||
utils.DataDirFlag,
|
||||
utils.KeyStoreDirFlag,
|
||||
utils.NoUSBFlag,
|
||||
utils.NetworkIdFlag,
|
||||
utils.TestNetFlag,
|
||||
utils.TestnetFlag,
|
||||
utils.RinkebyFlag,
|
||||
utils.DevModeFlag,
|
||||
utils.SyncModeFlag,
|
||||
utils.EthStatsURLFlag,
|
||||
@ -89,6 +92,18 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
utils.TxPoolPriceLimitFlag,
|
||||
utils.TxPoolPriceBumpFlag,
|
||||
utils.TxPoolAccountSlotsFlag,
|
||||
utils.TxPoolGlobalSlotsFlag,
|
||||
utils.TxPoolAccountQueueFlag,
|
||||
utils.TxPoolGlobalQueueFlag,
|
||||
utils.TxPoolLifetimeFlag,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "PERFORMANCE TUNING",
|
||||
Flags: []cli.Flag{
|
||||
@ -127,6 +142,8 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
Name: "NETWORKING",
|
||||
Flags: []cli.Flag{
|
||||
utils.BootnodesFlag,
|
||||
utils.BootnodesV4Flag,
|
||||
utils.BootnodesV5Flag,
|
||||
utils.ListenPortFlag,
|
||||
utils.MaxPeersFlag,
|
||||
utils.MaxPendingPeersFlag,
|
||||
@ -185,6 +202,39 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
},
|
||||
}
|
||||
|
||||
// byCategory sorts an array of flagGroup by Name in the order
|
||||
// defined in AppHelpFlagGroups.
|
||||
type byCategory []flagGroup
|
||||
|
||||
func (a byCategory) Len() int { return len(a) }
|
||||
func (a byCategory) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byCategory) Less(i, j int) bool {
|
||||
iCat, jCat := a[i].Name, a[j].Name
|
||||
iIdx, jIdx := len(AppHelpFlagGroups), len(AppHelpFlagGroups) // ensure non categorized flags come last
|
||||
|
||||
for i, group := range AppHelpFlagGroups {
|
||||
if iCat == group.Name {
|
||||
iIdx = i
|
||||
}
|
||||
if jCat == group.Name {
|
||||
jIdx = i
|
||||
}
|
||||
}
|
||||
|
||||
return iIdx < jIdx
|
||||
}
|
||||
|
||||
func flagCategory(flag cli.Flag) string {
|
||||
for _, category := range AppHelpFlagGroups {
|
||||
for _, flg := range category.Flags {
|
||||
if flg.GetName() == flag.GetName() {
|
||||
return category.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
return "MISC"
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Override the default app help template
|
||||
cli.AppHelpTemplate = AppHelpTemplate
|
||||
@ -194,6 +244,7 @@ func init() {
|
||||
App interface{}
|
||||
FlagGroups []flagGroup
|
||||
}
|
||||
|
||||
// Override the default app help printer, but only for the global app help
|
||||
originalHelpPrinter := cli.HelpPrinter
|
||||
cli.HelpPrinter = func(w io.Writer, tmpl string, data interface{}) {
|
||||
@ -223,6 +274,27 @@ func init() {
|
||||
}
|
||||
// Render out custom usage screen
|
||||
originalHelpPrinter(w, tmpl, helpData{data, AppHelpFlagGroups})
|
||||
} else if tmpl == utils.CommandHelpTemplate {
|
||||
// Iterate over all command specific flags and categorize them
|
||||
categorized := make(map[string][]cli.Flag)
|
||||
for _, flag := range data.(cli.Command).Flags {
|
||||
if _, ok := categorized[flag.String()]; !ok {
|
||||
categorized[flagCategory(flag)] = append(categorized[flagCategory(flag)], flag)
|
||||
}
|
||||
}
|
||||
|
||||
// sort to get a stable ordering
|
||||
sorted := make([]flagGroup, 0, len(categorized))
|
||||
for cat, flgs := range categorized {
|
||||
sorted = append(sorted, flagGroup{cat, flgs})
|
||||
}
|
||||
sort.Sort(byCategory(sorted))
|
||||
|
||||
// add sorted array to data and render with default printer
|
||||
originalHelpPrinter(w, tmpl, map[string]interface{}{
|
||||
"cmd": data,
|
||||
"categorizedFlags": sorted,
|
||||
})
|
||||
} else {
|
||||
originalHelpPrinter(w, tmpl, data)
|
||||
}
|
||||
|
@ -51,9 +51,10 @@ ADD account.pass /account.pass
|
||||
EXPOSE 8080
|
||||
|
||||
CMD [ \
|
||||
"/faucet", "--genesis", "/genesis.json", "--network", "{{.NetworkID}}", "--bootnodes", "{{.Bootnodes}}", "--ethstats", "{{.Ethstats}}", \
|
||||
"--ethport", "{{.EthPort}}", "--faucet.name", "{{.FaucetName}}", "--faucet.amount", "{{.FaucetAmount}}", "--faucet.minutes", "{{.FaucetMinutes}}", \
|
||||
"--github.user", "{{.GitHubUser}}", "--github.token", "{{.GitHubToken}}", "--account.json", "/account.json", "--account.pass", "/account.pass" \
|
||||
"/faucet", "--genesis", "/genesis.json", "--network", "{{.NetworkID}}", "--bootnodes", "{{.Bootnodes}}", "--ethstats", "{{.Ethstats}}", "--ethport", "{{.EthPort}}", \
|
||||
"--faucet.name", "{{.FaucetName}}", "--faucet.amount", "{{.FaucetAmount}}", "--faucet.minutes", "{{.FaucetMinutes}}", "--faucet.tiers", "{{.FaucetTiers}}", \
|
||||
"--github.user", "{{.GitHubUser}}", "--github.token", "{{.GitHubToken}}", "--account.json", "/account.json", "--account.pass", "/account.pass" \
|
||||
{{if .CaptchaToken}}, "--captcha.token", "{{.CaptchaToken}}", "--captcha.secret", "{{.CaptchaSecret}}"{{end}} \
|
||||
]`
|
||||
|
||||
// faucetComposefile is the docker-compose.yml file required to deploy and maintain
|
||||
@ -74,8 +75,11 @@ services:
|
||||
- ETH_NAME={{.EthName}}
|
||||
- FAUCET_AMOUNT={{.FaucetAmount}}
|
||||
- FAUCET_MINUTES={{.FaucetMinutes}}
|
||||
- FAUCET_TIERS={{.FaucetTiers}}
|
||||
- GITHUB_USER={{.GitHubUser}}
|
||||
- GITHUB_TOKEN={{.GitHubToken}}{{if .VHost}}
|
||||
- GITHUB_TOKEN={{.GitHubToken}}
|
||||
- CAPTCHA_TOKEN={{.CaptchaToken}}
|
||||
- CAPTCHA_SECRET={{.CaptchaSecret}}{{if .VHost}}
|
||||
- VIRTUAL_HOST={{.VHost}}
|
||||
- VIRTUAL_PORT=8080{{end}}
|
||||
restart: always
|
||||
@ -97,9 +101,12 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
"EthPort": config.node.portFull,
|
||||
"GitHubUser": config.githubUser,
|
||||
"GitHubToken": config.githubToken,
|
||||
"CaptchaToken": config.captchaToken,
|
||||
"CaptchaSecret": config.captchaSecret,
|
||||
"FaucetName": strings.Title(network),
|
||||
"FaucetAmount": config.amount,
|
||||
"FaucetMinutes": config.minutes,
|
||||
"FaucetTiers": config.tiers,
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
|
||||
@ -113,8 +120,11 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
"EthName": config.node.ethstats[:strings.Index(config.node.ethstats, ":")],
|
||||
"GitHubUser": config.githubUser,
|
||||
"GitHubToken": config.githubToken,
|
||||
"CaptchaToken": config.captchaToken,
|
||||
"CaptchaSecret": config.captchaSecret,
|
||||
"FaucetAmount": config.amount,
|
||||
"FaucetMinutes": config.minutes,
|
||||
"FaucetTiers": config.tiers,
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
@ -135,18 +145,21 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config
|
||||
// faucetInfos is returned from an faucet status check to allow reporting various
|
||||
// configuration parameters.
|
||||
type faucetInfos struct {
|
||||
node *nodeInfos
|
||||
host string
|
||||
port int
|
||||
amount int
|
||||
minutes int
|
||||
githubUser string
|
||||
githubToken string
|
||||
node *nodeInfos
|
||||
host string
|
||||
port int
|
||||
amount int
|
||||
minutes int
|
||||
tiers int
|
||||
githubUser string
|
||||
githubToken string
|
||||
captchaToken string
|
||||
captchaSecret string
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
func (info *faucetInfos) String() string {
|
||||
return fmt.Sprintf("host=%s, api=%d, eth=%d, amount=%d, minutes=%d, github=%s, ethstats=%s", info.host, info.port, info.node.portFull, info.amount, info.minutes, info.githubUser, info.node.ethstats)
|
||||
return fmt.Sprintf("host=%s, api=%d, eth=%d, amount=%d, minutes=%d, tiers=%d, github=%s, captcha=%v, ethstats=%s", info.host, info.port, info.node.portFull, info.amount, info.minutes, info.tiers, info.githubUser, info.captchaToken != "", info.node.ethstats)
|
||||
}
|
||||
|
||||
// checkFaucet does a health-check against an faucet server to verify whether
|
||||
@ -177,6 +190,7 @@ func checkFaucet(client *sshClient, network string) (*faucetInfos, error) {
|
||||
}
|
||||
amount, _ := strconv.Atoi(infos.envvars["FAUCET_AMOUNT"])
|
||||
minutes, _ := strconv.Atoi(infos.envvars["FAUCET_MINUTES"])
|
||||
tiers, _ := strconv.Atoi(infos.envvars["FAUCET_TIERS"])
|
||||
|
||||
// Retrieve the funding account informations
|
||||
var out []byte
|
||||
@ -200,11 +214,14 @@ func checkFaucet(client *sshClient, network string) (*faucetInfos, error) {
|
||||
keyJSON: keyJSON,
|
||||
keyPass: keyPass,
|
||||
},
|
||||
host: host,
|
||||
port: port,
|
||||
amount: amount,
|
||||
minutes: minutes,
|
||||
githubUser: infos.envvars["GITHUB_USER"],
|
||||
githubToken: infos.envvars["GITHUB_TOKEN"],
|
||||
host: host,
|
||||
port: port,
|
||||
amount: amount,
|
||||
minutes: minutes,
|
||||
tiers: tiers,
|
||||
githubUser: infos.envvars["GITHUB_USER"],
|
||||
githubToken: infos.envvars["GITHUB_TOKEN"],
|
||||
captchaToken: infos.envvars["CAPTCHA_TOKEN"],
|
||||
captchaSecret: infos.envvars["CAPTCHA_SECRET"],
|
||||
}, nil
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ ADD genesis.json /genesis.json
|
||||
RUN \
|
||||
echo '/geth init /genesis.json' > geth.sh && \{{if .Unlock}}
|
||||
echo 'mkdir -p /root/.ethereum/keystore/ && cp /signer.json /root/.ethereum/keystore/' >> geth.sh && \{{end}}
|
||||
echo $'/geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .Bootnodes}}--bootnodes {{.Bootnodes}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}}' >> geth.sh
|
||||
echo $'/geth --networkid {{.NetworkID}} --cache 512 --port {{.Port}} --maxpeers {{.Peers}} {{.LightFlag}} --ethstats \'{{.Ethstats}}\' {{if .BootV4}}--bootnodesv4 {{.BootV4}}{{end}} {{if .BootV5}}--bootnodesv5 {{.BootV5}}{{end}} {{if .Etherbase}}--etherbase {{.Etherbase}} --mine{{end}}{{if .Unlock}}--unlock 0 --password /signer.pass --mine{{end}} --targetgaslimit {{.GasTarget}} --gasprice {{.GasPrice}}' >> geth.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "geth.sh"]
|
||||
`
|
||||
@ -66,17 +66,20 @@ services:
|
||||
- LIGHT_PEERS={{.LightPeers}}
|
||||
- STATS_NAME={{.Ethstats}}
|
||||
- MINER_NAME={{.Etherbase}}
|
||||
- GAS_TARGET={{.GasTarget}}
|
||||
- GAS_PRICE={{.GasPrice}}
|
||||
restart: always
|
||||
`
|
||||
|
||||
// deployNode deploys a new Ethereum node container to a remote machine via SSH,
|
||||
// docker and docker-compose. If an instance with the specified network name
|
||||
// already exists there, it will be overwritten!
|
||||
func deployNode(client *sshClient, network string, bootnodes []string, config *nodeInfos) ([]byte, error) {
|
||||
func deployNode(client *sshClient, network string, bootv4, bootv5 []string, config *nodeInfos) ([]byte, error) {
|
||||
kind := "sealnode"
|
||||
if config.keyJSON == "" && config.etherbase == "" {
|
||||
kind = "bootnode"
|
||||
bootnodes = make([]string, 0)
|
||||
bootv4 = make([]string, 0)
|
||||
bootv5 = make([]string, 0)
|
||||
}
|
||||
// Generate the content to upload to the server
|
||||
workdir := fmt.Sprintf("%d", rand.Int63())
|
||||
@ -92,9 +95,12 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
"Port": config.portFull,
|
||||
"Peers": config.peersTotal,
|
||||
"LightFlag": lightFlag,
|
||||
"Bootnodes": strings.Join(bootnodes, ","),
|
||||
"BootV4": strings.Join(bootv4, ","),
|
||||
"BootV5": strings.Join(bootv5, ","),
|
||||
"Ethstats": config.ethstats,
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": uint64(1000000 * config.gasTarget),
|
||||
"GasPrice": uint64(1000000000 * config.gasPrice),
|
||||
"Unlock": config.keyJSON != "",
|
||||
})
|
||||
files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes()
|
||||
@ -111,6 +117,8 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
"LightPeers": config.peersLight,
|
||||
"Ethstats": config.ethstats[:strings.Index(config.ethstats, ":")],
|
||||
"Etherbase": config.etherbase,
|
||||
"GasTarget": config.gasTarget,
|
||||
"GasPrice": config.gasPrice,
|
||||
})
|
||||
files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes()
|
||||
|
||||
@ -127,7 +135,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n
|
||||
}
|
||||
defer client.Run("rm -rf " + workdir)
|
||||
|
||||
// Build and deploy the bootnode service
|
||||
// Build and deploy the boot or seal node service
|
||||
return nil, client.Stream(fmt.Sprintf("cd %s && docker-compose -p %s up -d --build", workdir, network))
|
||||
}
|
||||
|
||||
@ -147,6 +155,8 @@ type nodeInfos struct {
|
||||
etherbase string
|
||||
keyJSON string
|
||||
keyPass string
|
||||
gasTarget float64
|
||||
gasPrice float64
|
||||
}
|
||||
|
||||
// String implements the stringer interface.
|
||||
@ -155,7 +165,8 @@ func (info *nodeInfos) String() string {
|
||||
if info.peersLight > 0 {
|
||||
discv5 = fmt.Sprintf(", portv5=%d", info.portLight)
|
||||
}
|
||||
return fmt.Sprintf("port=%d%s, datadir=%s, peers=%d, lights=%d, ethstats=%s", info.portFull, discv5, info.datadir, info.peersTotal, info.peersLight, info.ethstats)
|
||||
return fmt.Sprintf("port=%d%s, datadir=%s, peers=%d, lights=%d, ethstats=%s, gastarget=%0.3f MGas, gasprice=%0.3f GWei",
|
||||
info.portFull, discv5, info.datadir, info.peersTotal, info.peersLight, info.ethstats, info.gasTarget, info.gasPrice)
|
||||
}
|
||||
|
||||
// checkNode does a health-check against an boot or seal node server to verify
|
||||
@ -176,6 +187,8 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
// Resolve a few types from the environmental variables
|
||||
totalPeers, _ := strconv.Atoi(infos.envvars["TOTAL_PEERS"])
|
||||
lightPeers, _ := strconv.Atoi(infos.envvars["LIGHT_PEERS"])
|
||||
gasTarget, _ := strconv.ParseFloat(infos.envvars["GAS_TARGET"], 64)
|
||||
gasPrice, _ := strconv.ParseFloat(infos.envvars["GAS_PRICE"], 64)
|
||||
|
||||
// Container available, retrieve its node ID and its genesis json
|
||||
var out []byte
|
||||
@ -213,6 +226,8 @@ func checkNode(client *sshClient, network string, boot bool) (*nodeInfos, error)
|
||||
etherbase: infos.envvars["MINER_NAME"],
|
||||
keyJSON: keyJSON,
|
||||
keyPass: keyPass,
|
||||
gasTarget: gasTarget,
|
||||
gasPrice: gasPrice,
|
||||
}
|
||||
stats.enodeFull = fmt.Sprintf("enode://%s@%s:%d", id, client.address, stats.portFull)
|
||||
if stats.portLight != 0 {
|
||||
|
@ -17,6 +17,8 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -37,18 +39,26 @@ import (
|
||||
type sshClient struct {
|
||||
server string // Server name or IP without port number
|
||||
address string // IP address of the remote server
|
||||
pubkey []byte // RSA public key to authenticate the server
|
||||
client *ssh.Client
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// dial establishes an SSH connection to a remote node using the current user and
|
||||
// the user's configured private RSA key.
|
||||
func dial(server string) (*sshClient, error) {
|
||||
// the user's configured private RSA key. If that fails, password authentication
|
||||
// is fallen back to. The caller may override the login user via user@server:port.
|
||||
func dial(server string, pubkey []byte) (*sshClient, error) {
|
||||
// Figure out a label for the server and a logger
|
||||
label := server
|
||||
if strings.Contains(label, ":") {
|
||||
label = label[:strings.Index(label, ":")]
|
||||
}
|
||||
login := ""
|
||||
if strings.Contains(server, "@") {
|
||||
login = label[:strings.Index(label, "@")]
|
||||
label = label[strings.Index(label, "@")+1:]
|
||||
server = server[strings.Index(server, "@")+1:]
|
||||
}
|
||||
logger := log.New("server", label)
|
||||
logger.Debug("Attempting to establish SSH connection")
|
||||
|
||||
@ -56,6 +66,9 @@ func dial(server string) (*sshClient, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if login == "" {
|
||||
login = user.Username
|
||||
}
|
||||
// Configure the supported authentication methods (private key and password)
|
||||
var auths []ssh.AuthMethod
|
||||
|
||||
@ -71,7 +84,7 @@ func dial(server string) (*sshClient, error) {
|
||||
}
|
||||
}
|
||||
auths = append(auths, ssh.PasswordCallback(func() (string, error) {
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", user.Username, server)
|
||||
fmt.Printf("What's the login password for %s at %s? (won't be echoed)\n> ", login, server)
|
||||
blob, err := terminal.ReadPassword(int(syscall.Stdin))
|
||||
|
||||
fmt.Println()
|
||||
@ -86,11 +99,36 @@ func dial(server string) (*sshClient, error) {
|
||||
return nil, errors.New("no IPs associated with domain")
|
||||
}
|
||||
// Try to dial in to the remote server
|
||||
logger.Trace("Dialing remote SSH server", "user", user.Username, "key", path)
|
||||
logger.Trace("Dialing remote SSH server", "user", login)
|
||||
if !strings.Contains(server, ":") {
|
||||
server += ":22"
|
||||
}
|
||||
client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: user.Username, Auth: auths})
|
||||
keycheck := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||
// If no public key is known for SSH, ask the user to confirm
|
||||
if pubkey == nil {
|
||||
fmt.Printf("The authenticity of host '%s (%s)' can't be established.\n", hostname, remote)
|
||||
fmt.Printf("SSH key fingerprint is %s [MD5]\n", ssh.FingerprintLegacyMD5(key))
|
||||
fmt.Printf("Are you sure you want to continue connecting (yes/no)? ")
|
||||
|
||||
text, err := bufio.NewReader(os.Stdin).ReadString('\n')
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case strings.TrimSpace(text) == "yes":
|
||||
pubkey = key.Marshal()
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown auth choice: %v", text)
|
||||
}
|
||||
}
|
||||
// If a public key exists for this SSH server, check that it matches
|
||||
if bytes.Compare(pubkey, key.Marshal()) == 0 {
|
||||
return nil
|
||||
}
|
||||
// We have a mismatch, forbid connecting
|
||||
return errors.New("ssh key mismatch, readd the machine to update")
|
||||
}
|
||||
client, err := ssh.Dial("tcp", server, &ssh.ClientConfig{User: login, Auth: auths, HostKeyCallback: keycheck})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -98,6 +136,7 @@ func dial(server string) (*sshClient, error) {
|
||||
c := &sshClient{
|
||||
server: label,
|
||||
address: addr[0],
|
||||
pubkey: pubkey,
|
||||
client: client,
|
||||
logger: logger,
|
||||
}
|
||||
|
@ -44,14 +44,24 @@ type config struct {
|
||||
bootLight []string // Bootnodes to always connect to by light nodes
|
||||
ethstats string // Ethstats settings to cache for node deploys
|
||||
|
||||
Servers []string `json:"servers,omitempty"`
|
||||
Servers map[string][]byte `json:"servers,omitempty"`
|
||||
}
|
||||
|
||||
// servers retrieves an alphabetically sorted list of servers.
|
||||
func (c config) servers() []string {
|
||||
servers := make([]string, 0, len(c.Servers))
|
||||
for server := range c.Servers {
|
||||
servers = append(servers, server)
|
||||
}
|
||||
sort.Strings(servers)
|
||||
|
||||
return servers
|
||||
}
|
||||
|
||||
// flush dumps the contents of config to disk.
|
||||
func (c config) flush() {
|
||||
os.MkdirAll(filepath.Dir(c.path), 0755)
|
||||
|
||||
sort.Strings(c.Servers)
|
||||
out, _ := json.MarshalIndent(c, "", " ")
|
||||
if err := ioutil.WriteFile(c.path, out, 0644); err != nil {
|
||||
log.Warn("Failed to save puppeth configs", "file", c.path, "err", err)
|
||||
@ -152,6 +162,48 @@ func (w *wizard) readDefaultInt(def int) int {
|
||||
}
|
||||
}
|
||||
|
||||
// readFloat reads a single line from stdin, trimming if from spaces, enforcing it
|
||||
// to parse into a float.
|
||||
func (w *wizard) readFloat() float64 {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
continue
|
||||
}
|
||||
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
|
||||
if err != nil {
|
||||
log.Error("Invalid input, expected float", "err", err)
|
||||
continue
|
||||
}
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
// readDefaultFloat reads a single line from stdin, trimming if from spaces, enforcing
|
||||
// it to parse into a float. If an empty line is entered, the default value is returned.
|
||||
func (w *wizard) readDefaultFloat(def float64) float64 {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
text, err := w.in.ReadString('\n')
|
||||
if err != nil {
|
||||
log.Crit("Failed to read user input", "err", err)
|
||||
}
|
||||
if text = strings.TrimSpace(text); text == "" {
|
||||
return def
|
||||
}
|
||||
val, err := strconv.ParseFloat(strings.TrimSpace(text), 64)
|
||||
if err != nil {
|
||||
log.Error("Invalid input, expected float", "err", err)
|
||||
continue
|
||||
}
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
// readPassword reads a single line from stdin, trimming it from the trailing new
|
||||
// line and returns it. The input will not be echoed.
|
||||
func (w *wizard) readPassword() string {
|
||||
|
@ -44,6 +44,7 @@ func (w *wizard) deployFaucet() {
|
||||
host: client.server,
|
||||
amount: 1,
|
||||
minutes: 1440,
|
||||
tiers: 3,
|
||||
}
|
||||
}
|
||||
infos.node.genesis, _ = json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
@ -68,10 +69,17 @@ func (w *wizard) deployFaucet() {
|
||||
fmt.Printf("How many minutes to enforce between requests? (default = %d)\n", infos.minutes)
|
||||
infos.minutes = w.readDefaultInt(infos.minutes)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("How many funding tiers to feature (x2.5 amounts, x3 timeout)? (default = %d)\n", infos.tiers)
|
||||
infos.tiers = w.readDefaultInt(infos.tiers)
|
||||
if infos.tiers == 0 {
|
||||
log.Error("At least one funding tier must be set")
|
||||
return
|
||||
}
|
||||
// Accessing GitHub gists requires API authorization, retrieve it
|
||||
if infos.githubUser != "" {
|
||||
fmt.Println()
|
||||
fmt.Printf("Reused previous (%s) GitHub API authorization (y/n)? (default = yes)\n", infos.githubUser)
|
||||
fmt.Printf("Reuse previous (%s) GitHub API authorization (y/n)? (default = yes)\n", infos.githubUser)
|
||||
if w.readDefaultString("y") != "y" {
|
||||
infos.githubUser, infos.githubToken = "", ""
|
||||
}
|
||||
@ -109,6 +117,29 @@ func (w *wizard) deployFaucet() {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Accessing the reCaptcha service requires API authorizations, request it
|
||||
if infos.captchaToken != "" {
|
||||
fmt.Println()
|
||||
fmt.Println("Reuse previous reCaptcha API authorization (y/n)? (default = yes)")
|
||||
if w.readDefaultString("y") != "y" {
|
||||
infos.captchaToken, infos.captchaSecret = "", ""
|
||||
}
|
||||
}
|
||||
if infos.captchaToken == "" {
|
||||
// No previous authorization (or old one discarded)
|
||||
fmt.Println()
|
||||
fmt.Println("Enable reCaptcha protection against robots (y/n)? (default = no)")
|
||||
if w.readDefaultString("n") == "y" {
|
||||
// Captcha protection explicitly requested, read the site and secret keys
|
||||
fmt.Println()
|
||||
fmt.Printf("What is the reCaptcha site key to authenticate human users?\n")
|
||||
infos.captchaToken = w.readString()
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("What is the reCaptcha secret key to verify authentications? (won't be echoed)\n")
|
||||
infos.captchaSecret = w.readPassword()
|
||||
}
|
||||
}
|
||||
// Figure out where the user wants to store the persistent data
|
||||
fmt.Println()
|
||||
if infos.node.datadir == "" {
|
||||
|
@ -120,7 +120,7 @@ func (w *wizard) makeGenesis() {
|
||||
// Query the user for some custom extras
|
||||
fmt.Println()
|
||||
fmt.Println("Specify your chain/network ID if you want an explicit one (default = random)")
|
||||
genesis.Config.ChainId = big.NewInt(int64(w.readDefaultInt(rand.Intn(65536))))
|
||||
genesis.Config.ChainId = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536))))
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Anything fun to embed into the genesis block? (max 32 bytes)")
|
||||
|
@ -31,7 +31,10 @@ import (
|
||||
// makeWizard creates and returns a new puppeth wizard.
|
||||
func makeWizard(network string) *wizard {
|
||||
return &wizard{
|
||||
network: network,
|
||||
network: network,
|
||||
conf: config{
|
||||
Servers: make(map[string][]byte),
|
||||
},
|
||||
servers: make(map[string]*sshClient),
|
||||
services: make(map[string][]string),
|
||||
in: bufio.NewReader(os.Stdin),
|
||||
@ -77,9 +80,9 @@ func (w *wizard) run() {
|
||||
} else if err := json.Unmarshal(blob, &w.conf); err != nil {
|
||||
log.Crit("Previous configuration corrupted", "path", w.conf.path, "err", err)
|
||||
} else {
|
||||
for _, server := range w.conf.Servers {
|
||||
for server, pubkey := range w.conf.Servers {
|
||||
log.Info("Dialing previously configured server", "server", server)
|
||||
client, err := dial(server)
|
||||
client, err := dial(server, pubkey)
|
||||
if err != nil {
|
||||
log.Error("Previous server unreachable", "server", server, "err", err)
|
||||
}
|
||||
|
@ -39,16 +39,16 @@ func (w *wizard) networkStats(tips bool) {
|
||||
// Iterate over all the specified hosts and check their status
|
||||
stats := tablewriter.NewWriter(os.Stdout)
|
||||
stats.SetHeader([]string{"Server", "IP", "Status", "Service", "Details"})
|
||||
stats.SetColWidth(128)
|
||||
stats.SetColWidth(100)
|
||||
|
||||
for _, server := range w.conf.Servers {
|
||||
for server, pubkey := range w.conf.Servers {
|
||||
client := w.servers[server]
|
||||
logger := log.New("server", server)
|
||||
logger.Info("Starting remote server health-check")
|
||||
|
||||
// If the server is not connected, try to connect again
|
||||
if client == nil {
|
||||
conn, err := dial(server)
|
||||
conn, err := dial(server, pubkey)
|
||||
if err != nil {
|
||||
logger.Error("Failed to establish remote connection", "err", err)
|
||||
stats.Append([]string{server, "", err.Error(), "", ""})
|
||||
|
@ -28,7 +28,9 @@ import (
|
||||
func (w *wizard) manageServers() {
|
||||
// List all the servers we can disconnect, along with an entry to connect a new one
|
||||
fmt.Println()
|
||||
for i, server := range w.conf.Servers {
|
||||
|
||||
servers := w.conf.servers()
|
||||
for i, server := range servers {
|
||||
fmt.Printf(" %d. Disconnect %s\n", i+1, server)
|
||||
}
|
||||
fmt.Printf(" %d. Connect another server\n", len(w.conf.Servers)+1)
|
||||
@ -40,14 +42,14 @@ func (w *wizard) manageServers() {
|
||||
}
|
||||
// If the user selected an existing server, drop it
|
||||
if choice <= len(w.conf.Servers) {
|
||||
server := w.conf.Servers[choice-1]
|
||||
server := servers[choice-1]
|
||||
client := w.servers[server]
|
||||
|
||||
delete(w.servers, server)
|
||||
if client != nil {
|
||||
client.Close()
|
||||
}
|
||||
w.conf.Servers = append(w.conf.Servers[:choice-1], w.conf.Servers[choice:]...)
|
||||
delete(w.conf.Servers, server)
|
||||
w.conf.flush()
|
||||
|
||||
log.Info("Disconnected existing server", "server", server)
|
||||
@ -73,14 +75,14 @@ func (w *wizard) makeServer() string {
|
||||
// Read and fial the server to ensure docker is present
|
||||
input := w.readString()
|
||||
|
||||
client, err := dial(input)
|
||||
client, err := dial(input, nil)
|
||||
if err != nil {
|
||||
log.Error("Server not ready for puppeth", "err", err)
|
||||
return ""
|
||||
}
|
||||
// All checks passed, start tracking the server
|
||||
w.servers[input] = client
|
||||
w.conf.Servers = append(w.conf.Servers, input)
|
||||
w.conf.Servers[input] = client.pubkey
|
||||
w.conf.flush()
|
||||
|
||||
return input
|
||||
@ -93,7 +95,9 @@ func (w *wizard) selectServer() string {
|
||||
// List the available server to the user and wait for a choice
|
||||
fmt.Println()
|
||||
fmt.Println("Which server do you want to interact with?")
|
||||
for i, server := range w.conf.Servers {
|
||||
|
||||
servers := w.conf.servers()
|
||||
for i, server := range servers {
|
||||
fmt.Printf(" %d. %s\n", i+1, server)
|
||||
}
|
||||
fmt.Printf(" %d. Connect another server\n", len(w.conf.Servers)+1)
|
||||
@ -105,7 +109,7 @@ func (w *wizard) selectServer() string {
|
||||
}
|
||||
// If the user requested connecting to a new server, go for it
|
||||
if choice <= len(w.conf.Servers) {
|
||||
return w.conf.Servers[choice-1]
|
||||
return servers[choice-1]
|
||||
}
|
||||
return w.makeServer()
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
if boot {
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 512, peersLight: 256}
|
||||
} else {
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0}
|
||||
infos = &nodeInfos{portFull: 30303, peersTotal: 50, peersLight: 0, gasTarget: 4.7, gasPrice: 18}
|
||||
}
|
||||
}
|
||||
infos.genesis, _ = json.MarshalIndent(w.conf.genesis, "", " ")
|
||||
@ -109,8 +109,7 @@ func (w *wizard) deployNode(boot bool) {
|
||||
} else if w.conf.genesis.Config.Clique != nil {
|
||||
// If a previous signer was already set, offer to reuse it
|
||||
if infos.keyJSON != "" {
|
||||
var key keystore.Key
|
||||
if err := json.Unmarshal([]byte(infos.keyJSON), &key); err != nil {
|
||||
if key, err := keystore.DecryptKey([]byte(infos.keyJSON), infos.keyPass); err != nil {
|
||||
infos.keyJSON, infos.keyPass = "", ""
|
||||
} else {
|
||||
fmt.Println()
|
||||
@ -136,9 +135,17 @@ func (w *wizard) deployNode(boot bool) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Establish the gas dynamics to be enforced by the signer
|
||||
fmt.Println()
|
||||
fmt.Printf("What gas limit should empty blocks target (MGas)? (default = %0.3f)\n", infos.gasTarget)
|
||||
infos.gasTarget = w.readDefaultFloat(infos.gasTarget)
|
||||
|
||||
fmt.Println()
|
||||
fmt.Printf("What gas price should the signer require (GWei)? (default = %0.3f)\n", infos.gasPrice)
|
||||
infos.gasPrice = w.readDefaultFloat(infos.gasPrice)
|
||||
}
|
||||
// Try to deploy the full node on the host
|
||||
if out, err := deployNode(client, w.network, w.conf.bootFull, infos); err != nil {
|
||||
if out, err := deployNode(client, w.network, w.conf.bootFull, w.conf.bootLight, infos); err != nil {
|
||||
log.Error("Failed to deploy Ethereum node container", "err", err)
|
||||
if len(out) > 0 {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
@ -67,6 +67,10 @@ var (
|
||||
Name: "bzzaccount",
|
||||
Usage: "Swarm account key file",
|
||||
}
|
||||
SwarmListenAddrFlag = cli.StringFlag{
|
||||
Name: "httpaddr",
|
||||
Usage: "Swarm HTTP API listening interface",
|
||||
}
|
||||
SwarmPortFlag = cli.StringFlag{
|
||||
Name: "bzzport",
|
||||
Usage: "Swarm local http api port",
|
||||
@ -249,6 +253,7 @@ Cleans database of corrupted entries.
|
||||
SwarmConfigPathFlag,
|
||||
SwarmSwapEnabledFlag,
|
||||
SwarmSyncEnabledFlag,
|
||||
SwarmListenAddrFlag,
|
||||
SwarmPortFlag,
|
||||
SwarmAccountFlag,
|
||||
SwarmNetworkIdFlag,
|
||||
@ -345,6 +350,9 @@ func registerBzzService(ctx *cli.Context, stack *node.Node) {
|
||||
if len(bzzport) > 0 {
|
||||
bzzconfig.Port = bzzport
|
||||
}
|
||||
if bzzaddr := ctx.GlobalString(SwarmListenAddrFlag.Name); bzzaddr != "" {
|
||||
bzzconfig.ListenAddr = bzzaddr
|
||||
}
|
||||
swapEnabled := ctx.GlobalBool(SwarmSwapEnabledFlag.Name)
|
||||
syncEnabled := ctx.GlobalBoolT(SwarmSyncEnabledFlag.Name)
|
||||
|
||||
|
@ -52,10 +52,23 @@ import (
|
||||
"github.com/ethereum/go-ethereum/p2p/nat"
|
||||
"github.com/ethereum/go-ethereum/p2p/netutil"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv2"
|
||||
whisper "github.com/ethereum/go-ethereum/whisper/whisperv5"
|
||||
"gopkg.in/urfave/cli.v1"
|
||||
)
|
||||
|
||||
var (
|
||||
CommandHelpTemplate = `{{.cmd.Name}}{{if .cmd.Subcommands}} command{{end}}{{if .cmd.Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .cmd.Description}}{{.cmd.Description}}
|
||||
{{end}}{{if .cmd.Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .cmd.Subcommands}}{{.cmd.Name}}{{with .cmd.ShortName}}, {{.cmd}}{{end}}{{ "\t" }}{{.cmd.Usage}}
|
||||
{{end}}{{end}}{{if .categorizedFlags}}
|
||||
{{range $idx, $categorized := .categorizedFlags}}{{$categorized.Name}} OPTIONS:
|
||||
{{range $categorized.Flags}}{{"\t"}}{{.}}
|
||||
{{end}}
|
||||
{{end}}{{end}}`
|
||||
)
|
||||
|
||||
func init() {
|
||||
cli.AppHelpTemplate = `{{.Name}} {{if .Flags}}[global options] {{end}}command{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
|
||||
@ -70,16 +83,7 @@ GLOBAL OPTIONS:
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
cli.CommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
|
||||
{{if .Description}}{{.Description}}
|
||||
{{end}}{{if .Subcommands}}
|
||||
SUBCOMMANDS:
|
||||
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
cli.CommandHelpTemplate = CommandHelpTemplate
|
||||
}
|
||||
|
||||
// NewApp creates an app with sane defaults.
|
||||
@ -115,44 +119,23 @@ var (
|
||||
Name: "keystore",
|
||||
Usage: "Directory for the keystore (default = inside the datadir)",
|
||||
}
|
||||
EthashCacheDirFlag = DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
|
||||
NoUSBFlag = cli.BoolFlag{
|
||||
Name: "nousb",
|
||||
Usage: "Disables monitoring for and managine USB hardware wallets",
|
||||
}
|
||||
EthashCachesInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesinmem",
|
||||
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesInMem,
|
||||
}
|
||||
EthashCachesOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesondisk",
|
||||
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesOnDisk,
|
||||
}
|
||||
EthashDatasetDirFlag = DirectoryFlag{
|
||||
Name: "ethash.dagdir",
|
||||
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
|
||||
Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
|
||||
}
|
||||
EthashDatasetsInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsinmem",
|
||||
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsInMem,
|
||||
}
|
||||
EthashDatasetsOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsondisk",
|
||||
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
}
|
||||
NetworkIdFlag = cli.IntFlag{
|
||||
NetworkIdFlag = cli.Uint64Flag{
|
||||
Name: "networkid",
|
||||
Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten)",
|
||||
Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten, 4=Rinkeby)",
|
||||
Value: eth.DefaultConfig.NetworkId,
|
||||
}
|
||||
TestNetFlag = cli.BoolFlag{
|
||||
TestnetFlag = cli.BoolFlag{
|
||||
Name: "testnet",
|
||||
Usage: "Ropsten network: pre-configured proof-of-work test network",
|
||||
}
|
||||
RinkebyFlag = cli.BoolFlag{
|
||||
Name: "rinkeby",
|
||||
Usage: "Rinkeby network: pre-configured proof-of-authority test network",
|
||||
}
|
||||
DevModeFlag = cli.BoolFlag{
|
||||
Name: "dev",
|
||||
Usage: "Developer mode: pre-configured private network with several debugging flags",
|
||||
@ -195,6 +178,72 @@ var (
|
||||
Name: "lightkdf",
|
||||
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
||||
}
|
||||
// Ethash settings
|
||||
EthashCacheDirFlag = DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
Usage: "Directory to store the ethash verification caches (default = inside the datadir)",
|
||||
}
|
||||
EthashCachesInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesinmem",
|
||||
Usage: "Number of recent ethash caches to keep in memory (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesInMem,
|
||||
}
|
||||
EthashCachesOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.cachesondisk",
|
||||
Usage: "Number of recent ethash caches to keep on disk (16MB each)",
|
||||
Value: eth.DefaultConfig.EthashCachesOnDisk,
|
||||
}
|
||||
EthashDatasetDirFlag = DirectoryFlag{
|
||||
Name: "ethash.dagdir",
|
||||
Usage: "Directory to store the ethash mining DAGs (default = inside home folder)",
|
||||
Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir},
|
||||
}
|
||||
EthashDatasetsInMemoryFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsinmem",
|
||||
Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsInMem,
|
||||
}
|
||||
EthashDatasetsOnDiskFlag = cli.IntFlag{
|
||||
Name: "ethash.dagsondisk",
|
||||
Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)",
|
||||
Value: eth.DefaultConfig.EthashDatasetsOnDisk,
|
||||
}
|
||||
// Transaction pool settings
|
||||
TxPoolPriceLimitFlag = cli.Uint64Flag{
|
||||
Name: "txpool.pricelimit",
|
||||
Usage: "Minimum gas price limit to enforce for acceptance into the pool",
|
||||
Value: eth.DefaultConfig.TxPool.PriceLimit,
|
||||
}
|
||||
TxPoolPriceBumpFlag = cli.Uint64Flag{
|
||||
Name: "txpool.pricebump",
|
||||
Usage: "Price bump percentage to replace an already existing transaction",
|
||||
Value: eth.DefaultConfig.TxPool.PriceBump,
|
||||
}
|
||||
TxPoolAccountSlotsFlag = cli.Uint64Flag{
|
||||
Name: "txpool.accountslots",
|
||||
Usage: "Minimum number of executable transaction slots guaranteed per account",
|
||||
Value: eth.DefaultConfig.TxPool.AccountSlots,
|
||||
}
|
||||
TxPoolGlobalSlotsFlag = cli.Uint64Flag{
|
||||
Name: "txpool.globalslots",
|
||||
Usage: "Maximum number of executable transaction slots for all accounts",
|
||||
Value: eth.DefaultConfig.TxPool.GlobalSlots,
|
||||
}
|
||||
TxPoolAccountQueueFlag = cli.Uint64Flag{
|
||||
Name: "txpool.accountqueue",
|
||||
Usage: "Maximum number of non-executable transaction slots permitted per account",
|
||||
Value: eth.DefaultConfig.TxPool.AccountQueue,
|
||||
}
|
||||
TxPoolGlobalQueueFlag = cli.Uint64Flag{
|
||||
Name: "txpool.globalqueue",
|
||||
Usage: "Maximum number of non-executable transaction slots for all accounts",
|
||||
Value: eth.DefaultConfig.TxPool.GlobalQueue,
|
||||
}
|
||||
TxPoolLifetimeFlag = cli.DurationFlag{
|
||||
Name: "txpool.lifetime",
|
||||
Usage: "Maximum amount of time non-executable transaction are queued",
|
||||
Value: eth.DefaultConfig.TxPool.Lifetime,
|
||||
}
|
||||
// Performance tuning settings
|
||||
CacheFlag = cli.IntFlag{
|
||||
Name: "cache",
|
||||
@ -229,7 +278,7 @@ var (
|
||||
GasPriceFlag = BigFlag{
|
||||
Name: "gasprice",
|
||||
Usage: "Minimal gas price to accept for mining a transactions",
|
||||
Value: big.NewInt(20 * params.Shannon),
|
||||
Value: eth.DefaultConfig.GasPrice,
|
||||
}
|
||||
ExtraDataFlag = cli.StringFlag{
|
||||
Name: "extradata",
|
||||
@ -327,7 +376,7 @@ var (
|
||||
}
|
||||
ExecFlag = cli.StringFlag{
|
||||
Name: "exec",
|
||||
Usage: "Execute JavaScript statement (only in combination with console/attach)",
|
||||
Usage: "Execute JavaScript statement",
|
||||
}
|
||||
PreloadJSFlag = cli.StringFlag{
|
||||
Name: "preload",
|
||||
@ -352,7 +401,17 @@ var (
|
||||
}
|
||||
BootnodesFlag = cli.StringFlag{
|
||||
Name: "bootnodes",
|
||||
Usage: "Comma separated enode URLs for P2P discovery bootstrap",
|
||||
Usage: "Comma separated enode URLs for P2P discovery bootstrap (set v4+v5 instead for light servers)",
|
||||
Value: "",
|
||||
}
|
||||
BootnodesV4Flag = cli.StringFlag{
|
||||
Name: "bootnodesv4",
|
||||
Usage: "Comma separated enode URLs for P2P v4 discovery bootstrap (light server, full nodes)",
|
||||
Value: "",
|
||||
}
|
||||
BootnodesV5Flag = cli.StringFlag{
|
||||
Name: "bootnodesv5",
|
||||
Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes)",
|
||||
Value: "",
|
||||
}
|
||||
NodeKeyFileFlag = cli.StringFlag{
|
||||
@ -411,10 +470,12 @@ var (
|
||||
// the a subdirectory of the specified datadir will be used.
|
||||
func MakeDataDir(ctx *cli.Context) string {
|
||||
if path := ctx.GlobalString(DataDirFlag.Name); path != "" {
|
||||
// TODO: choose a different location outside of the regular datadir.
|
||||
if ctx.GlobalBool(TestNetFlag.Name) {
|
||||
if ctx.GlobalBool(TestnetFlag.Name) {
|
||||
return filepath.Join(path, "testnet")
|
||||
}
|
||||
if ctx.GlobalBool(RinkebyFlag.Name) {
|
||||
return filepath.Join(path, "rinkeby")
|
||||
}
|
||||
return path
|
||||
}
|
||||
Fatalf("Cannot determine default data directory, please set manually (--datadir)")
|
||||
@ -458,10 +519,17 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) {
|
||||
// flags, reverting to pre-configured ones if none have been specified.
|
||||
func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
urls := params.MainnetBootnodes
|
||||
if ctx.GlobalIsSet(BootnodesFlag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
} else if ctx.GlobalBool(TestNetFlag.Name) {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV4Flag.Name):
|
||||
if ctx.GlobalIsSet(BootnodesV4Flag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesV4Flag.Name), ",")
|
||||
} else {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
}
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
urls = params.TestnetBootnodes
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyBootnodes
|
||||
}
|
||||
|
||||
cfg.BootstrapNodes = make([]*discover.Node, 0, len(urls))
|
||||
@ -479,9 +547,16 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
||||
// flags, reverting to pre-configured ones if none have been specified.
|
||||
func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
|
||||
urls := params.DiscoveryV5Bootnodes
|
||||
if ctx.GlobalIsSet(BootnodesFlag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
} else if cfg.BootstrapNodesV5 == nil {
|
||||
switch {
|
||||
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV5Flag.Name):
|
||||
if ctx.GlobalIsSet(BootnodesV5Flag.Name) {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesV5Flag.Name), ",")
|
||||
} else {
|
||||
urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
|
||||
}
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
urls = params.RinkebyV5Bootnodes
|
||||
case cfg.BootstrapNodesV5 != nil:
|
||||
return // already set, don't apply defaults.
|
||||
}
|
||||
|
||||
@ -643,7 +718,7 @@ func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// MakePasswordList reads password lines from the file specified by --password.
|
||||
// MakePasswordList reads password lines from the file specified by the global --password flag.
|
||||
func MakePasswordList(ctx *cli.Context) []string {
|
||||
path := ctx.GlobalString(PasswordFileFlag.Name)
|
||||
if path == "" {
|
||||
@ -701,6 +776,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
|
||||
// --dev mode can't use p2p networking.
|
||||
cfg.MaxPeers = 0
|
||||
cfg.ListenAddr = ":0"
|
||||
cfg.DiscoveryV5Addr = ":0"
|
||||
cfg.NoDiscovery = true
|
||||
cfg.DiscoveryV5 = false
|
||||
}
|
||||
@ -719,8 +795,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
||||
cfg.DataDir = ctx.GlobalString(DataDirFlag.Name)
|
||||
case ctx.GlobalBool(DevModeFlag.Name):
|
||||
cfg.DataDir = filepath.Join(os.TempDir(), "ethereum_dev_mode")
|
||||
case ctx.GlobalBool(TestNetFlag.Name):
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "testnet")
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby")
|
||||
}
|
||||
|
||||
if ctx.GlobalIsSet(KeyStoreDirFlag.Name) {
|
||||
@ -729,6 +807,9 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
||||
if ctx.GlobalIsSet(LightKDFFlag.Name) {
|
||||
cfg.UseLightweightKDF = ctx.GlobalBool(LightKDFFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(NoUSBFlag.Name) {
|
||||
cfg.NoUSB = ctx.GlobalBool(NoUSBFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||
@ -740,6 +821,30 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) {
|
||||
if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) {
|
||||
cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolPriceBumpFlag.Name) {
|
||||
cfg.PriceBump = ctx.GlobalUint64(TxPoolPriceBumpFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolAccountSlotsFlag.Name) {
|
||||
cfg.AccountSlots = ctx.GlobalUint64(TxPoolAccountSlotsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolGlobalSlotsFlag.Name) {
|
||||
cfg.GlobalSlots = ctx.GlobalUint64(TxPoolGlobalSlotsFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolAccountQueueFlag.Name) {
|
||||
cfg.AccountQueue = ctx.GlobalUint64(TxPoolAccountQueueFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolGlobalQueueFlag.Name) {
|
||||
cfg.GlobalQueue = ctx.GlobalUint64(TxPoolGlobalQueueFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(TxPoolLifetimeFlag.Name) {
|
||||
cfg.Lifetime = ctx.GlobalDuration(TxPoolLifetimeFlag.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func setEthash(ctx *cli.Context, cfg *eth.Config) {
|
||||
if ctx.GlobalIsSet(EthashCacheDirFlag.Name) {
|
||||
cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name)
|
||||
@ -776,12 +881,13 @@ func checkExclusive(ctx *cli.Context, flags ...cli.Flag) {
|
||||
// SetEthConfig applies eth-related command line flags to the config.
|
||||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
// Avoid conflicting network flags
|
||||
checkExclusive(ctx, DevModeFlag, TestNetFlag)
|
||||
checkExclusive(ctx, DevModeFlag, TestnetFlag, RinkebyFlag)
|
||||
checkExclusive(ctx, FastSyncFlag, LightModeFlag, SyncModeFlag)
|
||||
|
||||
ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore)
|
||||
setEtherbase(ctx, ks, cfg)
|
||||
setGPO(ctx, &cfg.GPO)
|
||||
setTxPool(ctx, &cfg.TxPool)
|
||||
setEthash(ctx, cfg)
|
||||
|
||||
switch {
|
||||
@ -799,7 +905,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.LightPeers = ctx.GlobalInt(LightPeersFlag.Name)
|
||||
}
|
||||
if ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = ctx.GlobalInt(NetworkIdFlag.Name)
|
||||
cfg.NetworkId = ctx.GlobalUint64(NetworkIdFlag.Name)
|
||||
}
|
||||
|
||||
// Ethereum needs to know maxPeers to calculate the light server peer ratio.
|
||||
@ -828,13 +934,18 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
cfg.EnablePreimageRecording = ctx.GlobalBool(VMEnableDebugFlag.Name)
|
||||
}
|
||||
|
||||
// Override any default configs for --dev and --testnet.
|
||||
// Override any default configs for hard coded networks.
|
||||
switch {
|
||||
case ctx.GlobalBool(TestNetFlag.Name):
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 3
|
||||
}
|
||||
cfg.Genesis = core.DefaultTestnetGenesisBlock()
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||
cfg.NetworkId = 4
|
||||
}
|
||||
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(DevModeFlag.Name):
|
||||
cfg.Genesis = core.DevGenesisBlock()
|
||||
if !ctx.GlobalIsSet(GasPriceFlag.Name) {
|
||||
@ -901,22 +1012,16 @@ func SetupNetwork(ctx *cli.Context) {
|
||||
params.TargetGasLimit = new(big.Int).SetUint64(ctx.GlobalUint64(TargetGasLimitFlag.Name))
|
||||
}
|
||||
|
||||
func ChainDbName(ctx *cli.Context) string {
|
||||
if ctx.GlobalBool(LightModeFlag.Name) {
|
||||
return "lightchaindata"
|
||||
} else {
|
||||
return "chaindata"
|
||||
}
|
||||
}
|
||||
|
||||
// MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails.
|
||||
func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
||||
var (
|
||||
cache = ctx.GlobalInt(CacheFlag.Name)
|
||||
handles = makeDatabaseHandles()
|
||||
name = ChainDbName(ctx)
|
||||
)
|
||||
|
||||
name := "chaindata"
|
||||
if ctx.GlobalBool(LightModeFlag.Name) {
|
||||
name = "lightchaindata"
|
||||
}
|
||||
chainDb, err := stack.OpenDatabase(name, cache, handles)
|
||||
if err != nil {
|
||||
Fatalf("Could not open database: %v", err)
|
||||
@ -927,8 +1032,10 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database {
|
||||
func MakeGenesis(ctx *cli.Context) *core.Genesis {
|
||||
var genesis *core.Genesis
|
||||
switch {
|
||||
case ctx.GlobalBool(TestNetFlag.Name):
|
||||
case ctx.GlobalBool(TestnetFlag.Name):
|
||||
genesis = core.DefaultTestnetGenesisBlock()
|
||||
case ctx.GlobalBool(RinkebyFlag.Name):
|
||||
genesis = core.DefaultRinkebyGenesisBlock()
|
||||
case ctx.GlobalBool(DevModeFlag.Name):
|
||||
genesis = core.DevGenesisBlock()
|
||||
}
|
||||
@ -972,3 +1079,27 @@ func MakeConsolePreloads(ctx *cli.Context) []string {
|
||||
}
|
||||
return preloads
|
||||
}
|
||||
|
||||
// MigrateFlags sets the global flag from a local flag when it's set.
|
||||
// This is a temporary function used for migrating old command/flags to the
|
||||
// new format.
|
||||
//
|
||||
// e.g. geth account new --keystore /tmp/mykeystore --lightkdf
|
||||
//
|
||||
// is equivalent after calling this method with:
|
||||
//
|
||||
// geth --keystore /tmp/mykeystore --lightkdf account new
|
||||
//
|
||||
// This allows the use of the existing configuration functionality.
|
||||
// When all flags are migrated this function can be removed and the existing
|
||||
// configuration functionality must be changed that is uses local flags
|
||||
func MigrateFlags(action func(ctx *cli.Context) error) func(*cli.Context) error {
|
||||
return func(ctx *cli.Context) error {
|
||||
for _, name := range ctx.FlagNames() {
|
||||
if ctx.IsSet(name) {
|
||||
ctx.GlobalSet(name, ctx.String(name))
|
||||
}
|
||||
}
|
||||
return action(ctx)
|
||||
}
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ var (
|
||||
pub *ecdsa.PublicKey
|
||||
asymKey *ecdsa.PrivateKey
|
||||
nodeid *ecdsa.PrivateKey
|
||||
topic []byte
|
||||
topic whisper.TopicType
|
||||
asymKeyID string
|
||||
filterID string
|
||||
symPass string
|
||||
@ -84,7 +84,7 @@ var (
|
||||
testMode = flag.Bool("test", false, "use of predefined parameters for diagnostics")
|
||||
echoMode = flag.Bool("echo", false, "echo mode: prints some arguments for diagnostics")
|
||||
|
||||
argVerbosity = flag.Int("verbosity", int(log.LvlWarn), "log verbosity level")
|
||||
argVerbosity = flag.Int("verbosity", int(log.LvlError), "log verbosity level")
|
||||
argTTL = flag.Uint("ttl", 30, "time-to-live for messages in seconds")
|
||||
argWorkTime = flag.Uint("work", 5, "work time in seconds")
|
||||
argMaxSize = flag.Int("maxsize", whisper.DefaultMaxMessageLength, "max size of message")
|
||||
@ -129,7 +129,7 @@ func processArgs() {
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to parse the topic: %s", err)
|
||||
}
|
||||
topic = x
|
||||
topic = whisper.BytesToTopic(x)
|
||||
}
|
||||
|
||||
if *asymmetricMode && len(*argPub) > 0 {
|
||||
@ -183,7 +183,7 @@ func initialize() {
|
||||
|
||||
if *testMode {
|
||||
symPass = "wwww" // ascii code: 0x77777777
|
||||
msPassword = "mail server test password"
|
||||
msPassword = "wwww"
|
||||
}
|
||||
|
||||
if *bootstrapMode {
|
||||
@ -307,7 +307,11 @@ func configureNode() {
|
||||
if *asymmetricMode {
|
||||
if len(*argPub) == 0 {
|
||||
s := scanLine("Please enter the peer's public key: ")
|
||||
pub = crypto.ToECDSAPub(common.FromHex(s))
|
||||
b := common.FromHex(s)
|
||||
if b == nil {
|
||||
utils.Fatalf("Error: can not convert hexadecimal string")
|
||||
}
|
||||
pub = crypto.ToECDSAPub(b)
|
||||
if !isKeyValid(pub) {
|
||||
utils.Fatalf("Error: invalid public key")
|
||||
}
|
||||
@ -326,7 +330,7 @@ func configureNode() {
|
||||
|
||||
if !*asymmetricMode && !*forwarderMode {
|
||||
if len(symPass) == 0 {
|
||||
symPass, err = console.Stdin.PromptPassword("Please enter the password: ")
|
||||
symPass, err = console.Stdin.PromptPassword("Please enter the password for symmetric encryption: ")
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to read passphrase: %v", err)
|
||||
}
|
||||
@ -343,6 +347,8 @@ func configureNode() {
|
||||
if len(*argTopic) == 0 {
|
||||
generateTopic([]byte(symPass))
|
||||
}
|
||||
|
||||
fmt.Printf("Filter is configured for the topic: %x \n", topic)
|
||||
}
|
||||
|
||||
if *mailServerMode {
|
||||
@ -354,18 +360,17 @@ func configureNode() {
|
||||
filter := whisper.Filter{
|
||||
KeySym: symKey,
|
||||
KeyAsym: asymKey,
|
||||
Topics: [][]byte{topic},
|
||||
Topics: [][]byte{topic[:]},
|
||||
AllowP2P: p2pAccept,
|
||||
}
|
||||
filterID, err = shh.Subscribe(&filter)
|
||||
if err != nil {
|
||||
utils.Fatalf("Failed to install filter: %s", err)
|
||||
}
|
||||
fmt.Printf("Filter is configured for the topic: %x \n", topic)
|
||||
}
|
||||
|
||||
func generateTopic(password []byte) {
|
||||
x := pbkdf2.Key(password, password, 8196, 128, sha512.New)
|
||||
x := pbkdf2.Key(password, password, 4096, 128, sha512.New)
|
||||
for i := 0; i < len(x); i++ {
|
||||
topic[i%whisper.TopicLength] ^= x[i]
|
||||
}
|
||||
@ -485,16 +490,15 @@ func sendMsg(payload []byte) common.Hash {
|
||||
Dst: pub,
|
||||
KeySym: symKey,
|
||||
Payload: payload,
|
||||
Topic: whisper.BytesToTopic(topic),
|
||||
Topic: topic,
|
||||
TTL: uint32(*argTTL),
|
||||
PoW: *argPoW,
|
||||
WorkTime: uint32(*argWorkTime),
|
||||
}
|
||||
|
||||
msg := whisper.NewSentMessage(¶ms)
|
||||
if msg == nil {
|
||||
fmt.Printf("failed to create new message (OS level error)")
|
||||
os.Exit(0)
|
||||
msg, err := whisper.NewSentMessage(¶ms)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to create new message: %s", err)
|
||||
}
|
||||
envelope, err := msg.Wrap(¶ms)
|
||||
if err != nil {
|
||||
@ -624,9 +628,9 @@ func requestExpiredMessagesLoop() {
|
||||
params.Src = nodeid
|
||||
params.WorkTime = 5
|
||||
|
||||
msg := whisper.NewSentMessage(¶ms)
|
||||
if msg == nil {
|
||||
utils.Fatalf("failed to create new message (OS level error)")
|
||||
msg, err := whisper.NewSentMessage(¶ms)
|
||||
if err != nil {
|
||||
utils.Fatalf("failed to create new message: %s", err)
|
||||
}
|
||||
env, err := msg.Wrap(¶ms)
|
||||
if err != nil {
|
||||
|
188
common/bitutil/bitutil.go
Normal file
188
common/bitutil/bitutil.go
Normal file
@ -0,0 +1,188 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Adapted from: https://golang.org/src/crypto/cipher/xor.go
|
||||
|
||||
// Package bitutil implements fast bitwise operations.
|
||||
package bitutil
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
|
||||
|
||||
// XORBytes xors the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes xor'd.
|
||||
func XORBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastXORBytes(dst, a, b)
|
||||
}
|
||||
return safeXORBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastXORBytes xors in bulk. It only works on architectures that support
|
||||
// unaligned read/writes.
|
||||
func fastXORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] ^ bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// safeXORBytes xors one by one. It works on all architectures, independent if
|
||||
// it supports unaligned read/writes or not.
|
||||
func safeXORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// ANDBytes ands the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes and'd.
|
||||
func ANDBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastANDBytes(dst, a, b)
|
||||
}
|
||||
return safeANDBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastANDBytes ands in bulk. It only works on architectures that support
|
||||
// unaligned read/writes.
|
||||
func fastANDBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] & bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] & b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// safeANDBytes ands one by one. It works on all architectures, independent if
|
||||
// it supports unaligned read/writes or not.
|
||||
func safeANDBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] & b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// ORBytes ors the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes or'd.
|
||||
func ORBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastORBytes(dst, a, b)
|
||||
}
|
||||
return safeORBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastORBytes ors in bulk. It only works on architectures that support
|
||||
// unaligned read/writes.
|
||||
func fastORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] | bw[i]
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] | b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// safeORBytes ors one by one. It works on all architectures, independent if
|
||||
// it supports unaligned read/writes or not.
|
||||
func safeORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] | b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// TestBytes tests whether any bit is set in the input byte slice.
|
||||
func TestBytes(p []byte) bool {
|
||||
if supportsUnaligned {
|
||||
return fastTestBytes(p)
|
||||
}
|
||||
return safeTestBytes(p)
|
||||
}
|
||||
|
||||
// fastTestBytes tests for set bits in bulk. It only works on architectures that
|
||||
// support unaligned read/writes.
|
||||
func fastTestBytes(p []byte) bool {
|
||||
n := len(p)
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
pw := *(*[]uintptr)(unsafe.Pointer(&p))
|
||||
for i := 0; i < w; i++ {
|
||||
if pw[i] != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
if p[i] != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// safeTestBytes tests for set bits one byte at a time. It works on all
|
||||
// architectures, independent if it supports unaligned read/writes or not.
|
||||
func safeTestBytes(p []byte) bool {
|
||||
for i := 0; i < len(p); i++ {
|
||||
if p[i] != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
215
common/bitutil/bitutil_test.go
Normal file
215
common/bitutil/bitutil_test.go
Normal file
@ -0,0 +1,215 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Adapted from: https://golang.org/src/crypto/cipher/xor_test.go
|
||||
|
||||
package bitutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Tests that bitwise XOR works for various alignments.
|
||||
func TestXOR(t *testing.T) {
|
||||
for alignP := 0; alignP < 2; alignP++ {
|
||||
for alignQ := 0; alignQ < 2; alignQ++ {
|
||||
for alignD := 0; alignD < 2; alignD++ {
|
||||
p := make([]byte, 1023)[alignP:]
|
||||
q := make([]byte, 1023)[alignQ:]
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
p[i] = byte(i)
|
||||
}
|
||||
for i := 0; i < len(q); i++ {
|
||||
q[i] = byte(len(q) - i)
|
||||
}
|
||||
d1 := make([]byte, 1023+alignD)[alignD:]
|
||||
d2 := make([]byte, 1023+alignD)[alignD:]
|
||||
|
||||
XORBytes(d1, p, q)
|
||||
safeXORBytes(d2, p, q)
|
||||
if !bytes.Equal(d1, d2) {
|
||||
t.Error("not equal", d1, d2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that bitwise AND works for various alignments.
|
||||
func TestAND(t *testing.T) {
|
||||
for alignP := 0; alignP < 2; alignP++ {
|
||||
for alignQ := 0; alignQ < 2; alignQ++ {
|
||||
for alignD := 0; alignD < 2; alignD++ {
|
||||
p := make([]byte, 1023)[alignP:]
|
||||
q := make([]byte, 1023)[alignQ:]
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
p[i] = byte(i)
|
||||
}
|
||||
for i := 0; i < len(q); i++ {
|
||||
q[i] = byte(len(q) - i)
|
||||
}
|
||||
d1 := make([]byte, 1023+alignD)[alignD:]
|
||||
d2 := make([]byte, 1023+alignD)[alignD:]
|
||||
|
||||
ANDBytes(d1, p, q)
|
||||
safeANDBytes(d2, p, q)
|
||||
if !bytes.Equal(d1, d2) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that bitwise OR works for various alignments.
|
||||
func TestOR(t *testing.T) {
|
||||
for alignP := 0; alignP < 2; alignP++ {
|
||||
for alignQ := 0; alignQ < 2; alignQ++ {
|
||||
for alignD := 0; alignD < 2; alignD++ {
|
||||
p := make([]byte, 1023)[alignP:]
|
||||
q := make([]byte, 1023)[alignQ:]
|
||||
|
||||
for i := 0; i < len(p); i++ {
|
||||
p[i] = byte(i)
|
||||
}
|
||||
for i := 0; i < len(q); i++ {
|
||||
q[i] = byte(len(q) - i)
|
||||
}
|
||||
d1 := make([]byte, 1023+alignD)[alignD:]
|
||||
d2 := make([]byte, 1023+alignD)[alignD:]
|
||||
|
||||
ORBytes(d1, p, q)
|
||||
safeORBytes(d2, p, q)
|
||||
if !bytes.Equal(d1, d2) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that bit testing works for various alignments.
|
||||
func TestTest(t *testing.T) {
|
||||
for align := 0; align < 2; align++ {
|
||||
// Test for bits set in the bulk part
|
||||
p := make([]byte, 1023)[align:]
|
||||
p[100] = 1
|
||||
|
||||
if TestBytes(p) != safeTestBytes(p) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
// Test for bits set in the tail part
|
||||
q := make([]byte, 1023)[align:]
|
||||
q[len(q)-1] = 1
|
||||
|
||||
if TestBytes(q) != safeTestBytes(q) {
|
||||
t.Error("not equal")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized XOR performance.
|
||||
func BenchmarkFastXOR1KB(b *testing.B) { benchmarkFastXOR(b, 1024) }
|
||||
func BenchmarkFastXOR2KB(b *testing.B) { benchmarkFastXOR(b, 2048) }
|
||||
func BenchmarkFastXOR4KB(b *testing.B) { benchmarkFastXOR(b, 4096) }
|
||||
|
||||
func benchmarkFastXOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
XORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline XOR performance.
|
||||
func BenchmarkBaseXOR1KB(b *testing.B) { benchmarkBaseXOR(b, 1024) }
|
||||
func BenchmarkBaseXOR2KB(b *testing.B) { benchmarkBaseXOR(b, 2048) }
|
||||
func BenchmarkBaseXOR4KB(b *testing.B) { benchmarkBaseXOR(b, 4096) }
|
||||
|
||||
func benchmarkBaseXOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeXORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized AND performance.
|
||||
func BenchmarkFastAND1KB(b *testing.B) { benchmarkFastAND(b, 1024) }
|
||||
func BenchmarkFastAND2KB(b *testing.B) { benchmarkFastAND(b, 2048) }
|
||||
func BenchmarkFastAND4KB(b *testing.B) { benchmarkFastAND(b, 4096) }
|
||||
|
||||
func benchmarkFastAND(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ANDBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline AND performance.
|
||||
func BenchmarkBaseAND1KB(b *testing.B) { benchmarkBaseAND(b, 1024) }
|
||||
func BenchmarkBaseAND2KB(b *testing.B) { benchmarkBaseAND(b, 2048) }
|
||||
func BenchmarkBaseAND4KB(b *testing.B) { benchmarkBaseAND(b, 4096) }
|
||||
|
||||
func benchmarkBaseAND(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeANDBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized OR performance.
|
||||
func BenchmarkFastOR1KB(b *testing.B) { benchmarkFastOR(b, 1024) }
|
||||
func BenchmarkFastOR2KB(b *testing.B) { benchmarkFastOR(b, 2048) }
|
||||
func BenchmarkFastOR4KB(b *testing.B) { benchmarkFastOR(b, 4096) }
|
||||
|
||||
func benchmarkFastOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
ORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline OR performance.
|
||||
func BenchmarkBaseOR1KB(b *testing.B) { benchmarkBaseOR(b, 1024) }
|
||||
func BenchmarkBaseOR2KB(b *testing.B) { benchmarkBaseOR(b, 2048) }
|
||||
func BenchmarkBaseOR4KB(b *testing.B) { benchmarkBaseOR(b, 4096) }
|
||||
|
||||
func benchmarkBaseOR(b *testing.B, size int) {
|
||||
p, q := make([]byte, size), make([]byte, size)
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeORBytes(p, p, q)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the potentially optimized bit testing performance.
|
||||
func BenchmarkFastTest1KB(b *testing.B) { benchmarkFastTest(b, 1024) }
|
||||
func BenchmarkFastTest2KB(b *testing.B) { benchmarkFastTest(b, 2048) }
|
||||
func BenchmarkFastTest4KB(b *testing.B) { benchmarkFastTest(b, 4096) }
|
||||
|
||||
func benchmarkFastTest(b *testing.B, size int) {
|
||||
p := make([]byte, size)
|
||||
for i := 0; i < b.N; i++ {
|
||||
TestBytes(p)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the baseline bit testing performance.
|
||||
func BenchmarkBaseTest1KB(b *testing.B) { benchmarkBaseTest(b, 1024) }
|
||||
func BenchmarkBaseTest2KB(b *testing.B) { benchmarkBaseTest(b, 2048) }
|
||||
func BenchmarkBaseTest4KB(b *testing.B) { benchmarkBaseTest(b, 4096) }
|
||||
|
||||
func benchmarkBaseTest(b *testing.B, size int) {
|
||||
p := make([]byte, size)
|
||||
for i := 0; i < b.N; i++ {
|
||||
safeTestBytes(p)
|
||||
}
|
||||
}
|
170
common/bitutil/compress.go
Normal file
170
common/bitutil/compress.go
Normal file
@ -0,0 +1,170 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bitutil
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// errMissingData is returned from decompression if the byte referenced by
|
||||
// the bitset header overflows the input data.
|
||||
errMissingData = errors.New("missing bytes on input")
|
||||
|
||||
// errUnreferencedData is returned from decompression if not all bytes were used
|
||||
// up from the input data after decompressing it.
|
||||
errUnreferencedData = errors.New("extra bytes on input")
|
||||
|
||||
// errExceededTarget is returned from decompression if the bitset header has
|
||||
// more bits defined than the number of target buffer space available.
|
||||
errExceededTarget = errors.New("target data size exceeded")
|
||||
|
||||
// errZeroContent is returned from decompression if a data byte referenced in
|
||||
// the bitset header is actually a zero byte.
|
||||
errZeroContent = errors.New("zero byte in input content")
|
||||
)
|
||||
|
||||
// The compression algorithm implemented by CompressBytes and DecompressBytes is
|
||||
// optimized for sparse input data which contains a lot of zero bytes. Decompression
|
||||
// requires knowledge of the decompressed data length.
|
||||
//
|
||||
// Compression works as follows:
|
||||
//
|
||||
// if data only contains zeroes,
|
||||
// CompressBytes(data) == nil
|
||||
// otherwise if len(data) <= 1,
|
||||
// CompressBytes(data) == data
|
||||
// otherwise:
|
||||
// CompressBytes(data) == append(CompressBytes(nonZeroBitset(data)), nonZeroBytes(data)...)
|
||||
// where
|
||||
// nonZeroBitset(data) is a bit vector with len(data) bits (MSB first):
|
||||
// nonZeroBitset(data)[i/8] && (1 << (7-i%8)) != 0 if data[i] != 0
|
||||
// len(nonZeroBitset(data)) == (len(data)+7)/8
|
||||
// nonZeroBytes(data) contains the non-zero bytes of data in the same order
|
||||
|
||||
// CompressBytes compresses the input byte slice according to the sparse bitset
|
||||
// representation algorithm. If the result is bigger than the original input, no
|
||||
// compression is done.
|
||||
func CompressBytes(data []byte) []byte {
|
||||
if out := bitsetEncodeBytes(data); len(out) < len(data) {
|
||||
return out
|
||||
}
|
||||
cpy := make([]byte, len(data))
|
||||
copy(cpy, data)
|
||||
return cpy
|
||||
}
|
||||
|
||||
// bitsetEncodeBytes compresses the input byte slice according to the sparse
|
||||
// bitset representation algorithm.
|
||||
func bitsetEncodeBytes(data []byte) []byte {
|
||||
// Empty slices get compressed to nil
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
// One byte slices compress to nil or retain the single byte
|
||||
if len(data) == 1 {
|
||||
if data[0] == 0 {
|
||||
return nil
|
||||
}
|
||||
return data
|
||||
}
|
||||
// Calculate the bitset of set bytes, and gather the non-zero bytes
|
||||
nonZeroBitset := make([]byte, (len(data)+7)/8)
|
||||
nonZeroBytes := make([]byte, 0, len(data))
|
||||
|
||||
for i, b := range data {
|
||||
if b != 0 {
|
||||
nonZeroBytes = append(nonZeroBytes, b)
|
||||
nonZeroBitset[i/8] |= 1 << byte(7-i%8)
|
||||
}
|
||||
}
|
||||
if len(nonZeroBytes) == 0 {
|
||||
return nil
|
||||
}
|
||||
return append(bitsetEncodeBytes(nonZeroBitset), nonZeroBytes...)
|
||||
}
|
||||
|
||||
// DecompressBytes decompresses data with a known target size. If the input data
|
||||
// matches the size of the target, it means no compression was done in the first
|
||||
// place.
|
||||
func DecompressBytes(data []byte, target int) ([]byte, error) {
|
||||
if len(data) > target {
|
||||
return nil, errExceededTarget
|
||||
}
|
||||
if len(data) == target {
|
||||
cpy := make([]byte, len(data))
|
||||
copy(cpy, data)
|
||||
return cpy, nil
|
||||
}
|
||||
return bitsetDecodeBytes(data, target)
|
||||
}
|
||||
|
||||
// bitsetDecodeBytes decompresses data with a known target size.
|
||||
func bitsetDecodeBytes(data []byte, target int) ([]byte, error) {
|
||||
out, size, err := bitsetDecodePartialBytes(data, target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if size != len(data) {
|
||||
return nil, errUnreferencedData
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// bitsetDecodePartialBytes decompresses data with a known target size, but does
|
||||
// not enforce consuming all the input bytes. In addition to the decompressed
|
||||
// output, the function returns the length of compressed input data corresponding
|
||||
// to the output as the input slice may be longer.
|
||||
func bitsetDecodePartialBytes(data []byte, target int) ([]byte, int, error) {
|
||||
// Sanity check 0 targets to avoid infinite recursion
|
||||
if target == 0 {
|
||||
return nil, 0, nil
|
||||
}
|
||||
// Handle the zero and single byte corner cases
|
||||
decomp := make([]byte, target)
|
||||
if len(data) == 0 {
|
||||
return decomp, 0, nil
|
||||
}
|
||||
if target == 1 {
|
||||
decomp[0] = data[0] // copy to avoid referencing the input slice
|
||||
if data[0] != 0 {
|
||||
return decomp, 1, nil
|
||||
}
|
||||
return decomp, 0, nil
|
||||
}
|
||||
// Decompress the bitset of set bytes and distribute the non zero bytes
|
||||
nonZeroBitset, ptr, err := bitsetDecodePartialBytes(data, (target+7)/8)
|
||||
if err != nil {
|
||||
return nil, ptr, err
|
||||
}
|
||||
for i := 0; i < 8*len(nonZeroBitset); i++ {
|
||||
if nonZeroBitset[i/8]&(1<<byte(7-i%8)) != 0 {
|
||||
// Make sure we have enough data to push into the correct slot
|
||||
if ptr >= len(data) {
|
||||
return nil, 0, errMissingData
|
||||
}
|
||||
if i >= len(decomp) {
|
||||
return nil, 0, errExceededTarget
|
||||
}
|
||||
// Make sure the data is valid and push into the slot
|
||||
if data[ptr] == 0 {
|
||||
return nil, 0, errZeroContent
|
||||
}
|
||||
decomp[i] = data[ptr]
|
||||
ptr++
|
||||
}
|
||||
}
|
||||
return decomp, ptr, nil
|
||||
}
|
56
common/bitutil/compress_fuzz.go
Normal file
56
common/bitutil/compress_fuzz.go
Normal file
@ -0,0 +1,56 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build gofuzz
|
||||
|
||||
package bitutil
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Fuzz implements a go-fuzz fuzzer method to test various encoding method
|
||||
// invocations.
|
||||
func Fuzz(data []byte) int {
|
||||
if len(data) == 0 {
|
||||
return -1
|
||||
}
|
||||
if data[0]%2 == 0 {
|
||||
return fuzzEncode(data[1:])
|
||||
}
|
||||
return fuzzDecode(data[1:])
|
||||
}
|
||||
|
||||
// fuzzEncode implements a go-fuzz fuzzer method to test the bitset encoding and
|
||||
// decoding algorithm.
|
||||
func fuzzEncode(data []byte) int {
|
||||
proc, _ := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
|
||||
if !bytes.Equal(data, proc) {
|
||||
panic("content mismatch")
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// fuzzDecode implements a go-fuzz fuzzer method to test the bit decoding and
|
||||
// reencoding algorithm.
|
||||
func fuzzDecode(data []byte) int {
|
||||
blob, err := bitsetDecodeBytes(data, 1024)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
if comp := bitsetEncodeBytes(blob); !bytes.Equal(comp, data) {
|
||||
panic("content mismatch")
|
||||
}
|
||||
return 0
|
||||
}
|
181
common/bitutil/compress_test.go
Normal file
181
common/bitutil/compress_test.go
Normal file
@ -0,0 +1,181 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bitutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
)
|
||||
|
||||
// Tests that data bitset encoding and decoding works and is bijective.
|
||||
func TestEncodingCycle(t *testing.T) {
|
||||
tests := []string{
|
||||
// Tests generated by go-fuzz to maximize code coverage
|
||||
"0x000000000000000000",
|
||||
"0xef0400",
|
||||
"0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb",
|
||||
"0x7b64000000",
|
||||
"0x000034000000000000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f0000000000000000000",
|
||||
"0x4912385c0e7b64000000",
|
||||
"0x000034000000000000000000000000000000",
|
||||
"0x00",
|
||||
"0x000003e834ff7f0000",
|
||||
"0x0000",
|
||||
"0x0000000000000000000000000000000000000000000000000000000000ff00",
|
||||
"0x895f0c6a020f850c6a020f85f88df88d",
|
||||
"0xdf7070533534333636313639343638373432313536346c1bc3315aac2f65fefb",
|
||||
"0x0000000000",
|
||||
"0xdf70706336346c65fefb",
|
||||
"0x00006d643634000000",
|
||||
"0xdf7070533534333636313639343638373532313536346c1bc333393438373130707063363430353639343638373532313536346c1bc333393438336336346c65fe",
|
||||
}
|
||||
for i, tt := range tests {
|
||||
data := hexutil.MustDecode(tt)
|
||||
|
||||
proc, err := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
|
||||
if err != nil {
|
||||
t.Errorf("test %d: failed to decompress compressed data: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(data, proc) {
|
||||
t.Errorf("test %d: compress/decompress mismatch: have %x, want %x", i, proc, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that data bitset decoding and rencoding works and is bijective.
|
||||
func TestDecodingCycle(t *testing.T) {
|
||||
tests := []struct {
|
||||
size int
|
||||
input string
|
||||
fail error
|
||||
}{
|
||||
{size: 0, input: "0x"},
|
||||
|
||||
// Crashers generated by go-fuzz
|
||||
{size: 0, input: "0x0020", fail: errUnreferencedData},
|
||||
{size: 0, input: "0x30", fail: errUnreferencedData},
|
||||
{size: 1, input: "0x00", fail: errUnreferencedData},
|
||||
{size: 2, input: "0x07", fail: errMissingData},
|
||||
{size: 1024, input: "0x8000", fail: errZeroContent},
|
||||
|
||||
// Tests generated by go-fuzz to maximize code coverage
|
||||
{size: 29490, input: "0x343137343733323134333839373334323073333930783e3078333930783e70706336346c65303e", fail: errMissingData},
|
||||
{size: 59395, input: "0x00", fail: errUnreferencedData},
|
||||
{size: 52574, input: "0x70706336346c65c0de", fail: errExceededTarget},
|
||||
{size: 42264, input: "0x07", fail: errMissingData},
|
||||
{size: 52, input: "0xa5045bad48f4", fail: errExceededTarget},
|
||||
{size: 52574, input: "0xc0de", fail: errMissingData},
|
||||
{size: 52574, input: "0x"},
|
||||
{size: 29490, input: "0x34313734373332313433383937333432307333393078073034333839373334323073333930783e3078333937333432307333393078073061333930783e70706336346c65303e", fail: errMissingData},
|
||||
{size: 29491, input: "0x3973333930783e30783e", fail: errMissingData},
|
||||
|
||||
{size: 1024, input: "0x808080608080"},
|
||||
{size: 1024, input: "0x808470705e3632383337363033313434303137393130306c6580ef46806380635a80"},
|
||||
{size: 1024, input: "0x8080808070"},
|
||||
{size: 1024, input: "0x808070705e36346c6580ef46806380635a80"},
|
||||
{size: 1024, input: "0x80808046802680"},
|
||||
{size: 1024, input: "0x4040404035"},
|
||||
{size: 1024, input: "0x4040bf3ba2b3f684402d353234373438373934409fe5b1e7ada94ebfd7d0505e27be4035"},
|
||||
{size: 1024, input: "0x404040bf3ba2b3f6844035"},
|
||||
{size: 1024, input: "0x40402d35323437343837393440bfd7d0505e27be4035"},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
data := hexutil.MustDecode(tt.input)
|
||||
|
||||
orig, err := bitsetDecodeBytes(data, tt.size)
|
||||
if err != tt.fail {
|
||||
t.Errorf("test %d: failure mismatch: have %v, want %v", i, err, tt.fail)
|
||||
}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if comp := bitsetEncodeBytes(orig); !bytes.Equal(comp, data) {
|
||||
t.Errorf("test %d: decompress/compress mismatch: have %x, want %x", i, comp, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestCompression tests that compression works by returning either the bitset
|
||||
// encoded input, or the actual input if the bitset version is longer.
|
||||
func TestCompression(t *testing.T) {
|
||||
// Check the the compression returns the bitset encoding is shorter
|
||||
in := hexutil.MustDecode("0x4912385c0e7b64000000")
|
||||
out := hexutil.MustDecode("0x80fe4912385c0e7b64")
|
||||
|
||||
if data := CompressBytes(in); bytes.Compare(data, out) != 0 {
|
||||
t.Errorf("encoding mismatch for sparse data: have %x, want %x", data, out)
|
||||
}
|
||||
if data, err := DecompressBytes(out, len(in)); err != nil || bytes.Compare(data, in) != 0 {
|
||||
t.Errorf("decoding mismatch for sparse data: have %x, want %x, error %v", data, in, err)
|
||||
}
|
||||
// Check the the compression returns the input if the bitset encoding is longer
|
||||
in = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
||||
out = hexutil.MustDecode("0xdf7070533534333636313639343638373532313536346c1bc33339343837313070706336343035336336346c65fefb3930393233383838ac2f65fefb")
|
||||
|
||||
if data := CompressBytes(in); bytes.Compare(data, out) != 0 {
|
||||
t.Errorf("encoding mismatch for dense data: have %x, want %x", data, out)
|
||||
}
|
||||
if data, err := DecompressBytes(out, len(in)); err != nil || bytes.Compare(data, in) != 0 {
|
||||
t.Errorf("decoding mismatch for dense data: have %x, want %x, error %v", data, in, err)
|
||||
}
|
||||
// Check that decompressing a longer input than the target fails
|
||||
if _, err := DecompressBytes([]byte{0xc0, 0x01, 0x01}, 2); err != errExceededTarget {
|
||||
t.Errorf("decoding error mismatch for long data: have %v, want %v", err, errExceededTarget)
|
||||
}
|
||||
}
|
||||
|
||||
// Crude benchmark for compressing random slices of bytes.
|
||||
func BenchmarkEncoding1KBVerySparse(b *testing.B) { benchmarkEncoding(b, 1024, 0.0001) }
|
||||
func BenchmarkEncoding2KBVerySparse(b *testing.B) { benchmarkEncoding(b, 2048, 0.0001) }
|
||||
func BenchmarkEncoding4KBVerySparse(b *testing.B) { benchmarkEncoding(b, 4096, 0.0001) }
|
||||
|
||||
func BenchmarkEncoding1KBSparse(b *testing.B) { benchmarkEncoding(b, 1024, 0.001) }
|
||||
func BenchmarkEncoding2KBSparse(b *testing.B) { benchmarkEncoding(b, 2048, 0.001) }
|
||||
func BenchmarkEncoding4KBSparse(b *testing.B) { benchmarkEncoding(b, 4096, 0.001) }
|
||||
|
||||
func BenchmarkEncoding1KBDense(b *testing.B) { benchmarkEncoding(b, 1024, 0.1) }
|
||||
func BenchmarkEncoding2KBDense(b *testing.B) { benchmarkEncoding(b, 2048, 0.1) }
|
||||
func BenchmarkEncoding4KBDense(b *testing.B) { benchmarkEncoding(b, 4096, 0.1) }
|
||||
|
||||
func BenchmarkEncoding1KBSaturated(b *testing.B) { benchmarkEncoding(b, 1024, 0.5) }
|
||||
func BenchmarkEncoding2KBSaturated(b *testing.B) { benchmarkEncoding(b, 2048, 0.5) }
|
||||
func BenchmarkEncoding4KBSaturated(b *testing.B) { benchmarkEncoding(b, 4096, 0.5) }
|
||||
|
||||
func benchmarkEncoding(b *testing.B, bytes int, fill float64) {
|
||||
// Generate a random slice of bytes to compress
|
||||
random := rand.NewSource(0) // reproducible and comparable
|
||||
|
||||
data := make([]byte, bytes)
|
||||
bits := int(float64(bytes) * 8 * fill)
|
||||
|
||||
for i := 0; i < bits; i++ {
|
||||
idx := random.Int63() % int64(len(data))
|
||||
bit := uint(random.Int63() % 8)
|
||||
data[idx] |= 1 << bit
|
||||
}
|
||||
// Reset the benchmark and measure encoding/decoding
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bitsetDecodeBytes(bitsetEncodeBytes(data), len(data))
|
||||
}
|
||||
}
|
@ -89,18 +89,18 @@ func Hex2BytesFixed(str string, flen int) []byte {
|
||||
}
|
||||
|
||||
func RightPadBytes(slice []byte, l int) []byte {
|
||||
if l < len(slice) {
|
||||
if l <= len(slice) {
|
||||
return slice
|
||||
}
|
||||
|
||||
padded := make([]byte, l)
|
||||
copy(padded[0:len(slice)], slice)
|
||||
copy(padded, slice)
|
||||
|
||||
return padded
|
||||
}
|
||||
|
||||
func LeftPadBytes(slice []byte, l int) []byte {
|
||||
if l < len(slice) {
|
||||
if l <= len(slice) {
|
||||
return slice
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,8 @@ var (
|
||||
tt256 = BigPow(2, 256)
|
||||
tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1))
|
||||
MaxBig256 = new(big.Int).Set(tt256m1)
|
||||
tt63 = BigPow(2, 63)
|
||||
MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1))
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -44,7 +44,7 @@ import (
|
||||
const (
|
||||
checkpointInterval = 1024 // Number of blocks after which to save the vote snapshot to the database
|
||||
inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory
|
||||
inmemorySignatures = 1024 // Number of recent blocks to keep in memory
|
||||
inmemorySignatures = 4096 // Number of recent block signatures to keep in memory
|
||||
|
||||
wiggleTime = 500 * time.Millisecond // Random delay (per signer) to allow concurrent signers
|
||||
)
|
||||
@ -162,7 +162,12 @@ func sigHash(header *types.Header) (hash common.Hash) {
|
||||
}
|
||||
|
||||
// ecrecover extracts the Ethereum account address from a signed header.
|
||||
func ecrecover(header *types.Header) (common.Address, error) {
|
||||
func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, error) {
|
||||
// If the signature's already cached, return that
|
||||
hash := header.Hash()
|
||||
if address, known := sigcache.Get(hash); known {
|
||||
return address.(common.Address), nil
|
||||
}
|
||||
// Retrieve the signature from the header extra-data
|
||||
if len(header.Extra) < extraSeal {
|
||||
return common.Address{}, errMissingSignature
|
||||
@ -177,6 +182,7 @@ func ecrecover(header *types.Header) (common.Address, error) {
|
||||
var signer common.Address
|
||||
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
|
||||
|
||||
sigcache.Add(hash, signer)
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
@ -223,7 +229,7 @@ func New(config *params.CliqueConfig, db ethdb.Database) *Clique {
|
||||
// Author implements consensus.Engine, returning the Ethereum address recovered
|
||||
// from the signature in the header's extra-data section.
|
||||
func (c *Clique) Author(header *types.Header) (common.Address, error) {
|
||||
return ecrecover(header)
|
||||
return ecrecover(header, c.signatures)
|
||||
}
|
||||
|
||||
// VerifyHeader checks whether a header conforms to the consensus rules.
|
||||
@ -369,7 +375,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
|
||||
}
|
||||
// If an on-disk checkpoint snapshot can be found, use that
|
||||
if number%checkpointInterval == 0 {
|
||||
if s, err := loadSnapshot(c.config, c.db, hash); err == nil {
|
||||
if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil {
|
||||
log.Trace("Loaded voting snapshot form disk", "number", number, "hash", hash)
|
||||
snap = s
|
||||
break
|
||||
@ -385,7 +391,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo
|
||||
for i := 0; i < len(signers); i++ {
|
||||
copy(signers[i][:], genesis.Extra[extraVanity+i*common.AddressLength:])
|
||||
}
|
||||
snap = newSnapshot(c.config, 0, genesis.Hash(), signers)
|
||||
snap = newSnapshot(c.config, c.signatures, 0, genesis.Hash(), signers)
|
||||
if err := snap.store(c.db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -464,7 +470,7 @@ func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, p
|
||||
c.recents.Add(snap.Hash, snap)
|
||||
|
||||
// Resolve the authorization key and check against signers
|
||||
signer, err := ecrecover(header)
|
||||
signer, err := ecrecover(header, c.signatures)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -599,7 +605,7 @@ func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, stop <-ch
|
||||
for seen, recent := range snap.Recents {
|
||||
if recent == signer {
|
||||
// Signer is among recents, only wait if the current block doens't shift it out
|
||||
if limit := uint64(len(snap.Signers)/2 + 1); seen > number-limit {
|
||||
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
|
||||
log.Info("Signed recently, must wait for others")
|
||||
<-stop
|
||||
return nil, nil
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
)
|
||||
|
||||
// Vote represents a single vote that an authorized signer made to modify the
|
||||
@ -44,7 +45,8 @@ type Tally struct {
|
||||
|
||||
// Snapshot is the state of the authorization voting at a given point in time.
|
||||
type Snapshot struct {
|
||||
config *params.CliqueConfig // Consensus engine parameters to fine tune behavior
|
||||
config *params.CliqueConfig // Consensus engine parameters to fine tune behavior
|
||||
sigcache *lru.ARCCache // Cache of recent block signatures to speed up ecrecover
|
||||
|
||||
Number uint64 `json:"number"` // Block number where the snapshot was created
|
||||
Hash common.Hash `json:"hash"` // Block hash where the snapshot was created
|
||||
@ -57,14 +59,15 @@ type Snapshot struct {
|
||||
// newSnapshot create a new snapshot with the specified startup parameters. This
|
||||
// method does not initialize the set of recent signers, so only ever use if for
|
||||
// the genesis block.
|
||||
func newSnapshot(config *params.CliqueConfig, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
|
||||
func newSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, number uint64, hash common.Hash, signers []common.Address) *Snapshot {
|
||||
snap := &Snapshot{
|
||||
config: config,
|
||||
Number: number,
|
||||
Hash: hash,
|
||||
Signers: make(map[common.Address]struct{}),
|
||||
Recents: make(map[uint64]common.Address),
|
||||
Tally: make(map[common.Address]Tally),
|
||||
config: config,
|
||||
sigcache: sigcache,
|
||||
Number: number,
|
||||
Hash: hash,
|
||||
Signers: make(map[common.Address]struct{}),
|
||||
Recents: make(map[uint64]common.Address),
|
||||
Tally: make(map[common.Address]Tally),
|
||||
}
|
||||
for _, signer := range signers {
|
||||
snap.Signers[signer] = struct{}{}
|
||||
@ -73,7 +76,7 @@ func newSnapshot(config *params.CliqueConfig, number uint64, hash common.Hash, s
|
||||
}
|
||||
|
||||
// loadSnapshot loads an existing snapshot from the database.
|
||||
func loadSnapshot(config *params.CliqueConfig, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
|
||||
func loadSnapshot(config *params.CliqueConfig, sigcache *lru.ARCCache, db ethdb.Database, hash common.Hash) (*Snapshot, error) {
|
||||
blob, err := db.Get(append([]byte("clique-"), hash[:]...))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -83,6 +86,7 @@ func loadSnapshot(config *params.CliqueConfig, db ethdb.Database, hash common.Ha
|
||||
return nil, err
|
||||
}
|
||||
snap.config = config
|
||||
snap.sigcache = sigcache
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
@ -99,13 +103,14 @@ func (s *Snapshot) store(db ethdb.Database) error {
|
||||
// copy creates a deep copy of the snapshot, though not the individual votes.
|
||||
func (s *Snapshot) copy() *Snapshot {
|
||||
cpy := &Snapshot{
|
||||
config: s.config,
|
||||
Number: s.Number,
|
||||
Hash: s.Hash,
|
||||
Signers: make(map[common.Address]struct{}),
|
||||
Recents: make(map[uint64]common.Address),
|
||||
Votes: make([]*Vote, len(s.Votes)),
|
||||
Tally: make(map[common.Address]Tally),
|
||||
config: s.config,
|
||||
sigcache: s.sigcache,
|
||||
Number: s.Number,
|
||||
Hash: s.Hash,
|
||||
Signers: make(map[common.Address]struct{}),
|
||||
Recents: make(map[uint64]common.Address),
|
||||
Votes: make([]*Vote, len(s.Votes)),
|
||||
Tally: make(map[common.Address]Tally),
|
||||
}
|
||||
for signer := range s.Signers {
|
||||
cpy.Signers[signer] = struct{}{}
|
||||
@ -190,7 +195,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) {
|
||||
delete(snap.Recents, number-limit)
|
||||
}
|
||||
// Resolve the authorization key and check against signers
|
||||
signer, err := ecrecover(header)
|
||||
signer, err := ecrecover(header, s.sigcache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/bitutil"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/crypto/sha3"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -142,7 +143,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
||||
dstOff = j * hashBytes
|
||||
xorOff = (binary.LittleEndian.Uint32(cache[dstOff:]) % uint32(rows)) * hashBytes
|
||||
)
|
||||
xorBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
|
||||
bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
|
||||
keccak512(cache[dstOff:], temp)
|
||||
|
||||
atomic.AddUint32(&progress, 1)
|
||||
|
@ -239,10 +239,19 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
return errZeroBlockTime
|
||||
}
|
||||
// Verify the block's difficulty based in it's timestamp and parent's difficulty
|
||||
expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent.Time.Uint64(), parent.Number, parent.Difficulty)
|
||||
expected := CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
|
||||
if expected.Cmp(header.Difficulty) != 0 {
|
||||
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
|
||||
}
|
||||
// Verify that the gas limit is <= 2^63-1
|
||||
if header.GasLimit.Cmp(math.MaxBig63) > 0 {
|
||||
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, math.MaxBig63)
|
||||
}
|
||||
// Verify that the gasUsed is <= gasLimit
|
||||
if header.GasUsed.Cmp(header.GasLimit) > 0 {
|
||||
return fmt.Errorf("invalid gasUsed: have %v, gasLimit %v", header.GasUsed, header.GasLimit)
|
||||
}
|
||||
|
||||
// Verify that the gas limit remains within allowed bounds
|
||||
diff := new(big.Int).Set(parent.GasLimit)
|
||||
diff = diff.Sub(diff, header.GasLimit)
|
||||
@ -274,16 +283,19 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
|
||||
return nil
|
||||
}
|
||||
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
|
||||
// that a new block should have when created at time given the parent block's time
|
||||
// and difficulty.
|
||||
// CalcDifficulty is the difficulty adjustment algorithm. It returns
|
||||
// the difficulty that a new block should have when created at time
|
||||
// given the parent block's time and difficulty.
|
||||
//
|
||||
// TODO (karalabe): Move the chain maker into this package and make this private!
|
||||
func CalcDifficulty(config *params.ChainConfig, time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
||||
if config.IsHomestead(new(big.Int).Add(parentNumber, common.Big1)) {
|
||||
return calcDifficultyHomestead(time, parentTime, parentNumber, parentDiff)
|
||||
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
|
||||
next := new(big.Int).Add(parent.Number, common.Big1)
|
||||
switch {
|
||||
case config.IsHomestead(next):
|
||||
return calcDifficultyHomestead(time, parent)
|
||||
default:
|
||||
return calcDifficultyFrontier(time, parent)
|
||||
}
|
||||
return calcDifficultyFrontier(time, parentTime, parentNumber, parentDiff)
|
||||
}
|
||||
|
||||
// Some weird constants to avoid constant memory allocs for them.
|
||||
@ -296,7 +308,7 @@ var (
|
||||
// calcDifficultyHomestead is the difficulty adjustment algorithm. It returns
|
||||
// the difficulty that a new block should have when created at time given the
|
||||
// parent block's time and difficulty. The calculation uses the Homestead rules.
|
||||
func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
||||
func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
|
||||
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki
|
||||
// algorithm:
|
||||
// diff = (parent_diff +
|
||||
@ -304,7 +316,7 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
|
||||
// ) + 2^(periodCount - 2)
|
||||
|
||||
bigTime := new(big.Int).SetUint64(time)
|
||||
bigParentTime := new(big.Int).SetUint64(parentTime)
|
||||
bigParentTime := new(big.Int).Set(parent.Time)
|
||||
|
||||
// holds intermediate values to make the algo easier to read & audit
|
||||
x := new(big.Int)
|
||||
@ -320,16 +332,16 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
|
||||
x.Set(bigMinus99)
|
||||
}
|
||||
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
|
||||
y.Div(parentDiff, params.DifficultyBoundDivisor)
|
||||
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
||||
x.Mul(y, x)
|
||||
x.Add(parentDiff, x)
|
||||
x.Add(parent.Difficulty, x)
|
||||
|
||||
// minimum difficulty can ever be (before exponential factor)
|
||||
if x.Cmp(params.MinimumDifficulty) < 0 {
|
||||
x.Set(params.MinimumDifficulty)
|
||||
}
|
||||
// for the exponential factor
|
||||
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
||||
periodCount := new(big.Int).Add(parent.Number, common.Big1)
|
||||
periodCount.Div(periodCount, expDiffPeriod)
|
||||
|
||||
// the exponential factor, commonly referred to as "the bomb"
|
||||
@ -345,25 +357,25 @@ func calcDifficultyHomestead(time, parentTime uint64, parentNumber, parentDiff *
|
||||
// calcDifficultyFrontier is the difficulty adjustment algorithm. It returns the
|
||||
// difficulty that a new block should have when created at time given the parent
|
||||
// block's time and difficulty. The calculation uses the Frontier rules.
|
||||
func calcDifficultyFrontier(time, parentTime uint64, parentNumber, parentDiff *big.Int) *big.Int {
|
||||
func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
|
||||
diff := new(big.Int)
|
||||
adjust := new(big.Int).Div(parentDiff, params.DifficultyBoundDivisor)
|
||||
adjust := new(big.Int).Div(parent.Difficulty, params.DifficultyBoundDivisor)
|
||||
bigTime := new(big.Int)
|
||||
bigParentTime := new(big.Int)
|
||||
|
||||
bigTime.SetUint64(time)
|
||||
bigParentTime.SetUint64(parentTime)
|
||||
bigParentTime.Set(parent.Time)
|
||||
|
||||
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
|
||||
diff.Add(parentDiff, adjust)
|
||||
diff.Add(parent.Difficulty, adjust)
|
||||
} else {
|
||||
diff.Sub(parentDiff, adjust)
|
||||
diff.Sub(parent.Difficulty, adjust)
|
||||
}
|
||||
if diff.Cmp(params.MinimumDifficulty) < 0 {
|
||||
diff.Set(params.MinimumDifficulty)
|
||||
}
|
||||
|
||||
periodCount := new(big.Int).Add(parentNumber, common.Big1)
|
||||
periodCount := new(big.Int).Add(parent.Number, common.Big1)
|
||||
periodCount.Div(periodCount, expDiffPeriod)
|
||||
if periodCount.Cmp(common.Big1) > 0 {
|
||||
// diff = diff + 2^(periodCount - 2)
|
||||
@ -425,8 +437,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainReader, header *types.Header)
|
||||
if parent == nil {
|
||||
return consensus.ErrUnknownAncestor
|
||||
}
|
||||
header.Difficulty = CalcDifficulty(chain.Config(), header.Time.Uint64(),
|
||||
parent.Time.Uint64(), parent.Number, parent.Difficulty)
|
||||
header.Difficulty = CalcDifficulty(chain.Config(), header.Time.Uint64(), parent)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/math"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
@ -71,7 +72,11 @@ func TestCalcDifficulty(t *testing.T) {
|
||||
config := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
|
||||
for name, test := range tests {
|
||||
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
|
||||
diff := CalcDifficulty(config, test.CurrentTimestamp, test.ParentTimestamp, number, test.ParentDifficulty)
|
||||
diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{
|
||||
Number: number,
|
||||
Time: new(big.Int).SetUint64(test.ParentTimestamp),
|
||||
Difficulty: test.ParentDifficulty,
|
||||
})
|
||||
if diff.Cmp(test.CurrentDifficulty) != 0 {
|
||||
t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff)
|
||||
}
|
||||
|
@ -467,8 +467,9 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
|
||||
future = &cache{epoch: epoch + 1}
|
||||
ethash.fcache = future
|
||||
}
|
||||
// New current cache, set its initial timestamp
|
||||
current.used = time.Now()
|
||||
}
|
||||
current.used = time.Now()
|
||||
ethash.lock.Unlock()
|
||||
|
||||
// Wait for generation finish, bump the timestamp and finalize the cache
|
||||
@ -529,8 +530,9 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
|
||||
future = &dataset{epoch: epoch + 1}
|
||||
ethash.fdataset = future
|
||||
}
|
||||
// New current dataset, set its initial timestamp
|
||||
current.used = time.Now()
|
||||
}
|
||||
current.used = time.Now()
|
||||
ethash.lock.Unlock()
|
||||
|
||||
// Wait for generation finish, bump the timestamp and finalize the cache
|
||||
|
@ -1,85 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Source: https://golang.org/src/crypto/cipher/xor.go
|
||||
|
||||
package ethash
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
|
||||
|
||||
// fastXORBytes xors in bulk. It only works on architectures that
|
||||
// support unaligned read/writes.
|
||||
func fastXORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
|
||||
w := n / wordSize
|
||||
if w > 0 {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
for i := 0; i < w; i++ {
|
||||
dw[i] = aw[i] ^ bw[i]
|
||||
}
|
||||
}
|
||||
|
||||
for i := (n - n%wordSize); i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func safeXORBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// xorBytes xors the bytes in a and b. The destination is assumed to have enough
|
||||
// space. Returns the number of bytes xor'd.
|
||||
func xorBytes(dst, a, b []byte) int {
|
||||
if supportsUnaligned {
|
||||
return fastXORBytes(dst, a, b)
|
||||
}
|
||||
// TODO(hanwen): if (dst, a, b) have common alignment
|
||||
// we could still try fastXORBytes. It is not clear
|
||||
// how often this happens, and it's only worth it if
|
||||
// the block encryption itself is hardware
|
||||
// accelerated.
|
||||
return safeXORBytes(dst, a, b)
|
||||
}
|
||||
|
||||
// fastXORWords XORs multiples of 4 or 8 bytes (depending on architecture.)
|
||||
// The arguments are assumed to be of equal length.
|
||||
func fastXORWords(dst, a, b []byte) {
|
||||
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||
n := len(b) / wordSize
|
||||
for i := 0; i < n; i++ {
|
||||
dw[i] = aw[i] ^ bw[i]
|
||||
}
|
||||
}
|
||||
|
||||
func xorWords(dst, a, b []byte) {
|
||||
if supportsUnaligned {
|
||||
fastXORWords(dst, a, b)
|
||||
} else {
|
||||
safeXORBytes(dst, a, b)
|
||||
}
|
||||
}
|
@ -20,6 +20,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@ -240,17 +241,19 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
|
||||
throwJSException(err.Error())
|
||||
}
|
||||
var (
|
||||
rawReq = []byte(reqVal.String())
|
||||
rawReq = reqVal.String()
|
||||
dec = json.NewDecoder(strings.NewReader(rawReq))
|
||||
reqs []jsonrpcCall
|
||||
batch bool
|
||||
)
|
||||
dec.UseNumber() // avoid float64s
|
||||
if rawReq[0] == '[' {
|
||||
batch = true
|
||||
json.Unmarshal(rawReq, &reqs)
|
||||
dec.Decode(&reqs)
|
||||
} else {
|
||||
batch = false
|
||||
reqs = make([]jsonrpcCall, 1)
|
||||
json.Unmarshal(rawReq, &reqs[0])
|
||||
dec.Decode(&reqs[0])
|
||||
}
|
||||
|
||||
// Execute the requests.
|
||||
|
@ -2,7 +2,7 @@ FROM alpine:3.5
|
||||
|
||||
RUN \
|
||||
apk add --update go git make gcc musl-dev linux-headers ca-certificates && \
|
||||
git clone --depth 1 --branch release/1.5 https://github.com/ethereum/go-ethereum && \
|
||||
git clone --depth 1 --branch release/1.6 https://github.com/ethereum/go-ethereum && \
|
||||
(cd go-ethereum && make geth) && \
|
||||
cp go-ethereum/build/bin/geth /geth && \
|
||||
apk del go git make gcc musl-dev linux-headers && \
|
||||
|
1
containers/vagrant/.gitignore
vendored
Normal file
1
containers/vagrant/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
.vagrant
|
51
containers/vagrant/Vagrantfile
vendored
51
containers/vagrant/Vagrantfile
vendored
@ -1,29 +1,38 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure(2) do |config|
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
require 'yaml'
|
||||
|
||||
config.vm.provider "virtualbox" do |vb|
|
||||
vb.memory = "2048"
|
||||
VAGRANTFILE_API_VERSION = 2
|
||||
VM_RAM = 2048
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
|
||||
config.vm.define "ubuntu", :primary => true do |ubuntu|
|
||||
ubuntu.vm.box = "ubuntu/trusty64"
|
||||
ubuntu.vm.provision "shell", :path => "provisioners/shell/ubuntu.sh"
|
||||
end
|
||||
|
||||
config.vm.define "debian", :primary => true do |debian|
|
||||
debian.vm.box = "debian/jessie64"
|
||||
debian.vm.provision "shell", :path => "provisioners/shell/debian.sh"
|
||||
end
|
||||
|
||||
config.vm.define "centos", :autostart => false do |centos|
|
||||
centos.vm.box = "centos/7"
|
||||
centos.vm.provision "shell", :path => "provisioners/shell/centos.sh"
|
||||
end
|
||||
|
||||
config.vm.provider "virtualbox" do |vb|
|
||||
vb.memory = VM_RAM
|
||||
end
|
||||
|
||||
config.vm.provider "libvirt" do |lv|
|
||||
lv.memory = VM_RAM
|
||||
|
||||
config.vm.synced_folder ".", "/home/vagrant/sync", :disabled => true
|
||||
end
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", :disabled => true
|
||||
config.vm.synced_folder "../../", "/home/vagrant/go/src/github.com/ethereum/go-ethereum"
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
sudo apt-get install software-properties-common
|
||||
sudo add-apt-repository -y ppa:ethereum/ethereum
|
||||
sudo add-apt-repository -y ppa:ethereum/ethereum-dev
|
||||
sudo apt-get update
|
||||
|
||||
sudo apt-get install -y build-essential golang git-all
|
||||
|
||||
GOPATH=/home/vagrant/go go get github.com/tools/godep
|
||||
|
||||
sudo chown -R vagrant:vagrant ~vagrant/go
|
||||
|
||||
echo "export GOPATH=/home/vagrant/go" >> ~vagrant/.bashrc
|
||||
echo "export PATH=\\\$PATH:\\\$GOPATH/bin:/usr/local/go/bin" >> ~vagrant/.bashrc
|
||||
SHELL
|
||||
end
|
||||
|
11
containers/vagrant/provisioners/shell/centos.sh
Executable file
11
containers/vagrant/provisioners/shell/centos.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo yum install -y git wget
|
||||
sudo yum update -y
|
||||
|
||||
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
|
||||
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
|
||||
|
||||
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
|
||||
|
||||
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc
|
11
containers/vagrant/provisioners/shell/debian.sh
Executable file
11
containers/vagrant/provisioners/shell/debian.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo apt-get install -y build-essential git-all wget
|
||||
sudo apt-get update
|
||||
|
||||
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
|
||||
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
|
||||
|
||||
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
|
||||
|
||||
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc
|
11
containers/vagrant/provisioners/shell/ubuntu.sh
Executable file
11
containers/vagrant/provisioners/shell/ubuntu.sh
Executable file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
sudo apt-get install -y build-essential git-all wget
|
||||
sudo apt-get update
|
||||
|
||||
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
|
||||
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
|
||||
|
||||
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
|
||||
|
||||
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc
|
File diff suppressed because it is too large
Load Diff
@ -18,7 +18,7 @@ package core
|
||||
|
||||
import "github.com/ethereum/go-ethereum/common"
|
||||
|
||||
// Set of manually tracked bad hashes (usually hard forks)
|
||||
// BadHashes represent a set of manually tracked bad hashes (usually hard forks)
|
||||
var BadHashes = map[common.Hash]bool{
|
||||
common.HexToHash("05bef30ef572270f654746da22639a7a0c97dd97a7050b9e252391996aaeb689"): true,
|
||||
common.HexToHash("7d05d08cbc596a2e5e4f13b80a743e53e09221b5323c3a61946b20873e58583f"): true,
|
||||
|
@ -84,7 +84,7 @@ func (b *BlockGen) AddTx(tx *types.Transaction) {
|
||||
if b.gasPool == nil {
|
||||
b.SetCoinbase(common.Address{})
|
||||
}
|
||||
b.statedb.StartRecord(tx.Hash(), common.Hash{}, len(b.txs))
|
||||
b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs))
|
||||
receipt, _, err := ApplyTransaction(b.config, nil, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, b.header.GasUsed, vm.Config{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -98,10 +98,10 @@ func (b *BlockGen) Number() *big.Int {
|
||||
return new(big.Int).Set(b.header.Number)
|
||||
}
|
||||
|
||||
// AddUncheckedReceipts forcefully adds a receipts to the block without a
|
||||
// AddUncheckedReceipt forcefully adds a receipts to the block without a
|
||||
// backing transaction.
|
||||
//
|
||||
// AddUncheckedReceipts will cause consensus failures when used during real
|
||||
// AddUncheckedReceipt will cause consensus failures when used during real
|
||||
// chain processing. This is best used in conjunction with raw block insertion.
|
||||
func (b *BlockGen) AddUncheckedReceipt(receipt *types.Receipt) {
|
||||
b.receipts = append(b.receipts, receipt)
|
||||
@ -142,7 +142,7 @@ func (b *BlockGen) OffsetTime(seconds int64) {
|
||||
if b.header.Time.Cmp(b.parent.Header().Time) <= 0 {
|
||||
panic("block time out of range")
|
||||
}
|
||||
b.header.Difficulty = ethash.CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Time().Uint64(), b.parent.Number(), b.parent.Difficulty())
|
||||
b.header.Difficulty = ethash.CalcDifficulty(b.config, b.header.Time.Uint64(), b.parent.Header())
|
||||
}
|
||||
|
||||
// GenerateChain creates a chain of n blocks. The first block's
|
||||
@ -209,15 +209,20 @@ func makeHeader(config *params.ChainConfig, parent *types.Block, state *state.St
|
||||
} else {
|
||||
time = new(big.Int).Add(parent.Time(), big.NewInt(10)) // block time is fixed at 10 seconds
|
||||
}
|
||||
|
||||
return &types.Header{
|
||||
Root: state.IntermediateRoot(config.IsEIP158(parent.Number())),
|
||||
ParentHash: parent.Hash(),
|
||||
Coinbase: parent.Coinbase(),
|
||||
Difficulty: ethash.CalcDifficulty(config, time.Uint64(), new(big.Int).Sub(time, big.NewInt(10)).Uint64(), parent.Number(), parent.Difficulty()),
|
||||
GasLimit: CalcGasLimit(parent),
|
||||
GasUsed: new(big.Int),
|
||||
Number: new(big.Int).Add(parent.Number(), common.Big1),
|
||||
Time: time,
|
||||
Difficulty: ethash.CalcDifficulty(config, time.Uint64(), &types.Header{
|
||||
Number: parent.Number(),
|
||||
Time: new(big.Int).Sub(time, big.NewInt(10)),
|
||||
Difficulty: parent.Difficulty(),
|
||||
}),
|
||||
GasLimit: CalcGasLimit(parent),
|
||||
GasUsed: new(big.Int),
|
||||
Number: new(big.Int).Add(parent.Number(), common.Big1),
|
||||
Time: time,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ var (
|
||||
oldBlockReceiptsPrefix = []byte("receipts-block-")
|
||||
oldBlockHashPrefix = []byte("block-hash-") // [deprecated by the header/block split, remove eventually]
|
||||
|
||||
ChainConfigNotFoundErr = errors.New("ChainConfig not found") // general config not found error
|
||||
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
|
||||
|
||||
mipmapBloomMu sync.Mutex // protect against race condition when updating mipmap blooms
|
||||
|
||||
@ -546,7 +546,7 @@ func mipmapKey(num, level uint64) []byte {
|
||||
return append(mipmapPre, append(lkey, key.Bytes()...)...)
|
||||
}
|
||||
|
||||
// WriteMapmapBloom writes each address included in the receipts' logs to the
|
||||
// WriteMipmapBloom writes each address included in the receipts' logs to the
|
||||
// MIP bloom bin.
|
||||
func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
|
||||
mipmapBloomMu.Lock()
|
||||
@ -638,7 +638,7 @@ func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *params.ChainConf
|
||||
func GetChainConfig(db ethdb.Database, hash common.Hash) (*params.ChainConfig, error) {
|
||||
jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...))
|
||||
if len(jsonChainConfig) == 0 {
|
||||
return nil, ChainConfigNotFoundErr
|
||||
return nil, ErrChainConfigNotFound
|
||||
}
|
||||
|
||||
var config params.ChainConfig
|
||||
|
@ -17,8 +17,6 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
@ -43,15 +41,9 @@ type NewMinedBlockEvent struct{ Block *types.Block }
|
||||
// RemovedTransactionEvent is posted when a reorg happens
|
||||
type RemovedTransactionEvent struct{ Txs types.Transactions }
|
||||
|
||||
// RemovedLogEvent is posted when a reorg happens
|
||||
// RemovedLogsEvent is posted when a reorg happens
|
||||
type RemovedLogsEvent struct{ Logs []*types.Log }
|
||||
|
||||
// ChainSplit is posted when a new head is detected
|
||||
type ChainSplitEvent struct {
|
||||
Block *types.Block
|
||||
Logs []*types.Log
|
||||
}
|
||||
|
||||
type ChainEvent struct {
|
||||
Block *types.Block
|
||||
Hash common.Hash
|
||||
@ -73,8 +65,6 @@ type ChainUncleEvent struct {
|
||||
|
||||
type ChainHeadEvent struct{ Block *types.Block }
|
||||
|
||||
type GasPriceChanged struct{ Price *big.Int }
|
||||
|
||||
// Mining operation events
|
||||
type StartMining struct{}
|
||||
type TopMining struct{}
|
||||
|
@ -20,4 +20,4 @@ import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var BlockReward *big.Int = big.NewInt(5e+18)
|
||||
var BlockReward = big.NewInt(5e+18)
|
||||
|
@ -86,7 +86,7 @@ type GenesisMismatchError struct {
|
||||
}
|
||||
|
||||
func (e *GenesisMismatchError) Error() string {
|
||||
return fmt.Sprintf("wrong genesis block in database (have %x, new %x)", e.Stored[:8], e.New[:8])
|
||||
return fmt.Sprintf("database already contains an incompatible genesis block (have %x, new %x)", e.Stored[:8], e.New[:8])
|
||||
}
|
||||
|
||||
// SetupGenesisBlock writes or updates the genesis block in db.
|
||||
@ -133,7 +133,7 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig
|
||||
newcfg := genesis.configOrDefault(stored)
|
||||
storedcfg, err := GetChainConfig(db, stored)
|
||||
if err != nil {
|
||||
if err == ChainConfigNotFoundErr {
|
||||
if err == ErrChainConfigNotFound {
|
||||
// This case happens if a genesis write was interrupted.
|
||||
log.Warn("Found genesis block without chain config")
|
||||
err = WriteChainConfig(db, stored, newcfg)
|
||||
@ -278,6 +278,18 @@ func DefaultTestnetGenesisBlock() *Genesis {
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultRinkebyGenesisBlock returns the Rinkeby network genesis block.
|
||||
func DefaultRinkebyGenesisBlock() *Genesis {
|
||||
return &Genesis{
|
||||
Config: params.RinkebyChainConfig,
|
||||
Timestamp: 1492009146,
|
||||
ExtraData: hexutil.MustDecode("0x52657370656374206d7920617574686f7269746168207e452e436172746d616e42eb768f2244c8811c63729a21a3569731535f067ffc57839b00206d1ad20c69a1981b489f772031b279182d99e65703f0076e4812653aab85fca0f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
|
||||
GasLimit: 4700000,
|
||||
Difficulty: big.NewInt(1),
|
||||
Alloc: decodePrealloc(rinkebyAllocData),
|
||||
}
|
||||
}
|
||||
|
||||
// DevGenesisBlock returns the 'geth --dev' genesis block.
|
||||
func DevGenesisBlock() *Genesis {
|
||||
return &Genesis{
|
||||
|
File diff suppressed because one or more lines are too long
@ -201,15 +201,6 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
|
||||
// header writes should be protected by the parent chain mutex individually.
|
||||
type WhCallback func(*types.Header) error
|
||||
|
||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||
// chain, possibly creating a reorg. If an error is returned, it will return the
|
||||
// index number of the failing header as well an error describing what went wrong.
|
||||
//
|
||||
// The verify parameter can be used to fine tune whether nonce verification
|
||||
// should be done or not. The reason behind the optional check is because some
|
||||
// of the header retrieval mechanisms already need to verfy nonces, as well as
|
||||
// because nonces can be verified sparsely, not needing to check each.
|
||||
|
||||
func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
|
||||
// Do a sanity check that the provided chain is actually ordered and linked
|
||||
for i := 1; i < len(chain); i++ {
|
||||
@ -257,6 +248,14 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||
// chain, possibly creating a reorg. If an error is returned, it will return the
|
||||
// index number of the failing header as well an error describing what went wrong.
|
||||
//
|
||||
// The verify parameter can be used to fine tune whether nonce verification
|
||||
// should be done or not. The reason behind the optional check is because some
|
||||
// of the header retrieval mechanisms already need to verfy nonces, as well as
|
||||
// because nonces can be verified sparsely, not needing to check each.
|
||||
func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCallback, start time.Time) (int, error) {
|
||||
// Collect some import statistics to report on
|
||||
stats := struct{ processed, ignored int }{}
|
||||
|
@ -21,8 +21,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
// "github.com/ethereum/go-ethereum/crypto"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethdb"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
)
|
||||
@ -38,24 +36,24 @@ type TestManager struct {
|
||||
Blocks []*types.Block
|
||||
}
|
||||
|
||||
func (s *TestManager) IsListening() bool {
|
||||
func (tm *TestManager) IsListening() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *TestManager) IsMining() bool {
|
||||
func (tm *TestManager) IsMining() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *TestManager) PeerCount() int {
|
||||
func (tm *TestManager) PeerCount() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *TestManager) Peers() *list.List {
|
||||
func (tm *TestManager) Peers() *list.List {
|
||||
return list.New()
|
||||
}
|
||||
|
||||
func (s *TestManager) BlockChain() *BlockChain {
|
||||
return s.blockChain
|
||||
func (tm *TestManager) BlockChain() *BlockChain {
|
||||
return tm.blockChain
|
||||
}
|
||||
|
||||
func (tm *TestManager) TxPool() *TxPool {
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/trie"
|
||||
)
|
||||
|
||||
type DumpAccount struct {
|
||||
@ -44,7 +45,7 @@ func (self *StateDB) RawDump() Dump {
|
||||
Accounts: make(map[string]DumpAccount),
|
||||
}
|
||||
|
||||
it := self.trie.Iterator()
|
||||
it := trie.NewIterator(self.trie.NodeIterator(nil))
|
||||
for it.Next() {
|
||||
addr := self.trie.GetKey(it.Key)
|
||||
var data Account
|
||||
@ -61,7 +62,7 @@ func (self *StateDB) RawDump() Dump {
|
||||
Code: common.Bytes2Hex(obj.Code(self.db)),
|
||||
Storage: make(map[string]string),
|
||||
}
|
||||
storageIt := obj.getTrie(self.db).Iterator()
|
||||
storageIt := trie.NewIterator(obj.getTrie(self.db).NodeIterator(nil))
|
||||
for storageIt.Next() {
|
||||
account.Storage[common.Bytes2Hex(self.trie.GetKey(storageIt.Key))] = common.Bytes2Hex(storageIt.Value)
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func (it *NodeIterator) step() error {
|
||||
}
|
||||
// Initialize the iterator if we've just started
|
||||
if it.stateIt == nil {
|
||||
it.stateIt = it.state.trie.NodeIterator()
|
||||
it.stateIt = it.state.trie.NodeIterator(nil)
|
||||
}
|
||||
// If we had data nodes previously, we surely have at least state nodes
|
||||
if it.dataIt != nil {
|
||||
@ -118,7 +118,7 @@ func (it *NodeIterator) step() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
it.dataIt = trie.NewNodeIterator(dataTrie)
|
||||
it.dataIt = dataTrie.NodeIterator(nil)
|
||||
if !it.dataIt.Next(true) {
|
||||
it.dataIt = nil
|
||||
}
|
||||
|
@ -71,8 +71,8 @@ type (
|
||||
hash common.Hash
|
||||
}
|
||||
touchChange struct {
|
||||
account *common.Address
|
||||
prev bool
|
||||
account *common.Address
|
||||
prev bool
|
||||
prevDirty bool
|
||||
}
|
||||
)
|
||||
@ -91,6 +91,11 @@ func (ch suicideChange) undo(s *StateDB) {
|
||||
if obj != nil {
|
||||
obj.suicided = ch.prev
|
||||
obj.setBalance(ch.prevbalance)
|
||||
// if the object wasn't suicided before, remove
|
||||
// it from the list of destructed objects as well.
|
||||
if !obj.suicided {
|
||||
delete(s.stateObjectsDestructed, *ch.account)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -201,7 +201,7 @@ func (self *stateObject) setState(key, value common.Hash) {
|
||||
}
|
||||
|
||||
// updateTrie writes cached storage modifications into the object's storage trie.
|
||||
func (self *stateObject) updateTrie(db trie.Database) {
|
||||
func (self *stateObject) updateTrie(db trie.Database) *trie.SecureTrie {
|
||||
tr := self.getTrie(db)
|
||||
for key, value := range self.dirtyStorage {
|
||||
delete(self.dirtyStorage, key)
|
||||
@ -213,6 +213,7 @@ func (self *stateObject) updateTrie(db trie.Database) {
|
||||
v, _ := rlp.EncodeToBytes(bytes.TrimLeft(value[:], "\x00"))
|
||||
tr.Update(key[:], v)
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
// UpdateRoot sets the trie root to the current root hash of
|
||||
@ -280,7 +281,11 @@ func (c *stateObject) ReturnGas(gas *big.Int) {}
|
||||
|
||||
func (self *stateObject) deepCopy(db *StateDB, onDirty func(addr common.Address)) *stateObject {
|
||||
stateObject := newObject(db, self.address, self.data, onDirty)
|
||||
stateObject.trie = self.trie
|
||||
if self.trie != nil {
|
||||
// A shallow copy makes the two tries independent.
|
||||
cpy := *self.trie
|
||||
stateObject.trie = &cpy
|
||||
}
|
||||
stateObject.code = self.code
|
||||
stateObject.dirtyStorage = self.dirtyStorage.Copy()
|
||||
stateObject.cachedStorage = self.dirtyStorage.Copy()
|
||||
|
@ -62,8 +62,9 @@ type StateDB struct {
|
||||
codeSizeCache *lru.Cache
|
||||
|
||||
// This map holds 'live' objects, which will get modified while processing a state transition.
|
||||
stateObjects map[common.Address]*stateObject
|
||||
stateObjectsDirty map[common.Address]struct{}
|
||||
stateObjects map[common.Address]*stateObject
|
||||
stateObjectsDirty map[common.Address]struct{}
|
||||
stateObjectsDestructed map[common.Address]struct{}
|
||||
|
||||
// The refund counter, also used by state transitioning.
|
||||
refund *big.Int
|
||||
@ -92,14 +93,15 @@ func New(root common.Hash, db ethdb.Database) (*StateDB, error) {
|
||||
}
|
||||
csc, _ := lru.New(codeSizeCacheSize)
|
||||
return &StateDB{
|
||||
db: db,
|
||||
trie: tr,
|
||||
codeSizeCache: csc,
|
||||
stateObjects: make(map[common.Address]*stateObject),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}),
|
||||
refund: new(big.Int),
|
||||
logs: make(map[common.Hash][]*types.Log),
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
db: db,
|
||||
trie: tr,
|
||||
codeSizeCache: csc,
|
||||
stateObjects: make(map[common.Address]*stateObject),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}),
|
||||
stateObjectsDestructed: make(map[common.Address]struct{}),
|
||||
refund: new(big.Int),
|
||||
logs: make(map[common.Hash][]*types.Log),
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -114,14 +116,15 @@ func (self *StateDB) New(root common.Hash) (*StateDB, error) {
|
||||
return nil, err
|
||||
}
|
||||
return &StateDB{
|
||||
db: self.db,
|
||||
trie: tr,
|
||||
codeSizeCache: self.codeSizeCache,
|
||||
stateObjects: make(map[common.Address]*stateObject),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}),
|
||||
refund: new(big.Int),
|
||||
logs: make(map[common.Hash][]*types.Log),
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
db: self.db,
|
||||
trie: tr,
|
||||
codeSizeCache: self.codeSizeCache,
|
||||
stateObjects: make(map[common.Address]*stateObject),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}),
|
||||
stateObjectsDestructed: make(map[common.Address]struct{}),
|
||||
refund: new(big.Int),
|
||||
logs: make(map[common.Hash][]*types.Log),
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -138,6 +141,7 @@ func (self *StateDB) Reset(root common.Hash) error {
|
||||
self.trie = tr
|
||||
self.stateObjects = make(map[common.Address]*stateObject)
|
||||
self.stateObjectsDirty = make(map[common.Address]struct{})
|
||||
self.stateObjectsDestructed = make(map[common.Address]struct{})
|
||||
self.thash = common.Hash{}
|
||||
self.bhash = common.Hash{}
|
||||
self.txIndex = 0
|
||||
@ -173,12 +177,6 @@ func (self *StateDB) pushTrie(t *trie.SecureTrie) {
|
||||
}
|
||||
}
|
||||
|
||||
func (self *StateDB) StartRecord(thash, bhash common.Hash, ti int) {
|
||||
self.thash = thash
|
||||
self.bhash = bhash
|
||||
self.txIndex = ti
|
||||
}
|
||||
|
||||
func (self *StateDB) AddLog(log *types.Log) {
|
||||
self.journal = append(self.journal, addLogChange{txhash: self.thash})
|
||||
|
||||
@ -296,6 +294,17 @@ func (self *StateDB) GetState(a common.Address, b common.Hash) common.Hash {
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
// StorageTrie returns the storage trie of an account.
|
||||
// The return value is a copy and is nil for non-existent accounts.
|
||||
func (self *StateDB) StorageTrie(a common.Address) *trie.SecureTrie {
|
||||
stateObject := self.getStateObject(a)
|
||||
if stateObject == nil {
|
||||
return nil
|
||||
}
|
||||
cpy := stateObject.deepCopy(self, nil)
|
||||
return cpy.updateTrie(self.db)
|
||||
}
|
||||
|
||||
func (self *StateDB) HasSuicided(addr common.Address) bool {
|
||||
stateObject := self.getStateObject(addr)
|
||||
if stateObject != nil {
|
||||
@ -369,6 +378,8 @@ func (self *StateDB) Suicide(addr common.Address) bool {
|
||||
})
|
||||
stateObject.markSuicided()
|
||||
stateObject.data.Balance = new(big.Int)
|
||||
self.stateObjectsDestructed[addr] = struct{}{}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@ -481,7 +492,7 @@ func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common
|
||||
cb(h, value)
|
||||
}
|
||||
|
||||
it := so.getTrie(db.db).Iterator()
|
||||
it := trie.NewIterator(so.getTrie(db.db).NodeIterator(nil))
|
||||
for it.Next() {
|
||||
// ignore cached values
|
||||
key := common.BytesToHash(db.trie.GetKey(it.Key))
|
||||
@ -499,21 +510,25 @@ func (self *StateDB) Copy() *StateDB {
|
||||
|
||||
// Copy all the basic fields, initialize the memory ones
|
||||
state := &StateDB{
|
||||
db: self.db,
|
||||
trie: self.trie,
|
||||
pastTries: self.pastTries,
|
||||
codeSizeCache: self.codeSizeCache,
|
||||
stateObjects: make(map[common.Address]*stateObject, len(self.stateObjectsDirty)),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
|
||||
refund: new(big.Int).Set(self.refund),
|
||||
logs: make(map[common.Hash][]*types.Log, len(self.logs)),
|
||||
logSize: self.logSize,
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
db: self.db,
|
||||
trie: self.trie,
|
||||
pastTries: self.pastTries,
|
||||
codeSizeCache: self.codeSizeCache,
|
||||
stateObjects: make(map[common.Address]*stateObject, len(self.stateObjectsDirty)),
|
||||
stateObjectsDirty: make(map[common.Address]struct{}, len(self.stateObjectsDirty)),
|
||||
stateObjectsDestructed: make(map[common.Address]struct{}, len(self.stateObjectsDestructed)),
|
||||
refund: new(big.Int).Set(self.refund),
|
||||
logs: make(map[common.Hash][]*types.Log, len(self.logs)),
|
||||
logSize: self.logSize,
|
||||
preimages: make(map[common.Hash][]byte),
|
||||
}
|
||||
// Copy the dirty states, logs, and preimages
|
||||
for addr := range self.stateObjectsDirty {
|
||||
state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state, state.MarkStateObjectDirty)
|
||||
state.stateObjectsDirty[addr] = struct{}{}
|
||||
if self.stateObjects[addr].suicided {
|
||||
state.stateObjectsDestructed[addr] = struct{}{}
|
||||
}
|
||||
}
|
||||
for hash, logs := range self.logs {
|
||||
state.logs[hash] = make([]*types.Log, len(logs))
|
||||
@ -579,6 +594,27 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
||||
return s.trie.Hash()
|
||||
}
|
||||
|
||||
// Prepare sets the current transaction hash and index and block hash which is
|
||||
// used when the EVM emits new state logs.
|
||||
func (self *StateDB) Prepare(thash, bhash common.Hash, ti int) {
|
||||
self.thash = thash
|
||||
self.bhash = bhash
|
||||
self.txIndex = ti
|
||||
}
|
||||
|
||||
// Finalise finalises the state by removing the self destructed objects
|
||||
// in the current stateObjectsDestructed buffer and clears the journal
|
||||
// as well as the refunds.
|
||||
//
|
||||
// Please note that Finalise is used by EIP#98 and is used instead of
|
||||
// IntermediateRoot.
|
||||
func (s *StateDB) Finalise() {
|
||||
for addr := range s.stateObjectsDestructed {
|
||||
s.deleteStateObject(s.stateObjects[addr])
|
||||
}
|
||||
s.clearJournalAndRefund()
|
||||
}
|
||||
|
||||
// DeleteSuicides flags the suicided objects for deletion so that it
|
||||
// won't be referenced again when called / queried up on.
|
||||
//
|
||||
|
@ -69,7 +69,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
|
||||
}
|
||||
// Iterate over and process the individual transactions
|
||||
for i, tx := range block.Transactions() {
|
||||
statedb.StartRecord(tx.Hash(), block.Hash(), i)
|
||||
statedb.Prepare(tx.Hash(), block.Hash(), i)
|
||||
receipt, _, err := ApplyTransaction(p.config, p.bc, nil, gp, statedb, header, tx, totalUsedGas, cfg)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
@ -107,7 +107,8 @@ func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common
|
||||
usedGas.Add(usedGas, gas)
|
||||
// Create a new receipt for the transaction, storing the intermediate root and gas used by the tx
|
||||
// based on the eip phase, we're passing wether the root touch-delete accounts.
|
||||
receipt := types.NewReceipt(statedb.IntermediateRoot(config.IsEIP158(header.Number)).Bytes(), usedGas)
|
||||
root := statedb.IntermediateRoot(config.IsEIP158(header.Number))
|
||||
receipt := types.NewReceipt(root.Bytes(), usedGas)
|
||||
receipt.TxHash = tx.Hash()
|
||||
receipt.GasUsed = new(big.Int).Set(gas)
|
||||
// if the transaction created a contract, store the creation address in the receipt.
|
||||
|
@ -78,10 +78,6 @@ type Message interface {
|
||||
Data() []byte
|
||||
}
|
||||
|
||||
func MessageCreatesContract(msg Message) bool {
|
||||
return msg.To() == nil
|
||||
}
|
||||
|
||||
// IntrinsicGas computes the 'intrinsic gas' for a message
|
||||
// with the given data.
|
||||
//
|
||||
@ -138,112 +134,113 @@ func ApplyMessage(evm *vm.EVM, msg Message, gp *GasPool) ([]byte, *big.Int, erro
|
||||
return ret, gasUsed, err
|
||||
}
|
||||
|
||||
func (self *StateTransition) from() vm.AccountRef {
|
||||
f := self.msg.From()
|
||||
if !self.state.Exist(f) {
|
||||
self.state.CreateAccount(f)
|
||||
func (st *StateTransition) from() vm.AccountRef {
|
||||
f := st.msg.From()
|
||||
if !st.state.Exist(f) {
|
||||
st.state.CreateAccount(f)
|
||||
}
|
||||
return vm.AccountRef(f)
|
||||
}
|
||||
|
||||
func (self *StateTransition) to() vm.AccountRef {
|
||||
if self.msg == nil {
|
||||
func (st *StateTransition) to() vm.AccountRef {
|
||||
if st.msg == nil {
|
||||
return vm.AccountRef{}
|
||||
}
|
||||
to := self.msg.To()
|
||||
to := st.msg.To()
|
||||
if to == nil {
|
||||
return vm.AccountRef{} // contract creation
|
||||
}
|
||||
|
||||
reference := vm.AccountRef(*to)
|
||||
if !self.state.Exist(*to) {
|
||||
self.state.CreateAccount(*to)
|
||||
if !st.state.Exist(*to) {
|
||||
st.state.CreateAccount(*to)
|
||||
}
|
||||
return reference
|
||||
}
|
||||
|
||||
func (self *StateTransition) useGas(amount uint64) error {
|
||||
if self.gas < amount {
|
||||
func (st *StateTransition) useGas(amount uint64) error {
|
||||
if st.gas < amount {
|
||||
return vm.ErrOutOfGas
|
||||
}
|
||||
self.gas -= amount
|
||||
st.gas -= amount
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *StateTransition) buyGas() error {
|
||||
mgas := self.msg.Gas()
|
||||
func (st *StateTransition) buyGas() error {
|
||||
mgas := st.msg.Gas()
|
||||
if mgas.BitLen() > 64 {
|
||||
return vm.ErrOutOfGas
|
||||
}
|
||||
|
||||
mgval := new(big.Int).Mul(mgas, self.gasPrice)
|
||||
mgval := new(big.Int).Mul(mgas, st.gasPrice)
|
||||
|
||||
var (
|
||||
state = self.state
|
||||
sender = self.from()
|
||||
state = st.state
|
||||
sender = st.from()
|
||||
)
|
||||
if state.GetBalance(sender.Address()).Cmp(mgval) < 0 {
|
||||
return errInsufficientBalanceForGas
|
||||
}
|
||||
if err := self.gp.SubGas(mgas); err != nil {
|
||||
if err := st.gp.SubGas(mgas); err != nil {
|
||||
return err
|
||||
}
|
||||
self.gas += mgas.Uint64()
|
||||
st.gas += mgas.Uint64()
|
||||
|
||||
self.initialGas.Set(mgas)
|
||||
st.initialGas.Set(mgas)
|
||||
state.SubBalance(sender.Address(), mgval)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *StateTransition) preCheck() error {
|
||||
msg := self.msg
|
||||
sender := self.from()
|
||||
func (st *StateTransition) preCheck() error {
|
||||
msg := st.msg
|
||||
sender := st.from()
|
||||
|
||||
// Make sure this transaction's nonce is correct
|
||||
if msg.CheckNonce() {
|
||||
if n := self.state.GetNonce(sender.Address()); n != msg.Nonce() {
|
||||
if n := st.state.GetNonce(sender.Address()); n != msg.Nonce() {
|
||||
return fmt.Errorf("invalid nonce: have %d, expected %d", msg.Nonce(), n)
|
||||
}
|
||||
}
|
||||
return self.buyGas()
|
||||
return st.buyGas()
|
||||
}
|
||||
|
||||
// TransitionDb will transition the state by applying the current message and returning the result
|
||||
// including the required gas for the operation as well as the used gas. It returns an error if it
|
||||
// failed. An error indicates a consensus issue.
|
||||
func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big.Int, err error) {
|
||||
if err = self.preCheck(); err != nil {
|
||||
func (st *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *big.Int, err error) {
|
||||
if err = st.preCheck(); err != nil {
|
||||
return
|
||||
}
|
||||
msg := self.msg
|
||||
sender := self.from() // err checked in preCheck
|
||||
msg := st.msg
|
||||
sender := st.from() // err checked in preCheck
|
||||
|
||||
homestead := st.evm.ChainConfig().IsHomestead(st.evm.BlockNumber)
|
||||
contractCreation := msg.To() == nil
|
||||
|
||||
homestead := self.evm.ChainConfig().IsHomestead(self.evm.BlockNumber)
|
||||
contractCreation := MessageCreatesContract(msg)
|
||||
// Pay intrinsic gas
|
||||
// TODO convert to uint64
|
||||
intrinsicGas := IntrinsicGas(self.data, contractCreation, homestead)
|
||||
intrinsicGas := IntrinsicGas(st.data, contractCreation, homestead)
|
||||
if intrinsicGas.BitLen() > 64 {
|
||||
return nil, nil, nil, vm.ErrOutOfGas
|
||||
}
|
||||
if err = self.useGas(intrinsicGas.Uint64()); err != nil {
|
||||
if err = st.useGas(intrinsicGas.Uint64()); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
evm = self.evm
|
||||
evm = st.evm
|
||||
// vm errors do not effect consensus and are therefor
|
||||
// not assigned to err, except for insufficient balance
|
||||
// error.
|
||||
vmerr error
|
||||
)
|
||||
if contractCreation {
|
||||
ret, _, self.gas, vmerr = evm.Create(sender, self.data, self.gas, self.value)
|
||||
ret, _, st.gas, vmerr = evm.Create(sender, st.data, st.gas, st.value)
|
||||
} else {
|
||||
// Increment the nonce for the next transaction
|
||||
self.state.SetNonce(sender.Address(), self.state.GetNonce(sender.Address())+1)
|
||||
ret, self.gas, vmerr = evm.Call(sender, self.to().Address(), self.data, self.gas, self.value)
|
||||
st.state.SetNonce(sender.Address(), st.state.GetNonce(sender.Address())+1)
|
||||
ret, st.gas, vmerr = evm.Call(sender, st.to().Address(), st.data, st.gas, st.value)
|
||||
}
|
||||
if vmerr != nil {
|
||||
log.Debug("VM returned with error", "err", err)
|
||||
@ -254,33 +251,33 @@ func (self *StateTransition) TransitionDb() (ret []byte, requiredGas, usedGas *b
|
||||
return nil, nil, nil, vmerr
|
||||
}
|
||||
}
|
||||
requiredGas = new(big.Int).Set(self.gasUsed())
|
||||
requiredGas = new(big.Int).Set(st.gasUsed())
|
||||
|
||||
self.refundGas()
|
||||
self.state.AddBalance(self.evm.Coinbase, new(big.Int).Mul(self.gasUsed(), self.gasPrice))
|
||||
st.refundGas()
|
||||
st.state.AddBalance(st.evm.Coinbase, new(big.Int).Mul(st.gasUsed(), st.gasPrice))
|
||||
|
||||
return ret, requiredGas, self.gasUsed(), err
|
||||
return ret, requiredGas, st.gasUsed(), err
|
||||
}
|
||||
|
||||
func (self *StateTransition) refundGas() {
|
||||
func (st *StateTransition) refundGas() {
|
||||
// Return eth for remaining gas to the sender account,
|
||||
// exchanged at the original rate.
|
||||
sender := self.from() // err already checked
|
||||
remaining := new(big.Int).Mul(new(big.Int).SetUint64(self.gas), self.gasPrice)
|
||||
self.state.AddBalance(sender.Address(), remaining)
|
||||
sender := st.from() // err already checked
|
||||
remaining := new(big.Int).Mul(new(big.Int).SetUint64(st.gas), st.gasPrice)
|
||||
st.state.AddBalance(sender.Address(), remaining)
|
||||
|
||||
// Apply refund counter, capped to half of the used gas.
|
||||
uhalf := remaining.Div(self.gasUsed(), common.Big2)
|
||||
refund := math.BigMin(uhalf, self.state.GetRefund())
|
||||
self.gas += refund.Uint64()
|
||||
uhalf := remaining.Div(st.gasUsed(), common.Big2)
|
||||
refund := math.BigMin(uhalf, st.state.GetRefund())
|
||||
st.gas += refund.Uint64()
|
||||
|
||||
self.state.AddBalance(sender.Address(), refund.Mul(refund, self.gasPrice))
|
||||
st.state.AddBalance(sender.Address(), refund.Mul(refund, st.gasPrice))
|
||||
|
||||
// Also return remaining gas to the block gas counter so it is
|
||||
// available for the next transaction.
|
||||
self.gp.AddGas(new(big.Int).SetUint64(self.gas))
|
||||
st.gp.AddGas(new(big.Int).SetUint64(st.gas))
|
||||
}
|
||||
|
||||
func (self *StateTransition) gasUsed() *big.Int {
|
||||
return new(big.Int).Sub(self.initialGas, new(big.Int).SetUint64(self.gas))
|
||||
func (st *StateTransition) gasUsed() *big.Int {
|
||||
return new(big.Int).Sub(st.initialGas, new(big.Int).SetUint64(st.gas))
|
||||
}
|
||||
|
208
core/tx_list.go
208
core/tx_list.go
@ -22,7 +22,9 @@ import (
|
||||
"math/big"
|
||||
"sort"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// nonceHeap is a heap.Interface implementation over 64bit unsigned integers for
|
||||
@ -53,11 +55,11 @@ type txSortedMap struct {
|
||||
cache types.Transactions // Cache of the transactions already sorted
|
||||
}
|
||||
|
||||
// newTxSortedMap creates a new sorted transaction map.
|
||||
// newTxSortedMap creates a new nonce-sorted transaction map.
|
||||
func newTxSortedMap() *txSortedMap {
|
||||
return &txSortedMap{
|
||||
items: make(map[uint64]*types.Transaction),
|
||||
index: &nonceHeap{},
|
||||
index: new(nonceHeap),
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,9 +220,11 @@ func (m *txSortedMap) Flatten() types.Transactions {
|
||||
// the executable/pending queue; and for storing gapped transactions for the non-
|
||||
// executable/future queue, with minor behavioral changes.
|
||||
type txList struct {
|
||||
strict bool // Whether nonces are strictly continuous or not
|
||||
txs *txSortedMap // Heap indexed sorted hash map of the transactions
|
||||
costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance)
|
||||
strict bool // Whether nonces are strictly continuous or not
|
||||
txs *txSortedMap // Heap indexed sorted hash map of the transactions
|
||||
|
||||
costcap *big.Int // Price of the highest costing transaction (reset only if exceeds balance)
|
||||
gascap *big.Int // Gas limit of the highest spending transaction (reset only if exceeds block limit)
|
||||
}
|
||||
|
||||
// newTxList create a new transaction list for maintaining nonce-indexable fast,
|
||||
@ -230,25 +234,38 @@ func newTxList(strict bool) *txList {
|
||||
strict: strict,
|
||||
txs: newTxSortedMap(),
|
||||
costcap: new(big.Int),
|
||||
gascap: new(big.Int),
|
||||
}
|
||||
}
|
||||
|
||||
// Overlaps returns whether the transaction specified has the same nonce as one
|
||||
// already contained within the list.
|
||||
func (l *txList) Overlaps(tx *types.Transaction) bool {
|
||||
return l.txs.Get(tx.Nonce()) != nil
|
||||
}
|
||||
|
||||
// Add tries to insert a new transaction into the list, returning whether the
|
||||
// transaction was accepted, and if yes, any previous transaction it replaced.
|
||||
//
|
||||
// If the new transaction is accepted into the list, the lists' cost threshold
|
||||
// is also potentially updated.
|
||||
func (l *txList) Add(tx *types.Transaction) (bool, *types.Transaction) {
|
||||
// If the new transaction is accepted into the list, the lists' cost and gas
|
||||
// thresholds are also potentially updated.
|
||||
func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transaction) {
|
||||
// If there's an older better transaction, abort
|
||||
old := l.txs.Get(tx.Nonce())
|
||||
if old != nil && old.GasPrice().Cmp(tx.GasPrice()) >= 0 {
|
||||
return false, nil
|
||||
if old != nil {
|
||||
threshold := new(big.Int).Div(new(big.Int).Mul(old.GasPrice(), big.NewInt(100+int64(priceBump))), big.NewInt(100))
|
||||
if threshold.Cmp(tx.GasPrice()) >= 0 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
// Otherwise overwrite the old transaction with the current one
|
||||
l.txs.Put(tx)
|
||||
if cost := tx.Cost(); l.costcap.Cmp(cost) < 0 {
|
||||
l.costcap = cost
|
||||
}
|
||||
if gas := tx.Gas(); l.gascap.Cmp(gas) < 0 {
|
||||
l.gascap = gas
|
||||
}
|
||||
return true, old
|
||||
}
|
||||
|
||||
@ -259,23 +276,25 @@ func (l *txList) Forward(threshold uint64) types.Transactions {
|
||||
return l.txs.Forward(threshold)
|
||||
}
|
||||
|
||||
// Filter removes all transactions from the list with a cost higher than the
|
||||
// provided threshold. Every removed transaction is returned for any post-removal
|
||||
// maintenance. Strict-mode invalidated transactions are also returned.
|
||||
// Filter removes all transactions from the list with a cost or gas limit higher
|
||||
// than the provided thresholds. Every removed transaction is returned for any
|
||||
// post-removal maintenance. Strict-mode invalidated transactions are also
|
||||
// returned.
|
||||
//
|
||||
// This method uses the cached costcap to quickly decide if there's even a point
|
||||
// in calculating all the costs or if the balance covers all. If the threshold is
|
||||
// lower than the costcap, the costcap will be reset to a new high after removing
|
||||
// expensive the too transactions.
|
||||
func (l *txList) Filter(threshold *big.Int) (types.Transactions, types.Transactions) {
|
||||
// This method uses the cached costcap and gascap to quickly decide if there's even
|
||||
// a point in calculating all the costs or if the balance covers all. If the threshold
|
||||
// is lower than the costgas cap, the caps will be reset to a new high after removing
|
||||
// the newly invalidated transactions.
|
||||
func (l *txList) Filter(costLimit, gasLimit *big.Int) (types.Transactions, types.Transactions) {
|
||||
// If all transactions are below the threshold, short circuit
|
||||
if l.costcap.Cmp(threshold) <= 0 {
|
||||
if l.costcap.Cmp(costLimit) <= 0 && l.gascap.Cmp(gasLimit) <= 0 {
|
||||
return nil, nil
|
||||
}
|
||||
l.costcap = new(big.Int).Set(threshold) // Lower the cap to the threshold
|
||||
l.costcap = new(big.Int).Set(costLimit) // Lower the caps to the thresholds
|
||||
l.gascap = new(big.Int).Set(gasLimit)
|
||||
|
||||
// Filter out all the transactions above the account's funds
|
||||
removed := l.txs.Filter(func(tx *types.Transaction) bool { return tx.Cost().Cmp(threshold) > 0 })
|
||||
removed := l.txs.Filter(func(tx *types.Transaction) bool { return tx.Cost().Cmp(costLimit) > 0 || tx.Gas().Cmp(gasLimit) > 0 })
|
||||
|
||||
// If the list was strict, filter anything above the lowest nonce
|
||||
var invalids types.Transactions
|
||||
@ -340,3 +359,150 @@ func (l *txList) Empty() bool {
|
||||
func (l *txList) Flatten() types.Transactions {
|
||||
return l.txs.Flatten()
|
||||
}
|
||||
|
||||
// priceHeap is a heap.Interface implementation over transactions for retrieving
|
||||
// price-sorted transactions to discard when the pool fills up.
|
||||
type priceHeap []*types.Transaction
|
||||
|
||||
func (h priceHeap) Len() int { return len(h) }
|
||||
func (h priceHeap) Less(i, j int) bool { return h[i].GasPrice().Cmp(h[j].GasPrice()) < 0 }
|
||||
func (h priceHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
|
||||
|
||||
func (h *priceHeap) Push(x interface{}) {
|
||||
*h = append(*h, x.(*types.Transaction))
|
||||
}
|
||||
|
||||
func (h *priceHeap) Pop() interface{} {
|
||||
old := *h
|
||||
n := len(old)
|
||||
x := old[n-1]
|
||||
*h = old[0 : n-1]
|
||||
return x
|
||||
}
|
||||
|
||||
// txPricedList is a price-sorted heap to allow operating on transactions pool
|
||||
// contents in a price-incrementing way.
|
||||
type txPricedList struct {
|
||||
all *map[common.Hash]*types.Transaction // Pointer to the map of all transactions
|
||||
items *priceHeap // Heap of prices of all the stored transactions
|
||||
stales int // Number of stale price points to (re-heap trigger)
|
||||
}
|
||||
|
||||
// newTxPricedList creates a new price-sorted transaction heap.
|
||||
func newTxPricedList(all *map[common.Hash]*types.Transaction) *txPricedList {
|
||||
return &txPricedList{
|
||||
all: all,
|
||||
items: new(priceHeap),
|
||||
}
|
||||
}
|
||||
|
||||
// Put inserts a new transaction into the heap.
|
||||
func (l *txPricedList) Put(tx *types.Transaction) {
|
||||
heap.Push(l.items, tx)
|
||||
}
|
||||
|
||||
// Removed notifies the prices transaction list that an old transaction dropped
|
||||
// from the pool. The list will just keep a counter of stale objects and update
|
||||
// the heap if a large enough ratio of transactions go stale.
|
||||
func (l *txPricedList) Removed() {
|
||||
// Bump the stale counter, but exit if still too low (< 25%)
|
||||
l.stales++
|
||||
if l.stales <= len(*l.items)/4 {
|
||||
return
|
||||
}
|
||||
// Seems we've reached a critical number of stale transactions, reheap
|
||||
reheap := make(priceHeap, 0, len(*l.all))
|
||||
|
||||
l.stales, l.items = 0, &reheap
|
||||
for _, tx := range *l.all {
|
||||
*l.items = append(*l.items, tx)
|
||||
}
|
||||
heap.Init(l.items)
|
||||
}
|
||||
|
||||
// Discard finds all the transactions below the given price threshold, drops them
|
||||
// from the priced list and returs them for further removal from the entire pool.
|
||||
func (l *txPricedList) Cap(threshold *big.Int, local *txSet) types.Transactions {
|
||||
drop := make(types.Transactions, 0, 128) // Remote underpriced transactions to drop
|
||||
save := make(types.Transactions, 0, 64) // Local underpriced transactions to keep
|
||||
|
||||
for len(*l.items) > 0 {
|
||||
// Discard stale transactions if found during cleanup
|
||||
tx := heap.Pop(l.items).(*types.Transaction)
|
||||
|
||||
hash := tx.Hash()
|
||||
if _, ok := (*l.all)[hash]; !ok {
|
||||
l.stales--
|
||||
continue
|
||||
}
|
||||
// Stop the discards if we've reached the threshold
|
||||
if tx.GasPrice().Cmp(threshold) >= 0 {
|
||||
break
|
||||
}
|
||||
// Non stale transaction found, discard unless local
|
||||
if local.contains(hash) {
|
||||
save = append(save, tx)
|
||||
} else {
|
||||
drop = append(drop, tx)
|
||||
}
|
||||
}
|
||||
for _, tx := range save {
|
||||
heap.Push(l.items, tx)
|
||||
}
|
||||
return drop
|
||||
}
|
||||
|
||||
// Underpriced checks whether a transaction is cheaper than (or as cheap as) the
|
||||
// lowest priced transaction currently being tracked.
|
||||
func (l *txPricedList) Underpriced(tx *types.Transaction, local *txSet) bool {
|
||||
// Local transactions cannot be underpriced
|
||||
if local.contains(tx.Hash()) {
|
||||
return false
|
||||
}
|
||||
// Discard stale price points if found at the heap start
|
||||
for len(*l.items) > 0 {
|
||||
head := []*types.Transaction(*l.items)[0]
|
||||
if _, ok := (*l.all)[head.Hash()]; !ok {
|
||||
l.stales--
|
||||
heap.Pop(l.items)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
// Check if the transaction is underpriced or not
|
||||
if len(*l.items) == 0 {
|
||||
log.Error("Pricing query for empty pool") // This cannot happen, print to catch programming errors
|
||||
return false
|
||||
}
|
||||
cheapest := []*types.Transaction(*l.items)[0]
|
||||
return cheapest.GasPrice().Cmp(tx.GasPrice()) >= 0
|
||||
}
|
||||
|
||||
// Discard finds a number of most underpriced transactions, removes them from the
|
||||
// priced list and returs them for further removal from the entire pool.
|
||||
func (l *txPricedList) Discard(count int, local *txSet) types.Transactions {
|
||||
drop := make(types.Transactions, 0, count) // Remote underpriced transactions to drop
|
||||
save := make(types.Transactions, 0, 64) // Local underpriced transactions to keep
|
||||
|
||||
for len(*l.items) > 0 && count > 0 {
|
||||
// Discard stale transactions if found during cleanup
|
||||
tx := heap.Pop(l.items).(*types.Transaction)
|
||||
|
||||
hash := tx.Hash()
|
||||
if _, ok := (*l.all)[hash]; !ok {
|
||||
l.stales--
|
||||
continue
|
||||
}
|
||||
// Non stale transaction found, discard unless local
|
||||
if local.contains(hash) {
|
||||
save = append(save, tx)
|
||||
} else {
|
||||
drop = append(drop, tx)
|
||||
count--
|
||||
}
|
||||
}
|
||||
for _, tx := range save {
|
||||
heap.Push(l.items, tx)
|
||||
}
|
||||
return drop
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ func TestStrictTxListAdd(t *testing.T) {
|
||||
// Insert the transactions in a random order
|
||||
list := newTxList(true)
|
||||
for _, v := range rand.Perm(len(txs)) {
|
||||
list.Add(txs[v])
|
||||
list.Add(txs[v], DefaultTxPoolConfig.PriceBump)
|
||||
}
|
||||
// Verify internal state
|
||||
if len(list.txs.items) != len(txs) {
|
||||
|
415
core/tx_pool.go
415
core/tx_pool.go
@ -36,23 +36,20 @@ import (
|
||||
|
||||
var (
|
||||
// Transaction Pool Errors
|
||||
ErrInvalidSender = errors.New("Invalid sender")
|
||||
ErrNonce = errors.New("Nonce too low")
|
||||
ErrCheap = errors.New("Gas price too low for acceptance")
|
||||
ErrBalance = errors.New("Insufficient balance")
|
||||
ErrInsufficientFunds = errors.New("Insufficient funds for gas * price + value")
|
||||
ErrIntrinsicGas = errors.New("Intrinsic gas too low")
|
||||
ErrGasLimit = errors.New("Exceeds block gas limit")
|
||||
ErrNegativeValue = errors.New("Negative value")
|
||||
ErrInvalidSender = errors.New("invalid sender")
|
||||
ErrNonce = errors.New("nonce too low")
|
||||
ErrUnderpriced = errors.New("transaction underpriced")
|
||||
ErrReplaceUnderpriced = errors.New("replacement transaction underpriced")
|
||||
ErrBalance = errors.New("insufficient balance")
|
||||
ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value")
|
||||
ErrIntrinsicGas = errors.New("intrinsic gas too low")
|
||||
ErrGasLimit = errors.New("exceeds block gas limit")
|
||||
ErrNegativeValue = errors.New("negative value")
|
||||
)
|
||||
|
||||
var (
|
||||
minPendingPerAccount = uint64(16) // Min number of guaranteed transaction slots per address
|
||||
maxPendingTotal = uint64(4096) // Max limit of pending transactions from all accounts (soft)
|
||||
maxQueuedPerAccount = uint64(64) // Max limit of queued transactions per address
|
||||
maxQueuedInTotal = uint64(1024) // Max limit of queued transactions from all accounts
|
||||
maxQueuedLifetime = 3 * time.Hour // Max amount of time transactions from idle accounts are queued
|
||||
evictionInterval = time.Minute // Time interval to check for evictable transactions
|
||||
evictionInterval = time.Minute // Time interval to check for evictable transactions
|
||||
statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats
|
||||
)
|
||||
|
||||
var (
|
||||
@ -69,11 +66,54 @@ var (
|
||||
queuedNofundsCounter = metrics.NewCounter("txpool/queued/nofunds") // Dropped due to out-of-funds
|
||||
|
||||
// General tx metrics
|
||||
invalidTxCounter = metrics.NewCounter("txpool/invalid")
|
||||
invalidTxCounter = metrics.NewCounter("txpool/invalid")
|
||||
underpricedTxCounter = metrics.NewCounter("txpool/underpriced")
|
||||
)
|
||||
|
||||
type stateFn func() (*state.StateDB, error)
|
||||
|
||||
// TxPoolConfig are the configuration parameters of the transaction pool.
|
||||
type TxPoolConfig struct {
|
||||
PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool
|
||||
PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce)
|
||||
|
||||
AccountSlots uint64 // Minimum number of executable transaction slots guaranteed per account
|
||||
GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts
|
||||
AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account
|
||||
GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts
|
||||
|
||||
Lifetime time.Duration // Maximum amount of time non-executable transaction are queued
|
||||
}
|
||||
|
||||
// DefaultTxPoolConfig contains the default configurations for the transaction
|
||||
// pool.
|
||||
var DefaultTxPoolConfig = TxPoolConfig{
|
||||
PriceLimit: 1,
|
||||
PriceBump: 10,
|
||||
|
||||
AccountSlots: 16,
|
||||
GlobalSlots: 4096,
|
||||
AccountQueue: 64,
|
||||
GlobalQueue: 1024,
|
||||
|
||||
Lifetime: 3 * time.Hour,
|
||||
}
|
||||
|
||||
// sanitize checks the provided user configurations and changes anything that's
|
||||
// unreasonable or unworkable.
|
||||
func (config *TxPoolConfig) sanitize() TxPoolConfig {
|
||||
conf := *config
|
||||
if conf.PriceLimit < 1 {
|
||||
log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit)
|
||||
conf.PriceLimit = DefaultTxPoolConfig.PriceLimit
|
||||
}
|
||||
if conf.PriceBump < 1 {
|
||||
log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump)
|
||||
conf.PriceBump = DefaultTxPoolConfig.PriceBump
|
||||
}
|
||||
return conf
|
||||
}
|
||||
|
||||
// TxPool contains all currently known transactions. Transactions
|
||||
// enter the pool when they are received from the network or submitted
|
||||
// locally. They exit the pool when they are included in the blockchain.
|
||||
@ -82,21 +122,23 @@ type stateFn func() (*state.StateDB, error)
|
||||
// current state) and future transactions. Transactions move between those
|
||||
// two states over time as they are received and processed.
|
||||
type TxPool struct {
|
||||
config *params.ChainConfig
|
||||
config TxPoolConfig
|
||||
chainconfig *params.ChainConfig
|
||||
currentState stateFn // The state function which will allow us to do some pre checks
|
||||
pendingState *state.ManagedState
|
||||
gasLimit func() *big.Int // The current gas limit function callback
|
||||
minGasPrice *big.Int
|
||||
gasPrice *big.Int
|
||||
eventMux *event.TypeMux
|
||||
events *event.TypeMuxSubscription
|
||||
localTx *txSet
|
||||
locals *txSet
|
||||
signer types.Signer
|
||||
mu sync.RWMutex
|
||||
|
||||
pending map[common.Address]*txList // All currently processable transactions
|
||||
queue map[common.Address]*txList // Queued but non-processable transactions
|
||||
all map[common.Hash]*types.Transaction // All transactions to allow lookups
|
||||
beats map[common.Address]time.Time // Last heartbeat from each known account
|
||||
all map[common.Hash]*types.Transaction // All transactions to allow lookups
|
||||
priced *txPricedList // All transactions sorted by price
|
||||
|
||||
wg sync.WaitGroup // for shutdown sync
|
||||
quit chan struct{}
|
||||
@ -104,26 +146,34 @@ type TxPool struct {
|
||||
homestead bool
|
||||
}
|
||||
|
||||
func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
|
||||
// NewTxPool creates a new transaction pool to gather, sort and filter inbound
|
||||
// trnsactions from the network.
|
||||
func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool {
|
||||
// Sanitize the input to ensure no vulnerable gas prices are set
|
||||
config = (&config).sanitize()
|
||||
|
||||
// Create the transaction pool with its initial settings
|
||||
pool := &TxPool{
|
||||
config: config,
|
||||
signer: types.NewEIP155Signer(config.ChainId),
|
||||
chainconfig: chainconfig,
|
||||
signer: types.NewEIP155Signer(chainconfig.ChainId),
|
||||
pending: make(map[common.Address]*txList),
|
||||
queue: make(map[common.Address]*txList),
|
||||
all: make(map[common.Hash]*types.Transaction),
|
||||
beats: make(map[common.Address]time.Time),
|
||||
all: make(map[common.Hash]*types.Transaction),
|
||||
eventMux: eventMux,
|
||||
currentState: currentStateFn,
|
||||
gasLimit: gasLimitFn,
|
||||
minGasPrice: new(big.Int),
|
||||
gasPrice: new(big.Int).SetUint64(config.PriceLimit),
|
||||
pendingState: nil,
|
||||
localTx: newTxSet(),
|
||||
events: eventMux.Subscribe(ChainHeadEvent{}, GasPriceChanged{}, RemovedTransactionEvent{}),
|
||||
locals: newTxSet(),
|
||||
events: eventMux.Subscribe(ChainHeadEvent{}, RemovedTransactionEvent{}),
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
|
||||
pool.priced = newTxPricedList(&pool.all)
|
||||
pool.resetState()
|
||||
|
||||
// Start the various events loops and return
|
||||
pool.wg.Add(2)
|
||||
go pool.eventLoop()
|
||||
go pool.expirationLoop()
|
||||
@ -134,27 +184,48 @@ func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentState
|
||||
func (pool *TxPool) eventLoop() {
|
||||
defer pool.wg.Done()
|
||||
|
||||
// Start a ticker and keep track of interesting pool stats to report
|
||||
var prevPending, prevQueued, prevStales int
|
||||
|
||||
report := time.NewTicker(statsReportInterval)
|
||||
defer report.Stop()
|
||||
|
||||
// Track chain events. When a chain events occurs (new chain canon block)
|
||||
// we need to know the new state. The new state will help us determine
|
||||
// the nonces in the managed state
|
||||
for ev := range pool.events.Chan() {
|
||||
switch ev := ev.Data.(type) {
|
||||
case ChainHeadEvent:
|
||||
pool.mu.Lock()
|
||||
if ev.Block != nil {
|
||||
if pool.config.IsHomestead(ev.Block.Number()) {
|
||||
pool.homestead = true
|
||||
for {
|
||||
select {
|
||||
// Handle any events fired by the system
|
||||
case ev, ok := <-pool.events.Chan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
switch ev := ev.Data.(type) {
|
||||
case ChainHeadEvent:
|
||||
pool.mu.Lock()
|
||||
if ev.Block != nil {
|
||||
if pool.chainconfig.IsHomestead(ev.Block.Number()) {
|
||||
pool.homestead = true
|
||||
}
|
||||
}
|
||||
pool.resetState()
|
||||
pool.mu.Unlock()
|
||||
|
||||
case RemovedTransactionEvent:
|
||||
pool.AddBatch(ev.Txs)
|
||||
}
|
||||
|
||||
pool.resetState()
|
||||
pool.mu.Unlock()
|
||||
case GasPriceChanged:
|
||||
pool.mu.Lock()
|
||||
pool.minGasPrice = ev.Price
|
||||
pool.mu.Unlock()
|
||||
case RemovedTransactionEvent:
|
||||
pool.AddBatch(ev.Txs)
|
||||
// Handle stats reporting ticks
|
||||
case <-report.C:
|
||||
pool.mu.RLock()
|
||||
pending, queued := pool.stats()
|
||||
stales := pool.priced.stales
|
||||
pool.mu.RUnlock()
|
||||
|
||||
if pending != prevPending || queued != prevQueued || stales != prevStales {
|
||||
log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales)
|
||||
prevPending, prevQueued, prevStales = pending, queued, stales
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -180,9 +251,10 @@ func (pool *TxPool) resetState() {
|
||||
}
|
||||
// Check the queue and move transactions over to the pending if possible
|
||||
// or remove those that have become invalid
|
||||
pool.promoteExecutables(currentState)
|
||||
pool.promoteExecutables(currentState, nil)
|
||||
}
|
||||
|
||||
// Stop terminates the transaction pool.
|
||||
func (pool *TxPool) Stop() {
|
||||
pool.events.Unsubscribe()
|
||||
close(pool.quit)
|
||||
@ -191,6 +263,28 @@ func (pool *TxPool) Stop() {
|
||||
log.Info("Transaction pool stopped")
|
||||
}
|
||||
|
||||
// GasPrice returns the current gas price enforced by the transaction pool.
|
||||
func (pool *TxPool) GasPrice() *big.Int {
|
||||
pool.mu.RLock()
|
||||
defer pool.mu.RUnlock()
|
||||
|
||||
return new(big.Int).Set(pool.gasPrice)
|
||||
}
|
||||
|
||||
// SetGasPrice updates the minimum price required by the transaction pool for a
|
||||
// new transaction, and drops all transactions below this threshold.
|
||||
func (pool *TxPool) SetGasPrice(price *big.Int) {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
pool.gasPrice = price
|
||||
for _, tx := range pool.priced.Cap(price, pool.locals) {
|
||||
pool.removeTx(tx.Hash())
|
||||
}
|
||||
log.Info("Transaction pool price threshold updated", "price", price)
|
||||
}
|
||||
|
||||
// State returns the virtual managed state of the transaction pool.
|
||||
func (pool *TxPool) State() *state.ManagedState {
|
||||
pool.mu.RLock()
|
||||
defer pool.mu.RUnlock()
|
||||
@ -200,17 +294,25 @@ func (pool *TxPool) State() *state.ManagedState {
|
||||
|
||||
// Stats retrieves the current pool stats, namely the number of pending and the
|
||||
// number of queued (non-executable) transactions.
|
||||
func (pool *TxPool) Stats() (pending int, queued int) {
|
||||
func (pool *TxPool) Stats() (int, int) {
|
||||
pool.mu.RLock()
|
||||
defer pool.mu.RUnlock()
|
||||
|
||||
return pool.stats()
|
||||
}
|
||||
|
||||
// stats retrieves the current pool stats, namely the number of pending and the
|
||||
// number of queued (non-executable) transactions.
|
||||
func (pool *TxPool) stats() (int, int) {
|
||||
pending := 0
|
||||
for _, list := range pool.pending {
|
||||
pending += list.Len()
|
||||
}
|
||||
queued := 0
|
||||
for _, list := range pool.queue {
|
||||
queued += list.Len()
|
||||
}
|
||||
return
|
||||
return pending, queued
|
||||
}
|
||||
|
||||
// Content retrieves the data content of the transaction pool, returning all the
|
||||
@ -237,17 +339,6 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
state, err := pool.currentState()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check queue first
|
||||
pool.promoteExecutables(state)
|
||||
|
||||
// invalidate any txs
|
||||
pool.demoteUnexecutables(state)
|
||||
|
||||
pending := make(map[common.Address]types.Transactions)
|
||||
for addr, list := range pool.pending {
|
||||
pending[addr] = list.Flatten()
|
||||
@ -260,16 +351,16 @@ func (pool *TxPool) Pending() (map[common.Address]types.Transactions, error) {
|
||||
func (pool *TxPool) SetLocal(tx *types.Transaction) {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
pool.localTx.add(tx.Hash())
|
||||
pool.locals.add(tx.Hash())
|
||||
}
|
||||
|
||||
// validateTx checks whether a transaction is valid according
|
||||
// to the consensus rules.
|
||||
func (pool *TxPool) validateTx(tx *types.Transaction) error {
|
||||
local := pool.localTx.contains(tx.Hash())
|
||||
local := pool.locals.contains(tx.Hash())
|
||||
// Drop transactions under our own minimal accepted gas price
|
||||
if !local && pool.minGasPrice.Cmp(tx.GasPrice()) > 0 {
|
||||
return ErrCheap
|
||||
if !local && pool.gasPrice.Cmp(tx.GasPrice()) > 0 {
|
||||
return ErrUnderpriced
|
||||
}
|
||||
|
||||
currentState, err := pool.currentState()
|
||||
@ -314,47 +405,92 @@ func (pool *TxPool) validateTx(tx *types.Transaction) error {
|
||||
}
|
||||
|
||||
// add validates a transaction and inserts it into the non-executable queue for
|
||||
// later pending promotion and execution.
|
||||
func (pool *TxPool) add(tx *types.Transaction) error {
|
||||
// later pending promotion and execution. If the transaction is a replacement for
|
||||
// an already pending or queued one, it overwrites the previous and returns this
|
||||
// so outer code doesn't uselessly call promote.
|
||||
func (pool *TxPool) add(tx *types.Transaction) (bool, error) {
|
||||
// If the transaction is already known, discard it
|
||||
hash := tx.Hash()
|
||||
if pool.all[hash] != nil {
|
||||
log.Trace("Discarding already known transaction", "hash", hash)
|
||||
return fmt.Errorf("known transaction: %x", hash)
|
||||
return false, fmt.Errorf("known transaction: %x", hash)
|
||||
}
|
||||
// Otherwise ensure basic validation passes and queue it up
|
||||
// If the transaction fails basic validation, discard it
|
||||
if err := pool.validateTx(tx); err != nil {
|
||||
log.Trace("Discarding invalid transaction", "hash", hash, "err", err)
|
||||
invalidTxCounter.Inc(1)
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
pool.enqueueTx(hash, tx)
|
||||
// If the transaction pool is full, discard underpriced transactions
|
||||
if uint64(len(pool.all)) >= pool.config.GlobalSlots+pool.config.GlobalQueue {
|
||||
// If the new transaction is underpriced, don't accept it
|
||||
if pool.priced.Underpriced(tx, pool.locals) {
|
||||
log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice())
|
||||
underpricedTxCounter.Inc(1)
|
||||
return false, ErrUnderpriced
|
||||
}
|
||||
// New transaction is better than our worse ones, make room for it
|
||||
drop := pool.priced.Discard(len(pool.all)-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals)
|
||||
for _, tx := range drop {
|
||||
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
|
||||
underpricedTxCounter.Inc(1)
|
||||
pool.removeTx(tx.Hash())
|
||||
}
|
||||
}
|
||||
// If the transaction is replacing an already pending one, do directly
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
if list := pool.pending[from]; list != nil && list.Overlaps(tx) {
|
||||
// Nonce already pending, check if required price bump is met
|
||||
inserted, old := list.Add(tx, pool.config.PriceBump)
|
||||
if !inserted {
|
||||
pendingDiscardCounter.Inc(1)
|
||||
return false, ErrReplaceUnderpriced
|
||||
}
|
||||
// New transaction is better, replace old one
|
||||
if old != nil {
|
||||
delete(pool.all, old.Hash())
|
||||
pool.priced.Removed()
|
||||
pendingReplaceCounter.Inc(1)
|
||||
}
|
||||
pool.all[tx.Hash()] = tx
|
||||
pool.priced.Put(tx)
|
||||
|
||||
// Print a log message if low enough level is set
|
||||
log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To())
|
||||
return nil
|
||||
log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To())
|
||||
return old != nil, nil
|
||||
}
|
||||
// New transaction isn't replacing a pending one, push into queue
|
||||
replace, err := pool.enqueueTx(hash, tx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To())
|
||||
return replace, nil
|
||||
}
|
||||
|
||||
// enqueueTx inserts a new transaction into the non-executable transaction queue.
|
||||
//
|
||||
// Note, this method assumes the pool lock is held!
|
||||
func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) {
|
||||
func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, error) {
|
||||
// Try to insert the transaction into the future queue
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
if pool.queue[from] == nil {
|
||||
pool.queue[from] = newTxList(false)
|
||||
}
|
||||
inserted, old := pool.queue[from].Add(tx)
|
||||
inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump)
|
||||
if !inserted {
|
||||
// An older transaction was better, discard this
|
||||
queuedDiscardCounter.Inc(1)
|
||||
return // An older transaction was better, discard this
|
||||
return false, ErrReplaceUnderpriced
|
||||
}
|
||||
// Discard any previous transaction and mark this
|
||||
if old != nil {
|
||||
delete(pool.all, old.Hash())
|
||||
pool.priced.Removed()
|
||||
queuedReplaceCounter.Inc(1)
|
||||
}
|
||||
pool.all[hash] = tx
|
||||
pool.priced.Put(tx)
|
||||
return old != nil, nil
|
||||
}
|
||||
|
||||
// promoteTx adds a transaction to the pending (processable) list of transactions.
|
||||
@ -367,20 +503,27 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T
|
||||
}
|
||||
list := pool.pending[addr]
|
||||
|
||||
inserted, old := list.Add(tx)
|
||||
inserted, old := list.Add(tx, pool.config.PriceBump)
|
||||
if !inserted {
|
||||
// An older transaction was better, discard this
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
|
||||
pendingDiscardCounter.Inc(1)
|
||||
return
|
||||
}
|
||||
// Otherwise discard any previous transaction and mark this
|
||||
if old != nil {
|
||||
delete(pool.all, old.Hash())
|
||||
pool.priced.Removed()
|
||||
|
||||
pendingReplaceCounter.Inc(1)
|
||||
}
|
||||
pool.all[hash] = tx // Failsafe to work around direct pending inserts (tests)
|
||||
|
||||
// Failsafe to work around direct pending inserts (tests)
|
||||
if pool.all[hash] == nil {
|
||||
pool.all[hash] = tx
|
||||
pool.priced.Put(tx)
|
||||
}
|
||||
// Set the potentially new pending nonce and notify any subsystems of the new tx
|
||||
pool.beats[addr] = time.Now()
|
||||
pool.pendingState.SetNonce(addr, tx.Nonce()+1)
|
||||
@ -392,16 +535,20 @@ func (pool *TxPool) Add(tx *types.Transaction) error {
|
||||
pool.mu.Lock()
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
if err := pool.add(tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state, err := pool.currentState()
|
||||
// Try to inject the transaction and update any state
|
||||
replace, err := pool.add(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pool.promoteExecutables(state)
|
||||
|
||||
// If we added a new transaction, run promotion checks and return
|
||||
if !replace {
|
||||
state, err := pool.currentState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
pool.promoteExecutables(state, []common.Address{from})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -411,19 +558,26 @@ func (pool *TxPool) AddBatch(txs []*types.Transaction) error {
|
||||
defer pool.mu.Unlock()
|
||||
|
||||
// Add the batch of transaction, tracking the accepted ones
|
||||
added := 0
|
||||
dirty := make(map[common.Address]struct{})
|
||||
for _, tx := range txs {
|
||||
if err := pool.add(tx); err == nil {
|
||||
added++
|
||||
if replace, err := pool.add(tx); err == nil {
|
||||
if !replace {
|
||||
from, _ := types.Sender(pool.signer, tx) // already validated
|
||||
dirty[from] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Only reprocess the internal state if something was actually added
|
||||
if added > 0 {
|
||||
if len(dirty) > 0 {
|
||||
state, err := pool.currentState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pool.promoteExecutables(state)
|
||||
addrs := make([]common.Address, 0, len(dirty))
|
||||
for addr, _ := range dirty {
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
pool.promoteExecutables(state, addrs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -467,6 +621,7 @@ func (pool *TxPool) removeTx(hash common.Hash) {
|
||||
|
||||
// Remove it from the list of known transactions
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
|
||||
// Remove the transaction from the pending lists and reset the account nonce
|
||||
if pending := pool.pending[addr]; pending != nil {
|
||||
@ -499,35 +654,51 @@ func (pool *TxPool) removeTx(hash common.Hash) {
|
||||
// promoteExecutables moves transactions that have become processable from the
|
||||
// future queue to the set of pending transactions. During this process, all
|
||||
// invalidated transactions (low nonce, low balance) are deleted.
|
||||
func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
func (pool *TxPool) promoteExecutables(state *state.StateDB, accounts []common.Address) {
|
||||
gaslimit := pool.gasLimit()
|
||||
|
||||
// Gather all the accounts potentially needing updates
|
||||
if accounts == nil {
|
||||
accounts = make([]common.Address, 0, len(pool.queue))
|
||||
for addr, _ := range pool.queue {
|
||||
accounts = append(accounts, addr)
|
||||
}
|
||||
}
|
||||
// Iterate over all accounts and promote any executable transactions
|
||||
queued := uint64(0)
|
||||
for addr, list := range pool.queue {
|
||||
for _, addr := range accounts {
|
||||
list := pool.queue[addr]
|
||||
if list == nil {
|
||||
continue // Just in case someone calls with a non existing account
|
||||
}
|
||||
// Drop all transactions that are deemed too old (low nonce)
|
||||
for _, tx := range list.Forward(state.GetNonce(addr)) {
|
||||
hash := tx.Hash()
|
||||
log.Debug("Removed old queued transaction", "hash", hash)
|
||||
log.Trace("Removed old queued transaction", "hash", hash)
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
}
|
||||
// Drop all transactions that are too costly (low balance)
|
||||
drops, _ := list.Filter(state.GetBalance(addr))
|
||||
// Drop all transactions that are too costly (low balance or out of gas)
|
||||
drops, _ := list.Filter(state.GetBalance(addr), gaslimit)
|
||||
for _, tx := range drops {
|
||||
hash := tx.Hash()
|
||||
log.Debug("Removed unpayable queued transaction", "hash", hash)
|
||||
log.Trace("Removed unpayable queued transaction", "hash", hash)
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
queuedNofundsCounter.Inc(1)
|
||||
}
|
||||
// Gather all executable transactions and promote them
|
||||
for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) {
|
||||
hash := tx.Hash()
|
||||
log.Debug("Promoting queued transaction", "hash", hash)
|
||||
log.Trace("Promoting queued transaction", "hash", hash)
|
||||
pool.promoteTx(addr, hash, tx)
|
||||
}
|
||||
// Drop all transactions over the allowed limit
|
||||
for _, tx := range list.Cap(int(maxQueuedPerAccount)) {
|
||||
for _, tx := range list.Cap(int(pool.config.AccountQueue)) {
|
||||
hash := tx.Hash()
|
||||
log.Debug("Removed cap-exceeding queued transaction", "hash", hash)
|
||||
log.Trace("Removed cap-exceeding queued transaction", "hash", hash)
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
queuedRLCounter.Inc(1)
|
||||
}
|
||||
queued += uint64(list.Len())
|
||||
@ -542,16 +713,16 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
for _, list := range pool.pending {
|
||||
pending += uint64(list.Len())
|
||||
}
|
||||
if pending > maxPendingTotal {
|
||||
if pending > pool.config.GlobalSlots {
|
||||
pendingBeforeCap := pending
|
||||
// Assemble a spam order to penalize large transactors first
|
||||
spammers := prque.New()
|
||||
for addr, list := range pool.pending {
|
||||
// Only evict transactions from high rollers
|
||||
if uint64(list.Len()) > minPendingPerAccount {
|
||||
if uint64(list.Len()) > pool.config.AccountSlots {
|
||||
// Skip local accounts as pools should maintain backlogs for themselves
|
||||
for _, tx := range list.txs.items {
|
||||
if !pool.localTx.contains(tx.Hash()) {
|
||||
if !pool.locals.contains(tx.Hash()) {
|
||||
spammers.Push(addr, float32(list.Len()))
|
||||
}
|
||||
break // Checking on transaction for locality is enough
|
||||
@ -560,7 +731,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
}
|
||||
// Gradually drop transactions from offenders
|
||||
offenders := []common.Address{}
|
||||
for pending > maxPendingTotal && !spammers.Empty() {
|
||||
for pending > pool.config.GlobalSlots && !spammers.Empty() {
|
||||
// Retrieve the next offender if not local address
|
||||
offender, _ := spammers.Pop()
|
||||
offenders = append(offenders, offender.(common.Address))
|
||||
@ -571,7 +742,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
threshold := pool.pending[offender.(common.Address)].Len()
|
||||
|
||||
// Iteratively reduce all offenders until below limit or threshold reached
|
||||
for pending > maxPendingTotal && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
|
||||
for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold {
|
||||
for i := 0; i < len(offenders)-1; i++ {
|
||||
list := pool.pending[offenders[i]]
|
||||
list.Cap(list.Len() - 1)
|
||||
@ -581,8 +752,8 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
}
|
||||
}
|
||||
// If still above threshold, reduce to limit or min allowance
|
||||
if pending > maxPendingTotal && len(offenders) > 0 {
|
||||
for pending > maxPendingTotal && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > minPendingPerAccount {
|
||||
if pending > pool.config.GlobalSlots && len(offenders) > 0 {
|
||||
for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots {
|
||||
for _, addr := range offenders {
|
||||
list := pool.pending[addr]
|
||||
list.Cap(list.Len() - 1)
|
||||
@ -593,7 +764,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
pendingRLCounter.Inc(int64(pendingBeforeCap - pending))
|
||||
}
|
||||
// If we've queued more transactions than the hard limit, drop oldest ones
|
||||
if queued > maxQueuedInTotal {
|
||||
if queued > pool.config.GlobalQueue {
|
||||
// Sort all accounts with queued transactions by heartbeat
|
||||
addresses := make(addresssByHeartbeat, 0, len(pool.queue))
|
||||
for addr := range pool.queue {
|
||||
@ -602,7 +773,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
sort.Sort(addresses)
|
||||
|
||||
// Drop transactions until the total is below the limit
|
||||
for drop := queued - maxQueuedInTotal; drop > 0; {
|
||||
for drop := queued - pool.config.GlobalQueue; drop > 0; {
|
||||
addr := addresses[len(addresses)-1]
|
||||
list := pool.queue[addr.address]
|
||||
|
||||
@ -632,6 +803,8 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) {
|
||||
// executable/pending queue and any subsequent transactions that become unexecutable
|
||||
// are moved back into the future queue.
|
||||
func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
|
||||
gaslimit := pool.gasLimit()
|
||||
|
||||
// Iterate over all accounts and demote any non-executable transactions
|
||||
for addr, list := range pool.pending {
|
||||
nonce := state.GetNonce(addr)
|
||||
@ -639,20 +812,22 @@ func (pool *TxPool) demoteUnexecutables(state *state.StateDB) {
|
||||
// Drop all transactions that are deemed too old (low nonce)
|
||||
for _, tx := range list.Forward(nonce) {
|
||||
hash := tx.Hash()
|
||||
log.Debug("Removed old pending transaction", "hash", hash)
|
||||
log.Trace("Removed old pending transaction", "hash", hash)
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
}
|
||||
// Drop all transactions that are too costly (low balance), and queue any invalids back for later
|
||||
drops, invalids := list.Filter(state.GetBalance(addr))
|
||||
// Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later
|
||||
drops, invalids := list.Filter(state.GetBalance(addr), gaslimit)
|
||||
for _, tx := range drops {
|
||||
hash := tx.Hash()
|
||||
log.Debug("Removed unpayable pending transaction", "hash", hash)
|
||||
log.Trace("Removed unpayable pending transaction", "hash", hash)
|
||||
delete(pool.all, hash)
|
||||
pool.priced.Removed()
|
||||
pendingNofundsCounter.Inc(1)
|
||||
}
|
||||
for _, tx := range invalids {
|
||||
hash := tx.Hash()
|
||||
log.Debug("Demoting pending transaction", "hash", hash)
|
||||
log.Trace("Demoting pending transaction", "hash", hash)
|
||||
pool.enqueueTx(hash, tx)
|
||||
}
|
||||
// Delete the entire queue entry if it became empty.
|
||||
@ -677,7 +852,7 @@ func (pool *TxPool) expirationLoop() {
|
||||
case <-evict.C:
|
||||
pool.mu.Lock()
|
||||
for addr := range pool.queue {
|
||||
if time.Since(pool.beats[addr]) > maxQueuedLifetime {
|
||||
if time.Since(pool.beats[addr]) > pool.config.Lifetime {
|
||||
for _, tx := range pool.queue[addr].Flatten() {
|
||||
pool.removeTx(tx.Hash())
|
||||
}
|
||||
@ -729,22 +904,22 @@ func newTxSet() *txSet {
|
||||
|
||||
// contains returns true if the set contains the given transaction hash
|
||||
// (not thread safe, should be called from a locked environment)
|
||||
func (self *txSet) contains(hash common.Hash) bool {
|
||||
_, ok := self.txMap[hash]
|
||||
func (ts *txSet) contains(hash common.Hash) bool {
|
||||
_, ok := ts.txMap[hash]
|
||||
return ok
|
||||
}
|
||||
|
||||
// add adds a transaction hash to the set, then removes entries older than txSetDuration
|
||||
// (not thread safe, should be called from a locked environment)
|
||||
func (self *txSet) add(hash common.Hash) {
|
||||
self.txMap[hash] = struct{}{}
|
||||
func (ts *txSet) add(hash common.Hash) {
|
||||
ts.txMap[hash] = struct{}{}
|
||||
now := time.Now()
|
||||
self.txOrd[self.addPtr] = txOrdType{hash: hash, time: now}
|
||||
self.addPtr++
|
||||
ts.txOrd[ts.addPtr] = txOrdType{hash: hash, time: now}
|
||||
ts.addPtr++
|
||||
delBefore := now.Add(-txSetDuration)
|
||||
for self.delPtr < self.addPtr && self.txOrd[self.delPtr].time.Before(delBefore) {
|
||||
delete(self.txMap, self.txOrd[self.delPtr].hash)
|
||||
delete(self.txOrd, self.delPtr)
|
||||
self.delPtr++
|
||||
for ts.delPtr < ts.addPtr && ts.txOrd[ts.delPtr].time.Before(delBefore) {
|
||||
delete(ts.txMap, ts.txOrd[ts.delPtr].hash)
|
||||
delete(ts.txOrd, ts.delPtr)
|
||||
ts.delPtr++
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,11 @@ import (
|
||||
)
|
||||
|
||||
func transaction(nonce uint64, gaslimit *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
|
||||
tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, big.NewInt(1), nil), types.HomesteadSigner{}, key)
|
||||
return pricedTransaction(nonce, gaslimit, big.NewInt(1), key)
|
||||
}
|
||||
|
||||
func pricedTransaction(nonce uint64, gaslimit, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
|
||||
tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key)
|
||||
return tx
|
||||
}
|
||||
|
||||
@ -42,7 +46,7 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
|
||||
statedb, _ := state.New(common.Hash{}, db)
|
||||
|
||||
key, _ := crypto.GenerateKey()
|
||||
newPool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
newPool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
newPool.resetState()
|
||||
|
||||
return newPool, key
|
||||
@ -91,7 +95,7 @@ func TestStateChangeDuringPoolReset(t *testing.T) {
|
||||
|
||||
gasLimitFunc := func() *big.Int { return big.NewInt(1000000000) }
|
||||
|
||||
txpool := NewTxPool(params.TestChainConfig, mux, stateFunc, gasLimitFunc)
|
||||
txpool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, mux, stateFunc, gasLimitFunc)
|
||||
txpool.resetState()
|
||||
|
||||
nonce := txpool.State().GetNonce(address)
|
||||
@ -151,9 +155,9 @@ func TestInvalidTransactions(t *testing.T) {
|
||||
}
|
||||
|
||||
tx = transaction(1, big.NewInt(100000), key)
|
||||
pool.minGasPrice = big.NewInt(1000)
|
||||
if err := pool.Add(tx); err != ErrCheap {
|
||||
t.Error("expected", ErrCheap, "got", err)
|
||||
pool.gasPrice = big.NewInt(1000)
|
||||
if err := pool.Add(tx); err != ErrUnderpriced {
|
||||
t.Error("expected", ErrUnderpriced, "got", err)
|
||||
}
|
||||
|
||||
pool.SetLocal(tx)
|
||||
@ -171,7 +175,7 @@ func TestTransactionQueue(t *testing.T) {
|
||||
pool.resetState()
|
||||
pool.enqueueTx(tx.Hash(), tx)
|
||||
|
||||
pool.promoteExecutables(currentState)
|
||||
pool.promoteExecutables(currentState, []common.Address{from})
|
||||
if len(pool.pending) != 1 {
|
||||
t.Error("expected valid txs to be 1 is", len(pool.pending))
|
||||
}
|
||||
@ -180,7 +184,7 @@ func TestTransactionQueue(t *testing.T) {
|
||||
from, _ = deriveSender(tx)
|
||||
currentState.SetNonce(from, 2)
|
||||
pool.enqueueTx(tx.Hash(), tx)
|
||||
pool.promoteExecutables(currentState)
|
||||
pool.promoteExecutables(currentState, []common.Address{from})
|
||||
if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok {
|
||||
t.Error("expected transaction to be in tx pool")
|
||||
}
|
||||
@ -202,7 +206,7 @@ func TestTransactionQueue(t *testing.T) {
|
||||
pool.enqueueTx(tx2.Hash(), tx2)
|
||||
pool.enqueueTx(tx3.Hash(), tx3)
|
||||
|
||||
pool.promoteExecutables(currentState)
|
||||
pool.promoteExecutables(currentState, []common.Address{from})
|
||||
|
||||
if len(pool.pending) != 1 {
|
||||
t.Error("expected tx pool to be 1, got", len(pool.pending))
|
||||
@ -262,14 +266,14 @@ func TestTransactionChainFork(t *testing.T) {
|
||||
resetState()
|
||||
|
||||
tx := transaction(0, big.NewInt(100000), key)
|
||||
if err := pool.add(tx); err != nil {
|
||||
if _, err := pool.add(tx); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
}
|
||||
pool.RemoveBatch([]*types.Transaction{tx})
|
||||
|
||||
// reset the pool's internal state
|
||||
resetState()
|
||||
if err := pool.add(tx); err != nil {
|
||||
if _, err := pool.add(tx); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
}
|
||||
}
|
||||
@ -293,25 +297,23 @@ func TestTransactionDoubleNonce(t *testing.T) {
|
||||
tx3, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), big.NewInt(1000000), big.NewInt(1), nil), signer, key)
|
||||
|
||||
// Add the first two transaction, ensure higher priced stays only
|
||||
if err := pool.add(tx1); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
if replace, err := pool.add(tx1); err != nil || replace {
|
||||
t.Errorf("first transaction insert failed (%v) or reported replacement (%v)", err, replace)
|
||||
}
|
||||
if err := pool.add(tx2); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
if replace, err := pool.add(tx2); err != nil || !replace {
|
||||
t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace)
|
||||
}
|
||||
state, _ := pool.currentState()
|
||||
pool.promoteExecutables(state)
|
||||
pool.promoteExecutables(state, []common.Address{addr})
|
||||
if pool.pending[addr].Len() != 1 {
|
||||
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
|
||||
}
|
||||
if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() {
|
||||
t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash())
|
||||
}
|
||||
// Add the thid transaction and ensure it's not saved (smaller price)
|
||||
if err := pool.add(tx3); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
}
|
||||
pool.promoteExecutables(state)
|
||||
// Add the third transaction and ensure it's not saved (smaller price)
|
||||
pool.add(tx3)
|
||||
pool.promoteExecutables(state, []common.Address{addr})
|
||||
if pool.pending[addr].Len() != 1 {
|
||||
t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
|
||||
}
|
||||
@ -330,7 +332,7 @@ func TestMissingNonce(t *testing.T) {
|
||||
currentState, _ := pool.currentState()
|
||||
currentState.AddBalance(addr, big.NewInt(100000000000000))
|
||||
tx := transaction(1, big.NewInt(100000), key)
|
||||
if err := pool.add(tx); err != nil {
|
||||
if _, err := pool.add(tx); err != nil {
|
||||
t.Error("didn't expect error", err)
|
||||
}
|
||||
if len(pool.pending) != 0 {
|
||||
@ -395,49 +397,78 @@ func TestTransactionDropping(t *testing.T) {
|
||||
var (
|
||||
tx0 = transaction(0, big.NewInt(100), key)
|
||||
tx1 = transaction(1, big.NewInt(200), key)
|
||||
tx2 = transaction(2, big.NewInt(300), key)
|
||||
tx10 = transaction(10, big.NewInt(100), key)
|
||||
tx11 = transaction(11, big.NewInt(200), key)
|
||||
tx12 = transaction(12, big.NewInt(300), key)
|
||||
)
|
||||
pool.promoteTx(account, tx0.Hash(), tx0)
|
||||
pool.promoteTx(account, tx1.Hash(), tx1)
|
||||
pool.promoteTx(account, tx1.Hash(), tx2)
|
||||
pool.enqueueTx(tx10.Hash(), tx10)
|
||||
pool.enqueueTx(tx11.Hash(), tx11)
|
||||
pool.enqueueTx(tx11.Hash(), tx12)
|
||||
|
||||
// Check that pre and post validations leave the pool as is
|
||||
if pool.pending[account].Len() != 2 {
|
||||
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 2)
|
||||
if pool.pending[account].Len() != 3 {
|
||||
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
|
||||
}
|
||||
if pool.queue[account].Len() != 2 {
|
||||
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 2)
|
||||
if pool.queue[account].Len() != 3 {
|
||||
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
|
||||
}
|
||||
if len(pool.all) != 4 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4)
|
||||
}
|
||||
pool.resetState()
|
||||
if pool.pending[account].Len() != 2 {
|
||||
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 2)
|
||||
if pool.pending[account].Len() != 3 {
|
||||
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
|
||||
}
|
||||
if pool.queue[account].Len() != 2 {
|
||||
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 2)
|
||||
if pool.queue[account].Len() != 3 {
|
||||
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
|
||||
}
|
||||
if len(pool.all) != 4 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4)
|
||||
}
|
||||
// Reduce the balance of the account, and check that invalidated transactions are dropped
|
||||
state.AddBalance(account, big.NewInt(-750))
|
||||
state.AddBalance(account, big.NewInt(-650))
|
||||
pool.resetState()
|
||||
|
||||
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
|
||||
t.Errorf("funded pending transaction missing: %v", tx0)
|
||||
}
|
||||
if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; !ok {
|
||||
t.Errorf("funded pending transaction missing: %v", tx0)
|
||||
}
|
||||
if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok {
|
||||
t.Errorf("out-of-fund pending transaction present: %v", tx1)
|
||||
}
|
||||
if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
|
||||
t.Errorf("funded queued transaction missing: %v", tx10)
|
||||
}
|
||||
if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; !ok {
|
||||
t.Errorf("funded queued transaction missing: %v", tx10)
|
||||
}
|
||||
if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok {
|
||||
t.Errorf("out-of-fund queued transaction present: %v", tx11)
|
||||
}
|
||||
if len(pool.all) != 4 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4)
|
||||
}
|
||||
// Reduce the block gas limit, check that invalidated transactions are dropped
|
||||
pool.gasLimit = func() *big.Int { return big.NewInt(100) }
|
||||
pool.resetState()
|
||||
|
||||
if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
|
||||
t.Errorf("funded pending transaction missing: %v", tx0)
|
||||
}
|
||||
if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok {
|
||||
t.Errorf("out-of-fund pending transaction present: %v", tx1)
|
||||
t.Errorf("over-gased pending transaction present: %v", tx1)
|
||||
}
|
||||
if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
|
||||
t.Errorf("funded queued transaction missing: %v", tx10)
|
||||
}
|
||||
if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok {
|
||||
t.Errorf("out-of-fund queued transaction present: %v", tx11)
|
||||
t.Errorf("over-gased queued transaction present: %v", tx11)
|
||||
}
|
||||
if len(pool.all) != 2 {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 2)
|
||||
@ -531,25 +562,25 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
|
||||
pool.resetState()
|
||||
|
||||
// Keep queuing up transactions and make sure all above a limit are dropped
|
||||
for i := uint64(1); i <= maxQueuedPerAccount+5; i++ {
|
||||
for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue+5; i++ {
|
||||
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
|
||||
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
|
||||
}
|
||||
if len(pool.pending) != 0 {
|
||||
t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0)
|
||||
}
|
||||
if i <= maxQueuedPerAccount {
|
||||
if i <= DefaultTxPoolConfig.AccountQueue {
|
||||
if pool.queue[account].Len() != int(i) {
|
||||
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i)
|
||||
}
|
||||
} else {
|
||||
if pool.queue[account].Len() != int(maxQueuedPerAccount) {
|
||||
t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), maxQueuedPerAccount)
|
||||
if pool.queue[account].Len() != int(DefaultTxPoolConfig.AccountQueue) {
|
||||
t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), DefaultTxPoolConfig.AccountQueue)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(pool.all) != int(maxQueuedPerAccount) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), maxQueuedPerAccount)
|
||||
if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue)
|
||||
}
|
||||
}
|
||||
|
||||
@ -557,14 +588,14 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
|
||||
// some threshold, the higher transactions are dropped to prevent DOS attacks.
|
||||
func TestTransactionQueueGlobalLimiting(t *testing.T) {
|
||||
// Reduce the queue limits to shorten test time
|
||||
defer func(old uint64) { maxQueuedInTotal = old }(maxQueuedInTotal)
|
||||
maxQueuedInTotal = maxQueuedPerAccount * 3
|
||||
defer func(old uint64) { DefaultTxPoolConfig.GlobalQueue = old }(DefaultTxPoolConfig.GlobalQueue)
|
||||
DefaultTxPoolConfig.GlobalQueue = DefaultTxPoolConfig.AccountQueue * 3
|
||||
|
||||
// Create the pool to test the limit enforcement with
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ := state.New(common.Hash{}, db)
|
||||
|
||||
pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
pool.resetState()
|
||||
|
||||
// Create a number of test accounts and fund them
|
||||
@ -578,7 +609,7 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) {
|
||||
// Generate and queue a batch of transactions
|
||||
nonces := make(map[common.Address]uint64)
|
||||
|
||||
txs := make(types.Transactions, 0, 3*maxQueuedInTotal)
|
||||
txs := make(types.Transactions, 0, 3*DefaultTxPoolConfig.GlobalQueue)
|
||||
for len(txs) < cap(txs) {
|
||||
key := keys[rand.Intn(len(keys))]
|
||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||
@ -591,13 +622,13 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) {
|
||||
|
||||
queued := 0
|
||||
for addr, list := range pool.queue {
|
||||
if list.Len() > int(maxQueuedPerAccount) {
|
||||
t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), maxQueuedPerAccount)
|
||||
if list.Len() > int(DefaultTxPoolConfig.AccountQueue) {
|
||||
t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), DefaultTxPoolConfig.AccountQueue)
|
||||
}
|
||||
queued += list.Len()
|
||||
}
|
||||
if queued > int(maxQueuedInTotal) {
|
||||
t.Fatalf("total transactions overflow allowance: %d > %d", queued, maxQueuedInTotal)
|
||||
if queued > int(DefaultTxPoolConfig.GlobalQueue) {
|
||||
t.Fatalf("total transactions overflow allowance: %d > %d", queued, DefaultTxPoolConfig.GlobalQueue)
|
||||
}
|
||||
}
|
||||
|
||||
@ -606,9 +637,9 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) {
|
||||
// on shuffling them around.
|
||||
func TestTransactionQueueTimeLimiting(t *testing.T) {
|
||||
// Reduce the queue limits to shorten test time
|
||||
defer func(old time.Duration) { maxQueuedLifetime = old }(maxQueuedLifetime)
|
||||
defer func(old time.Duration) { DefaultTxPoolConfig.Lifetime = old }(DefaultTxPoolConfig.Lifetime)
|
||||
defer func(old time.Duration) { evictionInterval = old }(evictionInterval)
|
||||
maxQueuedLifetime = time.Second
|
||||
DefaultTxPoolConfig.Lifetime = time.Second
|
||||
evictionInterval = time.Second
|
||||
|
||||
// Create a test account and fund it
|
||||
@ -619,7 +650,7 @@ func TestTransactionQueueTimeLimiting(t *testing.T) {
|
||||
state.AddBalance(account, big.NewInt(1000000))
|
||||
|
||||
// Queue up a batch of transactions
|
||||
for i := uint64(1); i <= maxQueuedPerAccount; i++ {
|
||||
for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue; i++ {
|
||||
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
|
||||
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
|
||||
}
|
||||
@ -644,7 +675,7 @@ func TestTransactionPendingLimiting(t *testing.T) {
|
||||
pool.resetState()
|
||||
|
||||
// Keep queuing up transactions and make sure all above a limit are dropped
|
||||
for i := uint64(0); i < maxQueuedPerAccount+5; i++ {
|
||||
for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
|
||||
if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil {
|
||||
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
|
||||
}
|
||||
@ -655,8 +686,8 @@ func TestTransactionPendingLimiting(t *testing.T) {
|
||||
t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0)
|
||||
}
|
||||
}
|
||||
if len(pool.all) != int(maxQueuedPerAccount+5) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), maxQueuedPerAccount+5)
|
||||
if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue+5) {
|
||||
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue+5)
|
||||
}
|
||||
}
|
||||
|
||||
@ -672,7 +703,7 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
|
||||
state1, _ := pool1.currentState()
|
||||
state1.AddBalance(account1, big.NewInt(1000000))
|
||||
|
||||
for i := uint64(0); i < maxQueuedPerAccount+5; i++ {
|
||||
for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
|
||||
if err := pool1.Add(transaction(origin+i, big.NewInt(100000), key1)); err != nil {
|
||||
t.Fatalf("tx %d: failed to add transaction: %v", i, err)
|
||||
}
|
||||
@ -684,7 +715,7 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
|
||||
state2.AddBalance(account2, big.NewInt(1000000))
|
||||
|
||||
txns := []*types.Transaction{}
|
||||
for i := uint64(0); i < maxQueuedPerAccount+5; i++ {
|
||||
for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ {
|
||||
txns = append(txns, transaction(origin+i, big.NewInt(100000), key2))
|
||||
}
|
||||
pool2.AddBatch(txns)
|
||||
@ -706,14 +737,14 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
|
||||
// attacks.
|
||||
func TestTransactionPendingGlobalLimiting(t *testing.T) {
|
||||
// Reduce the queue limits to shorten test time
|
||||
defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal)
|
||||
maxPendingTotal = minPendingPerAccount * 10
|
||||
defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots)
|
||||
DefaultTxPoolConfig.GlobalSlots = DefaultTxPoolConfig.AccountSlots * 10
|
||||
|
||||
// Create the pool to test the limit enforcement with
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ := state.New(common.Hash{}, db)
|
||||
|
||||
pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
pool.resetState()
|
||||
|
||||
// Create a number of test accounts and fund them
|
||||
@ -730,7 +761,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
|
||||
txs := types.Transactions{}
|
||||
for _, key := range keys {
|
||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||
for j := 0; j < int(maxPendingTotal)/len(keys)*2; j++ {
|
||||
for j := 0; j < int(DefaultTxPoolConfig.GlobalSlots)/len(keys)*2; j++ {
|
||||
txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key))
|
||||
nonces[addr]++
|
||||
}
|
||||
@ -742,8 +773,8 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
|
||||
for _, list := range pool.pending {
|
||||
pending += list.Len()
|
||||
}
|
||||
if pending > int(maxPendingTotal) {
|
||||
t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, maxPendingTotal)
|
||||
if pending > int(DefaultTxPoolConfig.GlobalSlots) {
|
||||
t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, DefaultTxPoolConfig.GlobalSlots)
|
||||
}
|
||||
}
|
||||
|
||||
@ -752,14 +783,14 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
|
||||
// the transactions are still kept.
|
||||
func TestTransactionPendingMinimumAllowance(t *testing.T) {
|
||||
// Reduce the queue limits to shorten test time
|
||||
defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal)
|
||||
maxPendingTotal = 0
|
||||
defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots)
|
||||
DefaultTxPoolConfig.GlobalSlots = 0
|
||||
|
||||
// Create the pool to test the limit enforcement with
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ := state.New(common.Hash{}, db)
|
||||
|
||||
pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
pool.resetState()
|
||||
|
||||
// Create a number of test accounts and fund them
|
||||
@ -776,7 +807,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
|
||||
txs := types.Transactions{}
|
||||
for _, key := range keys {
|
||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||
for j := 0; j < int(minPendingPerAccount)*2; j++ {
|
||||
for j := 0; j < int(DefaultTxPoolConfig.AccountSlots)*2; j++ {
|
||||
txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key))
|
||||
nonces[addr]++
|
||||
}
|
||||
@ -785,12 +816,233 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) {
|
||||
pool.AddBatch(txs)
|
||||
|
||||
for addr, list := range pool.pending {
|
||||
if list.Len() != int(minPendingPerAccount) {
|
||||
t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), minPendingPerAccount)
|
||||
if list.Len() != int(DefaultTxPoolConfig.AccountSlots) {
|
||||
t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), DefaultTxPoolConfig.AccountSlots)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that setting the transaction pool gas price to a higher value correctly
|
||||
// discards everything cheaper than that and moves any gapped transactions back
|
||||
// from the pending pool to the queue.
|
||||
//
|
||||
// Note, local transactions are never allowed to be dropped.
|
||||
func TestTransactionPoolRepricing(t *testing.T) {
|
||||
// Create the pool to test the pricing enforcement with
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ := state.New(common.Hash{}, db)
|
||||
|
||||
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
pool.resetState()
|
||||
|
||||
// Create a number of test accounts and fund them
|
||||
state, _ := pool.currentState()
|
||||
|
||||
keys := make([]*ecdsa.PrivateKey, 3)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i], _ = crypto.GenerateKey()
|
||||
state.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
|
||||
}
|
||||
// Generate and queue a batch of transactions, both pending and queued
|
||||
txs := types.Transactions{}
|
||||
|
||||
txs = append(txs, pricedTransaction(0, big.NewInt(100000), big.NewInt(2), keys[0]))
|
||||
txs = append(txs, pricedTransaction(1, big.NewInt(100000), big.NewInt(1), keys[0]))
|
||||
txs = append(txs, pricedTransaction(2, big.NewInt(100000), big.NewInt(2), keys[0]))
|
||||
|
||||
txs = append(txs, pricedTransaction(1, big.NewInt(100000), big.NewInt(2), keys[1]))
|
||||
txs = append(txs, pricedTransaction(2, big.NewInt(100000), big.NewInt(1), keys[1]))
|
||||
txs = append(txs, pricedTransaction(3, big.NewInt(100000), big.NewInt(2), keys[1]))
|
||||
|
||||
txs = append(txs, pricedTransaction(0, big.NewInt(100000), big.NewInt(1), keys[2]))
|
||||
pool.SetLocal(txs[len(txs)-1]) // prevent this one from ever being dropped
|
||||
|
||||
// Import the batch and that both pending and queued transactions match up
|
||||
pool.AddBatch(txs)
|
||||
|
||||
pending, queued := pool.stats()
|
||||
if pending != 4 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4)
|
||||
}
|
||||
if queued != 3 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
|
||||
}
|
||||
// Reprice the pool and check that underpriced transactions get dropped
|
||||
pool.SetGasPrice(big.NewInt(2))
|
||||
|
||||
pending, queued = pool.stats()
|
||||
if pending != 2 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
|
||||
}
|
||||
if queued != 3 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
|
||||
}
|
||||
// Check that we can't add the old transactions back
|
||||
if err := pool.Add(pricedTransaction(1, big.NewInt(100000), big.NewInt(1), keys[0])); err != ErrUnderpriced {
|
||||
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(1), keys[1])); err != ErrUnderpriced {
|
||||
t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
|
||||
}
|
||||
// However we can add local underpriced transactions
|
||||
tx := pricedTransaction(1, big.NewInt(100000), big.NewInt(1), keys[2])
|
||||
|
||||
pool.SetLocal(tx) // prevent this one from ever being dropped
|
||||
if err := pool.Add(tx); err != nil {
|
||||
t.Fatalf("failed to add underpriced local transaction: %v", err)
|
||||
}
|
||||
if pending, _ = pool.stats(); pending != 3 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that when the pool reaches its global transaction limit, underpriced
|
||||
// transactions are gradually shifted out for more expensive ones and any gapped
|
||||
// pending transactions are moved into te queue.
|
||||
//
|
||||
// Note, local transactions are never allowed to be dropped.
|
||||
func TestTransactionPoolUnderpricing(t *testing.T) {
|
||||
// Reduce the queue limits to shorten test time
|
||||
defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots)
|
||||
DefaultTxPoolConfig.GlobalSlots = 2
|
||||
|
||||
defer func(old uint64) { DefaultTxPoolConfig.GlobalQueue = old }(DefaultTxPoolConfig.GlobalQueue)
|
||||
DefaultTxPoolConfig.GlobalQueue = 2
|
||||
|
||||
// Create the pool to test the pricing enforcement with
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ := state.New(common.Hash{}, db)
|
||||
|
||||
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
pool.resetState()
|
||||
|
||||
// Create a number of test accounts and fund them
|
||||
state, _ := pool.currentState()
|
||||
|
||||
keys := make([]*ecdsa.PrivateKey, 3)
|
||||
for i := 0; i < len(keys); i++ {
|
||||
keys[i], _ = crypto.GenerateKey()
|
||||
state.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
|
||||
}
|
||||
// Generate and queue a batch of transactions, both pending and queued
|
||||
txs := types.Transactions{}
|
||||
|
||||
txs = append(txs, pricedTransaction(0, big.NewInt(100000), big.NewInt(1), keys[0]))
|
||||
txs = append(txs, pricedTransaction(1, big.NewInt(100000), big.NewInt(2), keys[0]))
|
||||
|
||||
txs = append(txs, pricedTransaction(1, big.NewInt(100000), big.NewInt(1), keys[1]))
|
||||
|
||||
txs = append(txs, pricedTransaction(0, big.NewInt(100000), big.NewInt(1), keys[2]))
|
||||
pool.SetLocal(txs[len(txs)-1]) // prevent this one from ever being dropped
|
||||
|
||||
// Import the batch and that both pending and queued transactions match up
|
||||
pool.AddBatch(txs)
|
||||
|
||||
pending, queued := pool.stats()
|
||||
if pending != 3 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
|
||||
}
|
||||
if queued != 1 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
|
||||
}
|
||||
// Ensure that adding an underpriced transaction on block limit fails
|
||||
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), keys[1])); err != ErrUnderpriced {
|
||||
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
|
||||
}
|
||||
// Ensure that adding high priced transactions drops cheap ones, but not own
|
||||
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(3), keys[1])); err != nil {
|
||||
t.Fatalf("failed to add well priced transaction: %v", err)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(4), keys[1])); err != nil {
|
||||
t.Fatalf("failed to add well priced transaction: %v", err)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(3, big.NewInt(100000), big.NewInt(5), keys[1])); err != nil {
|
||||
t.Fatalf("failed to add well priced transaction: %v", err)
|
||||
}
|
||||
pending, queued = pool.stats()
|
||||
if pending != 2 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
|
||||
}
|
||||
if queued != 2 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
|
||||
}
|
||||
// Ensure that adding local transactions can push out even higher priced ones
|
||||
tx := pricedTransaction(1, big.NewInt(100000), big.NewInt(0), keys[2])
|
||||
|
||||
pool.SetLocal(tx) // prevent this one from ever being dropped
|
||||
if err := pool.Add(tx); err != nil {
|
||||
t.Fatalf("failed to add underpriced local transaction: %v", err)
|
||||
}
|
||||
pending, queued = pool.stats()
|
||||
if pending != 2 {
|
||||
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
|
||||
}
|
||||
if queued != 2 {
|
||||
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
|
||||
}
|
||||
}
|
||||
|
||||
// Tests that the pool rejects replacement transactions that don't meet the minimum
|
||||
// price bump required.
|
||||
func TestTransactionReplacement(t *testing.T) {
|
||||
// Create the pool to test the pricing enforcement with
|
||||
db, _ := ethdb.NewMemDatabase()
|
||||
statedb, _ := state.New(common.Hash{}, db)
|
||||
|
||||
pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) })
|
||||
pool.resetState()
|
||||
|
||||
// Create a a test account to add transactions with
|
||||
key, _ := crypto.GenerateKey()
|
||||
|
||||
state, _ := pool.currentState()
|
||||
state.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000))
|
||||
|
||||
// Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
|
||||
price := int64(100)
|
||||
threshold := (price * (100 + int64(DefaultTxPoolConfig.PriceBump))) / 100
|
||||
|
||||
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), key)); err != nil {
|
||||
t.Fatalf("failed to add original cheap pending transaction: %v", err)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(0, big.NewInt(100001), big.NewInt(1), key)); err != ErrReplaceUnderpriced {
|
||||
t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(2), key)); err != nil {
|
||||
t.Fatalf("failed to replace original cheap pending transaction: %v", err)
|
||||
}
|
||||
|
||||
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(price), key)); err != nil {
|
||||
t.Fatalf("failed to add original proper pending transaction: %v", err)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(threshold), key)); err != ErrReplaceUnderpriced {
|
||||
t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(threshold+1), key)); err != nil {
|
||||
t.Fatalf("failed to replace original proper pending transaction: %v", err)
|
||||
}
|
||||
// Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
|
||||
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(1), key)); err != nil {
|
||||
t.Fatalf("failed to add original queued transaction: %v", err)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(2, big.NewInt(100001), big.NewInt(1), key)); err != ErrReplaceUnderpriced {
|
||||
t.Fatalf("original queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(2), key)); err != nil {
|
||||
t.Fatalf("failed to replace original queued transaction: %v", err)
|
||||
}
|
||||
|
||||
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(price), key)); err != nil {
|
||||
t.Fatalf("failed to add original queued transaction: %v", err)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(2, big.NewInt(100001), big.NewInt(threshold), key)); err != ErrReplaceUnderpriced {
|
||||
t.Fatalf("original queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced)
|
||||
}
|
||||
if err := pool.Add(pricedTransaction(2, big.NewInt(100000), big.NewInt(threshold+1), key)); err != nil {
|
||||
t.Fatalf("failed to replace original queued transaction: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks the speed of validating the contents of the pending queue of the
|
||||
// transaction pool.
|
||||
func BenchmarkPendingDemotion100(b *testing.B) { benchmarkPendingDemotion(b, 100) }
|
||||
@ -835,7 +1087,7 @@ func benchmarkFuturePromotion(b *testing.B, size int) {
|
||||
// Benchmark the speed of pool validation
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
pool.promoteExecutables(state)
|
||||
pool.promoteExecutables(state, nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,12 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
var ErrInvalidChainId = errors.New("invalid chaid id for signer")
|
||||
var (
|
||||
ErrInvalidChainId = errors.New("invalid chaid id for signer")
|
||||
|
||||
errAbstractSigner = errors.New("abstract signer")
|
||||
abstractSignerAddress = common.HexToAddress("ffffffffffffffffffffffffffffffffffffffff")
|
||||
)
|
||||
|
||||
// sigCache is used to cache the derived sender and contains
|
||||
// the signer used to derive it.
|
||||
|
@ -79,7 +79,7 @@ func decodeTx(data []byte) (*Transaction, error) {
|
||||
}
|
||||
|
||||
func defaultTestKey() (*ecdsa.PrivateKey, common.Address) {
|
||||
key := crypto.ToECDSA(common.Hex2Bytes("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"))
|
||||
key, _ := crypto.HexToECDSA("45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8")
|
||||
addr := crypto.PubkeyToAddress(key.PublicKey)
|
||||
return key, addr
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package vm
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@ -27,15 +28,17 @@ import (
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
)
|
||||
|
||||
var errBadPrecompileInput = errors.New("bad pre compile input")
|
||||
|
||||
// Precompiled contract is the basic interface for native Go contracts. The implementation
|
||||
// requires a deterministic gas count based on the input size of the Run method of the
|
||||
// contract.
|
||||
type PrecompiledContract interface {
|
||||
RequiredGas(inputSize int) uint64 // RequiredPrice calculates the contract gas use
|
||||
Run(input []byte) []byte // Run runs the precompiled contract
|
||||
RequiredGas(input []byte) uint64 // RequiredPrice calculates the contract gas use
|
||||
Run(input []byte) ([]byte, error) // Run runs the precompiled contract
|
||||
}
|
||||
|
||||
// Precompiled contains the default set of ethereum contracts
|
||||
// PrecompiledContracts contains the default set of ethereum contracts
|
||||
var PrecompiledContracts = map[common.Address]PrecompiledContract{
|
||||
common.BytesToAddress([]byte{1}): &ecrecover{},
|
||||
common.BytesToAddress([]byte{2}): &sha256hash{},
|
||||
@ -45,11 +48,9 @@ var PrecompiledContracts = map[common.Address]PrecompiledContract{
|
||||
|
||||
// RunPrecompile runs and evaluate the output of a precompiled contract defined in contracts.go
|
||||
func RunPrecompiledContract(p PrecompiledContract, input []byte, contract *Contract) (ret []byte, err error) {
|
||||
gas := p.RequiredGas(len(input))
|
||||
gas := p.RequiredGas(input)
|
||||
if contract.UseGas(gas) {
|
||||
ret = p.Run(input)
|
||||
|
||||
return ret, nil
|
||||
return p.Run(input)
|
||||
} else {
|
||||
return nil, ErrOutOfGas
|
||||
}
|
||||
@ -58,11 +59,11 @@ func RunPrecompiledContract(p PrecompiledContract, input []byte, contract *Contr
|
||||
// ECRECOVER implemented as a native contract
|
||||
type ecrecover struct{}
|
||||
|
||||
func (c *ecrecover) RequiredGas(inputSize int) uint64 {
|
||||
func (c *ecrecover) RequiredGas(input []byte) uint64 {
|
||||
return params.EcrecoverGas
|
||||
}
|
||||
|
||||
func (c *ecrecover) Run(in []byte) []byte {
|
||||
func (c *ecrecover) Run(in []byte) ([]byte, error) {
|
||||
const ecRecoverInputLength = 128
|
||||
|
||||
in = common.RightPadBytes(in, ecRecoverInputLength)
|
||||
@ -76,18 +77,18 @@ func (c *ecrecover) Run(in []byte) []byte {
|
||||
// tighter sig s values in homestead only apply to tx sigs
|
||||
if !allZero(in[32:63]) || !crypto.ValidateSignatureValues(v, r, s, false) {
|
||||
log.Trace("ECRECOVER error: v, r or s value invalid")
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
// v needs to be at the end for libsecp256k1
|
||||
pubKey, err := crypto.Ecrecover(in[:32], append(in[64:128], v))
|
||||
// make sure the public key is a valid one
|
||||
if err != nil {
|
||||
log.Trace("ECRECOVER failed", "err", err)
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// the first byte of pubkey is bitcoin heritage
|
||||
return common.LeftPadBytes(crypto.Keccak256(pubKey[1:])[12:], 32)
|
||||
return common.LeftPadBytes(crypto.Keccak256(pubKey[1:])[12:], 32), nil
|
||||
}
|
||||
|
||||
// SHA256 implemented as a native contract
|
||||
@ -97,12 +98,12 @@ type sha256hash struct{}
|
||||
//
|
||||
// This method does not require any overflow checking as the input size gas costs
|
||||
// required for anything significant is so high it's impossible to pay for.
|
||||
func (c *sha256hash) RequiredGas(inputSize int) uint64 {
|
||||
return uint64(inputSize+31)/32*params.Sha256WordGas + params.Sha256Gas
|
||||
func (c *sha256hash) RequiredGas(input []byte) uint64 {
|
||||
return uint64(len(input)+31)/32*params.Sha256WordGas + params.Sha256Gas
|
||||
}
|
||||
func (c *sha256hash) Run(in []byte) []byte {
|
||||
func (c *sha256hash) Run(in []byte) ([]byte, error) {
|
||||
h := sha256.Sum256(in)
|
||||
return h[:]
|
||||
return h[:], nil
|
||||
}
|
||||
|
||||
// RIPMED160 implemented as a native contract
|
||||
@ -112,13 +113,13 @@ type ripemd160hash struct{}
|
||||
//
|
||||
// This method does not require any overflow checking as the input size gas costs
|
||||
// required for anything significant is so high it's impossible to pay for.
|
||||
func (c *ripemd160hash) RequiredGas(inputSize int) uint64 {
|
||||
return uint64(inputSize+31)/32*params.Ripemd160WordGas + params.Ripemd160Gas
|
||||
func (c *ripemd160hash) RequiredGas(input []byte) uint64 {
|
||||
return uint64(len(input)+31)/32*params.Ripemd160WordGas + params.Ripemd160Gas
|
||||
}
|
||||
func (c *ripemd160hash) Run(in []byte) []byte {
|
||||
func (c *ripemd160hash) Run(in []byte) ([]byte, error) {
|
||||
ripemd := ripemd160.New()
|
||||
ripemd.Write(in)
|
||||
return common.LeftPadBytes(ripemd.Sum(nil), 32)
|
||||
return common.LeftPadBytes(ripemd.Sum(nil), 32), nil
|
||||
}
|
||||
|
||||
// data copy implemented as a native contract
|
||||
@ -128,9 +129,9 @@ type dataCopy struct{}
|
||||
//
|
||||
// This method does not require any overflow checking as the input size gas costs
|
||||
// required for anything significant is so high it's impossible to pay for.
|
||||
func (c *dataCopy) RequiredGas(inputSize int) uint64 {
|
||||
return uint64(inputSize+31)/32*params.IdentityWordGas + params.IdentityGas
|
||||
func (c *dataCopy) RequiredGas(input []byte) uint64 {
|
||||
return uint64(len(input)+31)/32*params.IdentityWordGas + params.IdentityGas
|
||||
}
|
||||
func (c *dataCopy) Run(in []byte) []byte {
|
||||
return in
|
||||
func (c *dataCopy) Run(in []byte) ([]byte, error) {
|
||||
return in, nil
|
||||
}
|
||||
|
1
core/vm/contracts_test.go
Normal file
1
core/vm/contracts_test.go
Normal file
@ -0,0 +1 @@
|
||||
package vm
|
@ -33,7 +33,20 @@ type (
|
||||
GetHashFunc func(uint64) common.Hash
|
||||
)
|
||||
|
||||
// Context provides the EVM with auxiliary information. Once provided it shouldn't be modified.
|
||||
// run runs the given contract and takes care of running precompiles with a fallback to the byte code interpreter.
|
||||
func run(evm *EVM, snapshot int, contract *Contract, input []byte) ([]byte, error) {
|
||||
if contract.CodeAddr != nil {
|
||||
precompiledContracts := PrecompiledContracts
|
||||
if p := precompiledContracts[*contract.CodeAddr]; p != nil {
|
||||
return RunPrecompiledContract(p, input, contract)
|
||||
}
|
||||
}
|
||||
|
||||
return evm.interpreter.Run(snapshot, contract, input)
|
||||
}
|
||||
|
||||
// Context provides the EVM with auxiliary information. Once provided
|
||||
// it shouldn't be modified.
|
||||
type Context struct {
|
||||
// CanTransfer returns whether the account contains
|
||||
// sufficient ether to transfer the value
|
||||
@ -55,7 +68,13 @@ type Context struct {
|
||||
Difficulty *big.Int // Provides information for DIFFICULTY
|
||||
}
|
||||
|
||||
// EVM provides information about external sources for the EVM
|
||||
// EVM is the Ethereum Virtual Machine base object and provides
|
||||
// the necessary tools to run a contract on the given state with
|
||||
// the provided context. It should be noted that any error
|
||||
// generated through any of the calls should be considered a
|
||||
// revert-state-and-consume-all-gas operation, no checks on
|
||||
// specific errors should ever be performed. The interpreter makes
|
||||
// sure that any errors generated are to be considered faulty code.
|
||||
//
|
||||
// The EVM should never be reused and is not thread safe.
|
||||
type EVM struct {
|
||||
@ -68,6 +87,8 @@ type EVM struct {
|
||||
|
||||
// chainConfig contains information about the current chain
|
||||
chainConfig *params.ChainConfig
|
||||
// chain rules contains the chain rules for the current epoch
|
||||
chainRules params.Rules
|
||||
// virtual machine configuration options used to initialise the
|
||||
// evm.
|
||||
vmConfig Config
|
||||
@ -79,21 +100,23 @@ type EVM struct {
|
||||
abort int32
|
||||
}
|
||||
|
||||
// NewEVM retutrns a new EVM evmironment.
|
||||
// NewEVM retutrns a new EVM evmironment. The returned EVM is not thread safe
|
||||
// and should only ever be used *once*.
|
||||
func NewEVM(ctx Context, statedb StateDB, chainConfig *params.ChainConfig, vmConfig Config) *EVM {
|
||||
evm := &EVM{
|
||||
Context: ctx,
|
||||
StateDB: statedb,
|
||||
vmConfig: vmConfig,
|
||||
chainConfig: chainConfig,
|
||||
chainRules: chainConfig.Rules(ctx.BlockNumber),
|
||||
}
|
||||
|
||||
evm.interpreter = NewInterpreter(evm, vmConfig)
|
||||
return evm
|
||||
}
|
||||
|
||||
// Cancel cancels any running EVM operation. This may be called concurrently and it's safe to be
|
||||
// called multiple times.
|
||||
// Cancel cancels any running EVM operation. This may be called concurrently and
|
||||
// it's safe to be called multiple times.
|
||||
func (evm *EVM) Cancel() {
|
||||
atomic.StoreInt32(&evm.abort, 1)
|
||||
}
|
||||
@ -134,13 +157,12 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
||||
contract := NewContract(caller, to, value, gas)
|
||||
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
|
||||
|
||||
ret, err = evm.interpreter.Run(contract, input)
|
||||
ret, err = run(evm, snapshot, contract, input)
|
||||
// When an error was returned by the EVM or when setting the creation code
|
||||
// above we revert to the snapshot and consume any gas remaining. Additionally
|
||||
// when we're in homestead this also counts for code storage gas errors.
|
||||
if err != nil {
|
||||
contract.UseGas(contract.Gas)
|
||||
|
||||
evm.StateDB.RevertToSnapshot(snapshot)
|
||||
}
|
||||
return ret, contract.Gas, err
|
||||
@ -175,10 +197,9 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
|
||||
contract := NewContract(caller, to, value, gas)
|
||||
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
|
||||
|
||||
ret, err = evm.interpreter.Run(contract, input)
|
||||
ret, err = run(evm, snapshot, contract, input)
|
||||
if err != nil {
|
||||
contract.UseGas(contract.Gas)
|
||||
|
||||
evm.StateDB.RevertToSnapshot(snapshot)
|
||||
}
|
||||
|
||||
@ -210,10 +231,9 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
|
||||
contract := NewContract(caller, to, nil, gas).AsDelegate()
|
||||
contract.SetCallCode(&addr, evm.StateDB.GetCodeHash(addr), evm.StateDB.GetCode(addr))
|
||||
|
||||
ret, err = evm.interpreter.Run(contract, input)
|
||||
ret, err = run(evm, snapshot, contract, input)
|
||||
if err != nil {
|
||||
contract.UseGas(contract.Gas)
|
||||
|
||||
evm.StateDB.RevertToSnapshot(snapshot)
|
||||
}
|
||||
|
||||
@ -253,8 +273,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
|
||||
contract := NewContract(caller, AccountRef(contractAddr), value, gas)
|
||||
contract.SetCallCode(&contractAddr, crypto.Keccak256Hash(code), code)
|
||||
|
||||
ret, err = evm.interpreter.Run(contract, nil)
|
||||
|
||||
ret, err = run(evm, snapshot, contract, nil)
|
||||
// check whether the max code size has been exceeded
|
||||
maxCodeSizeExceeded := len(ret) > params.MaxCodeSize
|
||||
// if the contract creation ran successfully and no errors were returned
|
||||
@ -275,10 +294,8 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
|
||||
// when we're in homestead this also counts for code storage gas errors.
|
||||
if maxCodeSizeExceeded ||
|
||||
(err != nil && (evm.ChainConfig().IsHomestead(evm.BlockNumber) || err != ErrCodeStoreOutOfGas)) {
|
||||
contract.UseGas(contract.Gas)
|
||||
evm.StateDB.RevertToSnapshot(snapshot)
|
||||
|
||||
// Nothing should be returned when an error is thrown.
|
||||
return nil, contractAddr, 0, err
|
||||
}
|
||||
// If the vm returned with an error the return value should be set to nil.
|
||||
// This isn't consensus critical but merely to for behaviour reasons such as
|
||||
|
@ -27,7 +27,9 @@ import (
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
)
|
||||
|
||||
var bigZero = new(big.Int)
|
||||
var (
|
||||
bigZero = new(big.Int)
|
||||
)
|
||||
|
||||
func opAdd(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
x, y := stack.pop(), stack.pop()
|
||||
@ -599,7 +601,7 @@ func opCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Sta
|
||||
contract.Gas += returnGas
|
||||
|
||||
evm.interpreter.intPool.put(addr, value, inOffset, inSize, retOffset, retSize)
|
||||
return nil, nil
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func opCallCode(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
@ -633,16 +635,10 @@ func opCallCode(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack
|
||||
contract.Gas += returnGas
|
||||
|
||||
evm.interpreter.intPool.put(addr, value, inOffset, inSize, retOffset, retSize)
|
||||
return nil, nil
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func opDelegateCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
// if not homestead return an error. DELEGATECALL is not supported
|
||||
// during pre-homestead.
|
||||
if !evm.ChainConfig().IsHomestead(evm.BlockNumber) {
|
||||
return nil, fmt.Errorf("invalid opcode %x", DELEGATECALL)
|
||||
}
|
||||
|
||||
gas, to, inOffset, inSize, outOffset, outSize := stack.pop().Uint64(), stack.pop(), stack.pop(), stack.pop(), stack.pop(), stack.pop()
|
||||
|
||||
toAddr := common.BigToAddress(to)
|
||||
@ -658,7 +654,7 @@ func opDelegateCall(pc *uint64, evm *EVM, contract *Contract, memory *Memory, st
|
||||
contract.Gas += returnGas
|
||||
|
||||
evm.interpreter.intPool.put(to, inOffset, inSize, outOffset, outSize)
|
||||
return nil, nil
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func opReturn(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
@ -666,6 +662,7 @@ func opReturn(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S
|
||||
ret := memory.GetPtr(offset.Int64(), size.Int64())
|
||||
|
||||
evm.interpreter.intPool.put(offset, size)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
@ -709,10 +706,23 @@ func makeLog(size int) executionFunc {
|
||||
}
|
||||
|
||||
// make push instruction function
|
||||
func makePush(size uint64, bsize *big.Int) executionFunc {
|
||||
func makePush(size uint64, pushByteSize int) executionFunc {
|
||||
return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
byts := getData(contract.Code, evm.interpreter.intPool.get().SetUint64(*pc+1), bsize)
|
||||
stack.push(new(big.Int).SetBytes(byts))
|
||||
codeLen := len(contract.Code)
|
||||
|
||||
startMin := codeLen
|
||||
if int(*pc+1) < startMin {
|
||||
startMin = int(*pc + 1)
|
||||
}
|
||||
|
||||
endMin := codeLen
|
||||
if startMin+pushByteSize < endMin {
|
||||
endMin = startMin + pushByteSize
|
||||
}
|
||||
|
||||
integer := evm.interpreter.intPool.get()
|
||||
stack.push(integer.SetBytes(common.RightPadBytes(contract.Code[startMin:endMin], pushByteSize)))
|
||||
|
||||
*pc += size
|
||||
return nil, nil
|
||||
}
|
||||
@ -721,7 +731,7 @@ func makePush(size uint64, bsize *big.Int) executionFunc {
|
||||
// make push instruction function
|
||||
func makeDup(size int64) executionFunc {
|
||||
return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
|
||||
stack.dup(int(size))
|
||||
stack.dup(evm.interpreter.intPool, int(size))
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
@ -52,43 +52,53 @@ type Config struct {
|
||||
}
|
||||
|
||||
// Interpreter is used to run Ethereum based contracts and will utilise the
|
||||
// passed environment to query external sources for state information.
|
||||
// passed evmironment to query external sources for state information.
|
||||
// The Interpreter will run the byte code VM or JIT VM based on the passed
|
||||
// configuration.
|
||||
type Interpreter struct {
|
||||
env *EVM
|
||||
evm *EVM
|
||||
cfg Config
|
||||
gasTable params.GasTable
|
||||
intPool *intPool
|
||||
|
||||
readonly bool
|
||||
}
|
||||
|
||||
// NewInterpreter returns a new instance of the Interpreter.
|
||||
func NewInterpreter(env *EVM, cfg Config) *Interpreter {
|
||||
func NewInterpreter(evm *EVM, cfg Config) *Interpreter {
|
||||
// We use the STOP instruction whether to see
|
||||
// the jump table was initialised. If it was not
|
||||
// we'll set the default jump table.
|
||||
if !cfg.JumpTable[STOP].valid {
|
||||
cfg.JumpTable = defaultJumpTable
|
||||
switch {
|
||||
case evm.ChainConfig().IsHomestead(evm.BlockNumber):
|
||||
cfg.JumpTable = homesteadInstructionSet
|
||||
default:
|
||||
cfg.JumpTable = frontierInstructionSet
|
||||
}
|
||||
}
|
||||
|
||||
return &Interpreter{
|
||||
env: env,
|
||||
evm: evm,
|
||||
cfg: cfg,
|
||||
gasTable: env.ChainConfig().GasTable(env.BlockNumber),
|
||||
gasTable: evm.ChainConfig().GasTable(evm.BlockNumber),
|
||||
intPool: newIntPool(),
|
||||
}
|
||||
}
|
||||
|
||||
// Run loops and evaluates the contract's code with the given input data
|
||||
func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err error) {
|
||||
evm.env.depth++
|
||||
defer func() { evm.env.depth-- }()
|
||||
func (in *Interpreter) enforceRestrictions(op OpCode, operation operation, stack *Stack) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if contract.CodeAddr != nil {
|
||||
if p := PrecompiledContracts[*contract.CodeAddr]; p != nil {
|
||||
return RunPrecompiledContract(p, input, contract)
|
||||
}
|
||||
}
|
||||
// Run loops and evaluates the contract's code with the given input data and returns
|
||||
// the return byte-slice and an error if one occurred.
|
||||
//
|
||||
// It's important to note that any errors returned by the interpreter should be
|
||||
// considered a revert-and-consume-all-gas operation. No error specific checks
|
||||
// should be handled to reduce complexity and errors further down the in.
|
||||
func (in *Interpreter) Run(snapshot int, contract *Contract, input []byte) (ret []byte, err error) {
|
||||
in.evm.depth++
|
||||
defer func() { in.evm.depth-- }()
|
||||
|
||||
// Don't bother with the execution if there's no code.
|
||||
if len(contract.Code) == 0 {
|
||||
@ -105,7 +115,8 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
|
||||
mem = NewMemory() // bound memory
|
||||
stack = newstack() // local stack
|
||||
// For optimisation reason we're using uint64 as the program counter.
|
||||
// It's theoretically possible to go above 2^64. The YP defines the PC to be uint256. Practically much less so feasible.
|
||||
// It's theoretically possible to go above 2^64. The YP defines the PC
|
||||
// to be uint256. Practically much less so feasible.
|
||||
pc = uint64(0) // program counter
|
||||
cost uint64
|
||||
)
|
||||
@ -113,31 +124,34 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
|
||||
|
||||
// User defer pattern to check for an error and, based on the error being nil or not, use all gas and return.
|
||||
defer func() {
|
||||
if err != nil && evm.cfg.Debug {
|
||||
if err != nil && in.cfg.Debug {
|
||||
// XXX For debugging
|
||||
//fmt.Printf("%04d: %8v cost = %-8d stack = %-8d ERR = %v\n", pc, op, cost, stack.len(), err)
|
||||
evm.cfg.Tracer.CaptureState(evm.env, pc, op, contract.Gas, cost, mem, stack, contract, evm.env.depth, err)
|
||||
in.cfg.Tracer.CaptureState(in.evm, pc, op, contract.Gas, cost, mem, stack, contract, in.evm.depth, err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Debug("EVM running contract", "hash", codehash[:])
|
||||
log.Debug("interpreter running contract", "hash", codehash[:])
|
||||
tstart := time.Now()
|
||||
defer log.Debug("EVM finished running contract", "hash", codehash[:], "elapsed", time.Since(tstart))
|
||||
defer log.Debug("interpreter finished running contract", "hash", codehash[:], "elapsed", time.Since(tstart))
|
||||
|
||||
// The Interpreter main run loop (contextual). This loop runs until either an
|
||||
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
|
||||
// the execution of one of the operations or until the evm.done is set by
|
||||
// the parent context.Context.
|
||||
for atomic.LoadInt32(&evm.env.abort) == 0 {
|
||||
// the execution of one of the operations or until the done flag is set by the
|
||||
// parent context.
|
||||
for atomic.LoadInt32(&in.evm.abort) == 0 {
|
||||
// Get the memory location of pc
|
||||
op = contract.GetOp(pc)
|
||||
|
||||
// get the operation from the jump table matching the opcode
|
||||
operation := evm.cfg.JumpTable[op]
|
||||
operation := in.cfg.JumpTable[op]
|
||||
if err := in.enforceRestrictions(op, operation, stack); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the op is invalid abort the process and return an error
|
||||
if !operation.valid {
|
||||
return nil, fmt.Errorf("invalid opcode %x", op)
|
||||
return nil, fmt.Errorf("invalid opcode 0x%x", int(op))
|
||||
}
|
||||
|
||||
// validate the stack and make sure there enough stack items available
|
||||
@ -161,10 +175,10 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
|
||||
}
|
||||
}
|
||||
|
||||
if !evm.cfg.DisableGasMetering {
|
||||
if !in.cfg.DisableGasMetering {
|
||||
// consume the gas and return an error if not enough gas is available.
|
||||
// cost is explicitly set so that the capture state defer method cas get the proper cost
|
||||
cost, err = operation.gasCost(evm.gasTable, evm.env, contract, stack, mem, memorySize)
|
||||
cost, err = operation.gasCost(in.gasTable, in.evm, contract, stack, mem, memorySize)
|
||||
if err != nil || !contract.UseGas(cost) {
|
||||
return nil, ErrOutOfGas
|
||||
}
|
||||
@ -173,19 +187,20 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
|
||||
mem.Resize(memorySize)
|
||||
}
|
||||
|
||||
if evm.cfg.Debug {
|
||||
evm.cfg.Tracer.CaptureState(evm.env, pc, op, contract.Gas, cost, mem, stack, contract, evm.env.depth, err)
|
||||
if in.cfg.Debug {
|
||||
in.cfg.Tracer.CaptureState(in.evm, pc, op, contract.Gas, cost, mem, stack, contract, in.evm.depth, err)
|
||||
}
|
||||
// XXX For debugging
|
||||
//fmt.Printf("%04d: %8v cost = %-8d stack = %-8d\n", pc, op, cost, stack.len())
|
||||
|
||||
// execute the operation
|
||||
res, err := operation.execute(&pc, evm.env, contract, mem, stack)
|
||||
res, err := operation.execute(&pc, in.evm, contract, mem, stack)
|
||||
// verifyPool is a build flag. Pool verification makes sure the integrity
|
||||
// of the integer pool by comparing values to a default value.
|
||||
if verifyPool {
|
||||
verifyIntegerPool(evm.intPool)
|
||||
verifyIntegerPool(in.intPool)
|
||||
}
|
||||
|
||||
switch {
|
||||
case err != nil:
|
||||
return nil, err
|
||||
@ -194,6 +209,11 @@ func (evm *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err e
|
||||
case !operation.jumps:
|
||||
pc++
|
||||
}
|
||||
// if the operation returned a value make sure that is also set
|
||||
// the last return data.
|
||||
if res != nil {
|
||||
mem.lastReturn = ret
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ import "math/big"
|
||||
|
||||
var checkVal = big.NewInt(-42)
|
||||
|
||||
const poolLimit = 256
|
||||
|
||||
// intPool is a pool of big integers that
|
||||
// can be reused for all big.Int operations.
|
||||
type intPool struct {
|
||||
@ -37,6 +39,10 @@ func (p *intPool) get() *big.Int {
|
||||
return new(big.Int)
|
||||
}
|
||||
func (p *intPool) put(is ...*big.Int) {
|
||||
if len(p.pool.data) > poolLimit {
|
||||
return
|
||||
}
|
||||
|
||||
for _, i := range is {
|
||||
// verifyPool is a build flag. Pool verification makes sure the integrity
|
||||
// of the integer pool by comparing values to a default value.
|
||||
|
@ -47,13 +47,36 @@ type operation struct {
|
||||
// jumps indicates whether operation made a jump. This prevents the program
|
||||
// counter from further incrementing.
|
||||
jumps bool
|
||||
// writes determines whether this a state modifying operation
|
||||
writes bool
|
||||
// valid is used to check whether the retrieved operation is valid and known
|
||||
valid bool
|
||||
// reverts determined whether the operation reverts state
|
||||
reverts bool
|
||||
}
|
||||
|
||||
var defaultJumpTable = NewJumpTable()
|
||||
var (
|
||||
frontierInstructionSet = NewFrontierInstructionSet()
|
||||
homesteadInstructionSet = NewHomesteadInstructionSet()
|
||||
)
|
||||
|
||||
func NewJumpTable() [256]operation {
|
||||
// NewHomesteadInstructionSet returns the frontier and homestead
|
||||
// instructions that can be executed during the homestead phase.
|
||||
func NewHomesteadInstructionSet() [256]operation {
|
||||
instructionSet := NewFrontierInstructionSet()
|
||||
instructionSet[DELEGATECALL] = operation{
|
||||
execute: opDelegateCall,
|
||||
gasCost: gasDelegateCall,
|
||||
validateStack: makeStackFunc(6, 1),
|
||||
memorySize: memoryDelegateCall,
|
||||
valid: true,
|
||||
}
|
||||
return instructionSet
|
||||
}
|
||||
|
||||
// NewFrontierInstructionSet returns the frontier instructions
|
||||
// that can be executed during the frontier phase.
|
||||
func NewFrontierInstructionSet() [256]operation {
|
||||
return [256]operation{
|
||||
STOP: {
|
||||
execute: opStop,
|
||||
@ -357,6 +380,7 @@ func NewJumpTable() [256]operation {
|
||||
gasCost: gasSStore,
|
||||
validateStack: makeStackFunc(2, 0),
|
||||
valid: true,
|
||||
writes: true,
|
||||
},
|
||||
JUMP: {
|
||||
execute: opJump,
|
||||
@ -397,193 +421,193 @@ func NewJumpTable() [256]operation {
|
||||
valid: true,
|
||||
},
|
||||
PUSH1: {
|
||||
execute: makePush(1, big.NewInt(1)),
|
||||
execute: makePush(1, 1),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH2: {
|
||||
execute: makePush(2, big.NewInt(2)),
|
||||
execute: makePush(2, 2),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH3: {
|
||||
execute: makePush(3, big.NewInt(3)),
|
||||
execute: makePush(3, 3),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH4: {
|
||||
execute: makePush(4, big.NewInt(4)),
|
||||
execute: makePush(4, 4),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH5: {
|
||||
execute: makePush(5, big.NewInt(5)),
|
||||
execute: makePush(5, 5),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH6: {
|
||||
execute: makePush(6, big.NewInt(6)),
|
||||
execute: makePush(6, 6),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH7: {
|
||||
execute: makePush(7, big.NewInt(7)),
|
||||
execute: makePush(7, 7),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH8: {
|
||||
execute: makePush(8, big.NewInt(8)),
|
||||
execute: makePush(8, 8),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH9: {
|
||||
execute: makePush(9, big.NewInt(9)),
|
||||
execute: makePush(9, 9),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH10: {
|
||||
execute: makePush(10, big.NewInt(10)),
|
||||
execute: makePush(10, 10),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH11: {
|
||||
execute: makePush(11, big.NewInt(11)),
|
||||
execute: makePush(11, 11),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH12: {
|
||||
execute: makePush(12, big.NewInt(12)),
|
||||
execute: makePush(12, 12),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH13: {
|
||||
execute: makePush(13, big.NewInt(13)),
|
||||
execute: makePush(13, 13),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH14: {
|
||||
execute: makePush(14, big.NewInt(14)),
|
||||
execute: makePush(14, 14),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH15: {
|
||||
execute: makePush(15, big.NewInt(15)),
|
||||
execute: makePush(15, 15),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH16: {
|
||||
execute: makePush(16, big.NewInt(16)),
|
||||
execute: makePush(16, 16),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH17: {
|
||||
execute: makePush(17, big.NewInt(17)),
|
||||
execute: makePush(17, 17),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH18: {
|
||||
execute: makePush(18, big.NewInt(18)),
|
||||
execute: makePush(18, 18),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH19: {
|
||||
execute: makePush(19, big.NewInt(19)),
|
||||
execute: makePush(19, 19),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH20: {
|
||||
execute: makePush(20, big.NewInt(20)),
|
||||
execute: makePush(20, 20),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH21: {
|
||||
execute: makePush(21, big.NewInt(21)),
|
||||
execute: makePush(21, 21),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH22: {
|
||||
execute: makePush(22, big.NewInt(22)),
|
||||
execute: makePush(22, 22),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH23: {
|
||||
execute: makePush(23, big.NewInt(23)),
|
||||
execute: makePush(23, 23),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH24: {
|
||||
execute: makePush(24, big.NewInt(24)),
|
||||
execute: makePush(24, 24),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH25: {
|
||||
execute: makePush(25, big.NewInt(25)),
|
||||
execute: makePush(25, 25),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH26: {
|
||||
execute: makePush(26, big.NewInt(26)),
|
||||
execute: makePush(26, 26),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH27: {
|
||||
execute: makePush(27, big.NewInt(27)),
|
||||
execute: makePush(27, 27),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH28: {
|
||||
execute: makePush(28, big.NewInt(28)),
|
||||
execute: makePush(28, 28),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH29: {
|
||||
execute: makePush(29, big.NewInt(29)),
|
||||
execute: makePush(29, 29),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH30: {
|
||||
execute: makePush(30, big.NewInt(30)),
|
||||
execute: makePush(30, 30),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH31: {
|
||||
execute: makePush(31, big.NewInt(31)),
|
||||
execute: makePush(31, 31),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
},
|
||||
PUSH32: {
|
||||
execute: makePush(32, big.NewInt(32)),
|
||||
execute: makePush(32, 32),
|
||||
gasCost: gasPush,
|
||||
validateStack: makeStackFunc(0, 1),
|
||||
valid: true,
|
||||
@ -821,6 +845,7 @@ func NewJumpTable() [256]operation {
|
||||
validateStack: makeStackFunc(3, 1),
|
||||
memorySize: memoryCreate,
|
||||
valid: true,
|
||||
writes: true,
|
||||
},
|
||||
CALL: {
|
||||
execute: opCall,
|
||||
@ -844,19 +869,13 @@ func NewJumpTable() [256]operation {
|
||||
halts: true,
|
||||
valid: true,
|
||||
},
|
||||
DELEGATECALL: {
|
||||
execute: opDelegateCall,
|
||||
gasCost: gasDelegateCall,
|
||||
validateStack: makeStackFunc(6, 1),
|
||||
memorySize: memoryDelegateCall,
|
||||
valid: true,
|
||||
},
|
||||
SELFDESTRUCT: {
|
||||
execute: opSuicide,
|
||||
gasCost: gasSuicide,
|
||||
validateStack: makeStackFunc(1, 0),
|
||||
halts: true,
|
||||
valid: true,
|
||||
writes: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,7 @@ import "fmt"
|
||||
type Memory struct {
|
||||
store []byte
|
||||
lastGasCost uint64
|
||||
lastReturn []byte
|
||||
}
|
||||
|
||||
func NewMemory() *Memory {
|
||||
|
@ -29,7 +29,7 @@ type Stack struct {
|
||||
}
|
||||
|
||||
func newstack() *Stack {
|
||||
return &Stack{}
|
||||
return &Stack{data: make([]*big.Int, 0, 1024)}
|
||||
}
|
||||
|
||||
func (st *Stack) Data() []*big.Int {
|
||||
@ -60,8 +60,8 @@ func (st *Stack) swap(n int) {
|
||||
st.data[st.len()-n], st.data[st.len()-1] = st.data[st.len()-1], st.data[st.len()-n]
|
||||
}
|
||||
|
||||
func (st *Stack) dup(n int) {
|
||||
st.push(new(big.Int).Set(st.data[st.len()-n]))
|
||||
func (st *Stack) dup(pool *intPool, n int) {
|
||||
st.push(pool.get().Set(st.data[st.len()-n]))
|
||||
}
|
||||
|
||||
func (st *Stack) peek() *big.Int {
|
||||
|
428
crypto/bn256/bn256.go
Normal file
428
crypto/bn256/bn256.go
Normal file
@ -0,0 +1,428 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bn256 implements a particular bilinear group at the 128-bit security level.
|
||||
//
|
||||
// Bilinear groups are the basis of many of the new cryptographic protocols
|
||||
// that have been proposed over the past decade. They consist of a triplet of
|
||||
// groups (G₁, G₂ and GT) such that there exists a function e(g₁ˣ,g₂ʸ)=gTˣʸ
|
||||
// (where gₓ is a generator of the respective group). That function is called
|
||||
// a pairing function.
|
||||
//
|
||||
// This package specifically implements the Optimal Ate pairing over a 256-bit
|
||||
// Barreto-Naehrig curve as described in
|
||||
// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible
|
||||
// with the implementation described in that paper.
|
||||
package bn256
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// BUG(agl): this implementation is not constant time.
|
||||
// TODO(agl): keep GF(p²) elements in Mongomery form.
|
||||
|
||||
// G1 is an abstract cyclic group. The zero value is suitable for use as the
|
||||
// output of an operation, but cannot be used as an input.
|
||||
type G1 struct {
|
||||
p *curvePoint
|
||||
}
|
||||
|
||||
// RandomG1 returns x and g₁ˣ where x is a random, non-zero number read from r.
|
||||
func RandomG1(r io.Reader) (*big.Int, *G1, error) {
|
||||
var k *big.Int
|
||||
var err error
|
||||
|
||||
for {
|
||||
k, err = rand.Int(r, Order)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if k.Sign() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return k, new(G1).ScalarBaseMult(k), nil
|
||||
}
|
||||
|
||||
func (g *G1) String() string {
|
||||
return "bn256.G1" + g.p.String()
|
||||
}
|
||||
|
||||
// CurvePoints returns p's curve points in big integer
|
||||
func (e *G1) CurvePoints() (*big.Int, *big.Int, *big.Int, *big.Int) {
|
||||
return e.p.x, e.p.y, e.p.z, e.p.t
|
||||
}
|
||||
|
||||
// ScalarBaseMult sets e to g*k where g is the generator of the group and
|
||||
// then returns e.
|
||||
func (e *G1) ScalarBaseMult(k *big.Int) *G1 {
|
||||
if e.p == nil {
|
||||
e.p = newCurvePoint(nil)
|
||||
}
|
||||
e.p.Mul(curveGen, k, new(bnPool))
|
||||
return e
|
||||
}
|
||||
|
||||
// ScalarMult sets e to a*k and then returns e.
|
||||
func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 {
|
||||
if e.p == nil {
|
||||
e.p = newCurvePoint(nil)
|
||||
}
|
||||
e.p.Mul(a.p, k, new(bnPool))
|
||||
return e
|
||||
}
|
||||
|
||||
// Add sets e to a+b and then returns e.
|
||||
// BUG(agl): this function is not complete: a==b fails.
|
||||
func (e *G1) Add(a, b *G1) *G1 {
|
||||
if e.p == nil {
|
||||
e.p = newCurvePoint(nil)
|
||||
}
|
||||
e.p.Add(a.p, b.p, new(bnPool))
|
||||
return e
|
||||
}
|
||||
|
||||
// Neg sets e to -a and then returns e.
|
||||
func (e *G1) Neg(a *G1) *G1 {
|
||||
if e.p == nil {
|
||||
e.p = newCurvePoint(nil)
|
||||
}
|
||||
e.p.Negative(a.p)
|
||||
return e
|
||||
}
|
||||
|
||||
// Marshal converts n to a byte slice.
|
||||
func (n *G1) Marshal() []byte {
|
||||
n.p.MakeAffine(nil)
|
||||
|
||||
xBytes := new(big.Int).Mod(n.p.x, P).Bytes()
|
||||
yBytes := new(big.Int).Mod(n.p.y, P).Bytes()
|
||||
|
||||
// Each value is a 256-bit number.
|
||||
const numBytes = 256 / 8
|
||||
|
||||
ret := make([]byte, numBytes*2)
|
||||
copy(ret[1*numBytes-len(xBytes):], xBytes)
|
||||
copy(ret[2*numBytes-len(yBytes):], yBytes)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Unmarshal sets e to the result of converting the output of Marshal back into
|
||||
// a group element and then returns e.
|
||||
func (e *G1) Unmarshal(m []byte) (*G1, bool) {
|
||||
// Each value is a 256-bit number.
|
||||
const numBytes = 256 / 8
|
||||
|
||||
if len(m) != 2*numBytes {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if e.p == nil {
|
||||
e.p = newCurvePoint(nil)
|
||||
}
|
||||
|
||||
e.p.x.SetBytes(m[0*numBytes : 1*numBytes])
|
||||
e.p.y.SetBytes(m[1*numBytes : 2*numBytes])
|
||||
|
||||
if e.p.x.Sign() == 0 && e.p.y.Sign() == 0 {
|
||||
// This is the point at infinity.
|
||||
e.p.y.SetInt64(1)
|
||||
e.p.z.SetInt64(0)
|
||||
e.p.t.SetInt64(0)
|
||||
} else {
|
||||
e.p.z.SetInt64(1)
|
||||
e.p.t.SetInt64(1)
|
||||
|
||||
if !e.p.IsOnCurve() {
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
return e, true
|
||||
}
|
||||
|
||||
// G2 is an abstract cyclic group. The zero value is suitable for use as the
|
||||
// output of an operation, but cannot be used as an input.
|
||||
type G2 struct {
|
||||
p *twistPoint
|
||||
}
|
||||
|
||||
// RandomG1 returns x and g₂ˣ where x is a random, non-zero number read from r.
|
||||
func RandomG2(r io.Reader) (*big.Int, *G2, error) {
|
||||
var k *big.Int
|
||||
var err error
|
||||
|
||||
for {
|
||||
k, err = rand.Int(r, Order)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if k.Sign() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return k, new(G2).ScalarBaseMult(k), nil
|
||||
}
|
||||
|
||||
func (g *G2) String() string {
|
||||
return "bn256.G2" + g.p.String()
|
||||
}
|
||||
|
||||
// CurvePoints returns the curve points of p which includes the real
|
||||
// and imaginary parts of the curve point.
|
||||
func (e *G2) CurvePoints() (*gfP2, *gfP2, *gfP2, *gfP2) {
|
||||
return e.p.x, e.p.y, e.p.z, e.p.t
|
||||
}
|
||||
|
||||
// ScalarBaseMult sets e to g*k where g is the generator of the group and
|
||||
// then returns out.
|
||||
func (e *G2) ScalarBaseMult(k *big.Int) *G2 {
|
||||
if e.p == nil {
|
||||
e.p = newTwistPoint(nil)
|
||||
}
|
||||
e.p.Mul(twistGen, k, new(bnPool))
|
||||
return e
|
||||
}
|
||||
|
||||
// ScalarMult sets e to a*k and then returns e.
|
||||
func (e *G2) ScalarMult(a *G2, k *big.Int) *G2 {
|
||||
if e.p == nil {
|
||||
e.p = newTwistPoint(nil)
|
||||
}
|
||||
e.p.Mul(a.p, k, new(bnPool))
|
||||
return e
|
||||
}
|
||||
|
||||
// Add sets e to a+b and then returns e.
|
||||
// BUG(agl): this function is not complete: a==b fails.
|
||||
func (e *G2) Add(a, b *G2) *G2 {
|
||||
if e.p == nil {
|
||||
e.p = newTwistPoint(nil)
|
||||
}
|
||||
e.p.Add(a.p, b.p, new(bnPool))
|
||||
return e
|
||||
}
|
||||
|
||||
// Marshal converts n into a byte slice.
|
||||
func (n *G2) Marshal() []byte {
|
||||
n.p.MakeAffine(nil)
|
||||
|
||||
xxBytes := new(big.Int).Mod(n.p.x.x, P).Bytes()
|
||||
xyBytes := new(big.Int).Mod(n.p.x.y, P).Bytes()
|
||||
yxBytes := new(big.Int).Mod(n.p.y.x, P).Bytes()
|
||||
yyBytes := new(big.Int).Mod(n.p.y.y, P).Bytes()
|
||||
|
||||
// Each value is a 256-bit number.
|
||||
const numBytes = 256 / 8
|
||||
|
||||
ret := make([]byte, numBytes*4)
|
||||
copy(ret[1*numBytes-len(xxBytes):], xxBytes)
|
||||
copy(ret[2*numBytes-len(xyBytes):], xyBytes)
|
||||
copy(ret[3*numBytes-len(yxBytes):], yxBytes)
|
||||
copy(ret[4*numBytes-len(yyBytes):], yyBytes)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Unmarshal sets e to the result of converting the output of Marshal back into
|
||||
// a group element and then returns e.
|
||||
func (e *G2) Unmarshal(m []byte) (*G2, bool) {
|
||||
// Each value is a 256-bit number.
|
||||
const numBytes = 256 / 8
|
||||
|
||||
if len(m) != 4*numBytes {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if e.p == nil {
|
||||
e.p = newTwistPoint(nil)
|
||||
}
|
||||
|
||||
e.p.x.x.SetBytes(m[0*numBytes : 1*numBytes])
|
||||
e.p.x.y.SetBytes(m[1*numBytes : 2*numBytes])
|
||||
e.p.y.x.SetBytes(m[2*numBytes : 3*numBytes])
|
||||
e.p.y.y.SetBytes(m[3*numBytes : 4*numBytes])
|
||||
|
||||
if e.p.x.x.Sign() == 0 &&
|
||||
e.p.x.y.Sign() == 0 &&
|
||||
e.p.y.x.Sign() == 0 &&
|
||||
e.p.y.y.Sign() == 0 {
|
||||
// This is the point at infinity.
|
||||
e.p.y.SetOne()
|
||||
e.p.z.SetZero()
|
||||
e.p.t.SetZero()
|
||||
} else {
|
||||
e.p.z.SetOne()
|
||||
e.p.t.SetOne()
|
||||
|
||||
if !e.p.IsOnCurve() {
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
return e, true
|
||||
}
|
||||
|
||||
// GT is an abstract cyclic group. The zero value is suitable for use as the
|
||||
// output of an operation, but cannot be used as an input.
|
||||
type GT struct {
|
||||
p *gfP12
|
||||
}
|
||||
|
||||
func (g *GT) String() string {
|
||||
return "bn256.GT" + g.p.String()
|
||||
}
|
||||
|
||||
// ScalarMult sets e to a*k and then returns e.
|
||||
func (e *GT) ScalarMult(a *GT, k *big.Int) *GT {
|
||||
if e.p == nil {
|
||||
e.p = newGFp12(nil)
|
||||
}
|
||||
e.p.Exp(a.p, k, new(bnPool))
|
||||
return e
|
||||
}
|
||||
|
||||
// Add sets e to a+b and then returns e.
|
||||
func (e *GT) Add(a, b *GT) *GT {
|
||||
if e.p == nil {
|
||||
e.p = newGFp12(nil)
|
||||
}
|
||||
e.p.Mul(a.p, b.p, new(bnPool))
|
||||
return e
|
||||
}
|
||||
|
||||
// Neg sets e to -a and then returns e.
|
||||
func (e *GT) Neg(a *GT) *GT {
|
||||
if e.p == nil {
|
||||
e.p = newGFp12(nil)
|
||||
}
|
||||
e.p.Invert(a.p, new(bnPool))
|
||||
return e
|
||||
}
|
||||
|
||||
// Marshal converts n into a byte slice.
|
||||
func (n *GT) Marshal() []byte {
|
||||
n.p.Minimal()
|
||||
|
||||
xxxBytes := n.p.x.x.x.Bytes()
|
||||
xxyBytes := n.p.x.x.y.Bytes()
|
||||
xyxBytes := n.p.x.y.x.Bytes()
|
||||
xyyBytes := n.p.x.y.y.Bytes()
|
||||
xzxBytes := n.p.x.z.x.Bytes()
|
||||
xzyBytes := n.p.x.z.y.Bytes()
|
||||
yxxBytes := n.p.y.x.x.Bytes()
|
||||
yxyBytes := n.p.y.x.y.Bytes()
|
||||
yyxBytes := n.p.y.y.x.Bytes()
|
||||
yyyBytes := n.p.y.y.y.Bytes()
|
||||
yzxBytes := n.p.y.z.x.Bytes()
|
||||
yzyBytes := n.p.y.z.y.Bytes()
|
||||
|
||||
// Each value is a 256-bit number.
|
||||
const numBytes = 256 / 8
|
||||
|
||||
ret := make([]byte, numBytes*12)
|
||||
copy(ret[1*numBytes-len(xxxBytes):], xxxBytes)
|
||||
copy(ret[2*numBytes-len(xxyBytes):], xxyBytes)
|
||||
copy(ret[3*numBytes-len(xyxBytes):], xyxBytes)
|
||||
copy(ret[4*numBytes-len(xyyBytes):], xyyBytes)
|
||||
copy(ret[5*numBytes-len(xzxBytes):], xzxBytes)
|
||||
copy(ret[6*numBytes-len(xzyBytes):], xzyBytes)
|
||||
copy(ret[7*numBytes-len(yxxBytes):], yxxBytes)
|
||||
copy(ret[8*numBytes-len(yxyBytes):], yxyBytes)
|
||||
copy(ret[9*numBytes-len(yyxBytes):], yyxBytes)
|
||||
copy(ret[10*numBytes-len(yyyBytes):], yyyBytes)
|
||||
copy(ret[11*numBytes-len(yzxBytes):], yzxBytes)
|
||||
copy(ret[12*numBytes-len(yzyBytes):], yzyBytes)
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// Unmarshal sets e to the result of converting the output of Marshal back into
|
||||
// a group element and then returns e.
|
||||
func (e *GT) Unmarshal(m []byte) (*GT, bool) {
|
||||
// Each value is a 256-bit number.
|
||||
const numBytes = 256 / 8
|
||||
|
||||
if len(m) != 12*numBytes {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if e.p == nil {
|
||||
e.p = newGFp12(nil)
|
||||
}
|
||||
|
||||
e.p.x.x.x.SetBytes(m[0*numBytes : 1*numBytes])
|
||||
e.p.x.x.y.SetBytes(m[1*numBytes : 2*numBytes])
|
||||
e.p.x.y.x.SetBytes(m[2*numBytes : 3*numBytes])
|
||||
e.p.x.y.y.SetBytes(m[3*numBytes : 4*numBytes])
|
||||
e.p.x.z.x.SetBytes(m[4*numBytes : 5*numBytes])
|
||||
e.p.x.z.y.SetBytes(m[5*numBytes : 6*numBytes])
|
||||
e.p.y.x.x.SetBytes(m[6*numBytes : 7*numBytes])
|
||||
e.p.y.x.y.SetBytes(m[7*numBytes : 8*numBytes])
|
||||
e.p.y.y.x.SetBytes(m[8*numBytes : 9*numBytes])
|
||||
e.p.y.y.y.SetBytes(m[9*numBytes : 10*numBytes])
|
||||
e.p.y.z.x.SetBytes(m[10*numBytes : 11*numBytes])
|
||||
e.p.y.z.y.SetBytes(m[11*numBytes : 12*numBytes])
|
||||
|
||||
return e, true
|
||||
}
|
||||
|
||||
// Pair calculates an Optimal Ate pairing.
|
||||
func Pair(g1 *G1, g2 *G2) *GT {
|
||||
return >{optimalAte(g2.p, g1.p, new(bnPool))}
|
||||
}
|
||||
|
||||
func PairingCheck(a []*G1, b []*G2) bool {
|
||||
pool := new(bnPool)
|
||||
e := newGFp12(pool)
|
||||
e.SetOne()
|
||||
for i := 0; i < len(a); i++ {
|
||||
new_e := miller(b[i].p, a[i].p, pool)
|
||||
e.Mul(e, new_e, pool)
|
||||
}
|
||||
ret := finalExponentiation(e, pool)
|
||||
e.Put(pool)
|
||||
return ret.IsOne()
|
||||
}
|
||||
|
||||
// bnPool implements a tiny cache of *big.Int objects that's used to reduce the
|
||||
// number of allocations made during processing.
|
||||
type bnPool struct {
|
||||
bns []*big.Int
|
||||
count int
|
||||
}
|
||||
|
||||
func (pool *bnPool) Get() *big.Int {
|
||||
if pool == nil {
|
||||
return new(big.Int)
|
||||
}
|
||||
|
||||
pool.count++
|
||||
l := len(pool.bns)
|
||||
if l == 0 {
|
||||
return new(big.Int)
|
||||
}
|
||||
|
||||
bn := pool.bns[l-1]
|
||||
pool.bns = pool.bns[:l-1]
|
||||
return bn
|
||||
}
|
||||
|
||||
func (pool *bnPool) Put(bn *big.Int) {
|
||||
if pool == nil {
|
||||
return
|
||||
}
|
||||
pool.bns = append(pool.bns, bn)
|
||||
pool.count--
|
||||
}
|
||||
|
||||
func (pool *bnPool) Count() int {
|
||||
return pool.count
|
||||
}
|
304
crypto/bn256/bn256_test.go
Normal file
304
crypto/bn256/bn256_test.go
Normal file
@ -0,0 +1,304 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bn256
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGFp2Invert(t *testing.T) {
|
||||
pool := new(bnPool)
|
||||
|
||||
a := newGFp2(pool)
|
||||
a.x.SetString("23423492374", 10)
|
||||
a.y.SetString("12934872398472394827398470", 10)
|
||||
|
||||
inv := newGFp2(pool)
|
||||
inv.Invert(a, pool)
|
||||
|
||||
b := newGFp2(pool).Mul(inv, a, pool)
|
||||
if b.x.Int64() != 0 || b.y.Int64() != 1 {
|
||||
t.Fatalf("bad result for a^-1*a: %s %s", b.x, b.y)
|
||||
}
|
||||
|
||||
a.Put(pool)
|
||||
b.Put(pool)
|
||||
inv.Put(pool)
|
||||
|
||||
if c := pool.Count(); c > 0 {
|
||||
t.Errorf("Pool count non-zero: %d\n", c)
|
||||
}
|
||||
}
|
||||
|
||||
func isZero(n *big.Int) bool {
|
||||
return new(big.Int).Mod(n, P).Int64() == 0
|
||||
}
|
||||
|
||||
func isOne(n *big.Int) bool {
|
||||
return new(big.Int).Mod(n, P).Int64() == 1
|
||||
}
|
||||
|
||||
func TestGFp6Invert(t *testing.T) {
|
||||
pool := new(bnPool)
|
||||
|
||||
a := newGFp6(pool)
|
||||
a.x.x.SetString("239487238491", 10)
|
||||
a.x.y.SetString("2356249827341", 10)
|
||||
a.y.x.SetString("082659782", 10)
|
||||
a.y.y.SetString("182703523765", 10)
|
||||
a.z.x.SetString("978236549263", 10)
|
||||
a.z.y.SetString("64893242", 10)
|
||||
|
||||
inv := newGFp6(pool)
|
||||
inv.Invert(a, pool)
|
||||
|
||||
b := newGFp6(pool).Mul(inv, a, pool)
|
||||
if !isZero(b.x.x) ||
|
||||
!isZero(b.x.y) ||
|
||||
!isZero(b.y.x) ||
|
||||
!isZero(b.y.y) ||
|
||||
!isZero(b.z.x) ||
|
||||
!isOne(b.z.y) {
|
||||
t.Fatalf("bad result for a^-1*a: %s", b)
|
||||
}
|
||||
|
||||
a.Put(pool)
|
||||
b.Put(pool)
|
||||
inv.Put(pool)
|
||||
|
||||
if c := pool.Count(); c > 0 {
|
||||
t.Errorf("Pool count non-zero: %d\n", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGFp12Invert(t *testing.T) {
|
||||
pool := new(bnPool)
|
||||
|
||||
a := newGFp12(pool)
|
||||
a.x.x.x.SetString("239846234862342323958623", 10)
|
||||
a.x.x.y.SetString("2359862352529835623", 10)
|
||||
a.x.y.x.SetString("928836523", 10)
|
||||
a.x.y.y.SetString("9856234", 10)
|
||||
a.x.z.x.SetString("235635286", 10)
|
||||
a.x.z.y.SetString("5628392833", 10)
|
||||
a.y.x.x.SetString("252936598265329856238956532167968", 10)
|
||||
a.y.x.y.SetString("23596239865236954178968", 10)
|
||||
a.y.y.x.SetString("95421692834", 10)
|
||||
a.y.y.y.SetString("236548", 10)
|
||||
a.y.z.x.SetString("924523", 10)
|
||||
a.y.z.y.SetString("12954623", 10)
|
||||
|
||||
inv := newGFp12(pool)
|
||||
inv.Invert(a, pool)
|
||||
|
||||
b := newGFp12(pool).Mul(inv, a, pool)
|
||||
if !isZero(b.x.x.x) ||
|
||||
!isZero(b.x.x.y) ||
|
||||
!isZero(b.x.y.x) ||
|
||||
!isZero(b.x.y.y) ||
|
||||
!isZero(b.x.z.x) ||
|
||||
!isZero(b.x.z.y) ||
|
||||
!isZero(b.y.x.x) ||
|
||||
!isZero(b.y.x.y) ||
|
||||
!isZero(b.y.y.x) ||
|
||||
!isZero(b.y.y.y) ||
|
||||
!isZero(b.y.z.x) ||
|
||||
!isOne(b.y.z.y) {
|
||||
t.Fatalf("bad result for a^-1*a: %s", b)
|
||||
}
|
||||
|
||||
a.Put(pool)
|
||||
b.Put(pool)
|
||||
inv.Put(pool)
|
||||
|
||||
if c := pool.Count(); c > 0 {
|
||||
t.Errorf("Pool count non-zero: %d\n", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCurveImpl(t *testing.T) {
|
||||
pool := new(bnPool)
|
||||
|
||||
g := &curvePoint{
|
||||
pool.Get().SetInt64(1),
|
||||
pool.Get().SetInt64(-2),
|
||||
pool.Get().SetInt64(1),
|
||||
pool.Get().SetInt64(0),
|
||||
}
|
||||
|
||||
x := pool.Get().SetInt64(32498273234)
|
||||
X := newCurvePoint(pool).Mul(g, x, pool)
|
||||
|
||||
y := pool.Get().SetInt64(98732423523)
|
||||
Y := newCurvePoint(pool).Mul(g, y, pool)
|
||||
|
||||
s1 := newCurvePoint(pool).Mul(X, y, pool).MakeAffine(pool)
|
||||
s2 := newCurvePoint(pool).Mul(Y, x, pool).MakeAffine(pool)
|
||||
|
||||
if s1.x.Cmp(s2.x) != 0 ||
|
||||
s2.x.Cmp(s1.x) != 0 {
|
||||
t.Errorf("DH points don't match: (%s, %s) (%s, %s)", s1.x, s1.y, s2.x, s2.y)
|
||||
}
|
||||
|
||||
pool.Put(x)
|
||||
X.Put(pool)
|
||||
pool.Put(y)
|
||||
Y.Put(pool)
|
||||
s1.Put(pool)
|
||||
s2.Put(pool)
|
||||
g.Put(pool)
|
||||
|
||||
if c := pool.Count(); c > 0 {
|
||||
t.Errorf("Pool count non-zero: %d\n", c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrderG1(t *testing.T) {
|
||||
g := new(G1).ScalarBaseMult(Order)
|
||||
if !g.p.IsInfinity() {
|
||||
t.Error("G1 has incorrect order")
|
||||
}
|
||||
|
||||
one := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
|
||||
g.Add(g, one)
|
||||
g.p.MakeAffine(nil)
|
||||
if g.p.x.Cmp(one.p.x) != 0 || g.p.y.Cmp(one.p.y) != 0 {
|
||||
t.Errorf("1+0 != 1 in G1")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrderG2(t *testing.T) {
|
||||
g := new(G2).ScalarBaseMult(Order)
|
||||
if !g.p.IsInfinity() {
|
||||
t.Error("G2 has incorrect order")
|
||||
}
|
||||
|
||||
one := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
|
||||
g.Add(g, one)
|
||||
g.p.MakeAffine(nil)
|
||||
if g.p.x.x.Cmp(one.p.x.x) != 0 ||
|
||||
g.p.x.y.Cmp(one.p.x.y) != 0 ||
|
||||
g.p.y.x.Cmp(one.p.y.x) != 0 ||
|
||||
g.p.y.y.Cmp(one.p.y.y) != 0 {
|
||||
t.Errorf("1+0 != 1 in G2")
|
||||
}
|
||||
}
|
||||
|
||||
func TestOrderGT(t *testing.T) {
|
||||
gt := Pair(&G1{curveGen}, &G2{twistGen})
|
||||
g := new(GT).ScalarMult(gt, Order)
|
||||
if !g.p.IsOne() {
|
||||
t.Error("GT has incorrect order")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBilinearity(t *testing.T) {
|
||||
for i := 0; i < 2; i++ {
|
||||
a, p1, _ := RandomG1(rand.Reader)
|
||||
b, p2, _ := RandomG2(rand.Reader)
|
||||
e1 := Pair(p1, p2)
|
||||
|
||||
e2 := Pair(&G1{curveGen}, &G2{twistGen})
|
||||
e2.ScalarMult(e2, a)
|
||||
e2.ScalarMult(e2, b)
|
||||
|
||||
minusE2 := new(GT).Neg(e2)
|
||||
e1.Add(e1, minusE2)
|
||||
|
||||
if !e1.p.IsOne() {
|
||||
t.Fatalf("bad pairing result: %s", e1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestG1Marshal(t *testing.T) {
|
||||
g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
|
||||
form := g.Marshal()
|
||||
_, ok := new(G1).Unmarshal(form)
|
||||
if !ok {
|
||||
t.Fatalf("failed to unmarshal")
|
||||
}
|
||||
|
||||
g.ScalarBaseMult(Order)
|
||||
form = g.Marshal()
|
||||
g2, ok := new(G1).Unmarshal(form)
|
||||
if !ok {
|
||||
t.Fatalf("failed to unmarshal ∞")
|
||||
}
|
||||
if !g2.p.IsInfinity() {
|
||||
t.Fatalf("∞ unmarshaled incorrectly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestG2Marshal(t *testing.T) {
|
||||
g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
|
||||
form := g.Marshal()
|
||||
_, ok := new(G2).Unmarshal(form)
|
||||
if !ok {
|
||||
t.Fatalf("failed to unmarshal")
|
||||
}
|
||||
|
||||
g.ScalarBaseMult(Order)
|
||||
form = g.Marshal()
|
||||
g2, ok := new(G2).Unmarshal(form)
|
||||
if !ok {
|
||||
t.Fatalf("failed to unmarshal ∞")
|
||||
}
|
||||
if !g2.p.IsInfinity() {
|
||||
t.Fatalf("∞ unmarshaled incorrectly")
|
||||
}
|
||||
}
|
||||
|
||||
func TestG1Identity(t *testing.T) {
|
||||
g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(0))
|
||||
if !g.p.IsInfinity() {
|
||||
t.Error("failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestG2Identity(t *testing.T) {
|
||||
g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(0))
|
||||
if !g.p.IsInfinity() {
|
||||
t.Error("failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTripartiteDiffieHellman(t *testing.T) {
|
||||
a, _ := rand.Int(rand.Reader, Order)
|
||||
b, _ := rand.Int(rand.Reader, Order)
|
||||
c, _ := rand.Int(rand.Reader, Order)
|
||||
|
||||
pa, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
|
||||
qa, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
|
||||
pb, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
|
||||
qb, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
|
||||
pc, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
|
||||
qc, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
|
||||
|
||||
k1 := Pair(pb, qc)
|
||||
k1.ScalarMult(k1, a)
|
||||
k1Bytes := k1.Marshal()
|
||||
|
||||
k2 := Pair(pc, qa)
|
||||
k2.ScalarMult(k2, b)
|
||||
k2Bytes := k2.Marshal()
|
||||
|
||||
k3 := Pair(pa, qb)
|
||||
k3.ScalarMult(k3, c)
|
||||
k3Bytes := k3.Marshal()
|
||||
|
||||
if !bytes.Equal(k1Bytes, k2Bytes) || !bytes.Equal(k2Bytes, k3Bytes) {
|
||||
t.Errorf("keys didn't agree")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkPairing(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
Pair(&G1{curveGen}, &G2{twistGen})
|
||||
}
|
||||
}
|
44
crypto/bn256/constants.go
Normal file
44
crypto/bn256/constants.go
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bn256
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
func bigFromBase10(s string) *big.Int {
|
||||
n, _ := new(big.Int).SetString(s, 10)
|
||||
return n
|
||||
}
|
||||
|
||||
// u is the BN parameter that determines the prime: 1868033³.
|
||||
var u = bigFromBase10("4965661367192848881")
|
||||
|
||||
// p is a prime over which we form a basic field: 36u⁴+36u³+24u²+6u+1.
|
||||
var P = bigFromBase10("21888242871839275222246405745257275088696311157297823662689037894645226208583")
|
||||
|
||||
// Order is the number of elements in both G₁ and G₂: 36u⁴+36u³+18u²+6u+1.
|
||||
var Order = bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495617")
|
||||
|
||||
// xiToPMinus1Over6 is ξ^((p-1)/6) where ξ = i+9.
|
||||
var xiToPMinus1Over6 = &gfP2{bigFromBase10("16469823323077808223889137241176536799009286646108169935659301613961712198316"), bigFromBase10("8376118865763821496583973867626364092589906065868298776909617916018768340080")}
|
||||
|
||||
// xiToPMinus1Over3 is ξ^((p-1)/3) where ξ = i+9.
|
||||
var xiToPMinus1Over3 = &gfP2{bigFromBase10("10307601595873709700152284273816112264069230130616436755625194854815875713954"), bigFromBase10("21575463638280843010398324269430826099269044274347216827212613867836435027261")}
|
||||
|
||||
// xiToPMinus1Over2 is ξ^((p-1)/2) where ξ = i+9.
|
||||
var xiToPMinus1Over2 = &gfP2{bigFromBase10("3505843767911556378687030309984248845540243509899259641013678093033130930403"), bigFromBase10("2821565182194536844548159561693502659359617185244120367078079554186484126554")}
|
||||
|
||||
// xiToPSquaredMinus1Over3 is ξ^((p²-1)/3) where ξ = i+9.
|
||||
var xiToPSquaredMinus1Over3 = bigFromBase10("21888242871839275220042445260109153167277707414472061641714758635765020556616")
|
||||
|
||||
// xiTo2PSquaredMinus2Over3 is ξ^((2p²-2)/3) where ξ = i+9 (a cubic root of unity, mod p).
|
||||
var xiTo2PSquaredMinus2Over3 = bigFromBase10("2203960485148121921418603742825762020974279258880205651966")
|
||||
|
||||
// xiToPSquaredMinus1Over6 is ξ^((1p²-1)/6) where ξ = i+9 (a cubic root of -1, mod p).
|
||||
var xiToPSquaredMinus1Over6 = bigFromBase10("21888242871839275220042445260109153167277707414472061641714758635765020556617")
|
||||
|
||||
// xiTo2PMinus2Over3 is ξ^((2p-2)/3) where ξ = i+9.
|
||||
var xiTo2PMinus2Over3 = &gfP2{bigFromBase10("19937756971775647987995932169929341994314640652964949448313374472400716661030"), bigFromBase10("2581911344467009335267311115468803099551665605076196740867805258568234346338")}
|
278
crypto/bn256/curve.go
Normal file
278
crypto/bn256/curve.go
Normal file
@ -0,0 +1,278 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bn256
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// curvePoint implements the elliptic curve y²=x³+3. Points are kept in
|
||||
// Jacobian form and t=z² when valid. G₁ is the set of points of this curve on
|
||||
// GF(p).
|
||||
type curvePoint struct {
|
||||
x, y, z, t *big.Int
|
||||
}
|
||||
|
||||
var curveB = new(big.Int).SetInt64(3)
|
||||
|
||||
// curveGen is the generator of G₁.
|
||||
var curveGen = &curvePoint{
|
||||
new(big.Int).SetInt64(1),
|
||||
new(big.Int).SetInt64(-2),
|
||||
new(big.Int).SetInt64(1),
|
||||
new(big.Int).SetInt64(1),
|
||||
}
|
||||
|
||||
func newCurvePoint(pool *bnPool) *curvePoint {
|
||||
return &curvePoint{
|
||||
pool.Get(),
|
||||
pool.Get(),
|
||||
pool.Get(),
|
||||
pool.Get(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *curvePoint) String() string {
|
||||
c.MakeAffine(new(bnPool))
|
||||
return "(" + c.x.String() + ", " + c.y.String() + ")"
|
||||
}
|
||||
|
||||
func (c *curvePoint) Put(pool *bnPool) {
|
||||
pool.Put(c.x)
|
||||
pool.Put(c.y)
|
||||
pool.Put(c.z)
|
||||
pool.Put(c.t)
|
||||
}
|
||||
|
||||
func (c *curvePoint) Set(a *curvePoint) {
|
||||
c.x.Set(a.x)
|
||||
c.y.Set(a.y)
|
||||
c.z.Set(a.z)
|
||||
c.t.Set(a.t)
|
||||
}
|
||||
|
||||
// IsOnCurve returns true iff c is on the curve where c must be in affine form.
|
||||
func (c *curvePoint) IsOnCurve() bool {
|
||||
yy := new(big.Int).Mul(c.y, c.y)
|
||||
xxx := new(big.Int).Mul(c.x, c.x)
|
||||
xxx.Mul(xxx, c.x)
|
||||
yy.Sub(yy, xxx)
|
||||
yy.Sub(yy, curveB)
|
||||
if yy.Sign() < 0 || yy.Cmp(P) >= 0 {
|
||||
yy.Mod(yy, P)
|
||||
}
|
||||
return yy.Sign() == 0
|
||||
}
|
||||
|
||||
func (c *curvePoint) SetInfinity() {
|
||||
c.z.SetInt64(0)
|
||||
}
|
||||
|
||||
func (c *curvePoint) IsInfinity() bool {
|
||||
return c.z.Sign() == 0
|
||||
}
|
||||
|
||||
func (c *curvePoint) Add(a, b *curvePoint, pool *bnPool) {
|
||||
if a.IsInfinity() {
|
||||
c.Set(b)
|
||||
return
|
||||
}
|
||||
if b.IsInfinity() {
|
||||
c.Set(a)
|
||||
return
|
||||
}
|
||||
|
||||
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
|
||||
|
||||
// Normalize the points by replacing a = [x1:y1:z1] and b = [x2:y2:z2]
|
||||
// by [u1:s1:z1·z2] and [u2:s2:z1·z2]
|
||||
// where u1 = x1·z2², s1 = y1·z2³ and u1 = x2·z1², s2 = y2·z1³
|
||||
z1z1 := pool.Get().Mul(a.z, a.z)
|
||||
z1z1.Mod(z1z1, P)
|
||||
z2z2 := pool.Get().Mul(b.z, b.z)
|
||||
z2z2.Mod(z2z2, P)
|
||||
u1 := pool.Get().Mul(a.x, z2z2)
|
||||
u1.Mod(u1, P)
|
||||
u2 := pool.Get().Mul(b.x, z1z1)
|
||||
u2.Mod(u2, P)
|
||||
|
||||
t := pool.Get().Mul(b.z, z2z2)
|
||||
t.Mod(t, P)
|
||||
s1 := pool.Get().Mul(a.y, t)
|
||||
s1.Mod(s1, P)
|
||||
|
||||
t.Mul(a.z, z1z1)
|
||||
t.Mod(t, P)
|
||||
s2 := pool.Get().Mul(b.y, t)
|
||||
s2.Mod(s2, P)
|
||||
|
||||
// Compute x = (2h)²(s²-u1-u2)
|
||||
// where s = (s2-s1)/(u2-u1) is the slope of the line through
|
||||
// (u1,s1) and (u2,s2). The extra factor 2h = 2(u2-u1) comes from the value of z below.
|
||||
// This is also:
|
||||
// 4(s2-s1)² - 4h²(u1+u2) = 4(s2-s1)² - 4h³ - 4h²(2u1)
|
||||
// = r² - j - 2v
|
||||
// with the notations below.
|
||||
h := pool.Get().Sub(u2, u1)
|
||||
xEqual := h.Sign() == 0
|
||||
|
||||
t.Add(h, h)
|
||||
// i = 4h²
|
||||
i := pool.Get().Mul(t, t)
|
||||
i.Mod(i, P)
|
||||
// j = 4h³
|
||||
j := pool.Get().Mul(h, i)
|
||||
j.Mod(j, P)
|
||||
|
||||
t.Sub(s2, s1)
|
||||
yEqual := t.Sign() == 0
|
||||
if xEqual && yEqual {
|
||||
c.Double(a, pool)
|
||||
return
|
||||
}
|
||||
r := pool.Get().Add(t, t)
|
||||
|
||||
v := pool.Get().Mul(u1, i)
|
||||
v.Mod(v, P)
|
||||
|
||||
// t4 = 4(s2-s1)²
|
||||
t4 := pool.Get().Mul(r, r)
|
||||
t4.Mod(t4, P)
|
||||
t.Add(v, v)
|
||||
t6 := pool.Get().Sub(t4, j)
|
||||
c.x.Sub(t6, t)
|
||||
|
||||
// Set y = -(2h)³(s1 + s*(x/4h²-u1))
|
||||
// This is also
|
||||
// y = - 2·s1·j - (s2-s1)(2x - 2i·u1) = r(v-x) - 2·s1·j
|
||||
t.Sub(v, c.x) // t7
|
||||
t4.Mul(s1, j) // t8
|
||||
t4.Mod(t4, P)
|
||||
t6.Add(t4, t4) // t9
|
||||
t4.Mul(r, t) // t10
|
||||
t4.Mod(t4, P)
|
||||
c.y.Sub(t4, t6)
|
||||
|
||||
// Set z = 2(u2-u1)·z1·z2 = 2h·z1·z2
|
||||
t.Add(a.z, b.z) // t11
|
||||
t4.Mul(t, t) // t12
|
||||
t4.Mod(t4, P)
|
||||
t.Sub(t4, z1z1) // t13
|
||||
t4.Sub(t, z2z2) // t14
|
||||
c.z.Mul(t4, h)
|
||||
c.z.Mod(c.z, P)
|
||||
|
||||
pool.Put(z1z1)
|
||||
pool.Put(z2z2)
|
||||
pool.Put(u1)
|
||||
pool.Put(u2)
|
||||
pool.Put(t)
|
||||
pool.Put(s1)
|
||||
pool.Put(s2)
|
||||
pool.Put(h)
|
||||
pool.Put(i)
|
||||
pool.Put(j)
|
||||
pool.Put(r)
|
||||
pool.Put(v)
|
||||
pool.Put(t4)
|
||||
pool.Put(t6)
|
||||
}
|
||||
|
||||
func (c *curvePoint) Double(a *curvePoint, pool *bnPool) {
|
||||
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
|
||||
A := pool.Get().Mul(a.x, a.x)
|
||||
A.Mod(A, P)
|
||||
B := pool.Get().Mul(a.y, a.y)
|
||||
B.Mod(B, P)
|
||||
C_ := pool.Get().Mul(B, B)
|
||||
C_.Mod(C_, P)
|
||||
|
||||
t := pool.Get().Add(a.x, B)
|
||||
t2 := pool.Get().Mul(t, t)
|
||||
t2.Mod(t2, P)
|
||||
t.Sub(t2, A)
|
||||
t2.Sub(t, C_)
|
||||
d := pool.Get().Add(t2, t2)
|
||||
t.Add(A, A)
|
||||
e := pool.Get().Add(t, A)
|
||||
f := pool.Get().Mul(e, e)
|
||||
f.Mod(f, P)
|
||||
|
||||
t.Add(d, d)
|
||||
c.x.Sub(f, t)
|
||||
|
||||
t.Add(C_, C_)
|
||||
t2.Add(t, t)
|
||||
t.Add(t2, t2)
|
||||
c.y.Sub(d, c.x)
|
||||
t2.Mul(e, c.y)
|
||||
t2.Mod(t2, P)
|
||||
c.y.Sub(t2, t)
|
||||
|
||||
t.Mul(a.y, a.z)
|
||||
t.Mod(t, P)
|
||||
c.z.Add(t, t)
|
||||
|
||||
pool.Put(A)
|
||||
pool.Put(B)
|
||||
pool.Put(C_)
|
||||
pool.Put(t)
|
||||
pool.Put(t2)
|
||||
pool.Put(d)
|
||||
pool.Put(e)
|
||||
pool.Put(f)
|
||||
}
|
||||
|
||||
func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int, pool *bnPool) *curvePoint {
|
||||
sum := newCurvePoint(pool)
|
||||
sum.SetInfinity()
|
||||
t := newCurvePoint(pool)
|
||||
|
||||
for i := scalar.BitLen(); i >= 0; i-- {
|
||||
t.Double(sum, pool)
|
||||
if scalar.Bit(i) != 0 {
|
||||
sum.Add(t, a, pool)
|
||||
} else {
|
||||
sum.Set(t)
|
||||
}
|
||||
}
|
||||
|
||||
c.Set(sum)
|
||||
sum.Put(pool)
|
||||
t.Put(pool)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *curvePoint) MakeAffine(pool *bnPool) *curvePoint {
|
||||
if words := c.z.Bits(); len(words) == 1 && words[0] == 1 {
|
||||
return c
|
||||
}
|
||||
|
||||
zInv := pool.Get().ModInverse(c.z, P)
|
||||
t := pool.Get().Mul(c.y, zInv)
|
||||
t.Mod(t, P)
|
||||
zInv2 := pool.Get().Mul(zInv, zInv)
|
||||
zInv2.Mod(zInv2, P)
|
||||
c.y.Mul(t, zInv2)
|
||||
c.y.Mod(c.y, P)
|
||||
t.Mul(c.x, zInv2)
|
||||
t.Mod(t, P)
|
||||
c.x.Set(t)
|
||||
c.z.SetInt64(1)
|
||||
c.t.SetInt64(1)
|
||||
|
||||
pool.Put(zInv)
|
||||
pool.Put(t)
|
||||
pool.Put(zInv2)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *curvePoint) Negative(a *curvePoint) {
|
||||
c.x.Set(a.x)
|
||||
c.y.Neg(a.y)
|
||||
c.z.Set(a.z)
|
||||
c.t.SetInt64(0)
|
||||
}
|
43
crypto/bn256/example_test.go
Normal file
43
crypto/bn256/example_test.go
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bn256
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
)
|
||||
|
||||
func ExamplePair() {
|
||||
// This implements the tripartite Diffie-Hellman algorithm from "A One
|
||||
// Round Protocol for Tripartite Diffie-Hellman", A. Joux.
|
||||
// http://www.springerlink.com/content/cddc57yyva0hburb/fulltext.pdf
|
||||
|
||||
// Each of three parties, a, b and c, generate a private value.
|
||||
a, _ := rand.Int(rand.Reader, Order)
|
||||
b, _ := rand.Int(rand.Reader, Order)
|
||||
c, _ := rand.Int(rand.Reader, Order)
|
||||
|
||||
// Then each party calculates g₁ and g₂ times their private value.
|
||||
pa := new(G1).ScalarBaseMult(a)
|
||||
qa := new(G2).ScalarBaseMult(a)
|
||||
|
||||
pb := new(G1).ScalarBaseMult(b)
|
||||
qb := new(G2).ScalarBaseMult(b)
|
||||
|
||||
pc := new(G1).ScalarBaseMult(c)
|
||||
qc := new(G2).ScalarBaseMult(c)
|
||||
|
||||
// Now each party exchanges its public values with the other two and
|
||||
// all parties can calculate the shared key.
|
||||
k1 := Pair(pb, qc)
|
||||
k1.ScalarMult(k1, a)
|
||||
|
||||
k2 := Pair(pc, qa)
|
||||
k2.ScalarMult(k2, b)
|
||||
|
||||
k3 := Pair(pa, qb)
|
||||
k3.ScalarMult(k3, c)
|
||||
|
||||
// k1, k2 and k3 will all be equal.
|
||||
}
|
200
crypto/bn256/gfp12.go
Normal file
200
crypto/bn256/gfp12.go
Normal file
@ -0,0 +1,200 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bn256
|
||||
|
||||
// For details of the algorithms used, see "Multiplication and Squaring on
|
||||
// Pairing-Friendly Fields, Devegili et al.
|
||||
// http://eprint.iacr.org/2006/471.pdf.
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// gfP12 implements the field of size p¹² as a quadratic extension of gfP6
|
||||
// where ω²=τ.
|
||||
type gfP12 struct {
|
||||
x, y *gfP6 // value is xω + y
|
||||
}
|
||||
|
||||
func newGFp12(pool *bnPool) *gfP12 {
|
||||
return &gfP12{newGFp6(pool), newGFp6(pool)}
|
||||
}
|
||||
|
||||
func (e *gfP12) String() string {
|
||||
return "(" + e.x.String() + "," + e.y.String() + ")"
|
||||
}
|
||||
|
||||
func (e *gfP12) Put(pool *bnPool) {
|
||||
e.x.Put(pool)
|
||||
e.y.Put(pool)
|
||||
}
|
||||
|
||||
func (e *gfP12) Set(a *gfP12) *gfP12 {
|
||||
e.x.Set(a.x)
|
||||
e.y.Set(a.y)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *gfP12) SetZero() *gfP12 {
|
||||
e.x.SetZero()
|
||||
e.y.SetZero()
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *gfP12) SetOne() *gfP12 {
|
||||
e.x.SetZero()
|
||||
e.y.SetOne()
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *gfP12) Minimal() {
|
||||
e.x.Minimal()
|
||||
e.y.Minimal()
|
||||
}
|
||||
|
||||
func (e *gfP12) IsZero() bool {
|
||||
e.Minimal()
|
||||
return e.x.IsZero() && e.y.IsZero()
|
||||
}
|
||||
|
||||
func (e *gfP12) IsOne() bool {
|
||||
e.Minimal()
|
||||
return e.x.IsZero() && e.y.IsOne()
|
||||
}
|
||||
|
||||
func (e *gfP12) Conjugate(a *gfP12) *gfP12 {
|
||||
e.x.Negative(a.x)
|
||||
e.y.Set(a.y)
|
||||
return a
|
||||
}
|
||||
|
||||
func (e *gfP12) Negative(a *gfP12) *gfP12 {
|
||||
e.x.Negative(a.x)
|
||||
e.y.Negative(a.y)
|
||||
return e
|
||||
}
|
||||
|
||||
// Frobenius computes (xω+y)^p = x^p ω·ξ^((p-1)/6) + y^p
|
||||
func (e *gfP12) Frobenius(a *gfP12, pool *bnPool) *gfP12 {
|
||||
e.x.Frobenius(a.x, pool)
|
||||
e.y.Frobenius(a.y, pool)
|
||||
e.x.MulScalar(e.x, xiToPMinus1Over6, pool)
|
||||
return e
|
||||
}
|
||||
|
||||
// FrobeniusP2 computes (xω+y)^p² = x^p² ω·ξ^((p²-1)/6) + y^p²
|
||||
func (e *gfP12) FrobeniusP2(a *gfP12, pool *bnPool) *gfP12 {
|
||||
e.x.FrobeniusP2(a.x)
|
||||
e.x.MulGFP(e.x, xiToPSquaredMinus1Over6)
|
||||
e.y.FrobeniusP2(a.y)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *gfP12) Add(a, b *gfP12) *gfP12 {
|
||||
e.x.Add(a.x, b.x)
|
||||
e.y.Add(a.y, b.y)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *gfP12) Sub(a, b *gfP12) *gfP12 {
|
||||
e.x.Sub(a.x, b.x)
|
||||
e.y.Sub(a.y, b.y)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *gfP12) Mul(a, b *gfP12, pool *bnPool) *gfP12 {
|
||||
tx := newGFp6(pool)
|
||||
tx.Mul(a.x, b.y, pool)
|
||||
t := newGFp6(pool)
|
||||
t.Mul(b.x, a.y, pool)
|
||||
tx.Add(tx, t)
|
||||
|
||||
ty := newGFp6(pool)
|
||||
ty.Mul(a.y, b.y, pool)
|
||||
t.Mul(a.x, b.x, pool)
|
||||
t.MulTau(t, pool)
|
||||
e.y.Add(ty, t)
|
||||
e.x.Set(tx)
|
||||
|
||||
tx.Put(pool)
|
||||
ty.Put(pool)
|
||||
t.Put(pool)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *gfP12) MulScalar(a *gfP12, b *gfP6, pool *bnPool) *gfP12 {
|
||||
e.x.Mul(e.x, b, pool)
|
||||
e.y.Mul(e.y, b, pool)
|
||||
return e
|
||||
}
|
||||
|
||||
func (c *gfP12) Exp(a *gfP12, power *big.Int, pool *bnPool) *gfP12 {
|
||||
sum := newGFp12(pool)
|
||||
sum.SetOne()
|
||||
t := newGFp12(pool)
|
||||
|
||||
for i := power.BitLen() - 1; i >= 0; i-- {
|
||||
t.Square(sum, pool)
|
||||
if power.Bit(i) != 0 {
|
||||
sum.Mul(t, a, pool)
|
||||
} else {
|
||||
sum.Set(t)
|
||||
}
|
||||
}
|
||||
|
||||
c.Set(sum)
|
||||
|
||||
sum.Put(pool)
|
||||
t.Put(pool)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (e *gfP12) Square(a *gfP12, pool *bnPool) *gfP12 {
|
||||
// Complex squaring algorithm
|
||||
v0 := newGFp6(pool)
|
||||
v0.Mul(a.x, a.y, pool)
|
||||
|
||||
t := newGFp6(pool)
|
||||
t.MulTau(a.x, pool)
|
||||
t.Add(a.y, t)
|
||||
ty := newGFp6(pool)
|
||||
ty.Add(a.x, a.y)
|
||||
ty.Mul(ty, t, pool)
|
||||
ty.Sub(ty, v0)
|
||||
t.MulTau(v0, pool)
|
||||
ty.Sub(ty, t)
|
||||
|
||||
e.y.Set(ty)
|
||||
e.x.Double(v0)
|
||||
|
||||
v0.Put(pool)
|
||||
t.Put(pool)
|
||||
ty.Put(pool)
|
||||
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *gfP12) Invert(a *gfP12, pool *bnPool) *gfP12 {
|
||||
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
|
||||
// ftp://136.206.11.249/pub/crypto/pairings.pdf
|
||||
t1 := newGFp6(pool)
|
||||
t2 := newGFp6(pool)
|
||||
|
||||
t1.Square(a.x, pool)
|
||||
t2.Square(a.y, pool)
|
||||
t1.MulTau(t1, pool)
|
||||
t1.Sub(t2, t1)
|
||||
t2.Invert(t1, pool)
|
||||
|
||||
e.x.Negative(a.x)
|
||||
e.y.Set(a.y)
|
||||
e.MulScalar(e, t2, pool)
|
||||
|
||||
t1.Put(pool)
|
||||
t2.Put(pool)
|
||||
|
||||
return e
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user