Compare commits

..

25 Commits

Author SHA1 Message Date
Péter Szilágyi
017449971e params: release Geth v1.9.9 2019-12-06 11:51:37 +02:00
Martin Holst Swende
bc01593afb consensus/ethash, params: eip-2384: bump difficulty bomb (#20347)
* consensus/ethash, params: implement eip-2384: bump difficulty bomb

* params: EIP 2384 compat checks

* consensus, params: add Muir Glacier block number (mainnet,ropsten) + official name

* core/forkid: forkid tests for muir glacier

* params/config: address review concerns

* params, core/forkid: review nitpicks

* cmd/geth,eth,les: add override option for muir glacier

* params: nit fix
2019-12-06 11:36:40 +02:00
Marius van der Wijden
c9dce0bfd7 p2p/enode: remove data race in sliceIter (#20421) 2019-12-05 22:16:35 +01:00
Péter Szilágyi
e78f631dfc Merge pull request #20428 from karalabe/cht-1.9.9
params: update CHTs for v1.9.9 release
2019-12-05 11:38:41 +02:00
Péter Szilágyi
6b6882f08b params: update CHTs for v1.9.9 release 2019-12-05 11:01:40 +02:00
Péter Szilágyi
c2d65d34d5 Merge pull request #20415 from karalabe/trie-dirty-cache-metrics
trie: track dirty cache metrics, track clean writes on commit
2019-12-02 12:51:00 +02:00
Péter Szilágyi
13ccf6016e trie: track dirty cache metrics, track clean writes on commit 2019-12-02 12:23:35 +02:00
Marius van der Wijden
7ce7c3967c accounts/abi/bind: fix destructive packing of *big.Int (#20412) 2019-12-02 10:29:25 +01:00
gary rong
fc7e0fe6c7 core, miner: remove PostChainEvents (#19396)
This change:

- removes the PostChainEvents method on core.BlockChain.
- sorts 'removed log' events by block number.
- fire the NewChainHead event if we inject a canonical block into the chain
  even if the entire insertion is not successful.
- guarantees correct event ordering in all cases.
2019-11-29 14:22:08 +01:00
Guillaume Ballet
5cc6e7a71e accounts/usbwallet: fix staticcheck warnings (#20372) 2019-11-29 11:47:14 +01:00
xinluyin
d556d39a2c internal/web3ext: add debug_accountRange (#20410) 2019-11-29 11:46:12 +01:00
Guillaume Ballet
54d332e1db accounts/scwallet: fix staticcheck warnings (#20370) 2019-11-29 11:42:53 +01:00
Guillaume Ballet
e0bf5f0ccb internal: fix staticcheck warnings (#20380) 2019-11-29 11:40:02 +01:00
Guillaume Ballet
1ff3d7c2d4 cmd/faucet, cmd/geth: fix staticcheck warnings (#20374) 2019-11-29 11:38:34 +01:00
gary rong
08611cfd75 trie: remove dead code (#20405) 2019-11-28 12:47:35 +02:00
Guillaume Ballet
9a529d64d1 log: fix staticcheck warnings (#20388) 2019-11-28 10:53:06 +01:00
Felix Lange
a91b704b01 consensus/ethash: refactor remote sealer (#20335)
The original idea behind this change was to remove a use of the
deprecated CancelRequest method. Simply removing it would've been an
option, but I couldn't resist and did a bit of a refactoring instead.

All remote sealing code was contained in a single giant function. Remote
sealing is now extracted into its own object, remoteSealer.
2019-11-28 10:51:57 +01:00
Péter Szilágyi
c9f28ca8e5 go: update fastcache to 1.5.3 (#20404)
deps: update fastcache to 1.5.3
2019-11-27 15:08:34 +02:00
Péter Szilágyi
58e33d9e5a Merge pull request #20403 from karalabe/fix-freezer-reinit
core/rawdb: fix reinit regression caused by the hash check PR
2019-11-27 15:05:58 +02:00
Martin Holst Swende
7800ba978d deps: update fastcache to 1.5.3 2019-11-27 13:46:07 +01:00
Péter Szilágyi
717f8a4e8f core/rawdb: fix reinit regression caused by the hash check PR 2019-11-27 14:41:47 +02:00
Guillaume Ballet
7b189d6f1f core: fix staticcheck warnings (#20384)
* core: fix staticcheck warnings

* fix goimports
2019-11-27 09:50:30 +01:00
Guillaume Ballet
c4844e9ee2 les: fix staticcheck warnings (#20371) 2019-11-27 09:49:41 +01:00
zaccoding
23c8c74131 cmd: fix command help messages in modules (#20203) 2019-11-26 11:46:39 +01:00
Péter Szilágyi
0676320169 params: begin v1.9.9 release cycle 2019-11-26 12:21:11 +02:00
48 changed files with 716 additions and 647 deletions

View File

@@ -73,7 +73,7 @@ func packNum(value reflect.Value) []byte {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return U256(big.NewInt(value.Int()))
case reflect.Ptr:
return U256(value.Interface().(*big.Int))
return U256(new(big.Int).Set(value.Interface().(*big.Int)))
default:
panic("abi: fatal error")
}

View File

@@ -71,7 +71,7 @@ func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSes
cardPublic, ok := gen.Unmarshal(keyData)
if !ok {
return nil, fmt.Errorf("Could not unmarshal public key from card")
return nil, fmt.Errorf("could not unmarshal public key from card")
}
secret, err := gen.GenerateSharedSecret(private, cardPublic)
@@ -109,7 +109,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
cardChallenge := response.Data[32:64]
if !bytes.Equal(expectedCryptogram, cardCryptogram) {
return fmt.Errorf("Invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
return fmt.Errorf("invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
}
md.Reset()
@@ -132,7 +132,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
// Unpair disestablishes an existing pairing.
func (s *SecureChannelSession) Unpair() error {
if s.PairingKey == nil {
return fmt.Errorf("Cannot unpair: not paired")
return fmt.Errorf("cannot unpair: not paired")
}
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
@@ -148,7 +148,7 @@ func (s *SecureChannelSession) Unpair() error {
// Open initializes the secure channel.
func (s *SecureChannelSession) Open() error {
if s.iv != nil {
return fmt.Errorf("Session already opened")
return fmt.Errorf("session already opened")
}
response, err := s.open()
@@ -185,11 +185,11 @@ func (s *SecureChannelSession) mutuallyAuthenticate() error {
return err
}
if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
return fmt.Errorf("Got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
}
if len(response.Data) != scSecretLength {
return fmt.Errorf("Response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
return fmt.Errorf("response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
}
return nil
@@ -222,7 +222,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
if s.iv == nil {
return nil, fmt.Errorf("Channel not open")
return nil, fmt.Errorf("channel not open")
}
data, err := s.encryptAPDU(data)
@@ -261,14 +261,14 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
return nil, err
}
if !bytes.Equal(s.iv, rmac) {
return nil, fmt.Errorf("Invalid MAC in response")
return nil, fmt.Errorf("invalid MAC in response")
}
rapdu := &responseAPDU{}
rapdu.deserialize(plainData)
if rapdu.Sw1 != sw1Ok {
return nil, fmt.Errorf("Unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
return nil, fmt.Errorf("unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
}
return rapdu, nil
@@ -277,7 +277,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
// encryptAPDU is an internal method that serializes and encrypts an APDU.
func (s *SecureChannelSession) encryptAPDU(data []byte) ([]byte, error) {
if len(data) > maxPayloadSize {
return nil, fmt.Errorf("Payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
return nil, fmt.Errorf("payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
}
data = pad(data, 0x80)
@@ -323,10 +323,10 @@ func unpad(data []byte, terminator byte) ([]byte, error) {
case terminator:
return data[:len(data)-i], nil
default:
return nil, fmt.Errorf("Expected end of padding, got %d", data[len(data)-i])
return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i])
}
}
return nil, fmt.Errorf("Expected end of padding, got 0")
return nil, fmt.Errorf("expected end of padding, got 0")
}
// updateIV is an internal method that updates the initialization vector after

View File

@@ -167,7 +167,7 @@ func transmit(card *pcsc.Card, command *commandAPDU) (*responseAPDU, error) {
}
if response.Sw1 != sw1Ok {
return nil, fmt.Errorf("Unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
return nil, fmt.Errorf("unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
}
return response, nil
@@ -252,7 +252,7 @@ func (w *Wallet) release() error {
// with the wallet.
func (w *Wallet) pair(puk []byte) error {
if w.session.paired() {
return fmt.Errorf("Wallet already paired")
return fmt.Errorf("wallet already paired")
}
pairing, err := w.session.pair(puk)
if err != nil {
@@ -773,12 +773,12 @@ func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationP
// Look for the path in the URL
if account.URL.Scheme != w.Hub.scheme {
return nil, fmt.Errorf("Scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
}
parts := strings.SplitN(account.URL.Path, "/", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("Invalid URL format: %s", account.URL)
return nil, fmt.Errorf("invalid URL format: %s", account.URL)
}
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
@@ -813,7 +813,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) {
// unpair deletes an existing pairing.
func (s *Session) unpair() error {
if !s.verified {
return fmt.Errorf("Unpair requires that the PIN be verified")
return fmt.Errorf("unpair requires that the PIN be verified")
}
return s.Channel.Unpair()
}
@@ -850,7 +850,7 @@ func (s *Session) paired() bool {
// authenticate uses an existing pairing to establish a secure channel.
func (s *Session) authenticate(pairing smartcardPairing) error {
if !bytes.Equal(s.Wallet.PublicKey, pairing.PublicKey) {
return fmt.Errorf("Cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
return fmt.Errorf("cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
}
s.Channel.PairingKey = pairing.PairingKey
s.Channel.PairingIndex = pairing.PairingIndex
@@ -879,6 +879,7 @@ func (s *Session) walletStatus() (*walletStatus, error) {
}
// derivationPath fetches the wallet's current derivation path from the card.
//lint:ignore U1000 needs to be added to the console interface
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
if err != nil {
@@ -993,12 +994,14 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
}
// keyExport contains information on an exported keypair.
//lint:ignore U1000 needs to be added to the console interface
type keyExport struct {
PublicKey []byte `asn1:"tag:0"`
PrivateKey []byte `asn1:"tag:1,optional"`
}
// publicKey returns the public key for the current derivation path.
//lint:ignore U1000 needs to be added to the console interface
func (s *Session) publicKey() ([]byte, error) {
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
if err != nil {

View File

@@ -162,7 +162,8 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio
return common.Address{}, nil, accounts.ErrWalletClosed
}
// Ensure the wallet is capable of signing the given transaction
if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
if chainID != nil && w.version[0] <= 1 && w.version[2] <= 2 {
//lint:ignore ST1005 brand name displayed on the console
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
}
// All infos gathered and metadata checks out, request signing

View File

@@ -32,19 +32,6 @@ import (
"gopkg.in/urfave/cli.v1"
)
const (
commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
{{if .Description}}{{.Description}}
{{end}}{{if .Subcommands}}
SUBCOMMANDS:
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
{{end}}{{end}}{{if .Flags}}
OPTIONS:
{{range $.Flags}}{{"\t"}}{{.}}
{{end}}
{{end}}`
)
var (
// Git SHA1 commit hash of the release (set via linker flags)
gitCommit = ""
@@ -128,7 +115,7 @@ func init() {
aliasFlag,
}
app.Action = utils.MigrateFlags(abigen)
cli.CommandHelpTemplate = commandHelperTemplate
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
func abigen(c *cli.Context) error {

View File

@@ -28,19 +28,6 @@ import (
"gopkg.in/urfave/cli.v1"
)
const (
commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
{{if .Description}}{{.Description}}
{{end}}{{if .Subcommands}}
SUBCOMMANDS:
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
{{end}}{{end}}{{if .Flags}}
OPTIONS:
{{range $.Flags}}{{"\t"}}{{.}}
{{end}}
{{end}}`
)
var (
// Git SHA1 commit hash of the release (set via linker flags)
gitCommit = ""
@@ -61,7 +48,7 @@ func init() {
oracleFlag,
nodeURLFlag,
}
cli.CommandHelpTemplate = commandHelperTemplate
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
// Commonly used command line flags.

View File

@@ -223,6 +223,7 @@ func init() {
}
app.Action = signer
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand, delCredentialCommand, gendocCommand}
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
func main() {

View File

@@ -43,6 +43,7 @@ func init() {
commandSignMessage,
commandVerifyMessage,
}
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
// Commonly used command line flags.

View File

@@ -152,6 +152,7 @@ func init() {
runCommand,
stateTestCommand,
}
cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
func main() {

View File

@@ -351,6 +351,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
if head == nil || balance == nil {
// Report the faucet offline until initial stats are ready
//lint:ignore ST1005 This error is to be displayed in the browser
if err = sendError(conn, errors.New("Faucet offline")); err != nil {
log.Warn("Failed to send faucet error to client", "err", err)
return
@@ -392,6 +393,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
continue
}
if msg.Tier >= uint(*tiersFlag) {
//lint:ignore ST1005 This error is to be displayed in the browser
if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil {
log.Warn("Failed to send tier error to client", "err", err)
return
@@ -429,6 +431,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
}
if !result.Success {
log.Warn("Captcha verification failed", "err", string(result.Errors))
//lint:ignore ST1005 it's funny and the robot won't mind
if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil {
log.Warn("Failed to send captcha failure to client", "err", err)
return
@@ -450,6 +453,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
}
continue
case strings.HasPrefix(msg.URL, "https://plus.google.com/"):
//lint:ignore ST1005 Google is a company name and should be capitalized.
if err = sendError(conn, errors.New("Google+ authentication discontinued as the service was sunset")); err != nil {
log.Warn("Failed to send Google+ deprecation to client", "err", err)
return
@@ -462,6 +466,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
case *noauthFlag:
username, avatar, address, err = authNoAuth(msg.URL)
default:
//lint:ignore ST1005 This error is to be displayed in the browser
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
}
if err != nil {
@@ -520,7 +525,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
// Send an error if too frequent funding, othewise a success
if !fund {
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))); err != nil { // nolint: gosimple
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
log.Warn("Failed to send funding error to client", "err", err)
return
}
@@ -682,6 +687,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
// Twitter's API isn't really friendly with direct links. Still, we don't
@@ -696,6 +702,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
// Resolve the username from the final redirect, no intermediate junk
parts = strings.Split(res.Request.URL.String(), "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
username := parts[len(parts)-3]
@@ -706,6 +713,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
@@ -721,6 +729,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
}
username := parts[len(parts)-3]
@@ -740,6 +749,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
@@ -755,6 +765,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
func authNoAuth(url string) (string, string, common.Address, error) {
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
if address == (common.Address{}) {
//lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
return address.Hex() + "@noauth", "", address, nil

View File

@@ -150,6 +150,9 @@ func makeFullNode(ctx *cli.Context) *node.Node {
if ctx.GlobalIsSet(utils.OverrideIstanbulFlag.Name) {
cfg.Eth.OverrideIstanbul = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideIstanbulFlag.Name))
}
if ctx.GlobalIsSet(utils.OverrideMuirGlacierFlag.Name) {
cfg.Eth.OverrideMuirGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideMuirGlacierFlag.Name))
}
utils.RegisterEthService(stack, &cfg.Eth)
// Whisper must be explicitly enabled by specifying at least 1 whisper flag or in dev mode

View File

@@ -70,6 +70,7 @@ var (
utils.NoUSBFlag,
utils.SmartCardDaemonPathFlag,
utils.OverrideIstanbulFlag,
utils.OverrideMuirGlacierFlag,
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,

View File

@@ -495,7 +495,6 @@ func (api *RetestethAPI) mineBlock() error {
txCount := 0
var txs []*types.Transaction
var receipts []*types.Receipt
var coalescedLogs []*types.Log
var blockFull = gasPool.Gas() < params.TxGas
for address := range api.txSenders {
if blockFull {
@@ -522,7 +521,6 @@ func (api *RetestethAPI) mineBlock() error {
}
txs = append(txs, tx)
receipts = append(receipts, receipt)
coalescedLogs = append(coalescedLogs, receipt.Logs...)
delete(m, nonce)
if len(m) == 0 {
// Last tx for the sender
@@ -682,9 +680,6 @@ func (api *RetestethAPI) AccountRange(ctx context.Context,
for i := 0; i < int(maxResults) && it.Next(); i++ {
if preimage := accountTrie.GetKey(it.Key); preimage != nil {
result.AddressMap[common.BytesToHash(it.Key)] = common.BytesToAddress(preimage)
//fmt.Printf("%x: %x\n", it.Key, preimage)
} else {
//fmt.Printf("could not find preimage for %x\n", it.Key)
}
}
//fmt.Printf("Number of entries returned: %d\n", len(result.AddressMap))
@@ -808,9 +803,6 @@ func (api *RetestethAPI) StorageRangeAt(ctx context.Context,
Key: string(ks),
Value: string(vs),
}
//fmt.Printf("Key: %s, Value: %s\n", ks, vs)
} else {
//fmt.Printf("Did not find preimage for %x\n", it.Key)
}
}
if it.Next() {
@@ -889,7 +881,7 @@ func retesteth(ctx *cli.Context) error {
log.Info("HTTP endpoint closed", "url", httpEndpoint)
}()
abortChan := make(chan os.Signal)
abortChan := make(chan os.Signal, 11)
signal.Notify(abortChan, os.Interrupt)
sig := <-abortChan

View File

@@ -77,6 +77,17 @@ SUBCOMMANDS:
{{range $categorized.Flags}}{{"\t"}}{{.}}
{{end}}
{{end}}{{end}}`
OriginCommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
{{if .Description}}{{.Description}}
{{end}}{{if .Subcommands}}
SUBCOMMANDS:
{{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
{{end}}{{end}}{{if .Flags}}
OPTIONS:
{{range $.Flags}}{{"\t"}}{{.}}
{{end}}
{{end}}`
)
func init() {
@@ -226,6 +237,10 @@ var (
Name: "override.istanbul",
Usage: "Manually specify Istanbul fork-block, overriding the bundled setting",
}
OverrideMuirGlacierFlag = cli.Uint64Flag{
Name: "override.muirglacier",
Usage: "Manually specify Muir Glacier fork-block, overriding the bundled setting",
}
// Light server and client settings
LightLegacyServFlag = cli.IntFlag{ // Deprecated in favor of light.serve, remove in 2021
Name: "lightserv",

View File

@@ -729,7 +729,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
go func(idx int) {
defer pend.Done()
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}, nil, false)
ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal, nil}, nil, false)
defer ethash.Close()
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
t.Errorf("proc %d: block verification failed: %v", idx, err)

View File

@@ -28,7 +28,7 @@ var errEthashStopped = errors.New("ethash stopped")
// API exposes ethash related methods for the RPC interface.
type API struct {
ethash *Ethash // Make sure the mode of ethash is normal.
ethash *Ethash
}
// GetWork returns a work package for external miner.
@@ -39,7 +39,7 @@ type API struct {
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
// result[3] - hex encoded block number
func (api *API) GetWork() ([4]string, error) {
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
if api.ethash.remote == nil {
return [4]string{}, errors.New("not supported")
}
@@ -47,13 +47,11 @@ func (api *API) GetWork() ([4]string, error) {
workCh = make(chan [4]string, 1)
errc = make(chan error, 1)
)
select {
case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
case <-api.ethash.exitCh:
case api.ethash.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
case <-api.ethash.remote.exitCh:
return [4]string{}, errEthashStopped
}
select {
case work := <-workCh:
return work, nil
@@ -66,23 +64,21 @@ func (api *API) GetWork() ([4]string, error) {
// It returns an indication if the work was accepted.
// Note either an invalid solution, a stale work a non-existent work will return false.
func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool {
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
if api.ethash.remote == nil {
return false
}
var errc = make(chan error, 1)
select {
case api.ethash.submitWorkCh <- &mineResult{
case api.ethash.remote.submitWorkCh <- &mineResult{
nonce: nonce,
mixDigest: digest,
hash: hash,
errc: errc,
}:
case <-api.ethash.exitCh:
case <-api.ethash.remote.exitCh:
return false
}
err := <-errc
return err == nil
}
@@ -94,21 +90,19 @@ func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) boo
// It accepts the miner hash rate and an identifier which must be unique
// between nodes.
func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool {
if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
if api.ethash.remote == nil {
return false
}
var done = make(chan struct{}, 1)
select {
case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
case <-api.ethash.exitCh:
case api.ethash.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
case <-api.ethash.remote.exitCh:
return false
}
// Block until hash rate submitted successfully.
<-done
return true
}

View File

@@ -44,6 +44,11 @@ var (
maxUncles = 2 // Maximum number of uncles allowed in a single block
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
// calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384.
// It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks.
// Specification EIP-2384: https://eips.ethereum.org/EIPS/eip-2384
calcDifficultyEip2384 = makeDifficultyCalculator(big.NewInt(9000000))
// calcDifficultyConstantinople is the difficulty adjustment algorithm for Constantinople.
// It returns the difficulty that a new block should have when created at time given the
// parent block's time and difficulty. The calculation uses the Byzantium rules, but with
@@ -311,6 +316,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, p
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
next := new(big.Int).Add(parent.Number, big1)
switch {
case config.IsMuirGlacier(next):
return calcDifficultyEip2384(time, parent)
case config.IsConstantinople(next):
return calcDifficultyConstantinople(time, parent)
case config.IsByzantium(next):

View File

@@ -34,9 +34,7 @@ import (
"unsafe"
mmap "github.com/edsrzf/mmap-go"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rpc"
@@ -50,7 +48,7 @@ var (
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
// sharedEthash is a full instance that can be shared between multiple users.
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil, false)
sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal, nil}, nil, false)
// algorithmRevision is the data structure version used for file naming.
algorithmRevision = 23
@@ -403,36 +401,8 @@ type Config struct {
DatasetsInMem int
DatasetsOnDisk int
PowMode Mode
}
// sealTask wraps a seal block with relative result channel for remote sealer thread.
type sealTask struct {
block *types.Block
results chan<- *types.Block
}
// mineResult wraps the pow solution parameters for the specified block.
type mineResult struct {
nonce types.BlockNonce
mixDigest common.Hash
hash common.Hash
errc chan error
}
// hashrate wraps the hash rate submitted by the remote sealer.
type hashrate struct {
id common.Hash
ping time.Time
rate uint64
done chan struct{}
}
// sealWork wraps a seal work package for remote sealer.
type sealWork struct {
errc chan error
res chan [4]string
Log log.Logger `toml:"-"`
}
// Ethash is a consensus engine based on proof-of-work implementing the ethash
@@ -448,52 +418,42 @@ type Ethash struct {
threads int // Number of threads to mine on if mining
update chan struct{} // Notification channel to update mining parameters
hashrate metrics.Meter // Meter tracking the average hashrate
// Remote sealer related fields
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
remote *remoteSealer
// The fields below are hooks for testing
shared *Ethash // Shared PoW verifier to avoid cache regeneration
fakeFail uint64 // Block number which fails PoW check even in fake mode
fakeDelay time.Duration // Time delay to sleep for before returning from verify
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
closeOnce sync.Once // Ensures exit channel will not be closed twice.
exitCh chan chan error // Notification channel to exiting backend threads
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
closeOnce sync.Once // Ensures exit channel will not be closed twice.
}
// New creates a full sized ethash PoW scheme and starts a background thread for
// remote mining, also optionally notifying a batch of remote services of new work
// packages.
func New(config Config, notify []string, noverify bool) *Ethash {
if config.Log == nil {
config.Log = log.Root()
}
if config.CachesInMem <= 0 {
log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
config.CachesInMem = 1
}
if config.CacheDir != "" && config.CachesOnDisk > 0 {
log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
}
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
}
ethash := &Ethash{
config: config,
caches: newlru("cache", config.CachesInMem, newCache),
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
update: make(chan struct{}),
hashrate: metrics.NewMeterForced(),
workCh: make(chan *sealTask),
fetchWorkCh: make(chan *sealWork),
submitWorkCh: make(chan *mineResult),
fetchRateCh: make(chan chan uint64),
submitRateCh: make(chan *hashrate),
exitCh: make(chan chan error),
config: config,
caches: newlru("cache", config.CachesInMem, newCache),
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
update: make(chan struct{}),
hashrate: metrics.NewMeterForced(),
}
go ethash.remote(notify, noverify)
ethash.remote = startRemoteSealer(ethash, notify, noverify)
return ethash
}
@@ -501,19 +461,13 @@ func New(config Config, notify []string, noverify bool) *Ethash {
// purposes.
func NewTester(notify []string, noverify bool) *Ethash {
ethash := &Ethash{
config: Config{PowMode: ModeTest},
caches: newlru("cache", 1, newCache),
datasets: newlru("dataset", 1, newDataset),
update: make(chan struct{}),
hashrate: metrics.NewMeterForced(),
workCh: make(chan *sealTask),
fetchWorkCh: make(chan *sealWork),
submitWorkCh: make(chan *mineResult),
fetchRateCh: make(chan chan uint64),
submitRateCh: make(chan *hashrate),
exitCh: make(chan chan error),
config: Config{PowMode: ModeTest, Log: log.Root()},
caches: newlru("cache", 1, newCache),
datasets: newlru("dataset", 1, newDataset),
update: make(chan struct{}),
hashrate: metrics.NewMeterForced(),
}
go ethash.remote(notify, noverify)
ethash.remote = startRemoteSealer(ethash, notify, noverify)
return ethash
}
@@ -524,6 +478,7 @@ func NewFaker() *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFake,
Log: log.Root(),
},
}
}
@@ -535,6 +490,7 @@ func NewFakeFailer(fail uint64) *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFake,
Log: log.Root(),
},
fakeFail: fail,
}
@@ -547,6 +503,7 @@ func NewFakeDelayer(delay time.Duration) *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFake,
Log: log.Root(),
},
fakeDelay: delay,
}
@@ -558,6 +515,7 @@ func NewFullFaker() *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFullFake,
Log: log.Root(),
},
}
}
@@ -573,13 +531,11 @@ func (ethash *Ethash) Close() error {
var err error
ethash.closeOnce.Do(func() {
// Short circuit if the exit channel is not allocated.
if ethash.exitCh == nil {
if ethash.remote == nil {
return
}
errc := make(chan error)
ethash.exitCh <- errc
err = <-errc
close(ethash.exitCh)
close(ethash.remote.requestExit)
<-ethash.remote.exitCh
})
return err
}
@@ -680,8 +636,8 @@ func (ethash *Ethash) Hashrate() float64 {
var res = make(chan uint64, 1)
select {
case ethash.fetchRateCh <- res:
case <-ethash.exitCh:
case ethash.remote.fetchRateCh <- res:
case <-ethash.remote.exitCh:
// Return local hashrate only if ethash is stopped.
return ethash.hashrate.Rate1()
}

View File

@@ -18,6 +18,7 @@ package ethash
import (
"bytes"
"context"
crand "crypto/rand"
"encoding/json"
"errors"
@@ -33,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
const (
@@ -56,7 +56,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
select {
case results <- block.WithSeal(header):
default:
log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
}
return nil
}
@@ -85,8 +85,8 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
threads = 0 // Allows disabling local mining without extra logic around local/remote
}
// Push new work to remote sealer
if ethash.workCh != nil {
ethash.workCh <- &sealTask{block: block, results: results}
if ethash.remote != nil {
ethash.remote.workCh <- &sealTask{block: block, results: results}
}
var (
pend sync.WaitGroup
@@ -111,14 +111,14 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
select {
case results <- result:
default:
log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
}
close(abort)
case <-ethash.update:
// Thread count was changed on user request, restart
close(abort)
if err := ethash.Seal(chain, block, results, stop); err != nil {
log.Error("Failed to restart sealing after update", "err", err)
ethash.config.Log.Error("Failed to restart sealing after update", "err", err)
}
}
// Wait for all miners to terminate and return the block
@@ -143,7 +143,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
attempts = int64(0)
nonce = seed
)
logger := log.New("miner", id)
logger := ethash.config.Log.New("miner", id)
logger.Trace("Started ethash search for new nonces", "seed", seed)
search:
for {
@@ -186,160 +186,128 @@ search:
runtime.KeepAlive(dataset)
}
// remote is a standalone goroutine to handle remote mining related stuff.
func (ethash *Ethash) remote(notify []string, noverify bool) {
var (
works = make(map[common.Hash]*types.Block)
rates = make(map[common.Hash]hashrate)
// This is the timeout for HTTP requests to notify external miners.
const remoteSealerTimeout = 1 * time.Second
results chan<- *types.Block
currentBlock *types.Block
currentWork [4]string
type remoteSealer struct {
works map[common.Hash]*types.Block
rates map[common.Hash]hashrate
currentBlock *types.Block
currentWork [4]string
notifyCtx context.Context
cancelNotify context.CancelFunc // cancels all notification requests
reqWG sync.WaitGroup // tracks notification request goroutines
notifyTransport = &http.Transport{}
notifyClient = &http.Client{
Transport: notifyTransport,
Timeout: time.Second,
}
notifyReqs = make([]*http.Request, len(notify))
)
// notifyWork notifies all the specified mining endpoints of the availability of
// new work to be processed.
notifyWork := func() {
work := currentWork
blob, _ := json.Marshal(work)
ethash *Ethash
noverify bool
notifyURLs []string
results chan<- *types.Block
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
requestExit chan struct{}
exitCh chan struct{}
}
for i, url := range notify {
// Terminate any previously pending request and create the new work
if notifyReqs[i] != nil {
notifyTransport.CancelRequest(notifyReqs[i])
}
notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
notifyReqs[i].Header.Set("Content-Type", "application/json")
// sealTask wraps a seal block with relative result channel for remote sealer thread.
type sealTask struct {
block *types.Block
results chan<- *types.Block
}
// Push the new work concurrently to all the remote nodes
go func(req *http.Request, url string) {
res, err := notifyClient.Do(req)
if err != nil {
log.Warn("Failed to notify remote miner", "err", err)
} else {
log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
res.Body.Close()
}
}(notifyReqs[i], url)
}
// mineResult wraps the pow solution parameters for the specified block.
type mineResult struct {
nonce types.BlockNonce
mixDigest common.Hash
hash common.Hash
errc chan error
}
// hashrate wraps the hash rate submitted by the remote sealer.
type hashrate struct {
id common.Hash
ping time.Time
rate uint64
done chan struct{}
}
// sealWork wraps a seal work package for remote sealer.
type sealWork struct {
errc chan error
res chan [4]string
}
func startRemoteSealer(ethash *Ethash, urls []string, noverify bool) *remoteSealer {
ctx, cancel := context.WithCancel(context.Background())
s := &remoteSealer{
ethash: ethash,
noverify: noverify,
notifyURLs: urls,
notifyCtx: ctx,
cancelNotify: cancel,
works: make(map[common.Hash]*types.Block),
rates: make(map[common.Hash]hashrate),
workCh: make(chan *sealTask),
fetchWorkCh: make(chan *sealWork),
submitWorkCh: make(chan *mineResult),
fetchRateCh: make(chan chan uint64),
submitRateCh: make(chan *hashrate),
requestExit: make(chan struct{}),
exitCh: make(chan struct{}),
}
// makeWork creates a work package for external miner.
//
// The work package consists of 3 strings:
// result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
// result[3], hex encoded block number
makeWork := func(block *types.Block) {
hash := ethash.SealHash(block.Header())
go s.loop()
return s
}
currentWork[0] = hash.Hex()
currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
currentWork[3] = hexutil.EncodeBig(block.Number())
// Trace the seal work fetched by remote sealer.
currentBlock = block
works[hash] = block
}
// submitWork verifies the submitted pow solution, returning
// whether the solution was accepted or not (not can be both a bad pow as well as
// any other error, like no pending work or stale mining result).
submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
if currentBlock == nil {
log.Error("Pending work without block", "sealhash", sealhash)
return false
}
// Make sure the work submitted is present
block := works[sealhash]
if block == nil {
log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", currentBlock.NumberU64())
return false
}
// Verify the correctness of submitted result.
header := block.Header()
header.Nonce = nonce
header.MixDigest = mixDigest
start := time.Now()
if !noverify {
if err := ethash.verifySeal(nil, header, true); err != nil {
log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
return false
}
}
// Make sure the result channel is assigned.
if results == nil {
log.Warn("Ethash result channel is empty, submitted mining result is rejected")
return false
}
log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
// Solutions seems to be valid, return to the miner and notify acceptance.
solution := block.WithSeal(header)
// The submitted solution is within the scope of acceptance.
if solution.NumberU64()+staleThreshold > currentBlock.NumberU64() {
select {
case results <- solution:
log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
return true
default:
log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
return false
}
}
// The submitted block is too old to accept, drop it.
log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
return false
}
func (s *remoteSealer) loop() {
defer func() {
s.ethash.config.Log.Trace("Ethash remote sealer is exiting")
s.cancelNotify()
s.reqWG.Wait()
close(s.exitCh)
}()
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case work := <-ethash.workCh:
case work := <-s.workCh:
// Update current work with new received block.
// Note same work can be past twice, happens when changing CPU threads.
results = work.results
s.results = work.results
s.makeWork(work.block)
s.notifyWork()
makeWork(work.block)
// Notify and requested URLs of the new work availability
notifyWork()
case work := <-ethash.fetchWorkCh:
case work := <-s.fetchWorkCh:
// Return current mining work to remote miner.
if currentBlock == nil {
if s.currentBlock == nil {
work.errc <- errNoMiningWork
} else {
work.res <- currentWork
work.res <- s.currentWork
}
case result := <-ethash.submitWorkCh:
case result := <-s.submitWorkCh:
// Verify submitted PoW solution based on maintained mining blocks.
if submitWork(result.nonce, result.mixDigest, result.hash) {
if s.submitWork(result.nonce, result.mixDigest, result.hash) {
result.errc <- nil
} else {
result.errc <- errInvalidSealResult
}
case result := <-ethash.submitRateCh:
case result := <-s.submitRateCh:
// Trace remote sealer's hash rate by submitted value.
rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
close(result.done)
case req := <-ethash.fetchRateCh:
case req := <-s.fetchRateCh:
// Gather all hash rate submitted by remote sealer.
var total uint64
for _, rate := range rates {
for _, rate := range s.rates {
// this could overflow
total += rate.rate
}
@@ -347,25 +315,126 @@ func (ethash *Ethash) remote(notify []string, noverify bool) {
case <-ticker.C:
// Clear stale submitted hash rate.
for id, rate := range rates {
for id, rate := range s.rates {
if time.Since(rate.ping) > 10*time.Second {
delete(rates, id)
delete(s.rates, id)
}
}
// Clear stale pending blocks
if currentBlock != nil {
for hash, block := range works {
if block.NumberU64()+staleThreshold <= currentBlock.NumberU64() {
delete(works, hash)
if s.currentBlock != nil {
for hash, block := range s.works {
if block.NumberU64()+staleThreshold <= s.currentBlock.NumberU64() {
delete(s.works, hash)
}
}
}
case errc := <-ethash.exitCh:
// Exit remote loop if ethash is closed and return relevant error.
errc <- nil
log.Trace("Ethash remote sealer is exiting")
case <-s.requestExit:
return
}
}
}
// makeWork creates a work package for external miner.
//
// The work package consists of 3 strings:
// result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
// result[3], hex encoded block number
func (s *remoteSealer) makeWork(block *types.Block) {
hash := s.ethash.SealHash(block.Header())
s.currentWork[0] = hash.Hex()
s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
s.currentWork[3] = hexutil.EncodeBig(block.Number())
// Trace the seal work fetched by remote sealer.
s.currentBlock = block
s.works[hash] = block
}
// notifyWork notifies all the specified mining endpoints of the availability of
// new work to be processed.
func (s *remoteSealer) notifyWork() {
work := s.currentWork
blob, _ := json.Marshal(work)
s.reqWG.Add(len(s.notifyURLs))
for _, url := range s.notifyURLs {
go s.sendNotification(s.notifyCtx, url, blob, work)
}
}
func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) {
defer s.reqWG.Done()
req, err := http.NewRequest("POST", url, bytes.NewReader(json))
if err != nil {
s.ethash.config.Log.Warn("Can't create remote miner notification", "err", err)
return
}
ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout)
defer cancel()
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
s.ethash.config.Log.Warn("Failed to notify remote miner", "err", err)
} else {
s.ethash.config.Log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2])
resp.Body.Close()
}
}
// submitWork verifies the submitted pow solution, returning
// whether the solution was accepted or not (not can be both a bad pow as well as
// any other error, like no pending work or stale mining result).
func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
if s.currentBlock == nil {
s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash)
return false
}
// Make sure the work submitted is present
block := s.works[sealhash]
if block == nil {
s.ethash.config.Log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentBlock.NumberU64())
return false
}
// Verify the correctness of submitted result.
header := block.Header()
header.Nonce = nonce
header.MixDigest = mixDigest
start := time.Now()
if !s.noverify {
if err := s.ethash.verifySeal(nil, header, true); err != nil {
s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
return false
}
}
// Make sure the result channel is assigned.
if s.results == nil {
s.ethash.config.Log.Warn("Ethash result channel is empty, submitted mining result is rejected")
return false
}
s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
// Solutions seems to be valid, return to the miner and notify acceptance.
solution := block.WithSeal(header)
// The submitted solution is within the scope of acceptance.
if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() {
select {
case s.results <- solution:
s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
return true
default:
s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
return false
}
}
// The submitted block is too old to accept, drop it.
s.ethash.config.Log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
return false
}

View File

@@ -20,59 +20,39 @@ import (
"encoding/json"
"io/ioutil"
"math/big"
"net"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/internal/testlog"
"github.com/ethereum/go-ethereum/log"
)
// Tests whether remote HTTP servers are correctly notified of new work.
func TestRemoteNotify(t *testing.T) {
// Start a simple webserver to capture notifications
// Start a simple web server to capture notifications.
sink := make(chan [3]string)
server := &http.Server{
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
blob, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatalf("failed to read miner notification: %v", err)
}
var work [3]string
if err := json.Unmarshal(blob, &work); err != nil {
t.Fatalf("failed to unmarshal miner notification: %v", err)
}
sink <- work
}),
}
// Open a custom listener to extract its local address
listener, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatalf("failed to open notification server: %v", err)
}
defer listener.Close()
go server.Serve(listener)
// Wait for server to start listening
var tries int
for tries = 0; tries < 10; tries++ {
conn, _ := net.DialTimeout("tcp", listener.Addr().String(), 1*time.Second)
if conn != nil {
break
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
blob, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Errorf("failed to read miner notification: %v", err)
}
}
if tries == 10 {
t.Fatal("tcp listener not ready for more than 10 seconds")
}
var work [3]string
if err := json.Unmarshal(blob, &work); err != nil {
t.Errorf("failed to unmarshal miner notification: %v", err)
}
sink <- work
}))
defer server.Close()
// Create the custom ethash engine
ethash := NewTester([]string{"http://" + listener.Addr().String()}, false)
// Create the custom ethash engine.
ethash := NewTester([]string{server.URL}, false)
defer ethash.Close()
// Stream a work task and ensure the notification bubbles out
// Stream a work task and ensure the notification bubbles out.
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
block := types.NewBlockWithHeader(header)
@@ -97,46 +77,37 @@ func TestRemoteNotify(t *testing.T) {
// Tests that pushing work packages fast to the miner doesn't cause any data race
// issues in the notifications.
func TestRemoteMultiNotify(t *testing.T) {
// Start a simple webserver to capture notifications
// Start a simple web server to capture notifications.
sink := make(chan [3]string, 64)
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
blob, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Errorf("failed to read miner notification: %v", err)
}
var work [3]string
if err := json.Unmarshal(blob, &work); err != nil {
t.Errorf("failed to unmarshal miner notification: %v", err)
}
sink <- work
}))
defer server.Close()
server := &http.Server{
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
blob, err := ioutil.ReadAll(req.Body)
if err != nil {
t.Fatalf("failed to read miner notification: %v", err)
}
var work [3]string
if err := json.Unmarshal(blob, &work); err != nil {
t.Fatalf("failed to unmarshal miner notification: %v", err)
}
sink <- work
}),
}
// Open a custom listener to extract its local address
listener, err := net.Listen("tcp", "localhost:0")
if err != nil {
t.Fatalf("failed to open notification server: %v", err)
}
defer listener.Close()
go server.Serve(listener)
// Create the custom ethash engine
ethash := NewTester([]string{"http://" + listener.Addr().String()}, false)
// Create the custom ethash engine.
ethash := NewTester([]string{server.URL}, false)
ethash.config.Log = testlog.Logger(t, log.LvlWarn)
defer ethash.Close()
// Stream a lot of work task and ensure all the notifications bubble out
// Stream a lot of work task and ensure all the notifications bubble out.
for i := 0; i < cap(sink); i++ {
header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)}
block := types.NewBlockWithHeader(header)
ethash.Seal(nil, block, nil, nil)
}
for i := 0; i < cap(sink); i++ {
select {
case <-sink:
case <-time.After(3 * time.Second):
case <-time.After(10 * time.Second):
t.Fatalf("notification %d timed out", i)
}
}
@@ -206,10 +177,10 @@ func TestStaleSubmission(t *testing.T) {
select {
case res := <-results:
if res.Header().Nonce != fakeNonce {
t.Errorf("case %d block nonce mismatch, want %s, get %s", id+1, fakeNonce, res.Header().Nonce)
t.Errorf("case %d block nonce mismatch, want %x, get %x", id+1, fakeNonce, res.Header().Nonce)
}
if res.Header().MixDigest != fakeDigest {
t.Errorf("case %d block digest mismatch, want %s, get %s", id+1, fakeDigest, res.Header().MixDigest)
t.Errorf("case %d block digest mismatch, want %x, get %x", id+1, fakeDigest, res.Header().MixDigest)
}
if res.Header().Difficulty.Uint64() != c.headers[c.submitIndex].Difficulty.Uint64() {
t.Errorf("case %d block difficulty mismatch, want %d, get %d", id+1, c.headers[c.submitIndex].Difficulty, res.Header().Difficulty)

View File

@@ -1260,16 +1260,16 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
}
// WriteBlockWithState writes the block and all associated state to the database.
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
return bc.writeBlockWithState(block, receipts, state)
return bc.writeBlockWithState(block, receipts, logs, state, emitHeadEvent)
}
// writeBlockWithState writes the block and all associated state to the database,
// but is expects the chain mutex to be held.
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state *state.StateDB) (status WriteStatus, err error) {
func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
bc.wg.Add(1)
defer bc.wg.Done()
@@ -1394,6 +1394,23 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
bc.insert(block)
}
bc.futureBlocks.Remove(block.Hash())
if status == CanonStatTy {
bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
// In theory we should fire a ChainHeadEvent when we inject
// a canonical block, but sometimes we can insert a batch of
// canonicial blocks. Avoid firing too much ChainHeadEvents,
// we will fire an accumulated ChainHeadEvent and disable fire
// event here.
if emitHeadEvent {
bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
}
} else {
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
}
return status, nil
}
@@ -1444,11 +1461,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// Pre-checks passed, start the full block imports
bc.wg.Add(1)
bc.chainmu.Lock()
n, events, logs, err := bc.insertChain(chain, true)
n, err := bc.insertChain(chain, true)
bc.chainmu.Unlock()
bc.wg.Done()
bc.PostChainEvents(events, logs)
return n, err
}
@@ -1460,23 +1476,24 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// racey behaviour. If a sidechain import is in progress, and the historic state
// is imported, but then new canon-head is added before the actual sidechain
// completes, then the historic state could be pruned again
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) {
// If the chain is terminating, don't even bother starting up
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
return 0, nil, nil, nil
return 0, nil
}
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
// A queued approach to delivering events. This is generally
// faster than direct delivery and requires much less mutex
// acquiring.
var (
stats = insertStats{startTime: mclock.Now()}
events = make([]interface{}, 0, len(chain))
lastCanon *types.Block
coalescedLogs []*types.Log
stats = insertStats{startTime: mclock.Now()}
lastCanon *types.Block
)
// Fire a single chain head event if we've progressed the chain
defer func() {
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
}
}()
// Start the parallel header verifier
headers := make([]*types.Header, len(chain))
seals := make([]bool, len(chain))
@@ -1526,7 +1543,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
for block != nil && err == ErrKnownBlock {
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
if err := bc.writeKnownBlock(block); err != nil {
return it.index, nil, nil, err
return it.index, err
}
lastCanon = block
@@ -1545,7 +1562,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err
return it.index, err
}
block, err = it.next()
}
@@ -1553,14 +1570,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
stats.ignored += it.remaining()
// If there are any still remaining, mark as ignored
return it.index, events, coalescedLogs, err
return it.index, err
// Some other error occurred, abort
case err != nil:
bc.futureBlocks.Remove(block.Hash())
stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err)
return it.index, events, coalescedLogs, err
return it.index, err
}
// No validation errors for the first block (or chain prefix skipped)
for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() {
@@ -1572,7 +1589,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
// If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
bc.reportBlock(block, nil, ErrBlacklistedHash)
return it.index, events, coalescedLogs, ErrBlacklistedHash
return it.index, ErrBlacklistedHash
}
// If the block is known (in the middle of the chain), it's a special case for
// Clique blocks where they can share state among each other, so importing an
@@ -1589,15 +1606,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
"root", block.Root())
if err := bc.writeKnownBlock(block); err != nil {
return it.index, nil, nil, err
return it.index, err
}
stats.processed++
// We can assume that logs are empty here, since the only way for consecutive
// Clique blocks to have the same state is if there are no transactions.
events = append(events, ChainEvent{block, block.Hash(), nil})
lastCanon = block
continue
}
// Retrieve the parent block and it's state to execute on top
@@ -1609,7 +1624,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
}
statedb, err := state.New(parent.Root, bc.stateCache)
if err != nil {
return it.index, events, coalescedLogs, err
return it.index, err
}
// If we have a followup block, run that against the current state to pre-cache
// transactions and probabilistically some of the account/storage trie nodes.
@@ -1634,7 +1649,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
if err != nil {
bc.reportBlock(block, receipts, err)
atomic.StoreUint32(&followupInterrupt, 1)
return it.index, events, coalescedLogs, err
return it.index, err
}
// Update the metrics touched during block processing
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
@@ -1653,7 +1668,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
bc.reportBlock(block, receipts, err)
atomic.StoreUint32(&followupInterrupt, 1)
return it.index, events, coalescedLogs, err
return it.index, err
}
proctime := time.Since(start)
@@ -1665,10 +1680,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
// Write the block to the chain and get the status.
substart = time.Now()
status, err := bc.writeBlockWithState(block, receipts, statedb)
status, err := bc.writeBlockWithState(block, receipts, logs, statedb, false)
if err != nil {
atomic.StoreUint32(&followupInterrupt, 1)
return it.index, events, coalescedLogs, err
return it.index, err
}
atomic.StoreUint32(&followupInterrupt, 1)
@@ -1686,8 +1701,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
"elapsed", common.PrettyDuration(time.Since(start)),
"root", block.Root())
coalescedLogs = append(coalescedLogs, logs...)
events = append(events, ChainEvent{block, block.Hash(), logs})
lastCanon = block
// Only count canonical blocks for GC processing time
@@ -1698,7 +1711,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
events = append(events, ChainSideEvent{block})
default:
// This in theory is impossible, but lets be nice to our future selves and leave
@@ -1717,24 +1729,20 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
// Any blocks remaining here? The only ones we care about are the future ones
if block != nil && err == consensus.ErrFutureBlock {
if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err
return it.index, err
}
block, err = it.next()
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
if err := bc.addFutureBlock(block); err != nil {
return it.index, events, coalescedLogs, err
return it.index, err
}
stats.queued++
}
}
stats.ignored += it.remaining()
// Append a single chain head event if we've progressed the chain
if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
events = append(events, ChainHeadEvent{lastCanon})
}
return it.index, events, coalescedLogs, err
return it.index, err
}
// insertSideChain is called when an import batch hits upon a pruned ancestor
@@ -1743,7 +1751,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
//
// The method writes all (header-and-body-valid) blocks to disk, then tries to
// switch over to the new chain if the TD exceeded the current chain.
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
var (
externTd *big.Int
current = bc.CurrentBlock()
@@ -1779,7 +1787,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
// If someone legitimately side-mines blocks, they would still be imported as usual. However,
// we cannot risk writing unverified blocks to disk when they obviously target the pruning
// mechanism.
return it.index, nil, nil, errors.New("sidechain ghost-state attack")
return it.index, errors.New("sidechain ghost-state attack")
}
}
if externTd == nil {
@@ -1790,7 +1798,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
if !bc.HasBlock(block.Hash(), block.NumberU64()) {
start := time.Now()
if err := bc.writeBlockWithoutState(block, externTd); err != nil {
return it.index, nil, nil, err
return it.index, err
}
log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
@@ -1807,7 +1815,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
localTd := bc.GetTd(current.Hash(), current.NumberU64())
if localTd.Cmp(externTd) > 0 {
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
return it.index, nil, nil, err
return it.index, err
}
// Gather all the sidechain hashes (full blocks may be memory heavy)
var (
@@ -1822,7 +1830,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
}
if parent == nil {
return it.index, nil, nil, errors.New("missing parent")
return it.index, errors.New("missing parent")
}
// Import all the pruned blocks to make the state available
var (
@@ -1841,15 +1849,15 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
// memory here.
if len(blocks) >= 2048 || memory > 64*1024*1024 {
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
if _, _, _, err := bc.insertChain(blocks, false); err != nil {
return 0, nil, nil, err
if _, err := bc.insertChain(blocks, false); err != nil {
return 0, err
}
blocks, memory = blocks[:0], 0
// If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
return 0, nil, nil, nil
return 0, nil
}
}
}
@@ -1857,7 +1865,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
return bc.insertChain(blocks, false)
}
return 0, nil, nil, nil
return 0, nil
}
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
@@ -1872,11 +1880,11 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
deletedTxs types.Transactions
addedTxs types.Transactions
deletedLogs []*types.Log
rebirthLogs []*types.Log
deletedLogs [][]*types.Log
rebirthLogs [][]*types.Log
// collectLogs collects the logs that were generated during the
// processing of the block that corresponds with the given hash.
// collectLogs collects the logs that were generated or removed during
// the processing of the block that corresponds with the given hash.
// These logs are later announced as deleted or reborn
collectLogs = func(hash common.Hash, removed bool) {
number := bc.hc.GetBlockNumber(hash)
@@ -1884,17 +1892,39 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
return
}
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
var logs []*types.Log
for _, receipt := range receipts {
for _, log := range receipt.Logs {
l := *log
if removed {
l.Removed = true
deletedLogs = append(deletedLogs, &l)
} else {
rebirthLogs = append(rebirthLogs, &l)
}
logs = append(logs, &l)
}
}
if len(logs) > 0 {
if removed {
deletedLogs = append(deletedLogs, logs)
} else {
rebirthLogs = append(rebirthLogs, logs)
}
}
}
// mergeLogs returns a merged log slice with specified sort order.
mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log {
var ret []*types.Log
if reverse {
for i := len(logs) - 1; i >= 0; i-- {
ret = append(ret, logs[i]...)
}
} else {
for i := 0; i < len(logs); i++ {
ret = append(ret, logs[i]...)
}
}
return ret
}
)
// Reduce the longer chain to the same number as the shorter one
@@ -1990,47 +2020,20 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// this goroutine if there are no events to fire, but realistcally that only
// ever happens if we're reorging empty blocks, which will only happen on idle
// networks where performance is not an issue either way.
//
// TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
// event ordering?
go func() {
if len(deletedLogs) > 0 {
bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
if len(deletedLogs) > 0 {
bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
}
if len(rebirthLogs) > 0 {
bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
}
if len(oldChain) > 0 {
for i := len(oldChain) - 1; i >= 0; i-- {
bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
}
if len(rebirthLogs) > 0 {
bc.logsFeed.Send(rebirthLogs)
}
if len(oldChain) > 0 {
for _, block := range oldChain {
bc.chainSideFeed.Send(ChainSideEvent{Block: block})
}
}
}()
}
return nil
}
// PostChainEvents iterates over the events generated by a chain insertion and
// posts them into the event feed.
// TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
// post event logs for further processing
if logs != nil {
bc.logsFeed.Send(logs)
}
for _, event := range events {
switch ev := event.(type) {
case ChainEvent:
bc.chainFeed.Send(ev)
case ChainHeadEvent:
bc.chainHeadFeed.Send(ev)
case ChainSideEvent:
bc.chainSideFeed.Send(ev)
}
}
}
func (bc *BlockChain) update() {
futureTimer := time.NewTicker(5 * time.Second)
defer futureTimer.Stop()

View File

@@ -22,6 +22,7 @@ import (
"math/big"
"math/rand"
"os"
"reflect"
"sync"
"testing"
"time"
@@ -960,16 +961,20 @@ func TestLogReorgs(t *testing.T) {
}
chain, _ = GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {})
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
timeout := time.NewTimer(1 * time.Second)
select {
case ev := <-rmLogsCh:
done := make(chan struct{})
go func() {
ev := <-rmLogsCh
if len(ev.Logs) == 0 {
t.Error("expected logs")
}
close(done)
}()
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
timeout := time.NewTimer(1 * time.Second)
select {
case <-done:
case <-timeout.C:
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
}
@@ -982,39 +987,47 @@ func TestLogRebirth(t *testing.T) {
db = rawdb.NewMemoryDatabase()
// this code generates a log
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
genesis = gspec.MustCommit(db)
signer = types.NewEIP155Signer(gspec.Config.ChainID)
newLogCh = make(chan bool)
code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
genesis = gspec.MustCommit(db)
signer = types.NewEIP155Signer(gspec.Config.ChainID)
newLogCh = make(chan bool)
removeLogCh = make(chan bool)
)
// listenNewLog checks whether the received logs number is equal with expected.
listenNewLog := func(sink chan []*types.Log, expect int) {
// validateLogEvent checks whether the received logs number is equal with expected.
validateLogEvent := func(sink interface{}, result chan bool, expect int) {
chanval := reflect.ValueOf(sink)
chantyp := chanval.Type()
if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.RecvDir == 0 {
t.Fatalf("invalid channel, given type %v", chantyp)
}
cnt := 0
var recv []reflect.Value
timeout := time.After(1 * time.Second)
cases := []reflect.SelectCase{{Chan: chanval, Dir: reflect.SelectRecv}, {Chan: reflect.ValueOf(timeout), Dir: reflect.SelectRecv}}
for {
select {
case logs := <-sink:
cnt += len(logs)
case <-time.NewTimer(5 * time.Second).C:
// new logs timeout
newLogCh <- false
chose, v, _ := reflect.Select(cases)
if chose == 1 {
// Not enough event received
result <- false
return
}
cnt += 1
recv = append(recv, v)
if cnt == expect {
break
} else if cnt > expect {
// redundant logs received
newLogCh <- false
return
}
}
select {
case <-sink:
// redundant logs received
newLogCh <- false
case <-time.NewTimer(100 * time.Millisecond).C:
newLogCh <- true
done := time.After(50 * time.Millisecond)
cases = cases[:1]
cases = append(cases, reflect.SelectCase{Chan: reflect.ValueOf(done), Dir: reflect.SelectRecv})
chose, _, _ := reflect.Select(cases)
// If chose equal 0, it means receiving redundant events.
if chose == 1 {
result <- true
} else {
result <- false
}
}
@@ -1038,12 +1051,12 @@ func TestLogRebirth(t *testing.T) {
})
// Spawn a goroutine to receive log events
go listenNewLog(logsCh, 1)
go validateLogEvent(logsCh, newLogCh, 1)
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert chain: %v", err)
}
if !<-newLogCh {
t.Fatalf("failed to receive new log event")
t.Fatal("failed to receive new log event")
}
// Generate long reorg chain
@@ -1060,40 +1073,31 @@ func TestLogRebirth(t *testing.T) {
})
// Spawn a goroutine to receive log events
go listenNewLog(logsCh, 1)
go validateLogEvent(logsCh, newLogCh, 1)
go validateLogEvent(rmLogsCh, removeLogCh, 1)
if _, err := blockchain.InsertChain(forkChain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
if !<-newLogCh {
t.Fatalf("failed to receive new log event")
t.Fatal("failed to receive new log event")
}
// Ensure removedLog events received
select {
case ev := <-rmLogsCh:
if len(ev.Logs) == 0 {
t.Error("expected logs")
}
case <-time.NewTimer(1 * time.Second).C:
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
if !<-removeLogCh {
t.Fatal("failed to receive removed log event")
}
newBlocks, _ := GenerateChain(params.TestChainConfig, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
go listenNewLog(logsCh, 1)
go validateLogEvent(logsCh, newLogCh, 1)
go validateLogEvent(rmLogsCh, removeLogCh, 1)
if _, err := blockchain.InsertChain(newBlocks); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
// Ensure removedLog events received
select {
case ev := <-rmLogsCh:
if len(ev.Logs) == 0 {
t.Error("expected logs")
}
case <-time.NewTimer(1 * time.Second).C:
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
}
// Rebirth logs should omit a newLogEvent
if !<-newLogCh {
t.Fatalf("failed to receive new log event")
t.Fatal("failed to receive new log event")
}
// Ensure removedLog events received
if !<-removeLogCh {
t.Fatal("failed to receive removed log event")
}
}
@@ -1145,7 +1149,6 @@ func TestSideLogRebirth(t *testing.T) {
logsCh := make(chan []*types.Log)
blockchain.SubscribeLogsEvent(logsCh)
chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
if i == 1 {
// Higher block difficulty

View File

@@ -57,8 +57,10 @@ func TestCreation(t *testing.T) {
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
{9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
{9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 0}}, // Today Istanbul block
{10000000, ID{Hash: checksumToBytes(0x879d6e30), Next: 0}}, // Future Istanbul block
{9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
{9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
{9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // First Muir Glacier block
{10000000, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // Future Muir Glacier block
},
},
// Ropsten test cases
@@ -76,8 +78,10 @@ func TestCreation(t *testing.T) {
{4939393, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // Last Constantinople block
{4939394, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // First Petersburg block
{6485845, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // Last Petersburg block
{6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 0}}, // First Istanbul block
{7500000, ID{Hash: checksumToBytes(0x4bc66396), Next: 0}}, // Future Istanbul block
{6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // First Istanbul block
{7117116, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // Last Istanbul block
{7117117, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // First Muir Glacier block
{7500000, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // Future
},
},
// Rinkeby test cases
@@ -181,11 +185,11 @@ func TestValidation(t *testing.T) {
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
// Local is mainnet Istanbul, far in the future. Remote announces Gopherium (non existing fork)
// Local is mainnet Muir Glacier, far in the future. Remote announces Gopherium (non existing fork)
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
{88888888, ID{Hash: checksumToBytes(0x879d6e30), Next: 88888888}, ErrLocalIncompatibleOrStale},
{88888888, ID{Hash: checksumToBytes(0xe029e991), Next: 88888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
// fork) at block 7279999, before Petersburg. Local is incompatible.

View File

@@ -152,10 +152,10 @@ func (e *GenesisMismatchError) Error() string {
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
return SetupGenesisBlockWithOverride(db, genesis, nil)
return SetupGenesisBlockWithOverride(db, genesis, nil, nil)
}
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul *big.Int) (*params.ChainConfig, common.Hash, error) {
func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul, overrideMuirGlacier *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
@@ -207,6 +207,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
if overrideIstanbul != nil {
newcfg.IstanbulBlock = overrideIstanbul
}
if overrideMuirGlacier != nil {
newcfg.MuirGlacierBlock = overrideMuirGlacier
}
if err := newcfg.CheckConfigForkOrder(); err != nil {
return newcfg, common.Hash{}, err
}

View File

@@ -150,11 +150,10 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
}
// Database contains only older data than the freezer, this happens if the
// state was wiped and reinited from an existing freezer.
} else {
// Key-value store continues where the freezer left off, all is fine. We might
// have duplicate blocks (crash after freezer write but before kay-value store
// deletion, but that's fine).
}
// Otherwise, key-value store continues where the freezer left off, all is fine.
// We might have duplicate blocks (crash after freezer write but before key-value
// store deletion, but that's fine).
} else {
// If the freezer is empty, ensure nothing was moved yet from the key-value
// store, otherwise we'll end up missing data. We check block #1 to decide
@@ -167,9 +166,9 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
}
// Block #1 is still in the database, we're allowed to init a new feezer
} else {
// The head header is still the genesis, we're allowed to init a new feezer
}
// Otherwise, the head header is still the genesis, we're allowed to init a new
// feezer.
}
}
// Freezer is consistent with the key-value database, permit combining the two

View File

@@ -55,10 +55,10 @@ func InitDatabaseFromFreezer(db ethdb.Database) error {
if n >= frozen {
return
}
// Retrieve the block from the freezer (no need for the hash, we pull by
// number from the freezer). If successful, pre-cache the block hash and
// the individual transaction hashes for storing into the database.
block := ReadBlock(db, common.Hash{}, n)
// Retrieve the block from the freezer. If successful, pre-cache
// the block hash and the individual transaction hashes for storing
// into the database.
block := ReadBlock(db, ReadCanonicalHash(db, n), n)
if block != nil {
block.Hash()
for _, tx := range block.Transactions() {

View File

@@ -28,6 +28,8 @@ import (
"github.com/ethereum/go-ethereum/crypto/blake2b"
"github.com/ethereum/go-ethereum/crypto/bn256"
"github.com/ethereum/go-ethereum/params"
//lint:ignore SA1019 Needed for precompile
"golang.org/x/crypto/ripemd160"
)

View File

@@ -29,7 +29,6 @@ import (
// precompiledTest defines the input/output pairs for precompiled contract tests.
type precompiledTest struct {
input, expected string
gas uint64
name string
noBenchmark bool // Benchmark primarily the worst-cases
}
@@ -418,6 +417,24 @@ func testPrecompiled(addr string, test precompiledTest, t *testing.T) {
})
}
func testPrecompiledOOG(addr string, test precompiledTest, t *testing.T) {
p := PrecompiledContractsIstanbul[common.HexToAddress(addr)]
in := common.Hex2Bytes(test.input)
contract := NewContract(AccountRef(common.HexToAddress("1337")),
nil, new(big.Int), p.RequiredGas(in)-1)
t.Run(fmt.Sprintf("%s-Gas=%d", test.name, contract.Gas), func(t *testing.T) {
_, err := RunPrecompiledContract(p, in, contract)
if err.Error() != "out of gas" {
t.Errorf("Expected error [out of gas], got [%v]", err)
}
// Verify that the precompile did not touch the input buffer
exp := common.Hex2Bytes(test.input)
if !bytes.Equal(in, exp) {
t.Errorf("Precompiled %v modified input data", addr)
}
})
}
func testPrecompiledFailure(addr string, test precompiledFailureTest, t *testing.T) {
p := PrecompiledContractsIstanbul[common.HexToAddress(addr)]
in := common.Hex2Bytes(test.input)
@@ -541,6 +558,13 @@ func BenchmarkPrecompiledBn256Add(bench *testing.B) {
}
}
// Tests OOG
func TestPrecompiledModExpOOG(t *testing.T) {
for _, test := range modexpTests {
testPrecompiledOOG("05", test, t)
}
}
// Tests the sample inputs from the elliptic curve scalar multiplication EIP 213.
func TestPrecompiledBn256ScalarMul(t *testing.T) {
for _, test := range bn256ScalarMulTests {

View File

@@ -74,13 +74,6 @@ func (st *Stack) Back(n int) *big.Int {
return st.data[st.len()-n-1]
}
func (st *Stack) require(n int) error {
if st.len() < n {
return fmt.Errorf("stack underflow (%d <=> %d)", len(st.data), n)
}
return nil
}
// Print dumps the content of the stack
func (st *Stack) Print() {
fmt.Println("### stack ###")

View File

@@ -135,7 +135,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if err != nil {
return nil, err
}
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideIstanbul)
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideIstanbul, config.OverrideMuirGlacier)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr
}

View File

@@ -157,4 +157,7 @@ type Config struct {
// Istanbul block override (TODO: remove after the fork)
OverrideIstanbul *big.Int
// MuirGlacier block override (TODO: remove after the fork)
OverrideMuirGlacier *big.Int
}

3
go.mod
View File

@@ -7,10 +7,11 @@ require (
github.com/Azure/azure-storage-blob-go v0.7.0
github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
github.com/VictoriaMetrics/fastcache v1.5.2
github.com/VictoriaMetrics/fastcache v1.5.3
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6
github.com/cespare/cp v0.1.0
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea

6
go.sum
View File

@@ -25,8 +25,8 @@ github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/VictoriaMetrics/fastcache v1.5.2 h1:Erd8iIuBAL9kke8JzM4+WxkKuFkHh3ktwLanJvDgR44=
github.com/VictoriaMetrics/fastcache v1.5.2/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE=
github.com/VictoriaMetrics/fastcache v1.5.3 h1:2odJnXLbFZcoV9KYtQ+7TH1UOq3dn3AssMgieaezkR4=
github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
@@ -42,6 +42,8 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18 h1:pl4eWIqvFe/Kg3zkn7NxevNzILnZYWDCG7qbA1CJik0=
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 h1:J82+/8rub3qSy0HxEnoYD8cs+HDlHWYrqYXe2Vqxluk=
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=

View File

@@ -127,12 +127,12 @@ func (tt *TestCmd) matchExactOutput(want []byte) error {
// Find the mismatch position.
for i := 0; i < n; i++ {
if want[i] != buf[i] {
return fmt.Errorf("Output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s",
return fmt.Errorf("output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s",
buf[:i], buf[i:n], want)
}
}
if n < len(want) {
return fmt.Errorf("Not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s",
return fmt.Errorf("not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s",
buf, want[:n], want[n:])
}
}

View File

@@ -486,7 +486,7 @@ func (s *PrivateAccountAPI) InitializeWallet(ctx context.Context, url string) (s
case *scwallet.Wallet:
return mnemonic, wallet.Initialize(seed)
default:
return "", fmt.Errorf("Specified wallet does not support initialization")
return "", fmt.Errorf("specified wallet does not support initialization")
}
}
@@ -501,7 +501,7 @@ func (s *PrivateAccountAPI) Unpair(ctx context.Context, url string, pin string)
case *scwallet.Wallet:
return wallet.Unpair([]byte(pin))
default:
return fmt.Errorf("Specified wallet does not support pairing")
return fmt.Errorf("specified wallet does not support pairing")
}
}
@@ -1389,7 +1389,7 @@ func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error {
args.Nonce = (*hexutil.Uint64)(&nonce)
}
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
return errors.New(`Both "data" and "input" are set and not equal. Please use "input" to pass transaction call data.`)
return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`)
}
if args.To == nil {
// Contract creation
@@ -1645,7 +1645,7 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxAr
}
}
return common.Hash{}, fmt.Errorf("Transaction %#x not found", matchTx.Hash())
return common.Hash{}, fmt.Errorf("transaction %#x not found", matchTx.Hash())
}
// PublicDebugAPI is the collection of Ethereum APIs exposed over the public

View File

@@ -1,8 +1,7 @@
// Code generated by go-bindata. DO NOT EDIT.
// Package deps Code generated by go-bindata. (@generated) DO NOT EDIT.
// sources:
// bignumber.js
// web3.js
package deps
import (
@@ -20,7 +19,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
return nil, fmt.Errorf("read %q: %v", name, err)
}
var buf bytes.Buffer
@@ -28,7 +27,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
return nil, fmt.Errorf("read %q: %v", name, err)
}
if clErr != nil {
return nil, err
@@ -49,21 +48,32 @@ type bindataFileInfo struct {
modTime time.Time
}
// Name return file name
func (fi bindataFileInfo) Name() string {
return fi.name
}
// Size return file size
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
// ModTime return file modify time
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool {
return false
return fi.mode&os.ModeDir != 0
}
// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
@@ -161,8 +171,7 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"bignumber.js": bignumberJs,
"web3.js": web3Js,
"web3.js": web3Js,
}
// AssetDir returns the file names below a certain
@@ -228,7 +237,11 @@ func RestoreAsset(dir, name string) error {
if err != nil {
return err
}
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively

View File

@@ -227,6 +227,11 @@ const DebugJs = `
web3._extend({
property: 'debug',
methods: [
new web3._extend.Method({
name: 'accountRange',
call: 'debug_accountRange',
params: 2
}),
new web3._extend.Method({
name: 'printBlock',
call: 'debug_printBlock',

View File

@@ -72,7 +72,8 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
if err != nil {
return nil, err
}
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideIstanbul)
chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis,
config.OverrideIstanbul, config.OverrideMuirGlacier)
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
return nil, genesisErr
}

View File

@@ -119,7 +119,7 @@ func (rm *retrieveManager) retrieve(ctx context.Context, reqID uint64, req *dist
case <-ctx.Done():
sentReq.stop(ctx.Err())
case <-shutdown:
sentReq.stop(fmt.Errorf("Client is shutting down"))
sentReq.stop(fmt.Errorf("client is shutting down"))
}
return sentReq.getError()
}

View File

@@ -54,51 +54,51 @@ func newLesTxRelay(ps *peerSet, retriever *retrieveManager) *lesTxRelay {
return r
}
func (self *lesTxRelay) Stop() {
close(self.stop)
func (ltrx *lesTxRelay) Stop() {
close(ltrx.stop)
}
func (self *lesTxRelay) registerPeer(p *peer) {
self.lock.Lock()
defer self.lock.Unlock()
func (ltrx *lesTxRelay) registerPeer(p *peer) {
ltrx.lock.Lock()
defer ltrx.lock.Unlock()
self.peerList = self.ps.AllPeers()
ltrx.peerList = ltrx.ps.AllPeers()
}
func (self *lesTxRelay) unregisterPeer(p *peer) {
self.lock.Lock()
defer self.lock.Unlock()
func (ltrx *lesTxRelay) unregisterPeer(p *peer) {
ltrx.lock.Lock()
defer ltrx.lock.Unlock()
self.peerList = self.ps.AllPeers()
ltrx.peerList = ltrx.ps.AllPeers()
}
// send sends a list of transactions to at most a given number of peers at
// once, never resending any particular transaction to the same peer twice
func (self *lesTxRelay) send(txs types.Transactions, count int) {
func (ltrx *lesTxRelay) send(txs types.Transactions, count int) {
sendTo := make(map[*peer]types.Transactions)
self.peerStartPos++ // rotate the starting position of the peer list
if self.peerStartPos >= len(self.peerList) {
self.peerStartPos = 0
ltrx.peerStartPos++ // rotate the starting position of the peer list
if ltrx.peerStartPos >= len(ltrx.peerList) {
ltrx.peerStartPos = 0
}
for _, tx := range txs {
hash := tx.Hash()
ltr, ok := self.txSent[hash]
ltr, ok := ltrx.txSent[hash]
if !ok {
ltr = &ltrInfo{
tx: tx,
sentTo: make(map[*peer]struct{}),
}
self.txSent[hash] = ltr
self.txPending[hash] = struct{}{}
ltrx.txSent[hash] = ltr
ltrx.txPending[hash] = struct{}{}
}
if len(self.peerList) > 0 {
if len(ltrx.peerList) > 0 {
cnt := count
pos := self.peerStartPos
pos := ltrx.peerStartPos
for {
peer := self.peerList[pos]
peer := ltrx.peerList[pos]
if _, ok := ltr.sentTo[peer]; !ok {
sendTo[peer] = append(sendTo[peer], tx)
ltr.sentTo[peer] = struct{}{}
@@ -108,10 +108,10 @@ func (self *lesTxRelay) send(txs types.Transactions, count int) {
break // sent it to the desired number of peers
}
pos++
if pos == len(self.peerList) {
if pos == len(ltrx.peerList) {
pos = 0
}
if pos == self.peerStartPos {
if pos == ltrx.peerStartPos {
break // tried all available peers
}
}
@@ -139,46 +139,46 @@ func (self *lesTxRelay) send(txs types.Transactions, count int) {
return func() { peer.SendTxs(reqID, cost, enc) }
},
}
go self.retriever.retrieve(context.Background(), reqID, rq, func(p distPeer, msg *Msg) error { return nil }, self.stop)
go ltrx.retriever.retrieve(context.Background(), reqID, rq, func(p distPeer, msg *Msg) error { return nil }, ltrx.stop)
}
}
func (self *lesTxRelay) Send(txs types.Transactions) {
self.lock.Lock()
defer self.lock.Unlock()
func (ltrx *lesTxRelay) Send(txs types.Transactions) {
ltrx.lock.Lock()
defer ltrx.lock.Unlock()
self.send(txs, 3)
ltrx.send(txs, 3)
}
func (self *lesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) {
self.lock.Lock()
defer self.lock.Unlock()
func (ltrx *lesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) {
ltrx.lock.Lock()
defer ltrx.lock.Unlock()
for _, hash := range mined {
delete(self.txPending, hash)
delete(ltrx.txPending, hash)
}
for _, hash := range rollback {
self.txPending[hash] = struct{}{}
ltrx.txPending[hash] = struct{}{}
}
if len(self.txPending) > 0 {
txs := make(types.Transactions, len(self.txPending))
if len(ltrx.txPending) > 0 {
txs := make(types.Transactions, len(ltrx.txPending))
i := 0
for hash := range self.txPending {
txs[i] = self.txSent[hash].tx
for hash := range ltrx.txPending {
txs[i] = ltrx.txSent[hash].tx
i++
}
self.send(txs, 1)
ltrx.send(txs, 1)
}
}
func (self *lesTxRelay) Discard(hashes []common.Hash) {
self.lock.Lock()
defer self.lock.Unlock()
func (ltrx *lesTxRelay) Discard(hashes []common.Hash) {
ltrx.lock.Lock()
defer ltrx.lock.Unlock()
for _, hash := range hashes {
delete(self.txSent, hash)
delete(self.txPending, hash)
delete(ltrx.txSent, hash)
delete(ltrx.txPending, hash)
}
}

View File

@@ -207,7 +207,7 @@ func (h *GlogHandler) Log(r *Record) error {
}
// Check callsite cache for previously calculated log levels
h.lock.RLock()
lvl, ok := h.siteCache[r.Call.PC()]
lvl, ok := h.siteCache[r.Call.Frame().PC]
h.lock.RUnlock()
// If we didn't cache the callsite yet, calculate it
@@ -215,13 +215,13 @@ func (h *GlogHandler) Log(r *Record) error {
h.lock.Lock()
for _, rule := range h.patterns {
if rule.pattern.MatchString(fmt.Sprintf("%+s", r.Call)) {
h.siteCache[r.Call.PC()], lvl, ok = rule.level, rule.level, true
h.siteCache[r.Call.Frame().PC], lvl, ok = rule.level, rule.level, true
break
}
}
// If no rule matched, remember to drop log the next time
if !ok {
h.siteCache[r.Call.PC()] = 0
h.siteCache[r.Call.Frame().PC] = 0
}
h.lock.Unlock()
}

View File

@@ -83,7 +83,7 @@ func LvlFromString(lvlString string) (Lvl, error) {
case "crit":
return LvlCrit, nil
default:
return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString)
return LvlDebug, fmt.Errorf("unknown level: %v", lvlString)
}
}

View File

@@ -590,7 +590,7 @@ func (w *worker) resultLoop() {
logs = append(logs, receipt.Logs...)
}
// Commit block and state to database.
stat, err := w.chain.WriteBlockWithState(block, receipts, task.state)
_, err := w.chain.WriteBlockWithState(block, receipts, logs, task.state, true)
if err != nil {
log.Error("Failed writing block to chain", "err", err)
continue
@@ -601,16 +601,6 @@ func (w *worker) resultLoop() {
// Broadcast the block and announce chain insertion event
w.mux.Post(core.NewMinedBlockEvent{Block: block})
var events []interface{}
switch stat {
case core.CanonStatTy:
events = append(events, core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
events = append(events, core.ChainHeadEvent{Block: block})
case core.SideStatTy:
events = append(events, core.ChainSideEvent{Block: block})
}
w.chain.PostChainEvents(events, logs)
// Insert the block into the set of pending ones to resultLoop for confirmations
w.unconfirmed.Insert(block.NumberU64(), block.Hash())
@@ -996,3 +986,11 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st
}
return nil
}
// postSideBlock fires a side chain event, only use it for testing.
func (w *worker) postSideBlock(event core.ChainSideEvent) {
select {
case w.chainSideCh <- event:
case <-w.exitCh:
}
}

View File

@@ -149,9 +149,6 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain }
func (b *testWorkerBackend) TxPool() *core.TxPool { return b.txPool }
func (b *testWorkerBackend) PostChainEvents(events []interface{}) {
b.chain.PostChainEvents(events, nil)
}
func (b *testWorkerBackend) newRandomUncle() *types.Block {
var parent *types.Block
@@ -243,8 +240,8 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) {
for i := 0; i < 5; i++ {
b.txPool.AddLocal(b.newRandomTx(true))
b.txPool.AddLocal(b.newRandomTx(false))
b.PostChainEvents([]interface{}{core.ChainSideEvent{Block: b.newRandomUncle()}})
b.PostChainEvents([]interface{}{core.ChainSideEvent{Block: b.newRandomUncle()}})
w.postSideBlock(core.ChainSideEvent{Block: b.newRandomUncle()})
w.postSideBlock(core.ChainSideEvent{Block: b.newRandomUncle()})
select {
case e := <-loopErr:
t.Fatal(e)
@@ -295,7 +292,7 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens
}
w.skipSealHook = func(task *task) bool { return true }
w.fullTaskHook = func() {
// Aarch64 unit tests are running in a VM on travis, they must
// Arch64 unit tests are running in a VM on travis, they must
// be given more time to execute.
time.Sleep(time.Second)
}
@@ -351,7 +348,8 @@ func TestStreamUncleBlock(t *testing.T) {
}
}
b.PostChainEvents([]interface{}{core.ChainSideEvent{Block: b.uncleBlock}})
w.postSideBlock(core.ChainSideEvent{Block: b.uncleBlock})
select {
case <-taskCh:
case <-time.NewTimer(time.Second).C:

View File

@@ -88,6 +88,8 @@ func (it *sliceIter) Next() bool {
}
func (it *sliceIter) Node() *Node {
it.mu.Lock()
defer it.mu.Unlock()
if len(it.nodes) == 0 {
return nil
}

View File

@@ -66,15 +66,16 @@ var (
ConstantinopleBlock: big.NewInt(7280000),
PetersburgBlock: big.NewInt(7280000),
IstanbulBlock: big.NewInt(9069000),
MuirGlacierBlock: big.NewInt(9200000),
Ethash: new(EthashConfig),
}
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
MainnetTrustedCheckpoint = &TrustedCheckpoint{
SectionIndex: 270,
SectionHead: common.HexToHash("0xb67c33d838a60c282c2fb49b188fbbac1ef8565ffb4a1c4909b0a05885e72e40"),
CHTRoot: common.HexToHash("0x781daa4607782300da85d440df3813ba38a1262585231e35e9480726de81dbfc"),
BloomRoot: common.HexToHash("0xfd8951fa6d779cbc981df40dc31056ed1a549db529349d7dfae016f9d96cae72"),
SectionIndex: 275,
SectionHead: common.HexToHash("0x03159234a3699e31d27e5d83a55cbcf8ceb1f2d90855c219c55d79089b61abd4"),
CHTRoot: common.HexToHash("0xd0c1f3828a4dcb2ee76625fdbea85afeabfb61c04adf07439d2fc1cf00469f76"),
BloomRoot: common.HexToHash("0xab8ea2be8aa24703208fee3fc0afdbb536301013f412a7282b2692d6d68f92c5"),
}
// MainnetCheckpointOracle contains a set of configs for the main network oracle.
@@ -104,15 +105,16 @@ var (
ConstantinopleBlock: big.NewInt(4230000),
PetersburgBlock: big.NewInt(4939394),
IstanbulBlock: big.NewInt(6485846),
MuirGlacierBlock: big.NewInt(7117117),
Ethash: new(EthashConfig),
}
// TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
TestnetTrustedCheckpoint = &TrustedCheckpoint{
SectionIndex: 204,
SectionHead: common.HexToHash("0xa39168b51c3205456f30ce6a91f3590a43295b15a1c8c2ab86bb8c06b8ad1808"),
CHTRoot: common.HexToHash("0x9a3654147b79882bfc4e16fbd3421512aa7e4dfadc6c511923980e0877bdf3b4"),
BloomRoot: common.HexToHash("0xe72b979522d94fa45c1331639316da234a9bb85062d64d72e13afe1d3f5c17d5"),
SectionIndex: 209,
SectionHead: common.HexToHash("0x8037eb6872b69397d434121424ed8d6ab74be32bf3cb3f12dc5d9657fc146860"),
CHTRoot: common.HexToHash("0xe64b7d6324e5cbdcbbc250adf4cf24a639a665aa83ccfd6a0b84a80faaaa0d41"),
BloomRoot: common.HexToHash("0x80fedbef680cd70d3dc4b50b14480fba82c74361a35e8dc7be9f11e03077c840"),
}
// TestnetCheckpointOracle contains a set of configs for the Ropsten test network oracle.
@@ -150,10 +152,10 @@ var (
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
SectionIndex: 163,
SectionHead: common.HexToHash("0x36e5deaa46f258bece94b05d8e10f1ef68f422fb62ed47a2b6e616aa26e84997"),
CHTRoot: common.HexToHash("0x829b9feca1c2cdf5a4cf3efac554889e438ee4df8718c2ce3e02555a02d9e9e5"),
BloomRoot: common.HexToHash("0x58c01de24fdae7c082ebbe7665f189d0aa4d90ee10e72086bf56651c63269e54"),
SectionIndex: 168,
SectionHead: common.HexToHash("0x87301279595b16ac59360c839ef86b159e21fedbfcc8847d727ef446a14cf334"),
CHTRoot: common.HexToHash("0x00f522dd0705ff647cebdd36707d6779caaf77f5fe8f958aae85f36aa88e3f9c"),
BloomRoot: common.HexToHash("0xc908547a6b01c47c65a4581c68090e5602308d39e893f7c0ae3e16c52ce2abf2"),
}
// RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle.
@@ -189,10 +191,10 @@ var (
// GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network.
GoerliTrustedCheckpoint = &TrustedCheckpoint{
SectionIndex: 47,
SectionHead: common.HexToHash("0x00c5b54c6c9a73660501fd9273ccdb4c5bbdbe5d7b8b650e28f881ec9d2337f6"),
CHTRoot: common.HexToHash("0xef35caa155fd659f57167e7d507de2f8132cbb31f771526481211d8a977d704c"),
BloomRoot: common.HexToHash("0xbda330402f66008d52e7adc748da28535b1212a7912a21244acd2ba77ff0ff06"),
SectionIndex: 52,
SectionHead: common.HexToHash("0x64c3bbc896578cbf782e343db48e334177e87fb8b16106b75e1dcebf59ca59dc"),
CHTRoot: common.HexToHash("0x5d092e644f3815de40b8c4196698d3e34a9097cf3066a499c96e83e3927d8b8d"),
BloomRoot: common.HexToHash("0xb2ceb966b499dd9e6e5bf6adbf35440a0e15cbccc0f527f89a1c522a9f36250a"),
}
// GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle.
@@ -213,16 +215,16 @@ var (
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}}
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil}
TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil}
TestRules = TestChainConfig.Rules(new(big.Int))
)
@@ -292,6 +294,7 @@ type ChainConfig struct {
ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated)
PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople)
IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul)
MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated)
EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated)
// Various consensus engines
@@ -329,7 +332,7 @@ func (c *ChainConfig) String() string {
default:
engine = "unknown"
}
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v Engine: %v}",
return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Engine: %v}",
c.ChainID,
c.HomesteadBlock,
c.DAOForkBlock,
@@ -341,6 +344,7 @@ func (c *ChainConfig) String() string {
c.ConstantinopleBlock,
c.PetersburgBlock,
c.IstanbulBlock,
c.MuirGlacierBlock,
engine,
)
}
@@ -380,6 +384,11 @@ func (c *ChainConfig) IsConstantinople(num *big.Int) bool {
return isForked(c.ConstantinopleBlock, num)
}
// IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater.
func (c *ChainConfig) IsMuirGlacier(num *big.Int) bool {
return isForked(c.MuirGlacierBlock, num)
}
// IsPetersburg returns whether num is either
// - equal to or greater than the PetersburgBlock fork block,
// - OR is nil, and Constantinople is active
@@ -432,6 +441,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
{"constantinopleBlock", c.ConstantinopleBlock},
{"petersburgBlock", c.PetersburgBlock},
{"istanbulBlock", c.IstanbulBlock},
{"muirGlacierBlock", c.MuirGlacierBlock},
} {
if lastFork.name != "" {
// Next one must be higher number
@@ -485,6 +495,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi
if isForkIncompatible(c.IstanbulBlock, newcfg.IstanbulBlock, head) {
return newCompatError("Istanbul fork block", c.IstanbulBlock, newcfg.IstanbulBlock)
}
if isForkIncompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, head) {
return newCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock)
}
if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) {
return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock)
}

View File

@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 9 // Minor version component of the current release
VersionPatch = 8 // Patch version component of the current release
VersionPatch = 9 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
)

View File

@@ -17,7 +17,6 @@
package trie
import (
"encoding/binary"
"errors"
"fmt"
"io"
@@ -39,6 +38,11 @@ var (
memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil)
memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil)
memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil)
memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil)
memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
@@ -272,19 +276,6 @@ func expandNode(hash hashNode, n node) node {
}
}
// trienodeHasher is a struct to be used with BigCache, which uses a Hasher to
// determine which shard to place an entry into. It's not a cryptographic hash,
// just to provide a bit of anti-collision (default is FNV64a).
//
// Since trie keys are already hashes, we can just use the key directly to
// map shard id.
type trienodeHasher struct{}
// Sum64 implements the bigcache.Hasher interface.
func (t trienodeHasher) Sum64(key string) uint64 {
return binary.BigEndian.Uint64([]byte(key))
}
// NewDatabase creates a new trie database to store ephemeral trie content before
// its written out to disk or garbage collected. No read cache is created, so all
// data retrievals will hit the underlying disk database.
@@ -335,6 +326,8 @@ func (db *Database) insert(hash common.Hash, blob []byte, node node) {
if _, ok := db.dirties[hash]; ok {
return
}
memcacheDirtyWriteMeter.Mark(int64(len(blob)))
// Create the cached entry for this node
entry := &cachedNode{
node: simplifyNode(node),
@@ -386,8 +379,12 @@ func (db *Database) node(hash common.Hash) node {
db.lock.RUnlock()
if dirty != nil {
memcacheDirtyHitMeter.Mark(1)
memcacheDirtyReadMeter.Mark(int64(dirty.size))
return dirty.obj(hash)
}
memcacheDirtyMissMeter.Mark(1)
// Content unavailable in memory, attempt to retrieve from disk
enc, err := db.diskdb.Get(hash[:])
if err != nil || enc == nil {
@@ -422,8 +419,12 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
db.lock.RUnlock()
if dirty != nil {
memcacheDirtyHitMeter.Mark(1)
memcacheDirtyReadMeter.Mark(int64(dirty.size))
return dirty.rlp(), nil
}
memcacheDirtyMissMeter.Mark(1)
// Content unavailable in memory, attempt to retrieve from disk
enc, err := db.diskdb.Get(hash[:])
if err == nil && enc != nil {
@@ -826,6 +827,7 @@ func (c *cleaner) Put(key []byte, rlp []byte) error {
// Move the flushed node into the clean cache to prevent insta-reloads
if c.db.cleans != nil {
c.db.cleans.Set(hash[:], rlp)
memcacheCleanWriteMeter.Mark(int64(len(rlp)))
}
return nil
}