Compare commits
176 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
f0328f241b | ||
|
9f7bcb9d76 | ||
|
86216189a5 | ||
|
ca298a2821 | ||
|
9c82c646e4 | ||
|
d4d288e3f1 | ||
|
eb69f490ed | ||
|
195c979168 | ||
|
c40943a167 | ||
|
59f0e8ae60 | ||
|
40b736463a | ||
|
6c3fea0fc9 | ||
|
c1b69bd121 | ||
|
8d066f1f42 | ||
|
bf5cacfb8f | ||
|
92e3c56e7b | ||
|
9fd8825d5a | ||
|
f6891ba40d | ||
|
65825cd134 | ||
|
111a1b73cf | ||
|
fb3a081c7e | ||
|
7e2bbb9cbb | ||
|
0654014652 | ||
|
aa123939c2 | ||
|
28ec26094b | ||
|
1e973a96b4 | ||
|
3fd16af5a9 | ||
|
da16d089c0 | ||
|
127dc5982e | ||
|
8cacb42278 | ||
|
9f75994b5e | ||
|
67c070c379 | ||
|
b5a129ea24 | ||
|
763b3f8d1f | ||
|
25bd17d725 | ||
|
33022c2e7d | ||
|
8ec8b81b29 | ||
|
25c9b49fdb | ||
|
de6a113f84 | ||
|
b502b6ac97 | ||
|
1027cb52c4 | ||
|
b06e8c4a8a | ||
|
b45d82e94a | ||
|
0fffd3acbd | ||
|
eb3ebceaa1 | ||
|
d1c243f841 | ||
|
19b9cf714f | ||
|
6a44bf6826 | ||
|
a8040bc2c5 | ||
|
535f25d65f | ||
|
f252154599 | ||
|
fd4f60f49b | ||
|
e0e8bf31c5 | ||
|
7ae6c4a790 | ||
|
34501ed235 | ||
|
6afb717be5 | ||
|
51de2bc9dc | ||
|
afe9558bba | ||
|
667e1c038e | ||
|
4f4622bc8b | ||
|
830231c1c4 | ||
|
7a80cf6543 | ||
|
8d99fedeae | ||
|
2352c72229 | ||
|
6b8718c374 | ||
|
be7eb8ae17 | ||
|
dbfd397262 | ||
|
6cd72660d0 | ||
|
85042b7090 | ||
|
a6bf2487d1 | ||
|
fb2ae8e995 | ||
|
c3701b265e | ||
|
70da74e73a | ||
|
279409a98e | ||
|
496f05cf52 | ||
|
8f66ea3786 | ||
|
1b58e42802 | ||
|
7d3ecca451 | ||
|
57cec89253 | ||
|
658415960e | ||
|
538a868384 | ||
|
8c8a9e5ca1 | ||
|
5079e3c6e5 | ||
|
65ed1a6871 | ||
|
d1f6a9f544 | ||
|
19c2c60bbe | ||
|
8401e4277a | ||
|
ec64358ac9 | ||
|
48605b5f61 | ||
|
0a4ec1dde5 | ||
|
870b4505a0 | ||
|
a79afd9ac3 | ||
|
4860e50e05 | ||
|
37f9d25ba0 | ||
|
8fddf27a98 | ||
|
f4ff4268f7 | ||
|
7307d97ae1 | ||
|
6662c78ec0 | ||
|
7033724522 | ||
|
03b7de28b2 | ||
|
687e4dc855 | ||
|
0cb4d65f8d | ||
|
862f8e98bc | ||
|
d6f49bf764 | ||
|
06aaeed1a6 | ||
|
9b93564e21 | ||
|
4335bbbf0a | ||
|
fc8ad1b70d | ||
|
4d086430bd | ||
|
2056e596f2 | ||
|
20356e57b1 | ||
|
e98114da4f | ||
|
f01e2fab07 | ||
|
55430b6ea2 | ||
|
6c3513c077 | ||
|
51e7968b8b | ||
|
fb3a6528cf | ||
|
5a0d487c3b | ||
|
2d20fed893 | ||
|
6ce4670bc0 | ||
|
aaca58a7a1 | ||
|
1a7e345af4 | ||
|
d99e759e76 | ||
|
afe344bcf3 | ||
|
c5436c8eb7 | ||
|
b868ca1790 | ||
|
9da25c5db7 | ||
|
a5c0cfb451 | ||
|
cac09a3823 | ||
|
0c1bd22ec0 | ||
|
64c53edf83 | ||
|
abd49a6c48 | ||
|
a9885505ca | ||
|
e282246a4b | ||
|
015fde9a2c | ||
|
29cb5deea3 | ||
|
78f13a3a57 | ||
|
0e35192797 | ||
|
f39f068161 | ||
|
f9ce40bb84 | ||
|
4230f5f08f | ||
|
78636ee568 | ||
|
bd615e0e5f | ||
|
683854255c | ||
|
06e16de894 | ||
|
2dfa4bcf6c | ||
|
eef7a33135 | ||
|
ae45c97d3d | ||
|
c029cdc90b | ||
|
5bcbb2980b | ||
|
514ae7cfa3 | ||
|
03aaea11d1 | ||
|
7dec26db2a | ||
|
51eb5f8ca8 | ||
|
4aab440ee2 | ||
|
f80ce141a1 | ||
|
b1f09596e6 | ||
|
045e90c897 | ||
|
2c58e6b62d | ||
|
52448e9585 | ||
|
c006261758 | ||
|
e6b61edd57 | ||
|
acd7b36999 | ||
|
b1e72f7ea9 | ||
|
1884f37f2c | ||
|
23471288c8 | ||
|
adc0a6adca | ||
|
0dec47b5c0 | ||
|
127ce93db4 | ||
|
9aa2e98191 | ||
|
7403a38ab7 | ||
|
af2ca5a654 | ||
|
0f893109c9 | ||
|
4bd2d0eccf | ||
|
062d910b26 | ||
|
893502e561 |
.gitmodules.golangci.yml.travis.ymlDockerfileDockerfile.alltoolsREADME.mdSECURITY.mdappveyor.yml
accounts
abi
accounts.goerrors.gokeystore
scwallet
usbwallet
build
cmd
clef
devp2p
ethkey
evm
faucet
geth
accountcmd_test.goconfig.goconsolecmd_test.godao_test.godbcmd.gogenesis_test.gomain.gorun_test.gosnapshot.gousage.goversion_check_test.go
utils
common/compiler
consensus
console
contracts/checkpointoracle
core
beacon
bench_test.goblockchain.goblockchain_insert.goblockchain_repair_test.goblockchain_sethead_test.goblockchain_snapshot_test.goblockchain_test.goevm.gogenesis.gogenesis_alloc.gogenesis_test.gorawdb
accessors_chain.goaccessors_chain_test.goaccessors_metadata.goaccessors_snapshot.goaccessors_state.goaccessors_sync.godatabase.gofreezer.gofreezer_batch.gofreezer_meta.gofreezer_meta_test.gofreezer_table.gofreezer_table_test.gofreezer_test.gofreezer_utils.gofreezer_utils_test.gokey_length_iterator.gokey_length_iterator_test.goschema.gotable.go
4
.gitmodules
vendored
4
.gitmodules
vendored
@ -2,3 +2,7 @@
|
|||||||
path = tests/testdata
|
path = tests/testdata
|
||||||
url = https://github.com/ethereum/tests
|
url = https://github.com/ethereum/tests
|
||||||
shallow = true
|
shallow = true
|
||||||
|
[submodule "evm-benchmarks"]
|
||||||
|
path = tests/evm-benchmarks
|
||||||
|
url = https://github.com/ipsilon/evm-benchmarks
|
||||||
|
shallow = true
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# This file configures github.com/golangci/golangci-lint.
|
# This file configures github.com/golangci/golangci-lint.
|
||||||
|
|
||||||
run:
|
run:
|
||||||
timeout: 5m
|
timeout: 20m
|
||||||
tests: true
|
tests: true
|
||||||
# default is true. Enables skipping of directories:
|
# default is true. Enables skipping of directories:
|
||||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||||
|
24
.travis.yml
24
.travis.yml
@ -16,7 +16,7 @@ jobs:
|
|||||||
- stage: lint
|
- stage: lint
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- lint
|
- lint
|
||||||
git:
|
git:
|
||||||
@ -31,7 +31,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: amd64
|
arch: amd64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- docker
|
- docker
|
||||||
services:
|
services:
|
||||||
@ -48,7 +48,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: arm64
|
arch: arm64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- docker
|
- docker
|
||||||
services:
|
services:
|
||||||
@ -65,7 +65,7 @@ jobs:
|
|||||||
if: type = push
|
if: type = push
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- ubuntu-ppa
|
- ubuntu-ppa
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
@ -90,7 +90,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
sudo: required
|
sudo: required
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- azure-linux
|
- azure-linux
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
@ -148,7 +148,7 @@ jobs:
|
|||||||
- sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle"
|
- sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle"
|
||||||
|
|
||||||
# Install Go to allow building with
|
# Install Go to allow building with
|
||||||
- curl https://dl.google.com/go/go1.16.linux-amd64.tar.gz | tar -xz
|
- curl https://dl.google.com/go/go1.18.linux-amd64.tar.gz | tar -xz
|
||||||
- export PATH=`pwd`/go/bin:$PATH
|
- export PATH=`pwd`/go/bin:$PATH
|
||||||
- export GOROOT=`pwd`/go
|
- export GOROOT=`pwd`/go
|
||||||
- export GOPATH=$HOME/go
|
- export GOPATH=$HOME/go
|
||||||
@ -162,7 +162,7 @@ jobs:
|
|||||||
- stage: build
|
- stage: build
|
||||||
if: type = push
|
if: type = push
|
||||||
os: osx
|
os: osx
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- azure-osx
|
- azure-osx
|
||||||
- azure-ios
|
- azure-ios
|
||||||
@ -194,7 +194,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: amd64
|
arch: amd64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
@ -205,7 +205,7 @@ jobs:
|
|||||||
os: linux
|
os: linux
|
||||||
arch: arm64
|
arch: arm64
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
@ -214,7 +214,7 @@ jobs:
|
|||||||
- stage: build
|
- stage: build
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.16.x
|
go: 1.17.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
@ -225,7 +225,7 @@ jobs:
|
|||||||
if: type = cron
|
if: type = cron
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- azure-purge
|
- azure-purge
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
@ -239,7 +239,7 @@ jobs:
|
|||||||
if: type = cron
|
if: type = cron
|
||||||
os: linux
|
os: linux
|
||||||
dist: bionic
|
dist: bionic
|
||||||
go: 1.17.x
|
go: 1.18.x
|
||||||
env:
|
env:
|
||||||
- GO111MODULE=on
|
- GO111MODULE=on
|
||||||
script:
|
script:
|
||||||
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
|||||||
ARG BUILDNUM=""
|
ARG BUILDNUM=""
|
||||||
|
|
||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.17-alpine as builder
|
FROM golang:1.18-alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ ARG VERSION=""
|
|||||||
ARG BUILDNUM=""
|
ARG BUILDNUM=""
|
||||||
|
|
||||||
# Build Geth in a stock Go builder container
|
# Build Geth in a stock Go builder container
|
||||||
FROM golang:1.17-alpine as builder
|
FROM golang:1.18-alpine as builder
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev linux-headers git
|
RUN apk add --no-cache gcc musl-dev linux-headers git
|
||||||
|
|
||||||
|
18
README.md
18
README.md
@ -52,6 +52,22 @@ Going through all the possible command line flags is out of scope here (please c
|
|||||||
but we've enumerated a few common parameter combos to get you up to speed quickly
|
but we've enumerated a few common parameter combos to get you up to speed quickly
|
||||||
on how you can run your own `geth` instance.
|
on how you can run your own `geth` instance.
|
||||||
|
|
||||||
|
### Hardware Requirements
|
||||||
|
|
||||||
|
Minimum:
|
||||||
|
|
||||||
|
* CPU with 2+ cores
|
||||||
|
* 4GB RAM
|
||||||
|
* 1TB free storage space to sync the Mainnet
|
||||||
|
* 8 MBit/sec download Internet service
|
||||||
|
|
||||||
|
Recommended:
|
||||||
|
|
||||||
|
* Fast CPU with 4+ cores
|
||||||
|
* 16GB+ RAM
|
||||||
|
* High Performance SSD with at least 1TB free space
|
||||||
|
* 25+ MBit/sec download Internet service
|
||||||
|
|
||||||
### Full node on the main Ethereum network
|
### Full node on the main Ethereum network
|
||||||
|
|
||||||
By far the most common scenario is people wanting to simply interact with the Ethereum
|
By far the most common scenario is people wanting to simply interact with the Ethereum
|
||||||
@ -165,7 +181,7 @@ saving your blockchain as well as map the default ports. There is also an `alpin
|
|||||||
available for a slim version of the image.
|
available for a slim version of the image.
|
||||||
|
|
||||||
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
|
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
|
||||||
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not
|
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not
|
||||||
accessible from the outside.
|
accessible from the outside.
|
||||||
|
|
||||||
### Programmatically interfacing `geth` nodes
|
### Programmatically interfacing `geth` nodes
|
||||||
|
@ -19,7 +19,7 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
|
|||||||
|
|
||||||
**Please do not file a public ticket** mentioning the vulnerability.
|
**Please do not file a public ticket** mentioning the vulnerability.
|
||||||
|
|
||||||
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publically disclosed security vulnerabilities.
|
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities.
|
||||||
|
|
||||||
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
|
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
|
||||||
|
|
||||||
|
@ -81,13 +81,7 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
|||||||
if len(arguments) != 0 {
|
if len(arguments) != 0 {
|
||||||
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||||
}
|
}
|
||||||
// Nothing to unmarshal, return default variables
|
return make([]interface{}, 0), nil
|
||||||
nonIndexedArgs := arguments.NonIndexed()
|
|
||||||
defaultVars := make([]interface{}, len(nonIndexedArgs))
|
|
||||||
for index, arg := range nonIndexedArgs {
|
|
||||||
defaultVars[index] = reflect.New(arg.Type.GetType())
|
|
||||||
}
|
|
||||||
return defaultVars, nil
|
|
||||||
}
|
}
|
||||||
return arguments.UnpackValues(data)
|
return arguments.UnpackValues(data)
|
||||||
}
|
}
|
||||||
|
@ -230,6 +230,9 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common
|
|||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
|
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
|
||||||
|
if receipt == nil {
|
||||||
|
return nil, ethereum.NotFound
|
||||||
|
}
|
||||||
return receipt, nil
|
return receipt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -639,7 +642,6 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SendTransaction updates the pending block to include the given transaction.
|
// SendTransaction updates the pending block to include the given transaction.
|
||||||
// It panics if the transaction is invalid.
|
|
||||||
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
|
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
@ -647,17 +649,17 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
|||||||
// Get the last block
|
// Get the last block
|
||||||
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
|
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("could not fetch parent")
|
return fmt.Errorf("could not fetch parent")
|
||||||
}
|
}
|
||||||
// Check transaction validity
|
// Check transaction validity
|
||||||
signer := types.MakeSigner(b.blockchain.Config(), block.Number())
|
signer := types.MakeSigner(b.blockchain.Config(), block.Number())
|
||||||
sender, err := types.Sender(signer, tx)
|
sender, err := types.Sender(signer, tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("invalid transaction: %v", err))
|
return fmt.Errorf("invalid transaction: %v", err)
|
||||||
}
|
}
|
||||||
nonce := b.pendingState.GetNonce(sender)
|
nonce := b.pendingState.GetNonce(sender)
|
||||||
if tx.Nonce() != nonce {
|
if tx.Nonce() != nonce {
|
||||||
panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce))
|
return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)
|
||||||
}
|
}
|
||||||
// Include tx in chain
|
// Include tx in chain
|
||||||
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||||
|
@ -171,7 +171,10 @@ func (c *BoundContract) Call(opts *CallOpts, results *[]interface{}, method stri
|
|||||||
return ErrNoPendingState
|
return ErrNoPendingState
|
||||||
}
|
}
|
||||||
output, err = pb.PendingCallContract(ctx, msg)
|
output, err = pb.PendingCallContract(ctx, msg)
|
||||||
if err == nil && len(output) == 0 {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(output) == 0 {
|
||||||
// Make sure we have a contract to operate on, and bail out otherwise.
|
// Make sure we have a contract to operate on, and bail out otherwise.
|
||||||
if code, err = pb.PendingCodeAt(ctx, c.address); err != nil {
|
if code, err = pb.PendingCodeAt(ctx, c.address); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -18,6 +18,7 @@ package bind_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"math/big"
|
"math/big"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -75,34 +76,51 @@ func (mt *mockTransactor) SendTransaction(ctx context.Context, tx *types.Transac
|
|||||||
}
|
}
|
||||||
|
|
||||||
type mockCaller struct {
|
type mockCaller struct {
|
||||||
codeAtBlockNumber *big.Int
|
codeAtBlockNumber *big.Int
|
||||||
callContractBlockNumber *big.Int
|
callContractBlockNumber *big.Int
|
||||||
pendingCodeAtCalled bool
|
callContractBytes []byte
|
||||||
pendingCallContractCalled bool
|
callContractErr error
|
||||||
|
codeAtBytes []byte
|
||||||
|
codeAtErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
|
func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
|
||||||
mc.codeAtBlockNumber = blockNumber
|
mc.codeAtBlockNumber = blockNumber
|
||||||
return []byte{1, 2, 3}, nil
|
return mc.codeAtBytes, mc.codeAtErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {
|
||||||
mc.callContractBlockNumber = blockNumber
|
mc.callContractBlockNumber = blockNumber
|
||||||
return nil, nil
|
return mc.callContractBytes, mc.callContractErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockCaller) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
|
type mockPendingCaller struct {
|
||||||
|
*mockCaller
|
||||||
|
pendingCodeAtBytes []byte
|
||||||
|
pendingCodeAtErr error
|
||||||
|
pendingCodeAtCalled bool
|
||||||
|
pendingCallContractCalled bool
|
||||||
|
pendingCallContractBytes []byte
|
||||||
|
pendingCallContractErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *mockPendingCaller) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
|
||||||
mc.pendingCodeAtCalled = true
|
mc.pendingCodeAtCalled = true
|
||||||
return nil, nil
|
return mc.pendingCodeAtBytes, mc.pendingCodeAtErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *mockCaller) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) {
|
func (mc *mockPendingCaller) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) {
|
||||||
mc.pendingCallContractCalled = true
|
mc.pendingCallContractCalled = true
|
||||||
return nil, nil
|
return mc.pendingCallContractBytes, mc.pendingCallContractErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPassingBlockNumber(t *testing.T) {
|
func TestPassingBlockNumber(t *testing.T) {
|
||||||
|
|
||||||
mc := &mockCaller{}
|
mc := &mockPendingCaller{
|
||||||
|
mockCaller: &mockCaller{
|
||||||
|
codeAtBytes: []byte{1, 2, 3},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{
|
bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{
|
||||||
Methods: map[string]abi.Method{
|
Methods: map[string]abi.Method{
|
||||||
@ -341,3 +359,132 @@ func newMockLog(topics []common.Hash, txHash common.Hash) types.Log {
|
|||||||
Removed: false,
|
Removed: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCall(t *testing.T) {
|
||||||
|
var method, methodWithArg = "something", "somethingArrrrg"
|
||||||
|
tests := []struct {
|
||||||
|
name, method string
|
||||||
|
opts *bind.CallOpts
|
||||||
|
mc bind.ContractCaller
|
||||||
|
results *[]interface{}
|
||||||
|
wantErr bool
|
||||||
|
wantErrExact error
|
||||||
|
}{{
|
||||||
|
name: "ok not pending",
|
||||||
|
mc: &mockCaller{
|
||||||
|
codeAtBytes: []byte{0},
|
||||||
|
},
|
||||||
|
method: method,
|
||||||
|
}, {
|
||||||
|
name: "ok pending",
|
||||||
|
mc: &mockPendingCaller{
|
||||||
|
pendingCodeAtBytes: []byte{0},
|
||||||
|
},
|
||||||
|
opts: &bind.CallOpts{
|
||||||
|
Pending: true,
|
||||||
|
},
|
||||||
|
method: method,
|
||||||
|
}, {
|
||||||
|
name: "pack error, no method",
|
||||||
|
mc: new(mockCaller),
|
||||||
|
method: "else",
|
||||||
|
wantErr: true,
|
||||||
|
}, {
|
||||||
|
name: "interface error, pending but not a PendingContractCaller",
|
||||||
|
mc: new(mockCaller),
|
||||||
|
opts: &bind.CallOpts{
|
||||||
|
Pending: true,
|
||||||
|
},
|
||||||
|
method: method,
|
||||||
|
wantErrExact: bind.ErrNoPendingState,
|
||||||
|
}, {
|
||||||
|
name: "pending call canceled",
|
||||||
|
mc: &mockPendingCaller{
|
||||||
|
pendingCallContractErr: context.DeadlineExceeded,
|
||||||
|
},
|
||||||
|
opts: &bind.CallOpts{
|
||||||
|
Pending: true,
|
||||||
|
},
|
||||||
|
method: method,
|
||||||
|
wantErrExact: context.DeadlineExceeded,
|
||||||
|
}, {
|
||||||
|
name: "pending code at error",
|
||||||
|
mc: &mockPendingCaller{
|
||||||
|
pendingCodeAtErr: errors.New(""),
|
||||||
|
},
|
||||||
|
opts: &bind.CallOpts{
|
||||||
|
Pending: true,
|
||||||
|
},
|
||||||
|
method: method,
|
||||||
|
wantErr: true,
|
||||||
|
}, {
|
||||||
|
name: "no pending code at",
|
||||||
|
mc: new(mockPendingCaller),
|
||||||
|
opts: &bind.CallOpts{
|
||||||
|
Pending: true,
|
||||||
|
},
|
||||||
|
method: method,
|
||||||
|
wantErrExact: bind.ErrNoCode,
|
||||||
|
}, {
|
||||||
|
name: "call contract error",
|
||||||
|
mc: &mockCaller{
|
||||||
|
callContractErr: context.DeadlineExceeded,
|
||||||
|
},
|
||||||
|
method: method,
|
||||||
|
wantErrExact: context.DeadlineExceeded,
|
||||||
|
}, {
|
||||||
|
name: "code at error",
|
||||||
|
mc: &mockCaller{
|
||||||
|
codeAtErr: errors.New(""),
|
||||||
|
},
|
||||||
|
method: method,
|
||||||
|
wantErr: true,
|
||||||
|
}, {
|
||||||
|
name: "no code at",
|
||||||
|
mc: new(mockCaller),
|
||||||
|
method: method,
|
||||||
|
wantErrExact: bind.ErrNoCode,
|
||||||
|
}, {
|
||||||
|
name: "unpack error missing arg",
|
||||||
|
mc: &mockCaller{
|
||||||
|
codeAtBytes: []byte{0},
|
||||||
|
},
|
||||||
|
method: methodWithArg,
|
||||||
|
wantErr: true,
|
||||||
|
}, {
|
||||||
|
name: "interface unpack error",
|
||||||
|
mc: &mockCaller{
|
||||||
|
codeAtBytes: []byte{0},
|
||||||
|
},
|
||||||
|
method: method,
|
||||||
|
results: &[]interface{}{0},
|
||||||
|
wantErr: true,
|
||||||
|
}}
|
||||||
|
for _, test := range tests {
|
||||||
|
bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{
|
||||||
|
Methods: map[string]abi.Method{
|
||||||
|
method: {
|
||||||
|
Name: method,
|
||||||
|
Outputs: abi.Arguments{},
|
||||||
|
},
|
||||||
|
methodWithArg: {
|
||||||
|
Name: methodWithArg,
|
||||||
|
Outputs: abi.Arguments{abi.Argument{}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, test.mc, nil, nil)
|
||||||
|
err := bc.Call(test.opts, test.results, test.method)
|
||||||
|
if test.wantErr || test.wantErrExact != nil {
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("%q expected error", test.name)
|
||||||
|
}
|
||||||
|
if test.wantErrExact != nil && !errors.Is(err, test.wantErrExact) {
|
||||||
|
t.Fatalf("%q expected error %q but got %q", test.name, test.wantErrExact, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("%q unexpected error: %v", test.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1966,14 +1966,10 @@ func TestGolangBindings(t *testing.T) {
|
|||||||
t.Skip("go sdk not found for testing")
|
t.Skip("go sdk not found for testing")
|
||||||
}
|
}
|
||||||
// Create a temporary workspace for the test suite
|
// Create a temporary workspace for the test suite
|
||||||
ws, err := ioutil.TempDir("", "binding-test")
|
ws := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temporary workspace: %v", err)
|
|
||||||
}
|
|
||||||
//defer os.RemoveAll(ws)
|
|
||||||
|
|
||||||
pkg := filepath.Join(ws, "bindtest")
|
pkg := filepath.Join(ws, "bindtest")
|
||||||
if err = os.MkdirAll(pkg, 0700); err != nil {
|
if err := os.MkdirAll(pkg, 0700); err != nil {
|
||||||
t.Fatalf("failed to create package: %v", err)
|
t.Fatalf("failed to create package: %v", err)
|
||||||
}
|
}
|
||||||
// Generate the test suite for all the contracts
|
// Generate the test suite for all the contracts
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
@ -35,14 +36,16 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
|
|||||||
logger := log.New("hash", tx.Hash())
|
logger := log.New("hash", tx.Hash())
|
||||||
for {
|
for {
|
||||||
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
||||||
if receipt != nil {
|
if err == nil {
|
||||||
return receipt, nil
|
return receipt, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
logger.Trace("Receipt retrieval failed", "err", err)
|
if errors.Is(err, ethereum.NotFound) {
|
||||||
} else {
|
|
||||||
logger.Trace("Transaction not yet mined")
|
logger.Trace("Transaction not yet mined")
|
||||||
|
} else {
|
||||||
|
logger.Trace("Receipt retrieval failed", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the next round.
|
// Wait for the next round.
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
160
accounts/abi/selector_parser.go
Normal file
160
accounts/abi/selector_parser.go
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
package abi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SelectorMarshaling struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Inputs []ArgumentMarshaling `json:"inputs"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDigit(c byte) bool {
|
||||||
|
return c >= '0' && c <= '9'
|
||||||
|
}
|
||||||
|
|
||||||
|
func isAlpha(c byte) bool {
|
||||||
|
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||||
|
}
|
||||||
|
|
||||||
|
func isIdentifierSymbol(c byte) bool {
|
||||||
|
return c == '$' || c == '_'
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseToken(unescapedSelector string, isIdent bool) (string, string, error) {
|
||||||
|
if len(unescapedSelector) == 0 {
|
||||||
|
return "", "", fmt.Errorf("empty token")
|
||||||
|
}
|
||||||
|
firstChar := unescapedSelector[0]
|
||||||
|
position := 1
|
||||||
|
if !(isAlpha(firstChar) || (isIdent && isIdentifierSymbol(firstChar))) {
|
||||||
|
return "", "", fmt.Errorf("invalid token start: %c", firstChar)
|
||||||
|
}
|
||||||
|
for position < len(unescapedSelector) {
|
||||||
|
char := unescapedSelector[position]
|
||||||
|
if !(isAlpha(char) || isDigit(char) || (isIdent && isIdentifierSymbol(char))) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
position++
|
||||||
|
}
|
||||||
|
return unescapedSelector[:position], unescapedSelector[position:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseIdentifier(unescapedSelector string) (string, string, error) {
|
||||||
|
return parseToken(unescapedSelector, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseElementaryType(unescapedSelector string) (string, string, error) {
|
||||||
|
parsedType, rest, err := parseToken(unescapedSelector, false)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf("failed to parse elementary type: %v", err)
|
||||||
|
}
|
||||||
|
// handle arrays
|
||||||
|
for len(rest) > 0 && rest[0] == '[' {
|
||||||
|
parsedType = parsedType + string(rest[0])
|
||||||
|
rest = rest[1:]
|
||||||
|
for len(rest) > 0 && isDigit(rest[0]) {
|
||||||
|
parsedType = parsedType + string(rest[0])
|
||||||
|
rest = rest[1:]
|
||||||
|
}
|
||||||
|
if len(rest) == 0 || rest[0] != ']' {
|
||||||
|
return "", "", fmt.Errorf("failed to parse array: expected ']', got %c", unescapedSelector[0])
|
||||||
|
}
|
||||||
|
parsedType = parsedType + string(rest[0])
|
||||||
|
rest = rest[1:]
|
||||||
|
}
|
||||||
|
return parsedType, rest, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseCompositeType(unescapedSelector string) ([]interface{}, string, error) {
|
||||||
|
if len(unescapedSelector) == 0 || unescapedSelector[0] != '(' {
|
||||||
|
return nil, "", fmt.Errorf("expected '(', got %c", unescapedSelector[0])
|
||||||
|
}
|
||||||
|
parsedType, rest, err := parseType(unescapedSelector[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("failed to parse type: %v", err)
|
||||||
|
}
|
||||||
|
result := []interface{}{parsedType}
|
||||||
|
for len(rest) > 0 && rest[0] != ')' {
|
||||||
|
parsedType, rest, err = parseType(rest[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf("failed to parse type: %v", err)
|
||||||
|
}
|
||||||
|
result = append(result, parsedType)
|
||||||
|
}
|
||||||
|
if len(rest) == 0 || rest[0] != ')' {
|
||||||
|
return nil, "", fmt.Errorf("expected ')', got '%s'", rest)
|
||||||
|
}
|
||||||
|
if len(rest) >= 3 && rest[1] == '[' && rest[2] == ']' {
|
||||||
|
return append(result, "[]"), rest[3:], nil
|
||||||
|
}
|
||||||
|
return result, rest[1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseType(unescapedSelector string) (interface{}, string, error) {
|
||||||
|
if len(unescapedSelector) == 0 {
|
||||||
|
return nil, "", fmt.Errorf("empty type")
|
||||||
|
}
|
||||||
|
if unescapedSelector[0] == '(' {
|
||||||
|
return parseCompositeType(unescapedSelector)
|
||||||
|
} else {
|
||||||
|
return parseElementaryType(unescapedSelector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assembleArgs(args []interface{}) ([]ArgumentMarshaling, error) {
|
||||||
|
arguments := make([]ArgumentMarshaling, 0)
|
||||||
|
for i, arg := range args {
|
||||||
|
// generate dummy name to avoid unmarshal issues
|
||||||
|
name := fmt.Sprintf("name%d", i)
|
||||||
|
if s, ok := arg.(string); ok {
|
||||||
|
arguments = append(arguments, ArgumentMarshaling{name, s, s, nil, false})
|
||||||
|
} else if components, ok := arg.([]interface{}); ok {
|
||||||
|
subArgs, err := assembleArgs(components)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to assemble components: %v", err)
|
||||||
|
}
|
||||||
|
tupleType := "tuple"
|
||||||
|
if len(subArgs) != 0 && subArgs[len(subArgs)-1].Type == "[]" {
|
||||||
|
subArgs = subArgs[:len(subArgs)-1]
|
||||||
|
tupleType = "tuple[]"
|
||||||
|
}
|
||||||
|
arguments = append(arguments, ArgumentMarshaling{name, tupleType, tupleType, subArgs, false})
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("failed to assemble args: unexpected type %T", arg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return arguments, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseSelector converts a method selector into a struct that can be JSON encoded
|
||||||
|
// and consumed by other functions in this package.
|
||||||
|
// Note, although uppercase letters are not part of the ABI spec, this function
|
||||||
|
// still accepts it as the general format is valid.
|
||||||
|
func ParseSelector(unescapedSelector string) (SelectorMarshaling, error) {
|
||||||
|
name, rest, err := parseIdentifier(unescapedSelector)
|
||||||
|
if err != nil {
|
||||||
|
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
|
||||||
|
}
|
||||||
|
args := []interface{}{}
|
||||||
|
if len(rest) >= 2 && rest[0] == '(' && rest[1] == ')' {
|
||||||
|
rest = rest[2:]
|
||||||
|
} else {
|
||||||
|
args, rest, err = parseCompositeType(rest)
|
||||||
|
if err != nil {
|
||||||
|
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rest) > 0 {
|
||||||
|
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reassemble the fake ABI and constuct the JSON
|
||||||
|
fakeArgs, err := assembleArgs(args)
|
||||||
|
if err != nil {
|
||||||
|
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return SelectorMarshaling{name, "function", fakeArgs}, nil
|
||||||
|
}
|
63
accounts/abi/selector_parser_test.go
Normal file
63
accounts/abi/selector_parser_test.go
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
package abi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseSelector(t *testing.T) {
|
||||||
|
mkType := func(types ...interface{}) []ArgumentMarshaling {
|
||||||
|
var result []ArgumentMarshaling
|
||||||
|
for i, typeOrComponents := range types {
|
||||||
|
name := fmt.Sprintf("name%d", i)
|
||||||
|
if typeName, ok := typeOrComponents.(string); ok {
|
||||||
|
result = append(result, ArgumentMarshaling{name, typeName, typeName, nil, false})
|
||||||
|
} else if components, ok := typeOrComponents.([]ArgumentMarshaling); ok {
|
||||||
|
result = append(result, ArgumentMarshaling{name, "tuple", "tuple", components, false})
|
||||||
|
} else if components, ok := typeOrComponents.([][]ArgumentMarshaling); ok {
|
||||||
|
result = append(result, ArgumentMarshaling{name, "tuple[]", "tuple[]", components[0], false})
|
||||||
|
} else {
|
||||||
|
log.Fatalf("unexpected type %T", typeOrComponents)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
name string
|
||||||
|
args []ArgumentMarshaling
|
||||||
|
}{
|
||||||
|
{"noargs()", "noargs", []ArgumentMarshaling{}},
|
||||||
|
{"simple(uint256,uint256,uint256)", "simple", mkType("uint256", "uint256", "uint256")},
|
||||||
|
{"other(uint256,address)", "other", mkType("uint256", "address")},
|
||||||
|
{"withArray(uint256[],address[2],uint8[4][][5])", "withArray", mkType("uint256[]", "address[2]", "uint8[4][][5]")},
|
||||||
|
{"singleNest(bytes32,uint8,(uint256,uint256),address)", "singleNest", mkType("bytes32", "uint8", mkType("uint256", "uint256"), "address")},
|
||||||
|
{"multiNest(address,(uint256[],uint256),((address,bytes32),uint256))", "multiNest",
|
||||||
|
mkType("address", mkType("uint256[]", "uint256"), mkType(mkType("address", "bytes32"), "uint256"))},
|
||||||
|
{"arrayNest((uint256,uint256)[],bytes32)", "arrayNest", mkType([][]ArgumentMarshaling{mkType("uint256", "uint256")}, "bytes32")},
|
||||||
|
{"multiArrayNest((uint256,uint256)[],(uint256,uint256)[])", "multiArrayNest",
|
||||||
|
mkType([][]ArgumentMarshaling{mkType("uint256", "uint256")}, [][]ArgumentMarshaling{mkType("uint256", "uint256")})},
|
||||||
|
{"singleArrayNestAndArray((uint256,uint256)[],bytes32[])", "singleArrayNestAndArray",
|
||||||
|
mkType([][]ArgumentMarshaling{mkType("uint256", "uint256")}, "bytes32[]")},
|
||||||
|
{"singleArrayNestWithArrayAndArray((uint256[],address[2],uint8[4][][5])[],bytes32[])", "singleArrayNestWithArrayAndArray",
|
||||||
|
mkType([][]ArgumentMarshaling{mkType("uint256[]", "address[2]", "uint8[4][][5]")}, "bytes32[]")},
|
||||||
|
}
|
||||||
|
for i, tt := range tests {
|
||||||
|
selector, err := ParseSelector(tt.input)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d: failed to parse selector '%v': %v", i, tt.input, err)
|
||||||
|
}
|
||||||
|
if selector.Name != tt.name {
|
||||||
|
t.Errorf("test %d: unexpected function name: '%s' != '%s'", i, selector.Name, tt.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if selector.Type != "function" {
|
||||||
|
t.Errorf("test %d: unexpected type: '%s' != '%s'", i, selector.Type, "function")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(selector.Inputs, tt.args) {
|
||||||
|
t.Errorf("test %d: unexpected args: '%v' != '%v'", i, selector.Inputs, tt.args)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -46,7 +46,7 @@ const (
|
|||||||
// accounts (derived from the same seed).
|
// accounts (derived from the same seed).
|
||||||
type Wallet interface {
|
type Wallet interface {
|
||||||
// URL retrieves the canonical path under which this wallet is reachable. It is
|
// URL retrieves the canonical path under which this wallet is reachable. It is
|
||||||
// user by upper layers to define a sorting order over all wallets from multiple
|
// used by upper layers to define a sorting order over all wallets from multiple
|
||||||
// backends.
|
// backends.
|
||||||
URL() URL
|
URL() URL
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ type Wallet interface {
|
|||||||
// accounts.
|
// accounts.
|
||||||
//
|
//
|
||||||
// Note, self derivation will increment the last component of the specified path
|
// Note, self derivation will increment the last component of the specified path
|
||||||
// opposed to decending into a child path to allow discovering accounts starting
|
// opposed to descending into a child path to allow discovering accounts starting
|
||||||
// from non zero components.
|
// from non zero components.
|
||||||
//
|
//
|
||||||
// Some hardware wallets switched derivation paths through their evolution, so
|
// Some hardware wallets switched derivation paths through their evolution, so
|
||||||
@ -105,7 +105,7 @@ type Wallet interface {
|
|||||||
// or optionally with the aid of any location metadata from the embedded URL field.
|
// or optionally with the aid of any location metadata from the embedded URL field.
|
||||||
//
|
//
|
||||||
// If the wallet requires additional authentication to sign the request (e.g.
|
// If the wallet requires additional authentication to sign the request (e.g.
|
||||||
// a password to decrypt the account, or a PIN code o verify the transaction),
|
// a password to decrypt the account, or a PIN code to verify the transaction),
|
||||||
// an AuthNeededError instance will be returned, containing infos for the user
|
// an AuthNeededError instance will be returned, containing infos for the user
|
||||||
// about which fields or actions are needed. The user may retry by providing
|
// about which fields or actions are needed. The user may retry by providing
|
||||||
// the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
|
// the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
|
||||||
@ -124,13 +124,13 @@ type Wallet interface {
|
|||||||
// or optionally with the aid of any location metadata from the embedded URL field.
|
// or optionally with the aid of any location metadata from the embedded URL field.
|
||||||
//
|
//
|
||||||
// If the wallet requires additional authentication to sign the request (e.g.
|
// If the wallet requires additional authentication to sign the request (e.g.
|
||||||
// a password to decrypt the account, or a PIN code o verify the transaction),
|
// a password to decrypt the account, or a PIN code to verify the transaction),
|
||||||
// an AuthNeededError instance will be returned, containing infos for the user
|
// an AuthNeededError instance will be returned, containing infos for the user
|
||||||
// about which fields or actions are needed. The user may retry by providing
|
// about which fields or actions are needed. The user may retry by providing
|
||||||
// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
|
// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
|
||||||
// the account in a keystore).
|
// the account in a keystore).
|
||||||
//
|
//
|
||||||
// This method should return the signature in 'canonical' format, with v 0 or 1
|
// This method should return the signature in 'canonical' format, with v 0 or 1.
|
||||||
SignText(account Account, text []byte) ([]byte, error)
|
SignText(account Account, text []byte) ([]byte, error)
|
||||||
|
|
||||||
// SignTextWithPassphrase is identical to Signtext, but also takes a password
|
// SignTextWithPassphrase is identical to Signtext, but also takes a password
|
||||||
|
@ -42,7 +42,7 @@ var ErrInvalidPassphrase = errors.New("invalid password")
|
|||||||
var ErrWalletAlreadyOpen = errors.New("wallet already open")
|
var ErrWalletAlreadyOpen = errors.New("wallet already open")
|
||||||
|
|
||||||
// ErrWalletClosed is returned if a wallet is attempted to be opened the
|
// ErrWalletClosed is returned if a wallet is attempted to be opened the
|
||||||
// secodn time.
|
// second time.
|
||||||
var ErrWalletClosed = errors.New("wallet closed")
|
var ErrWalletClosed = errors.New("wallet closed")
|
||||||
|
|
||||||
// AuthNeededError is returned by backends for signing requests where the user
|
// AuthNeededError is returned by backends for signing requests where the user
|
||||||
|
@ -55,7 +55,6 @@ func TestWatchNewFile(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
dir, ks := tmpKeyStore(t, false)
|
dir, ks := tmpKeyStore(t, false)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
// Ensure the watcher is started before adding any files.
|
// Ensure the watcher is started before adding any files.
|
||||||
ks.Accounts()
|
ks.Accounts()
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
package keystore
|
package keystore
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -38,7 +37,6 @@ var testSigData = make([]byte, 32)
|
|||||||
|
|
||||||
func TestKeyStore(t *testing.T) {
|
func TestKeyStore(t *testing.T) {
|
||||||
dir, ks := tmpKeyStore(t, true)
|
dir, ks := tmpKeyStore(t, true)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
a, err := ks.NewAccount("foo")
|
a, err := ks.NewAccount("foo")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -72,8 +70,7 @@ func TestKeyStore(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSign(t *testing.T) {
|
func TestSign(t *testing.T) {
|
||||||
dir, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
pass := "" // not used but required by API
|
pass := "" // not used but required by API
|
||||||
a1, err := ks.NewAccount(pass)
|
a1, err := ks.NewAccount(pass)
|
||||||
@ -89,8 +86,7 @@ func TestSign(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSignWithPassphrase(t *testing.T) {
|
func TestSignWithPassphrase(t *testing.T) {
|
||||||
dir, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
pass := "passwd"
|
pass := "passwd"
|
||||||
acc, err := ks.NewAccount(pass)
|
acc, err := ks.NewAccount(pass)
|
||||||
@ -117,8 +113,7 @@ func TestSignWithPassphrase(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestTimedUnlock(t *testing.T) {
|
func TestTimedUnlock(t *testing.T) {
|
||||||
dir, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
pass := "foo"
|
pass := "foo"
|
||||||
a1, err := ks.NewAccount(pass)
|
a1, err := ks.NewAccount(pass)
|
||||||
@ -152,8 +147,7 @@ func TestTimedUnlock(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOverrideUnlock(t *testing.T) {
|
func TestOverrideUnlock(t *testing.T) {
|
||||||
dir, ks := tmpKeyStore(t, false)
|
_, ks := tmpKeyStore(t, false)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
pass := "foo"
|
pass := "foo"
|
||||||
a1, err := ks.NewAccount(pass)
|
a1, err := ks.NewAccount(pass)
|
||||||
@ -193,8 +187,7 @@ func TestOverrideUnlock(t *testing.T) {
|
|||||||
|
|
||||||
// This test should fail under -race if signing races the expiration goroutine.
|
// This test should fail under -race if signing races the expiration goroutine.
|
||||||
func TestSignRace(t *testing.T) {
|
func TestSignRace(t *testing.T) {
|
||||||
dir, ks := tmpKeyStore(t, false)
|
_, ks := tmpKeyStore(t, false)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
// Create a test account.
|
// Create a test account.
|
||||||
a1, err := ks.NewAccount("")
|
a1, err := ks.NewAccount("")
|
||||||
@ -222,8 +215,7 @@ func TestSignRace(t *testing.T) {
|
|||||||
// addition and removal of wallet event subscriptions.
|
// addition and removal of wallet event subscriptions.
|
||||||
func TestWalletNotifierLifecycle(t *testing.T) {
|
func TestWalletNotifierLifecycle(t *testing.T) {
|
||||||
// Create a temporary kesytore to test with
|
// Create a temporary kesytore to test with
|
||||||
dir, ks := tmpKeyStore(t, false)
|
_, ks := tmpKeyStore(t, false)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
// Ensure that the notification updater is not running yet
|
// Ensure that the notification updater is not running yet
|
||||||
time.Sleep(250 * time.Millisecond)
|
time.Sleep(250 * time.Millisecond)
|
||||||
@ -283,8 +275,7 @@ type walletEvent struct {
|
|||||||
// Tests that wallet notifications and correctly fired when accounts are added
|
// Tests that wallet notifications and correctly fired when accounts are added
|
||||||
// or deleted from the keystore.
|
// or deleted from the keystore.
|
||||||
func TestWalletNotifications(t *testing.T) {
|
func TestWalletNotifications(t *testing.T) {
|
||||||
dir, ks := tmpKeyStore(t, false)
|
_, ks := tmpKeyStore(t, false)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
// Subscribe to the wallet feed and collect events.
|
// Subscribe to the wallet feed and collect events.
|
||||||
var (
|
var (
|
||||||
@ -345,8 +336,7 @@ func TestWalletNotifications(t *testing.T) {
|
|||||||
|
|
||||||
// TestImportExport tests the import functionality of a keystore.
|
// TestImportExport tests the import functionality of a keystore.
|
||||||
func TestImportECDSA(t *testing.T) {
|
func TestImportECDSA(t *testing.T) {
|
||||||
dir, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
key, err := crypto.GenerateKey()
|
key, err := crypto.GenerateKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to generate key: %v", key)
|
t.Fatalf("failed to generate key: %v", key)
|
||||||
@ -364,8 +354,7 @@ func TestImportECDSA(t *testing.T) {
|
|||||||
|
|
||||||
// TestImportECDSA tests the import and export functionality of a keystore.
|
// TestImportECDSA tests the import and export functionality of a keystore.
|
||||||
func TestImportExport(t *testing.T) {
|
func TestImportExport(t *testing.T) {
|
||||||
dir, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
acc, err := ks.NewAccount("old")
|
acc, err := ks.NewAccount("old")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create account: %v", acc)
|
t.Fatalf("failed to create account: %v", acc)
|
||||||
@ -374,8 +363,7 @@ func TestImportExport(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to export account: %v", acc)
|
t.Fatalf("failed to export account: %v", acc)
|
||||||
}
|
}
|
||||||
dir2, ks2 := tmpKeyStore(t, true)
|
_, ks2 := tmpKeyStore(t, true)
|
||||||
defer os.RemoveAll(dir2)
|
|
||||||
if _, err = ks2.Import(json, "old", "old"); err == nil {
|
if _, err = ks2.Import(json, "old", "old"); err == nil {
|
||||||
t.Errorf("importing with invalid password succeeded")
|
t.Errorf("importing with invalid password succeeded")
|
||||||
}
|
}
|
||||||
@ -395,8 +383,7 @@ func TestImportExport(t *testing.T) {
|
|||||||
// TestImportRace tests the keystore on races.
|
// TestImportRace tests the keystore on races.
|
||||||
// This test should fail under -race if importing races.
|
// This test should fail under -race if importing races.
|
||||||
func TestImportRace(t *testing.T) {
|
func TestImportRace(t *testing.T) {
|
||||||
dir, ks := tmpKeyStore(t, true)
|
_, ks := tmpKeyStore(t, true)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
acc, err := ks.NewAccount("old")
|
acc, err := ks.NewAccount("old")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create account: %v", acc)
|
t.Fatalf("failed to create account: %v", acc)
|
||||||
@ -405,8 +392,7 @@ func TestImportRace(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to export account: %v", acc)
|
t.Fatalf("failed to export account: %v", acc)
|
||||||
}
|
}
|
||||||
dir2, ks2 := tmpKeyStore(t, true)
|
_, ks2 := tmpKeyStore(t, true)
|
||||||
defer os.RemoveAll(dir2)
|
|
||||||
var atom uint32
|
var atom uint32
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(2)
|
wg.Add(2)
|
||||||
@ -462,10 +448,7 @@ func checkEvents(t *testing.T, want []walletEvent, have []walletEvent) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func tmpKeyStore(t *testing.T, encrypted bool) (string, *KeyStore) {
|
func tmpKeyStore(t *testing.T, encrypted bool) (string, *KeyStore) {
|
||||||
d, err := ioutil.TempDir("", "eth-keystore-test")
|
d := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
newKs := NewPlaintextKeyStore
|
newKs := NewPlaintextKeyStore
|
||||||
if encrypted {
|
if encrypted {
|
||||||
newKs = func(kd string) *KeyStore { return NewKeyStore(kd, veryLightScryptN, veryLightScryptP) }
|
newKs = func(kd string) *KeyStore { return NewKeyStore(kd, veryLightScryptN, veryLightScryptP) }
|
||||||
|
@ -20,8 +20,6 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
@ -32,10 +30,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
|
func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
|
||||||
d, err := ioutil.TempDir("", "geth-keystore-test")
|
d := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if encrypted {
|
if encrypted {
|
||||||
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP, true}
|
ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP, true}
|
||||||
} else {
|
} else {
|
||||||
@ -45,8 +40,7 @@ func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyStorePlain(t *testing.T) {
|
func TestKeyStorePlain(t *testing.T) {
|
||||||
dir, ks := tmpKeyStoreIface(t, false)
|
_, ks := tmpKeyStoreIface(t, false)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
pass := "" // not used but required by API
|
pass := "" // not used but required by API
|
||||||
k1, account, err := storeNewKey(ks, rand.Reader, pass)
|
k1, account, err := storeNewKey(ks, rand.Reader, pass)
|
||||||
@ -66,8 +60,7 @@ func TestKeyStorePlain(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyStorePassphrase(t *testing.T) {
|
func TestKeyStorePassphrase(t *testing.T) {
|
||||||
dir, ks := tmpKeyStoreIface(t, true)
|
_, ks := tmpKeyStoreIface(t, true)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
pass := "foo"
|
pass := "foo"
|
||||||
k1, account, err := storeNewKey(ks, rand.Reader, pass)
|
k1, account, err := storeNewKey(ks, rand.Reader, pass)
|
||||||
@ -87,8 +80,7 @@ func TestKeyStorePassphrase(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
|
func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
|
||||||
dir, ks := tmpKeyStoreIface(t, true)
|
_, ks := tmpKeyStoreIface(t, true)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
pass := "foo"
|
pass := "foo"
|
||||||
k1, account, err := storeNewKey(ks, rand.Reader, pass)
|
k1, account, err := storeNewKey(ks, rand.Reader, pass)
|
||||||
@ -102,7 +94,6 @@ func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
|
|||||||
|
|
||||||
func TestImportPreSaleKey(t *testing.T) {
|
func TestImportPreSaleKey(t *testing.T) {
|
||||||
dir, ks := tmpKeyStoreIface(t, true)
|
dir, ks := tmpKeyStoreIface(t, true)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
// file content of a presale key file generated with:
|
// file content of a presale key file generated with:
|
||||||
// python pyethsaletool.py genwallet
|
// python pyethsaletool.py genwallet
|
||||||
|
@ -638,7 +638,7 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
|
|||||||
// accounts.
|
// accounts.
|
||||||
//
|
//
|
||||||
// Note, self derivation will increment the last component of the specified path
|
// Note, self derivation will increment the last component of the specified path
|
||||||
// opposed to decending into a child path to allow discovering accounts starting
|
// opposed to descending into a child path to allow discovering accounts starting
|
||||||
// from non zero components.
|
// from non zero components.
|
||||||
//
|
//
|
||||||
// Some hardware wallets switched derivation paths through their evolution, so
|
// Some hardware wallets switched derivation paths through their evolution, so
|
||||||
|
@ -496,7 +496,7 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
|
|||||||
// accounts.
|
// accounts.
|
||||||
//
|
//
|
||||||
// Note, self derivation will increment the last component of the specified path
|
// Note, self derivation will increment the last component of the specified path
|
||||||
// opposed to decending into a child path to allow discovering accounts starting
|
// opposed to descending into a child path to allow discovering accounts starting
|
||||||
// from non zero components.
|
// from non zero components.
|
||||||
//
|
//
|
||||||
// Some hardware wallets switched derivation paths through their evolution, so
|
// Some hardware wallets switched derivation paths through their evolution, so
|
||||||
|
@ -13,7 +13,7 @@ environment:
|
|||||||
GETH_MINGW: 'C:\msys64\mingw32'
|
GETH_MINGW: 'C:\msys64\mingw32'
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- git submodule update --init --depth 1
|
- git submodule update --init --depth 1 --recursive
|
||||||
- go version
|
- go version
|
||||||
|
|
||||||
for:
|
for:
|
||||||
|
@ -1,37 +1,58 @@
|
|||||||
# This file contains sha256 checksums of optional build dependencies.
|
# This file contains sha256 checksums of optional build dependencies.
|
||||||
|
|
||||||
3defb9a09bed042403195e872dcbc8c6fae1485963332279668ec52e80a95a2d go1.17.5.src.tar.gz
|
efd43e0f1402e083b73a03d444b7b6576bb4c539ac46208b63a916b69aca4088 go1.18.1.src.tar.gz
|
||||||
2db6a5d25815b56072465a2cacc8ed426c18f1d5fc26c1fc8c4f5a7188658264 go1.17.5.darwin-amd64.tar.gz
|
3703e9a0db1000f18c0c7b524f3d378aac71219b4715a6a4c5683eb639f41a4d go1.18.1.darwin-amd64.tar.gz
|
||||||
111f71166de0cb8089bb3e8f9f5b02d76e1bf1309256824d4062a47b0e5f98e0 go1.17.5.darwin-arm64.tar.gz
|
6d5641a06edba8cd6d425fb0adad06bad80e2afe0fa91b4aa0e5aed1bc78f58e go1.18.1.darwin-arm64.tar.gz
|
||||||
443c1cd9768df02085014f1eb034ebc7dbe032ffc8a9bb9f2e6617d037eee23c go1.17.5.freebsd-386.tar.gz
|
b9a9063d4265d8ccc046c9b314194d6eadc47e56d0d637db81e98e68aad45035 go1.18.1.freebsd-386.tar.gz
|
||||||
17180bdc4126acffd0ebf86d66ef5cbc3488b6734e93374fb00eb09494e006d3 go1.17.5.freebsd-amd64.tar.gz
|
2bc1c138d645e37dbbc63517dd1cf1bf33fc4cb95f442a6384df0418b5134e9f go1.18.1.freebsd-amd64.tar.gz
|
||||||
4f4914303bc18f24fd137a97e595735308f5ce81323c7224c12466fd763fc59f go1.17.5.linux-386.tar.gz
|
9a8df5dde9058f08ac01ecfaae42534610db398e487138788c01da26a0d41ff9 go1.18.1.linux-386.tar.gz
|
||||||
bd78114b0d441b029c8fe0341f4910370925a4d270a6a590668840675b0c653e go1.17.5.linux-amd64.tar.gz
|
b3b815f47ababac13810fc6021eb73d65478e0b2db4b09d348eefad9581a2334 go1.18.1.linux-amd64.tar.gz
|
||||||
6f95ce3da40d9ce1355e48f31f4eb6508382415ca4d7413b1e7a3314e6430e7e go1.17.5.linux-arm64.tar.gz
|
56a91851c97fb4697077abbca38860f735c32b38993ff79b088dac46e4735633 go1.18.1.linux-arm64.tar.gz
|
||||||
aa1fb6c53b4fe72f159333362a10aca37ae938bde8adc9c6eaf2a8e87d1e47de go1.17.5.linux-armv6l.tar.gz
|
9edc01c8e7db64e9ceeffc8258359e027812886ceca3444e83c4eb96ddb068ee go1.18.1.linux-armv6l.tar.gz
|
||||||
3d4be616e568f0a02cb7f7769bcaafda4b0969ed0f9bb4277619930b96847e70 go1.17.5.linux-ppc64le.tar.gz
|
33db623d1eecf362fe365107c12efc90eff0b9609e0b3345e258388019cb552a go1.18.1.linux-ppc64le.tar.gz
|
||||||
8087d4fe991e82804e6485c26568c2e0ee0bfde00ceb9015dc86cb6bf84ef40b go1.17.5.linux-s390x.tar.gz
|
5d9301324148ed4dbfaa0800da43a843ffd65c834ee73fcf087255697c925f74 go1.18.1.linux-s390x.tar.gz
|
||||||
6d7b9948ee14a906b14f5cbebdfab63cd6828b0b618160847ecd3cc3470a26fe go1.17.5.windows-386.zip
|
49ae65551acbfaa57b52fbefa0350b2072512ae3103b8cf1a919a02626dbc743 go1.18.1.windows-386.zip
|
||||||
671faf99cd5d81cd7e40936c0a94363c64d654faa0148d2af4bbc262555620b9 go1.17.5.windows-amd64.zip
|
c30bc3f1f7314a953fe208bd9cd5e24bd9403392a6c556ced3677f9f70f71fe1 go1.18.1.windows-amd64.zip
|
||||||
45e88676b68e9cf364be469b5a27965397f4e339aa622c2f52c10433c56e5030 go1.17.5.windows-arm64.zip
|
2c4a8265030eac37f906634f5c13c22c3d0ea725f2488e1bca005c6b981653d7 go1.18.1.windows-arm64.zip
|
||||||
|
|
||||||
d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
|
03c181fc1bb29ea3e73cbb23399c43b081063833a7cf7554b94e5a98308df53e golangci-lint-1.45.2-linux-riscv64.deb
|
||||||
e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
|
08a50bbbf451ede6d5354179eb3e14a5634e156dfa92cb9a2606f855a637e35b golangci-lint-1.45.2-linux-ppc64le.rpm
|
||||||
14d912a3fa856830339472fc4dc341933adf15f37bdb7130bbbfcf960ecf4809 golangci-lint-1.42.0-freebsd-386.tar.gz
|
0d12f6ec1296b5a70e392aa88cd2295cceef266165eb7028e675f455515dd1c9 golangci-lint-1.45.2-linux-armv7.deb
|
||||||
337257fccc9baeb5ee1cd7e70c153e9d9f59d3afde46d631659500048afbdf80 golangci-lint-1.42.0-freebsd-amd64.tar.gz
|
10f2846e2e50e4ea8ae426ee62dcd2227b23adddd8e991aa3c065927ac948735 golangci-lint-1.45.2-linux-ppc64le.deb
|
||||||
6debcc266b629359fdd8eef4f4abb05a621604079d27016265afb5b4593b0eff golangci-lint-1.42.0-freebsd-armv6.tar.gz
|
1463049b744871168095e3e8f687247d6040eeb895955b869889ea151e0603ab golangci-lint-1.45.2-linux-arm64.tar.gz
|
||||||
878f0e190169db2ce9dde8cefbd99adc4fe28b90b68686bbfcfcc2085e6d693e golangci-lint-1.42.0-freebsd-armv7.tar.gz
|
15720f9c4c6f9324af695f081dc189adc7751b255759e78d7b2df1d7e9192533 golangci-lint-1.45.2-linux-amd64.deb
|
||||||
42c78e31faf62b225363eff1b1d2aa74f9dbcb75686c8914aa3e90d6af65cece golangci-lint-1.42.0-linux-386.tar.gz
|
166d922e4d3cfe3d47786c590154a9c8ea689dff0aa92b73d2f5fc74fc570c29 golangci-lint-1.45.2-linux-arm64.rpm
|
||||||
6937f62f8e2329e94822dc11c10b871ace5557ae1fcc4ee2f9980cd6aecbc159 golangci-lint-1.42.0-linux-amd64.tar.gz
|
1a3754c69f7cc19ab89cbdcc2550da4cf9abb3120383c6b3bd440c1ec22da2e6 golangci-lint-1.45.2-freebsd-386.tar.gz
|
||||||
2cf8d23d96cd854a537b355dab2962b960b88a06b615232599f066afd233f246 golangci-lint-1.42.0-linux-arm64.tar.gz
|
1dec0aa46d4f0d241863b573f70129bdf1de9c595cf51172a840a588a4cd9fc5 golangci-lint-1.45.2-windows-amd64.zip
|
||||||
08b003d1ed61367473886defc957af5301066e62338e5d96a319c34dadc4c1d1 golangci-lint-1.42.0-linux-armv6.tar.gz
|
3198453806517c1ad988229f5e758ef850e671203f46d6905509df5bdf4dc24b golangci-lint-1.45.2-freebsd-armv7.tar.gz
|
||||||
c7c00ec4845e806a1f32685f5b150219e180bd6d6a9d584be8d27f0c41d7a1bf golangci-lint-1.42.0-linux-armv7.tar.gz
|
46a3cd1749d7b98adc2dc01510ddbe21abe42689c8a53fb0e81662713629f215 golangci-lint-1.45.2-linux-386.deb
|
||||||
3650fcf29eb3d8ee326d77791a896b15259eb2d5bf77437dc72e7efe5af6bd40 golangci-lint-1.42.0-linux-mips64.tar.gz
|
4e28bfb593d464b9e160f2acd5b71993836a183270bf8299b78ad31f7a168c0d golangci-lint-1.45.2-linux-arm64.deb
|
||||||
f51ae003fdbca4fef78ba73e2eb736a939c8eaa178cd452234213b489da5a420 golangci-lint-1.42.0-linux-mips64le.tar.gz
|
5157a58c8f9ab85c33af2e46f0d7c57a3b1e8953b81d61130e292e09f545cfab golangci-lint-1.45.2-linux-mips64le.tar.gz
|
||||||
1b0bb7b8b22cc4ea7da44fd5ad5faaf6111d0677e01cc6f961b62a96537de2c6 golangci-lint-1.42.0-linux-ppc64le.tar.gz
|
518cd027644129fbf8ec4f02bd6f9ad7278aae826f92b63c80d4d0819ddde49a golangci-lint-1.45.2-linux-armv6.rpm
|
||||||
8cb56927eb75e572450efbe0ff0f9cf3f56dc9faa81d9e8d30d6559fc1d06e6d golangci-lint-1.42.0-linux-riscv64.tar.gz
|
595ad6c6dade4c064351bc309f411703e457f8ffbb7a1806b3d8ee713333427f golangci-lint-1.45.2-linux-amd64.tar.gz
|
||||||
5ac41cd31825a176b21505a371a7b307cd9cdf17df0f35bbb3bf1466f9356ccc golangci-lint-1.42.0-linux-s390x.tar.gz
|
6994d6c80f0730751090986184a3481b4be2e6b6e84416238a2b857910045a4f golangci-lint-1.45.2-windows-arm64.zip
|
||||||
e1cebd2af621ac4b64c20937df92c3819264f2174c92f51e196db1e64ae097e0 golangci-lint-1.42.0-windows-386.zip
|
6c81652fc340118811b487f713c441fc6f527800bf5fd11b8929d08124efa015 golangci-lint-1.45.2-linux-armv7.tar.gz
|
||||||
7e70fcde8e87a17cae0455df07d257ebc86669f3968d568e12727fa24bbe9883 golangci-lint-1.42.0-windows-amd64.zip
|
726cb045559b7518bafdd3459de70a0647c087eb1b4634627a4b2e95b1258580 golangci-lint-1.45.2-freebsd-amd64.tar.gz
|
||||||
59da7ce1bda432616bfc28ae663e52c3675adee8d9bf5959fafd657c159576ab golangci-lint-1.42.0-windows-armv6.zip
|
77df3774cdfda49b956d4a0e676da9a9b883f496ee37293c530770fef6b1d24e golangci-lint-1.45.2-linux-mips64.deb
|
||||||
65f62dda937bfcede0326ac77abe947ce1548931e6e13298ca036cb31f224db5 golangci-lint-1.42.0-windows-armv7.zip
|
7a9840f279a7d5d405bb434e101c2290964b3729630ac2add29280b962b7b9a5 golangci-lint-1.45.2-windows-armv6.zip
|
||||||
|
7d4bf9a5d80ec467aaaf66e78dbdcab567bbc6ba8151334c714eee58766aae32 golangci-lint-1.45.2-windows-armv7.zip
|
||||||
|
7e5f8821d39bb11d273b0841b34355f56bd5a45a2d5179f0d09e614e0efc0482 golangci-lint-1.45.2-linux-s390x.rpm
|
||||||
|
828de1bde796b23d8656b17a8885fbd879ef612795d62d1e4618126b419728b5 golangci-lint-1.45.2-linux-mips64.rpm
|
||||||
|
879a52107a797678a03c175cc7cf441411a14a01f66dc87f70bdfa304a4129a6 golangci-lint-1.45.2-windows-386.zip
|
||||||
|
87b6c7e3a3769f7d9abeb3bb82119b3c91e3c975300f6834fdeef8b2e37c98ff golangci-lint-1.45.2-linux-amd64.rpm
|
||||||
|
8b605c6d686c8af53ecc4ef39544541eeb1644d34cc10f9ffc5087808210c4ff golangci-lint-1.45.2-linux-s390x.deb
|
||||||
|
9427dbf51d0ac6f73a0f992838bd40c817470cc5bf6c8e2e2bea6fac46d7af6e golangci-lint-1.45.2-linux-ppc64le.tar.gz
|
||||||
|
995e509e895ca6a64ffc7395ac884d5961bdec98423cb896b17f345a9b4a19cf golangci-lint-1.45.2-darwin-amd64.tar.gz
|
||||||
|
a3f36278f2ea5516341e9071a2df6e65df272be80230b5406a12b72c6d425bee golangci-lint-1.45.2-linux-armv7.rpm
|
||||||
|
a5e12c50c23e87ac1deffc872f92ae85427b1198604969399805ae47cfe43f08 golangci-lint-1.45.2-linux-riscv64.tar.gz
|
||||||
|
aa8fa1be0729dbc2fbc4e01e82027097613eee74bd686ebef20f860b01fff8b3 golangci-lint-1.45.2-freebsd-armv6.tar.gz
|
||||||
|
c2b9669decc1b638cf2ee9060571af4e255f6dfcbb225c293e3a7ee4bb2c7217 golangci-lint-1.45.2-darwin-arm64.tar.gz
|
||||||
|
dfa8bdaf0387aec1cd5c1aa8857f67b2bbdfc2e42efce540c8fb9bbe3e8af302 golangci-lint-1.45.2-linux-armv6.tar.gz
|
||||||
|
eb8b8539dd017eee5c131ea9b875893ab2cebeeca41e8c6624907fb02224d643 golangci-lint-1.45.2-linux-386.rpm
|
||||||
|
ed6c7e17a857f30d715c5302fa250d95936936b277024bffea201187a257d7a7 golangci-lint-1.45.2-linux-armv6.deb
|
||||||
|
ef4d0154ace4001f01b288baeb118176242efb4fd163e178763e3213b77ef30b golangci-lint-1.45.2-linux-mips64le.deb
|
||||||
|
ef7002a2229f5ff5ba201a715fcf877664ea88decbe58e69d163293913024955 golangci-lint-1.45.2-linux-s390x.tar.gz
|
||||||
|
f13ecbd09228632e6bbe91a8324bd675c406eed22eb6d2c1e8192eed9ec4f914 golangci-lint-1.45.2-linux-386.tar.gz
|
||||||
|
f4cd9cfb09252f51699407277512263cae8409b665dd764f55a34738d0e89edc golangci-lint-1.45.2-linux-riscv64.rpm
|
||||||
|
fb1945dc59d37c9d14bf0a4aea11ea8651fa0e1d582ea80c4c44d0a536c08893 golangci-lint-1.45.2-linux-mips64.tar.gz
|
||||||
|
fe542c22738010f453c735a3c410decfd3784d1bd394b395c298ee298fc4c606 golangci-lint-1.45.2-linux-mips64le.rpm
|
||||||
|
44
build/ci.go
44
build/ci.go
@ -130,13 +130,14 @@ var (
|
|||||||
// Distros for which packages are created.
|
// Distros for which packages are created.
|
||||||
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
// Note: vivid is unsupported because there is no golang-1.6 package for it.
|
||||||
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
|
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
|
||||||
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy
|
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite
|
||||||
debDistroGoBoots = map[string]string{
|
debDistroGoBoots = map[string]string{
|
||||||
"trusty": "golang-1.11",
|
"trusty": "golang-1.11", // EOL: 04/2024
|
||||||
"xenial": "golang-go",
|
"xenial": "golang-go", // EOL: 04/2026
|
||||||
"bionic": "golang-go",
|
"bionic": "golang-go", // EOL: 04/2028
|
||||||
"focal": "golang-go",
|
"focal": "golang-go", // EOL: 04/2030
|
||||||
"hirsute": "golang-go",
|
"impish": "golang-go", // EOL: 07/2022
|
||||||
|
// "jammy": "golang-go", // EOL: 04/2027
|
||||||
}
|
}
|
||||||
|
|
||||||
debGoBootPaths = map[string]string{
|
debGoBootPaths = map[string]string{
|
||||||
@ -147,7 +148,7 @@ var (
|
|||||||
// This is the version of go that will be downloaded by
|
// This is the version of go that will be downloaded by
|
||||||
//
|
//
|
||||||
// go run ci.go install -dlgo
|
// go run ci.go install -dlgo
|
||||||
dlgoVersion = "1.17.5"
|
dlgoVersion = "1.18.1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||||
@ -331,12 +332,21 @@ func doLint(cmdline []string) {
|
|||||||
|
|
||||||
// downloadLinter downloads and unpacks golangci-lint.
|
// downloadLinter downloads and unpacks golangci-lint.
|
||||||
func downloadLinter(cachedir string) string {
|
func downloadLinter(cachedir string) string {
|
||||||
const version = "1.42.0"
|
const version = "1.45.2"
|
||||||
|
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
|
arch := runtime.GOARCH
|
||||||
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base)
|
ext := ".tar.gz"
|
||||||
archivePath := filepath.Join(cachedir, base+".tar.gz")
|
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
ext = ".zip"
|
||||||
|
}
|
||||||
|
if arch == "arm" {
|
||||||
|
arch += "v" + os.Getenv("GOARM")
|
||||||
|
}
|
||||||
|
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch)
|
||||||
|
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s%s", version, base, ext)
|
||||||
|
archivePath := filepath.Join(cachedir, base+ext)
|
||||||
if err := csdb.DownloadFile(url, archivePath); err != nil {
|
if err := csdb.DownloadFile(url, archivePath); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -1123,11 +1133,7 @@ func doXCodeFramework(cmdline []string) {
|
|||||||
tc := new(build.GoToolchain)
|
tc := new(build.GoToolchain)
|
||||||
|
|
||||||
// Build gomobile.
|
// Build gomobile.
|
||||||
build.MustRun(tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile@latest", "golang.org/x/mobile/cmd/gobind@latest"))
|
build.MustRun(tc.Install(GOBIN, "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
|
||||||
|
|
||||||
// Ensure all dependencies are available. This is required to make
|
|
||||||
// gomobile bind work because it expects go.sum to contain all checksums.
|
|
||||||
build.MustRun(tc.Go("mod", "download"))
|
|
||||||
|
|
||||||
// Build the iOS XCode framework
|
// Build the iOS XCode framework
|
||||||
bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
|
bind := gomobileTool("bind", "-ldflags", "-s -w", "--target", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")
|
||||||
@ -1233,21 +1239,21 @@ func doPurge(cmdline []string) {
|
|||||||
|
|
||||||
// Iterate over the blobs, collect and sort all unstable builds
|
// Iterate over the blobs, collect and sort all unstable builds
|
||||||
for i := 0; i < len(blobs); i++ {
|
for i := 0; i < len(blobs); i++ {
|
||||||
if !strings.Contains(blobs[i].Name, "unstable") {
|
if !strings.Contains(*blobs[i].Name, "unstable") {
|
||||||
blobs = append(blobs[:i], blobs[i+1:]...)
|
blobs = append(blobs[:i], blobs[i+1:]...)
|
||||||
i--
|
i--
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for i := 0; i < len(blobs); i++ {
|
for i := 0; i < len(blobs); i++ {
|
||||||
for j := i + 1; j < len(blobs); j++ {
|
for j := i + 1; j < len(blobs); j++ {
|
||||||
if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) {
|
if blobs[i].Properties.LastModified.After(*blobs[j].Properties.LastModified) {
|
||||||
blobs[i], blobs[j] = blobs[j], blobs[i]
|
blobs[i], blobs[j] = blobs[j], blobs[i]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Filter out all archives more recent that the given threshold
|
// Filter out all archives more recent that the given threshold
|
||||||
for i, blob := range blobs {
|
for i, blob := range blobs {
|
||||||
if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
|
if time.Since(*blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
|
||||||
blobs = blobs[:i]
|
blobs = blobs[:i]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@ Maintainer: {{.Author}}
|
|||||||
Build-Depends: debhelper (>= 8.0.0), {{.GoBootPackage}}
|
Build-Depends: debhelper (>= 8.0.0), {{.GoBootPackage}}
|
||||||
Standards-Version: 3.9.5
|
Standards-Version: 3.9.5
|
||||||
Homepage: https://ethereum.org
|
Homepage: https://ethereum.org
|
||||||
Vcs-Git: git://github.com/ethereum/go-ethereum.git
|
Vcs-Git: https://github.com/ethereum/go-ethereum.git
|
||||||
Vcs-Browser: https://github.com/ethereum/go-ethereum
|
Vcs-Browser: https://github.com/ethereum/go-ethereum
|
||||||
|
|
||||||
Package: {{.Name}}
|
Package: {{.Name}}
|
||||||
|
32
build/tools/tools.go
Normal file
32
build/tools/tools.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
// Copyright 2019 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//go:build tools
|
||||||
|
// +build tools
|
||||||
|
|
||||||
|
package tools
|
||||||
|
|
||||||
|
import (
|
||||||
|
// Tool imports for go:generate.
|
||||||
|
_ "github.com/fjl/gencodec"
|
||||||
|
_ "github.com/golang/protobuf/protoc-gen-go"
|
||||||
|
_ "github.com/kevinburke/go-bindata/go-bindata"
|
||||||
|
_ "golang.org/x/tools/cmd/stringer"
|
||||||
|
|
||||||
|
// Tool imports for mobile build.
|
||||||
|
_ "golang.org/x/mobile/cmd/gobind"
|
||||||
|
_ "golang.org/x/mobile/cmd/gomobile"
|
||||||
|
)
|
@ -661,7 +661,7 @@ func signer(c *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Could not register API: %w", err)
|
utils.Fatalf("Could not register API: %w", err)
|
||||||
}
|
}
|
||||||
handler := node.NewHTTPHandlerStack(srv, cors, vhosts)
|
handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil)
|
||||||
|
|
||||||
// set port
|
// set port
|
||||||
port := c.Int(rpcPortFlag.Name)
|
port := c.Int(rpcPortFlag.Name)
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/forkid"
|
"github.com/ethereum/go-ethereum/core/forkid"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
@ -67,6 +68,13 @@ func (c *Chain) TotalDifficultyAt(height int) *big.Int {
|
|||||||
return sum
|
return sum
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Chain) RootAt(height int) common.Hash {
|
||||||
|
if height < c.Len() {
|
||||||
|
return c.blocks[height].Root()
|
||||||
|
}
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
|
||||||
// ForkID gets the fork id of the chain.
|
// ForkID gets the fork id of the chain.
|
||||||
func (c *Chain) ForkID() forkid.ID {
|
func (c *Chain) ForkID() forkid.ID {
|
||||||
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
|
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
|
||||||
|
@ -96,6 +96,19 @@ func (s *Suite) dial66() (*Conn, error) {
|
|||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dial66 attempts to dial the given node and perform a handshake,
|
||||||
|
// returning the created Conn with additional snap/1 capabilities if
|
||||||
|
// successful.
|
||||||
|
func (s *Suite) dialSnap() (*Conn, error) {
|
||||||
|
conn, err := s.dial66()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1})
|
||||||
|
conn.ourHighestSnapProtoVersion = 1
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
// peer performs both the protocol handshake and the status message
|
// peer performs both the protocol handshake and the status message
|
||||||
// exchange with the node in order to peer with it.
|
// exchange with the node in order to peer with it.
|
||||||
func (c *Conn) peer(chain *Chain, status *Status) error {
|
func (c *Conn) peer(chain *Chain, status *Status) error {
|
||||||
@ -131,7 +144,11 @@ func (c *Conn) handshake() error {
|
|||||||
}
|
}
|
||||||
c.negotiateEthProtocol(msg.Caps)
|
c.negotiateEthProtocol(msg.Caps)
|
||||||
if c.negotiatedProtoVersion == 0 {
|
if c.negotiatedProtoVersion == 0 {
|
||||||
return fmt.Errorf("could not negotiate protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
|
return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
|
||||||
|
}
|
||||||
|
// If we require snap, verify that it was negotiated
|
||||||
|
if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion {
|
||||||
|
return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
@ -143,15 +160,21 @@ func (c *Conn) handshake() error {
|
|||||||
// advertised capability from peer.
|
// advertised capability from peer.
|
||||||
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
|
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
|
||||||
var highestEthVersion uint
|
var highestEthVersion uint
|
||||||
|
var highestSnapVersion uint
|
||||||
for _, capability := range caps {
|
for _, capability := range caps {
|
||||||
if capability.Name != "eth" {
|
switch capability.Name {
|
||||||
continue
|
case "eth":
|
||||||
}
|
if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
|
||||||
if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
|
highestEthVersion = capability.Version
|
||||||
highestEthVersion = capability.Version
|
}
|
||||||
|
case "snap":
|
||||||
|
if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion {
|
||||||
|
highestSnapVersion = capability.Version
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c.negotiatedProtoVersion = highestEthVersion
|
c.negotiatedProtoVersion = highestEthVersion
|
||||||
|
c.negotiatedSnapProtoVersion = highestSnapVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
// statusExchange performs a `Status` message exchange with the given node.
|
// statusExchange performs a `Status` message exchange with the given node.
|
||||||
@ -325,6 +348,15 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) {
|
||||||
|
defer c.SetReadDeadline(time.Time{})
|
||||||
|
c.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||||
|
if err := c.Write(msg); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not write to connection: %v", err)
|
||||||
|
}
|
||||||
|
return c.ReadSnap(id)
|
||||||
|
}
|
||||||
|
|
||||||
// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol.
|
// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol.
|
||||||
func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) {
|
func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) {
|
||||||
// write request
|
// write request
|
||||||
|
675
cmd/devp2p/internal/ethtest/snap.go
Normal file
675
cmd/devp2p/internal/ethtest/snap.go
Normal file
@ -0,0 +1,675 @@
|
|||||||
|
// Copyright 2014 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ethtest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
|
"github.com/ethereum/go-ethereum/light"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Suite) TestSnapStatus(t *utesting.T) {
|
||||||
|
conn, err := s.dialSnap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err := conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type accRangeTest struct {
|
||||||
|
nBytes uint64
|
||||||
|
root common.Hash
|
||||||
|
origin common.Hash
|
||||||
|
limit common.Hash
|
||||||
|
|
||||||
|
expAccounts int
|
||||||
|
expFirst common.Hash
|
||||||
|
expLast common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSnapGetAccountRange various forms of GetAccountRange requests.
|
||||||
|
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
||||||
|
var (
|
||||||
|
root = s.chain.RootAt(999)
|
||||||
|
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||||
|
zero = common.Hash{}
|
||||||
|
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
|
||||||
|
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
|
firstKeyPlus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b")
|
||||||
|
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
||||||
|
storageRoot = common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790")
|
||||||
|
)
|
||||||
|
for i, tc := range []accRangeTest{
|
||||||
|
// Tests decreasing the number of bytes
|
||||||
|
{4000, root, zero, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
|
||||||
|
{3000, root, zero, ffHash, 57, firstKey, common.HexToHash("0x9b63fa753ece5cb90657d02ecb15df4dc1508d8c1d187af1bf7f1a05e747d3c7")},
|
||||||
|
{2000, root, zero, ffHash, 38, firstKey, common.HexToHash("0x5e6140ecae4354a9e8f47559a8c6209c1e0e69cb077b067b528556c11698b91f")},
|
||||||
|
{1, root, zero, ffHash, 1, firstKey, firstKey},
|
||||||
|
|
||||||
|
// Tests variations of the range
|
||||||
|
//
|
||||||
|
// [00b to firstkey]: should return [firstkey, secondkey], where secondkey is out of bounds
|
||||||
|
{4000, root, common.HexToHash("0x00bf000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b"), 2, firstKey, secondKey},
|
||||||
|
// [00b0 to 0bf0]: where both are before firstkey. Should return firstKey (even though it's out of bounds)
|
||||||
|
{4000, root, common.HexToHash("0x00b0000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000"), 1, firstKey, firstKey},
|
||||||
|
{4000, root, zero, zero, 1, firstKey, firstKey},
|
||||||
|
{4000, root, firstKey, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
|
||||||
|
{4000, root, firstKeyPlus1, ffHash, 76, secondKey, common.HexToHash("0xd28f55d3b994f16389f36944ad685b48e0fc3f8fbe86c3ca92ebecadf16a783f")},
|
||||||
|
|
||||||
|
// Test different root hashes
|
||||||
|
//
|
||||||
|
// A stateroot that does not exist
|
||||||
|
{4000, common.Hash{0x13, 37}, zero, ffHash, 0, zero, zero},
|
||||||
|
// The genesis stateroot (we expect it to not be served)
|
||||||
|
{4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero},
|
||||||
|
// A 127 block old stateroot, expected to be served
|
||||||
|
{4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")},
|
||||||
|
// A root which is not actually an account root, but a storage orot
|
||||||
|
{4000, storageRoot, zero, ffHash, 0, zero, zero},
|
||||||
|
|
||||||
|
// And some non-sensical requests
|
||||||
|
//
|
||||||
|
// range from [0xFF to 0x00], wrong order. Expect not to be serviced
|
||||||
|
{4000, root, ffHash, zero, 0, zero, zero},
|
||||||
|
// range from [firstkey, firstkey-1], wrong order. Expect to get first key.
|
||||||
|
{4000, root, firstKey, firstKeyMinus1, 1, firstKey, firstKey},
|
||||||
|
// range from [firstkey, 0], wrong order. Expect to get first key.
|
||||||
|
{4000, root, firstKey, zero, 1, firstKey, firstKey},
|
||||||
|
// Max bytes: 0. Expect to deliver one account.
|
||||||
|
{0, root, zero, ffHash, 1, firstKey, firstKey},
|
||||||
|
} {
|
||||||
|
if err := s.snapGetAccountRange(t, &tc); err != nil {
|
||||||
|
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type stRangesTest struct {
|
||||||
|
root common.Hash
|
||||||
|
accounts []common.Hash
|
||||||
|
origin []byte
|
||||||
|
limit []byte
|
||||||
|
nBytes uint64
|
||||||
|
|
||||||
|
expSlots int
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSnapGetStorageRange various forms of GetStorageRanges requests.
|
||||||
|
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
||||||
|
var (
|
||||||
|
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||||
|
zero = common.Hash{}
|
||||||
|
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
|
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
||||||
|
)
|
||||||
|
for i, tc := range []stRangesTest{
|
||||||
|
{
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{secondKey, firstKey},
|
||||||
|
origin: zero[:],
|
||||||
|
limit: ffHash[:],
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 0,
|
||||||
|
},
|
||||||
|
|
||||||
|
/*
|
||||||
|
Some tests against this account:
|
||||||
|
{
|
||||||
|
"balance": "0",
|
||||||
|
"nonce": 1,
|
||||||
|
"root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790",
|
||||||
|
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
|
||||||
|
"storage": {
|
||||||
|
"0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02",
|
||||||
|
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01",
|
||||||
|
"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03"
|
||||||
|
},
|
||||||
|
"key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844"
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
{ // [:] -> [slot1, slot2, slot3]
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||||
|
origin: zero[:],
|
||||||
|
limit: ffHash[:],
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 3,
|
||||||
|
},
|
||||||
|
{ // [slot1:] -> [slot1, slot2, slot3]
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||||
|
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
|
||||||
|
limit: ffHash[:],
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 3,
|
||||||
|
},
|
||||||
|
{ // [slot1+ :] -> [slot2, slot3]
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||||
|
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5acf"),
|
||||||
|
limit: ffHash[:],
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 2,
|
||||||
|
},
|
||||||
|
{ // [slot1:slot2] -> [slot1, slot2]
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||||
|
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
|
||||||
|
limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 2,
|
||||||
|
},
|
||||||
|
{ // [slot1+:slot2+] -> [slot2, slot3]
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||||
|
origin: common.FromHex("0x4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||||
|
limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7"),
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 2,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if err := s.snapGetStorageRanges(t, &tc); err != nil {
|
||||||
|
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v",
|
||||||
|
i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type byteCodesTest struct {
|
||||||
|
nBytes uint64
|
||||||
|
hashes []common.Hash
|
||||||
|
|
||||||
|
expHashes int
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// emptyRoot is the known root hash of an empty trie.
|
||||||
|
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||||
|
// emptyCode is the known hash of the empty EVM bytecode.
|
||||||
|
emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestSnapGetByteCodes various forms of GetByteCodes requests.
|
||||||
|
func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
|
||||||
|
// The halfchain import should yield these bytecodes
|
||||||
|
var hcBytecodes []common.Hash
|
||||||
|
for _, s := range []string{
|
||||||
|
"0x200c90460d8b0063210d5f5b9918e053c8f2c024485e0f1b48be8b1fc71b1317",
|
||||||
|
"0x20ba67ed4ac6aff626e0d1d4db623e2fada9593daeefc4a6eb4b70e6cff986f3",
|
||||||
|
"0x24b5b4902cb3d897c1cee9f16be8e897d8fa277c04c6dc8214f18295fca5de44",
|
||||||
|
"0x320b9d0a2be39b8a1c858f9f8cb96b1df0983071681de07ded3a7c0d05db5fd6",
|
||||||
|
"0x48cb0d5275936a24632babc7408339f9f7b051274809de565b8b0db76e97e03c",
|
||||||
|
"0x67c7a6f5cdaa43b4baa0e15b2be63346d1b9ce9f2c3d7e5804e0cacd44ee3b04",
|
||||||
|
"0x6d8418059bdc8c3fabf445e6bfc662af3b6a4ae45999b953996e42c7ead2ab49",
|
||||||
|
"0x7043422e5795d03f17ee0463a37235258e609fdd542247754895d72695e3e142",
|
||||||
|
"0x727f9e6f0c4bac1ff8d72c2972122d9c8d37ccb37e04edde2339e8da193546f1",
|
||||||
|
"0x86ccd5e23c78568a8334e0cebaf3e9f48c998307b0bfb1c378cee83b4bfb29cb",
|
||||||
|
"0x8fc89b00d6deafd4c4279531e743365626dbfa28845ec697919d305c2674302d",
|
||||||
|
"0x92cfc353bcb9746bb6f9996b6b9df779c88af2e9e0eeac44879ca19887c9b732",
|
||||||
|
"0x941b4872104f0995a4898fcf0f615ea6bf46bfbdfcf63ea8f2fd45b3f3286b77",
|
||||||
|
"0xa02fe8f41159bb39d2b704c633c3d6389cf4bfcb61a2539a9155f60786cf815f",
|
||||||
|
"0xa4b94e0afdffcb0af599677709dac067d3145489ea7aede57672bee43e3b7373",
|
||||||
|
"0xaf4e64edd3234c1205b725e42963becd1085f013590bd7ed93f8d711c5eb65fb",
|
||||||
|
"0xb69a18fa855b742031420081999086f6fb56c3930ae8840944e8b8ae9931c51e",
|
||||||
|
"0xc246c217bc73ce6666c93a93a94faa5250564f50a3fdc27ea74c231c07fe2ca6",
|
||||||
|
"0xcd6e4ab2c3034df2a8a1dfaaeb1c4baecd162a93d22de35e854ee2945cbe0c35",
|
||||||
|
"0xe24b692d09d6fc2f3d1a6028c400a27c37d7cbb11511907c013946d6ce263d3b",
|
||||||
|
"0xe440c5f0e8603fd1ed25976eee261ccee8038cf79d6a4c0eb31b2bf883be737f",
|
||||||
|
"0xe6eacbc509203d21ac814b350e72934fde686b7f673c19be8cf956b0c70078ce",
|
||||||
|
"0xe8530de4371467b5be7ea0e69e675ab36832c426d6c1ce9513817c0f0ae1486b",
|
||||||
|
"0xe85d487abbbc83bf3423cf9731360cf4f5a37220e18e5add54e72ee20861196a",
|
||||||
|
"0xf195ea389a5eea28db0be93660014275b158963dec44af1dfa7d4743019a9a49",
|
||||||
|
} {
|
||||||
|
hcBytecodes = append(hcBytecodes, common.HexToHash(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range []byteCodesTest{
|
||||||
|
// A few stateroots
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(999)},
|
||||||
|
expHashes: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(0)},
|
||||||
|
expHashes: 0,
|
||||||
|
},
|
||||||
|
// Empties
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: []common.Hash{emptyRoot},
|
||||||
|
expHashes: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: []common.Hash{emptyCode},
|
||||||
|
expHashes: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode},
|
||||||
|
expHashes: 3,
|
||||||
|
},
|
||||||
|
// The existing bytecodes
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: hcBytecodes,
|
||||||
|
expHashes: len(hcBytecodes),
|
||||||
|
},
|
||||||
|
// The existing, with limited byte arg
|
||||||
|
{
|
||||||
|
nBytes: 1, hashes: hcBytecodes,
|
||||||
|
expHashes: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nBytes: 0, hashes: hcBytecodes,
|
||||||
|
expHashes: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nBytes: 1000, hashes: []common.Hash{hcBytecodes[0], hcBytecodes[0], hcBytecodes[0], hcBytecodes[0]},
|
||||||
|
expHashes: 4,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if err := s.snapGetByteCodes(t, &tc); err != nil {
|
||||||
|
t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type trieNodesTest struct {
|
||||||
|
root common.Hash
|
||||||
|
paths []snap.TrieNodePathSet
|
||||||
|
nBytes uint64
|
||||||
|
|
||||||
|
expHashes []common.Hash
|
||||||
|
expReject bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeNibbles(nibbles []byte, bytes []byte) {
|
||||||
|
for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 {
|
||||||
|
bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasTerm returns whether a hex key has the terminator flag.
|
||||||
|
func hasTerm(s []byte) bool {
|
||||||
|
return len(s) > 0 && s[len(s)-1] == 16
|
||||||
|
}
|
||||||
|
|
||||||
|
func keybytesToHex(str []byte) []byte {
|
||||||
|
l := len(str)*2 + 1
|
||||||
|
var nibbles = make([]byte, l)
|
||||||
|
for i, b := range str {
|
||||||
|
nibbles[i*2] = b / 16
|
||||||
|
nibbles[i*2+1] = b % 16
|
||||||
|
}
|
||||||
|
nibbles[l-1] = 16
|
||||||
|
return nibbles
|
||||||
|
}
|
||||||
|
|
||||||
|
func hexToCompact(hex []byte) []byte {
|
||||||
|
terminator := byte(0)
|
||||||
|
if hasTerm(hex) {
|
||||||
|
terminator = 1
|
||||||
|
hex = hex[:len(hex)-1]
|
||||||
|
}
|
||||||
|
buf := make([]byte, len(hex)/2+1)
|
||||||
|
buf[0] = terminator << 5 // the flag byte
|
||||||
|
if len(hex)&1 == 1 {
|
||||||
|
buf[0] |= 1 << 4 // odd flag
|
||||||
|
buf[0] |= hex[0] // first nibble is contained in the first byte
|
||||||
|
hex = hex[1:]
|
||||||
|
}
|
||||||
|
decodeNibbles(hex, buf[1:])
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSnapTrieNodes various forms of GetTrieNodes requests.
|
||||||
|
func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
||||||
|
|
||||||
|
key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
|
// helper function to iterate the key, and generate the compact-encoded
|
||||||
|
// trie paths along the way.
|
||||||
|
pathTo := func(length int) snap.TrieNodePathSet {
|
||||||
|
hex := keybytesToHex(key)[:length]
|
||||||
|
hex[len(hex)-1] = 0 // remove term flag
|
||||||
|
hKey := hexToCompact(hex)
|
||||||
|
return snap.TrieNodePathSet{hKey}
|
||||||
|
}
|
||||||
|
var accPaths []snap.TrieNodePathSet
|
||||||
|
for i := 1; i <= 65; i++ {
|
||||||
|
accPaths = append(accPaths, pathTo(i))
|
||||||
|
}
|
||||||
|
empty := emptyCode
|
||||||
|
for i, tc := range []trieNodesTest{
|
||||||
|
{
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: nil,
|
||||||
|
nBytes: 500,
|
||||||
|
expHashes: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: []snap.TrieNodePathSet{
|
||||||
|
snap.TrieNodePathSet{}, // zero-length pathset should 'abort' and kick us off
|
||||||
|
snap.TrieNodePathSet{[]byte{0}},
|
||||||
|
},
|
||||||
|
nBytes: 5000,
|
||||||
|
expHashes: []common.Hash{},
|
||||||
|
expReject: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: []snap.TrieNodePathSet{
|
||||||
|
snap.TrieNodePathSet{[]byte{0}},
|
||||||
|
snap.TrieNodePathSet{[]byte{1}, []byte{0}},
|
||||||
|
},
|
||||||
|
nBytes: 5000,
|
||||||
|
//0x6b3724a41b8c38b46d4d02fba2bb2074c47a507eb16a9a4b978f91d32e406faf
|
||||||
|
expHashes: []common.Hash{s.chain.RootAt(999)},
|
||||||
|
},
|
||||||
|
{ // nonsensically long path
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: []snap.TrieNodePathSet{
|
||||||
|
snap.TrieNodePathSet{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8,
|
||||||
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}},
|
||||||
|
},
|
||||||
|
nBytes: 5000,
|
||||||
|
expHashes: []common.Hash{common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: s.chain.RootAt(0),
|
||||||
|
paths: []snap.TrieNodePathSet{
|
||||||
|
snap.TrieNodePathSet{[]byte{0}},
|
||||||
|
snap.TrieNodePathSet{[]byte{1}, []byte{0}},
|
||||||
|
},
|
||||||
|
nBytes: 5000,
|
||||||
|
expHashes: []common.Hash{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures.
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: accPaths,
|
||||||
|
nBytes: 5000,
|
||||||
|
expHashes: []common.Hash{
|
||||||
|
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
|
||||||
|
common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
|
||||||
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
empty, empty, empty},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Basically the same as above, with different ordering
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: []snap.TrieNodePathSet{
|
||||||
|
accPaths[10], accPaths[1], accPaths[0],
|
||||||
|
},
|
||||||
|
nBytes: 5000,
|
||||||
|
expHashes: []common.Hash{
|
||||||
|
empty,
|
||||||
|
common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
|
||||||
|
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if err := s.snapGetTrieNodes(t, &tc); err != nil {
|
||||||
|
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
|
||||||
|
conn, err := s.dialSnap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
// write request
|
||||||
|
req := &GetAccountRange{
|
||||||
|
ID: uint64(rand.Int63()),
|
||||||
|
Root: tc.root,
|
||||||
|
Origin: tc.origin,
|
||||||
|
Limit: tc.limit,
|
||||||
|
Bytes: tc.nBytes,
|
||||||
|
}
|
||||||
|
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("account range request failed: %v", err)
|
||||||
|
}
|
||||||
|
var res *snap.AccountRangePacket
|
||||||
|
if r, ok := resp.(*AccountRange); !ok {
|
||||||
|
return fmt.Errorf("account range response wrong: %T %v", resp, resp)
|
||||||
|
} else {
|
||||||
|
res = (*snap.AccountRangePacket)(r)
|
||||||
|
}
|
||||||
|
if exp, got := tc.expAccounts, len(res.Accounts); exp != got {
|
||||||
|
return fmt.Errorf("expected %d accounts, got %d", exp, got)
|
||||||
|
}
|
||||||
|
// Check that the encoding order is correct
|
||||||
|
for i := 1; i < len(res.Accounts); i++ {
|
||||||
|
if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
|
||||||
|
return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
hashes []common.Hash
|
||||||
|
accounts [][]byte
|
||||||
|
proof = res.Proof
|
||||||
|
)
|
||||||
|
hashes, accounts, err = res.Unpack()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(hashes) > 0 {
|
||||||
|
if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got {
|
||||||
|
return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got)
|
||||||
|
}
|
||||||
|
if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got {
|
||||||
|
return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Reconstruct a partial trie from the response and verify it
|
||||||
|
keys := make([][]byte, len(hashes))
|
||||||
|
for i, key := range hashes {
|
||||||
|
keys[i] = common.CopyBytes(key[:])
|
||||||
|
}
|
||||||
|
nodes := make(light.NodeList, len(proof))
|
||||||
|
for i, node := range proof {
|
||||||
|
nodes[i] = node
|
||||||
|
}
|
||||||
|
proofdb := nodes.NodeSet()
|
||||||
|
|
||||||
|
var end []byte
|
||||||
|
if len(keys) > 0 {
|
||||||
|
end = keys[len(keys)-1]
|
||||||
|
}
|
||||||
|
_, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) snapGetStorageRanges(t *utesting.T, tc *stRangesTest) error {
|
||||||
|
conn, err := s.dialSnap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
// write request
|
||||||
|
req := &GetStorageRanges{
|
||||||
|
ID: uint64(rand.Int63()),
|
||||||
|
Root: tc.root,
|
||||||
|
Accounts: tc.accounts,
|
||||||
|
Origin: tc.origin,
|
||||||
|
Limit: tc.limit,
|
||||||
|
Bytes: tc.nBytes,
|
||||||
|
}
|
||||||
|
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("account range request failed: %v", err)
|
||||||
|
}
|
||||||
|
var res *snap.StorageRangesPacket
|
||||||
|
if r, ok := resp.(*StorageRanges); !ok {
|
||||||
|
return fmt.Errorf("account range response wrong: %T %v", resp, resp)
|
||||||
|
} else {
|
||||||
|
res = (*snap.StorageRangesPacket)(r)
|
||||||
|
}
|
||||||
|
gotSlots := 0
|
||||||
|
// Ensure the ranges are monotonically increasing
|
||||||
|
for i, slots := range res.Slots {
|
||||||
|
gotSlots += len(slots)
|
||||||
|
for j := 1; j < len(slots); j++ {
|
||||||
|
if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
|
||||||
|
return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if exp, got := tc.expSlots, gotSlots; exp != got {
|
||||||
|
return fmt.Errorf("expected %d slots, got %d", exp, got)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error {
|
||||||
|
conn, err := s.dialSnap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
// write request
|
||||||
|
req := &GetByteCodes{
|
||||||
|
ID: uint64(rand.Int63()),
|
||||||
|
Hashes: tc.hashes,
|
||||||
|
Bytes: tc.nBytes,
|
||||||
|
}
|
||||||
|
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getBytecodes request failed: %v", err)
|
||||||
|
}
|
||||||
|
var res *snap.ByteCodesPacket
|
||||||
|
if r, ok := resp.(*ByteCodes); !ok {
|
||||||
|
return fmt.Errorf("bytecodes response wrong: %T %v", resp, resp)
|
||||||
|
} else {
|
||||||
|
res = (*snap.ByteCodesPacket)(r)
|
||||||
|
}
|
||||||
|
if exp, got := tc.expHashes, len(res.Codes); exp != got {
|
||||||
|
for i, c := range res.Codes {
|
||||||
|
fmt.Printf("%d. %#x\n", i, c)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("expected %d bytecodes, got %d", exp, got)
|
||||||
|
}
|
||||||
|
// Cross reference the requested bytecodes with the response to find gaps
|
||||||
|
// that the serving node is missing
|
||||||
|
var (
|
||||||
|
bytecodes = res.Codes
|
||||||
|
hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState)
|
||||||
|
hash = make([]byte, 32)
|
||||||
|
codes = make([][]byte, len(req.Hashes))
|
||||||
|
)
|
||||||
|
|
||||||
|
for i, j := 0, 0; i < len(bytecodes); i++ {
|
||||||
|
// Find the next hash that we've been served, leaving misses with nils
|
||||||
|
hasher.Reset()
|
||||||
|
hasher.Write(bytecodes[i])
|
||||||
|
hasher.Read(hash)
|
||||||
|
|
||||||
|
for j < len(req.Hashes) && !bytes.Equal(hash, req.Hashes[j][:]) {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
if j < len(req.Hashes) {
|
||||||
|
codes[j] = bytecodes[i]
|
||||||
|
j++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// We've either ran out of hashes, or got unrequested data
|
||||||
|
return errors.New("unexpected bytecode")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
|
||||||
|
conn, err := s.dialSnap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
// write request
|
||||||
|
req := &GetTrieNodes{
|
||||||
|
ID: uint64(rand.Int63()),
|
||||||
|
Root: tc.root,
|
||||||
|
Paths: tc.paths,
|
||||||
|
Bytes: tc.nBytes,
|
||||||
|
}
|
||||||
|
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||||
|
if err != nil {
|
||||||
|
if tc.expReject {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("trienodes request failed: %v", err)
|
||||||
|
}
|
||||||
|
var res *snap.TrieNodesPacket
|
||||||
|
if r, ok := resp.(*TrieNodes); !ok {
|
||||||
|
return fmt.Errorf("trienodes response wrong: %T %v", resp, resp)
|
||||||
|
} else {
|
||||||
|
res = (*snap.TrieNodesPacket)(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the correctness
|
||||||
|
|
||||||
|
// Cross reference the requested trienodes with the response to find gaps
|
||||||
|
// that the serving node is missing
|
||||||
|
hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
|
||||||
|
hash := make([]byte, 32)
|
||||||
|
trienodes := res.Nodes
|
||||||
|
if got, want := len(trienodes), len(tc.expHashes); got != want {
|
||||||
|
return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want)
|
||||||
|
}
|
||||||
|
for i, trienode := range trienodes {
|
||||||
|
hasher.Reset()
|
||||||
|
hasher.Write(trienode)
|
||||||
|
hasher.Read(hash)
|
||||||
|
if got, want := hash, tc.expHashes[i]; !bytes.Equal(got, want[:]) {
|
||||||
|
fmt.Printf("hash %d wrong, got %#x, want %#x\n", i, got, want)
|
||||||
|
err = fmt.Errorf("hash %d wrong, got %#x, want %#x", i, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
36
cmd/devp2p/internal/ethtest/snapTypes.go
Normal file
36
cmd/devp2p/internal/ethtest/snapTypes.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
package ethtest
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||||
|
|
||||||
|
// GetAccountRange represents an account range query.
|
||||||
|
type GetAccountRange snap.GetAccountRangePacket
|
||||||
|
|
||||||
|
func (g GetAccountRange) Code() int { return 33 }
|
||||||
|
|
||||||
|
type AccountRange snap.AccountRangePacket
|
||||||
|
|
||||||
|
func (g AccountRange) Code() int { return 34 }
|
||||||
|
|
||||||
|
type GetStorageRanges snap.GetStorageRangesPacket
|
||||||
|
|
||||||
|
func (g GetStorageRanges) Code() int { return 35 }
|
||||||
|
|
||||||
|
type StorageRanges snap.StorageRangesPacket
|
||||||
|
|
||||||
|
func (g StorageRanges) Code() int { return 36 }
|
||||||
|
|
||||||
|
type GetByteCodes snap.GetByteCodesPacket
|
||||||
|
|
||||||
|
func (g GetByteCodes) Code() int { return 37 }
|
||||||
|
|
||||||
|
type ByteCodes snap.ByteCodesPacket
|
||||||
|
|
||||||
|
func (g ByteCodes) Code() int { return 38 }
|
||||||
|
|
||||||
|
type GetTrieNodes snap.GetTrieNodesPacket
|
||||||
|
|
||||||
|
func (g GetTrieNodes) Code() int { return 39 }
|
||||||
|
|
||||||
|
type TrieNodes snap.TrieNodesPacket
|
||||||
|
|
||||||
|
func (g TrieNodes) Code() int { return 40 }
|
@ -125,6 +125,16 @@ func (s *Suite) Eth66Tests() []utesting.Test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Suite) SnapTests() []utesting.Test {
|
||||||
|
return []utesting.Test{
|
||||||
|
{Name: "TestSnapStatus", Fn: s.TestSnapStatus},
|
||||||
|
{Name: "TestSnapAccountRange", Fn: s.TestSnapGetAccountRange},
|
||||||
|
{Name: "TestSnapGetByteCodes", Fn: s.TestSnapGetByteCodes},
|
||||||
|
{Name: "TestSnapGetTrieNodes", Fn: s.TestSnapTrieNodes},
|
||||||
|
{Name: "TestSnapGetStorageRanges", Fn: s.TestSnapGetStorageRanges},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
eth66 = true // indicates whether suite should negotiate eth66 connection
|
eth66 = true // indicates whether suite should negotiate eth66 connection
|
||||||
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
|
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
|
||||||
|
@ -55,6 +55,27 @@ func TestEthSuite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSnapSuite(t *testing.T) {
|
||||||
|
geth, err := runGeth()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not run geth: %v", err)
|
||||||
|
}
|
||||||
|
defer geth.Close()
|
||||||
|
|
||||||
|
suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create new test suite: %v", err)
|
||||||
|
}
|
||||||
|
for _, test := range suite.SnapTests() {
|
||||||
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
|
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
||||||
|
if result[0].Failed {
|
||||||
|
t.Fatal()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// runGeth creates and starts a geth node
|
// runGeth creates and starts a geth node
|
||||||
func runGeth() (*node.Node, error) {
|
func runGeth() (*node.Node, error) {
|
||||||
stack, err := node.New(&node.Config{
|
stack, err := node.New(&node.Config{
|
||||||
|
@ -19,6 +19,7 @@ package ethtest
|
|||||||
import (
|
import (
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
@ -126,10 +127,12 @@ func (pt PooledTransactions) Code() int { return 26 }
|
|||||||
// Conn represents an individual connection with a peer
|
// Conn represents an individual connection with a peer
|
||||||
type Conn struct {
|
type Conn struct {
|
||||||
*rlpx.Conn
|
*rlpx.Conn
|
||||||
ourKey *ecdsa.PrivateKey
|
ourKey *ecdsa.PrivateKey
|
||||||
negotiatedProtoVersion uint
|
negotiatedProtoVersion uint
|
||||||
ourHighestProtoVersion uint
|
negotiatedSnapProtoVersion uint
|
||||||
caps []p2p.Cap
|
ourHighestProtoVersion uint
|
||||||
|
ourHighestSnapProtoVersion uint
|
||||||
|
caps []p2p.Cap
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read reads an eth packet from the connection.
|
// Read reads an eth packet from the connection.
|
||||||
@ -259,12 +262,7 @@ func (c *Conn) Read66() (uint64, Message) {
|
|||||||
|
|
||||||
// Write writes a eth packet to the connection.
|
// Write writes a eth packet to the connection.
|
||||||
func (c *Conn) Write(msg Message) error {
|
func (c *Conn) Write(msg Message) error {
|
||||||
// check if message is eth protocol message
|
payload, err := rlp.EncodeToBytes(msg)
|
||||||
var (
|
|
||||||
payload []byte
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
payload, err = rlp.EncodeToBytes(msg)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -281,3 +279,43 @@ func (c *Conn) Write66(req eth.Packet, code int) error {
|
|||||||
_, err = c.Conn.Write(uint64(code), payload)
|
_, err = c.Conn.Write(uint64(code), payload)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadSnap reads a snap/1 response with the given id from the connection.
|
||||||
|
func (c *Conn) ReadSnap(id uint64) (Message, error) {
|
||||||
|
respId := id + 1
|
||||||
|
start := time.Now()
|
||||||
|
for respId != id && time.Since(start) < timeout {
|
||||||
|
code, rawData, _, err := c.Conn.Read()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read from connection: %v", err)
|
||||||
|
}
|
||||||
|
var snpMsg interface{}
|
||||||
|
switch int(code) {
|
||||||
|
case (GetAccountRange{}).Code():
|
||||||
|
snpMsg = new(GetAccountRange)
|
||||||
|
case (AccountRange{}).Code():
|
||||||
|
snpMsg = new(AccountRange)
|
||||||
|
case (GetStorageRanges{}).Code():
|
||||||
|
snpMsg = new(GetStorageRanges)
|
||||||
|
case (StorageRanges{}).Code():
|
||||||
|
snpMsg = new(StorageRanges)
|
||||||
|
case (GetByteCodes{}).Code():
|
||||||
|
snpMsg = new(GetByteCodes)
|
||||||
|
case (ByteCodes{}).Code():
|
||||||
|
snpMsg = new(ByteCodes)
|
||||||
|
case (GetTrieNodes{}).Code():
|
||||||
|
snpMsg = new(GetTrieNodes)
|
||||||
|
case (TrieNodes{}).Code():
|
||||||
|
snpMsg = new(TrieNodes)
|
||||||
|
default:
|
||||||
|
//return nil, fmt.Errorf("invalid message code: %d", code)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := rlp.DecodeBytes(rawData, snpMsg); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not rlp decode message: %v", err)
|
||||||
|
}
|
||||||
|
return snpMsg.(Message), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("request timed out")
|
||||||
|
}
|
||||||
|
@ -36,6 +36,7 @@ var (
|
|||||||
Subcommands: []cli.Command{
|
Subcommands: []cli.Command{
|
||||||
rlpxPingCommand,
|
rlpxPingCommand,
|
||||||
rlpxEthTestCommand,
|
rlpxEthTestCommand,
|
||||||
|
rlpxSnapTestCommand,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rlpxPingCommand = cli.Command{
|
rlpxPingCommand = cli.Command{
|
||||||
@ -53,6 +54,16 @@ var (
|
|||||||
testTAPFlag,
|
testTAPFlag,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
rlpxSnapTestCommand = cli.Command{
|
||||||
|
Name: "snap-test",
|
||||||
|
Usage: "Runs tests against a node",
|
||||||
|
ArgsUsage: "<node> <chain.rlp> <genesis.json>",
|
||||||
|
Action: rlpxSnapTest,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
testPatternFlag,
|
||||||
|
testTAPFlag,
|
||||||
|
},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func rlpxPing(ctx *cli.Context) error {
|
func rlpxPing(ctx *cli.Context) error {
|
||||||
@ -106,3 +117,15 @@ func rlpxEthTest(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
return runTests(ctx, suite.AllEthTests())
|
return runTests(ctx, suite.AllEthTests())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// rlpxSnapTest runs the snap protocol test suite.
|
||||||
|
func rlpxSnapTest(ctx *cli.Context) error {
|
||||||
|
if ctx.NArg() < 3 {
|
||||||
|
exit("missing path to chain.rlp as command-line argument")
|
||||||
|
}
|
||||||
|
suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2])
|
||||||
|
if err != nil {
|
||||||
|
exit(err)
|
||||||
|
}
|
||||||
|
return runTests(ctx, suite.SnapTests())
|
||||||
|
}
|
||||||
|
@ -17,18 +17,12 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMessageSignVerify(t *testing.T) {
|
func TestMessageSignVerify(t *testing.T) {
|
||||||
tmpdir, err := ioutil.TempDir("", "ethkey-test")
|
tmpdir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Can't create temporary directory:", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
|
|
||||||
keyfile := filepath.Join(tmpdir, "the-keyfile")
|
keyfile := filepath.Join(tmpdir, "the-keyfile")
|
||||||
message := "test message"
|
message := "test message"
|
||||||
|
@ -49,7 +49,7 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string {
|
|||||||
// signHash is a helper function that calculates a hash for the given message
|
// signHash is a helper function that calculates a hash for the given message
|
||||||
// that can be safely used to calculate a signature from.
|
// that can be safely used to calculate a signature from.
|
||||||
//
|
//
|
||||||
// The hash is calulcated as
|
// The hash is calculated as
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
//
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
|
@ -36,7 +36,7 @@ import (
|
|||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate gencodec -type header -field-override headerMarshaling -out gen_header.go
|
//go:generate go run github.com/fjl/gencodec -type header -field-override headerMarshaling -out gen_header.go
|
||||||
type header struct {
|
type header struct {
|
||||||
ParentHash common.Hash `json:"parentHash"`
|
ParentHash common.Hash `json:"parentHash"`
|
||||||
OmmerHash *common.Hash `json:"sha3Uncles"`
|
OmmerHash *common.Hash `json:"sha3Uncles"`
|
||||||
|
@ -63,10 +63,11 @@ type ommer struct {
|
|||||||
Address common.Address `json:"address"`
|
Address common.Address `json:"address"`
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
|
//go:generate go run github.com/fjl/gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go
|
||||||
type stEnv struct {
|
type stEnv struct {
|
||||||
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
|
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
|
||||||
Difficulty *big.Int `json:"currentDifficulty"`
|
Difficulty *big.Int `json:"currentDifficulty"`
|
||||||
|
Random *big.Int `json:"currentRandom"`
|
||||||
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
||||||
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
|
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
Number uint64 `json:"currentNumber" gencodec:"required"`
|
Number uint64 `json:"currentNumber" gencodec:"required"`
|
||||||
@ -81,6 +82,7 @@ type stEnv struct {
|
|||||||
type stEnvMarshaling struct {
|
type stEnvMarshaling struct {
|
||||||
Coinbase common.UnprefixedAddress
|
Coinbase common.UnprefixedAddress
|
||||||
Difficulty *math.HexOrDecimal256
|
Difficulty *math.HexOrDecimal256
|
||||||
|
Random *math.HexOrDecimal256
|
||||||
ParentDifficulty *math.HexOrDecimal256
|
ParentDifficulty *math.HexOrDecimal256
|
||||||
GasLimit math.HexOrDecimal64
|
GasLimit math.HexOrDecimal64
|
||||||
Number math.HexOrDecimal64
|
Number math.HexOrDecimal64
|
||||||
@ -139,6 +141,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||||||
if pre.Env.BaseFee != nil {
|
if pre.Env.BaseFee != nil {
|
||||||
vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee)
|
vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee)
|
||||||
}
|
}
|
||||||
|
// If random is defined, add it to the vmContext.
|
||||||
|
if pre.Env.Random != nil {
|
||||||
|
rnd := common.BigToHash(pre.Env.Random)
|
||||||
|
vmContext.Random = &rnd
|
||||||
|
}
|
||||||
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
|
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
|
||||||
// done in StateProcessor.Process(block, ...), right before transactions are applied.
|
// done in StateProcessor.Process(block, ...), right before transactions are applied.
|
||||||
if chainConfig.DAOForkSupport &&
|
if chainConfig.DAOForkSupport &&
|
||||||
|
@ -18,6 +18,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
|
|||||||
type stEnv struct {
|
type stEnv struct {
|
||||||
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
||||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
||||||
|
Random *math.HexOrDecimal256 `json:"currentRandom"`
|
||||||
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
||||||
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
||||||
@ -31,6 +32,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
|
|||||||
var enc stEnv
|
var enc stEnv
|
||||||
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
|
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
|
||||||
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
|
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
|
||||||
|
enc.Random = (*math.HexOrDecimal256)(s.Random)
|
||||||
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
|
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
|
||||||
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
|
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
|
||||||
enc.Number = math.HexOrDecimal64(s.Number)
|
enc.Number = math.HexOrDecimal64(s.Number)
|
||||||
@ -48,6 +50,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
|||||||
type stEnv struct {
|
type stEnv struct {
|
||||||
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
||||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
||||||
|
Random *math.HexOrDecimal256 `json:"currentRandom"`
|
||||||
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
||||||
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
||||||
@ -69,6 +72,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
|||||||
if dec.Difficulty != nil {
|
if dec.Difficulty != nil {
|
||||||
s.Difficulty = (*big.Int)(dec.Difficulty)
|
s.Difficulty = (*big.Int)(dec.Difficulty)
|
||||||
}
|
}
|
||||||
|
if dec.Random != nil {
|
||||||
|
s.Random = (*big.Int)(dec.Random)
|
||||||
|
}
|
||||||
if dec.ParentDifficulty != nil {
|
if dec.ParentDifficulty != nil {
|
||||||
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
|
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
|
||||||
}
|
}
|
||||||
|
@ -252,6 +252,10 @@ func Transition(ctx *cli.Context) error {
|
|||||||
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
|
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Sanity check, to not `panic` in state_transition
|
||||||
|
if prestate.Env.Random != nil && !chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
|
||||||
|
return NewError(ErrorConfig, errors.New("can only apply RANDOM on top of London chainrules"))
|
||||||
|
}
|
||||||
if env := prestate.Env; env.Difficulty == nil {
|
if env := prestate.Env; env.Difficulty == nil {
|
||||||
// If difficulty was not provided by caller, we need to calculate it.
|
// If difficulty was not provided by caller, we need to calculate it.
|
||||||
switch {
|
switch {
|
||||||
|
10
cmd/evm/testdata/15/exp3.json
vendored
10
cmd/evm/testdata/15/exp3.json
vendored
@ -21,19 +21,19 @@
|
|||||||
"error": "transaction type not supported"
|
"error": "transaction type not supported"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"error": "rlp: expected List"
|
"error": "typed transaction too short"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"error": "rlp: expected List"
|
"error": "typed transaction too short"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"error": "rlp: expected List"
|
"error": "typed transaction too short"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"error": "rlp: expected List"
|
"error": "typed transaction too short"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"error": "rlp: expected List"
|
"error": "typed transaction too short"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"error": "rlp: expected input list for types.AccessListTx"
|
"error": "rlp: expected input list for types.AccessListTx"
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
// faucet is an Ether faucet backed by a light client.
|
// faucet is an Ether faucet backed by a light client.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
//go:generate go-bindata -nometadata -o website.go faucet.html
|
//go:generate go run github.com/kevinburke/go-bindata/go-bindata -nometadata -o website.go faucet.html
|
||||||
//go:generate gofmt -w -s website.go
|
//go:generate gofmt -w -s website.go
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
return nil, fmt.Errorf("read %q: %w", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
@ -28,7 +28,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
|
|||||||
clErr := gz.Close()
|
clErr := gz.Close()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
return nil, fmt.Errorf("read %q: %w", name, err)
|
||||||
}
|
}
|
||||||
if clErr != nil {
|
if clErr != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -183,6 +183,9 @@ var _bindata = map[string]func() (*asset, error){
|
|||||||
"faucet.html": faucetHtml,
|
"faucet.html": faucetHtml,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AssetDebug is true if the assets were built with the debug flag enabled.
|
||||||
|
const AssetDebug = false
|
||||||
|
|
||||||
// AssetDir returns the file names below a certain
|
// AssetDir returns the file names below a certain
|
||||||
// directory embedded in the file by go-bindata.
|
// directory embedded in the file by go-bindata.
|
||||||
// For example if you run go-bindata on data/... and data contains the
|
// For example if you run go-bindata on data/... and data contains the
|
||||||
|
@ -33,7 +33,7 @@ import (
|
|||||||
// are copied into a temporary keystore directory.
|
// are copied into a temporary keystore directory.
|
||||||
|
|
||||||
func tmpDatadirWithKeystore(t *testing.T) string {
|
func tmpDatadirWithKeystore(t *testing.T) string {
|
||||||
datadir := tmpdir(t)
|
datadir := t.TempDir()
|
||||||
keystore := filepath.Join(datadir, "keystore")
|
keystore := filepath.Join(datadir, "keystore")
|
||||||
source := filepath.Join("..", "..", "accounts", "keystore", "testdata", "keystore")
|
source := filepath.Join("..", "..", "accounts", "keystore", "testdata", "keystore")
|
||||||
if err := cp.CopyAll(keystore, source); err != nil {
|
if err := cp.CopyAll(keystore, source); err != nil {
|
||||||
@ -111,7 +111,7 @@ func TestAccountImport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func importAccountWithExpect(t *testing.T, key string, expected string) {
|
func importAccountWithExpect(t *testing.T, key string, expected string) {
|
||||||
dir := tmpdir(t)
|
dir := t.TempDir()
|
||||||
keyfile := filepath.Join(dir, "key.prv")
|
keyfile := filepath.Join(dir, "key.prv")
|
||||||
if err := ioutil.WriteFile(keyfile, []byte(key), 0600); err != nil {
|
if err := ioutil.WriteFile(keyfile, []byte(key), 0600); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
@ -120,7 +120,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) {
|
|||||||
if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
|
if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
geth := runGeth(t, "account", "import", keyfile, "-password", passwordFile)
|
geth := runGeth(t, "--lightkdf", "account", "import", keyfile, "-password", passwordFile)
|
||||||
defer geth.ExpectExit()
|
defer geth.ExpectExit()
|
||||||
geth.Expect(expected)
|
geth.Expect(expected)
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
"github.com/ethereum/go-ethereum/accounts/scwallet"
|
||||||
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
"github.com/ethereum/go-ethereum/accounts/usbwallet"
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
@ -159,9 +160,25 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||||||
cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name))
|
cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name))
|
||||||
}
|
}
|
||||||
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
|
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
|
||||||
cfg.Eth.OverrideTerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
|
cfg.Eth.OverrideTerminalTotalDifficulty = utils.GlobalBig(ctx, utils.OverrideTerminalTotalDifficulty.Name)
|
||||||
|
}
|
||||||
|
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
|
||||||
|
// Warn users to migrate if they have a legacy freezer format.
|
||||||
|
if eth != nil {
|
||||||
|
firstIdx := uint64(0)
|
||||||
|
// Hack to speed up check for mainnet because we know
|
||||||
|
// the first non-empty block.
|
||||||
|
ghash := rawdb.ReadCanonicalHash(eth.ChainDb(), 0)
|
||||||
|
if cfg.Eth.NetworkId == 1 && ghash == params.MainnetGenesisHash {
|
||||||
|
firstIdx = 46147
|
||||||
|
}
|
||||||
|
isLegacy, _, err := dbHasLegacyReceipts(eth.ChainDb(), firstIdx)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to check db for legacy receipts", "err", err)
|
||||||
|
} else if isLegacy {
|
||||||
|
log.Warn("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
backend, _ := utils.RegisterEthService(stack, &cfg.Eth, ctx.GlobalBool(utils.CatalystFlag.Name))
|
|
||||||
|
|
||||||
// Configure GraphQL if requested
|
// Configure GraphQL if requested
|
||||||
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
|
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
|
||||||
|
@ -19,7 +19,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -92,9 +91,7 @@ func TestAttachWelcome(t *testing.T) {
|
|||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
ipc = `\\.\pipe\geth` + strconv.Itoa(trulyRandInt(100000, 999999))
|
ipc = `\\.\pipe\geth` + strconv.Itoa(trulyRandInt(100000, 999999))
|
||||||
} else {
|
} else {
|
||||||
ws := tmpdir(t)
|
ipc = filepath.Join(t.TempDir(), "geth.ipc")
|
||||||
defer os.RemoveAll(ws)
|
|
||||||
ipc = filepath.Join(ws, "geth.ipc")
|
|
||||||
}
|
}
|
||||||
// And HTTP + WS attachment
|
// And HTTP + WS attachment
|
||||||
p := trulyRandInt(1024, 65533) // Yeah, sometimes this will fail, sorry :P
|
p := trulyRandInt(1024, 65533) // Yeah, sometimes this will fail, sorry :P
|
||||||
@ -118,6 +115,7 @@ func TestAttachWelcome(t *testing.T) {
|
|||||||
waitForEndpoint(t, endpoint, 3*time.Second)
|
waitForEndpoint(t, endpoint, 3*time.Second)
|
||||||
testAttachWelcome(t, geth, endpoint, httpAPIs)
|
testAttachWelcome(t, geth, endpoint, httpAPIs)
|
||||||
})
|
})
|
||||||
|
geth.ExpectExit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
|
||||||
|
@ -19,7 +19,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -106,8 +105,7 @@ func TestDAOForkBlockNewChain(t *testing.T) {
|
|||||||
|
|
||||||
func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBlock *big.Int, expectVote bool) {
|
func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBlock *big.Int, expectVote bool) {
|
||||||
// Create a temporary data directory to use and inspect later
|
// Create a temporary data directory to use and inspect later
|
||||||
datadir := tmpdir(t)
|
datadir := t.TempDir()
|
||||||
defer os.RemoveAll(datadir)
|
|
||||||
|
|
||||||
// Start a Geth instance with the requested flags set and immediately terminate
|
// Start a Geth instance with the requested flags set and immediately terminate
|
||||||
if genesis != "" {
|
if genesis != "" {
|
||||||
|
@ -34,9 +34,12 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/console/prompt"
|
"github.com/ethereum/go-ethereum/console/prompt"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/olekukonko/tablewriter"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -69,6 +72,8 @@ Remove blockchain and state databases`,
|
|||||||
dbDumpFreezerIndex,
|
dbDumpFreezerIndex,
|
||||||
dbImportCmd,
|
dbImportCmd,
|
||||||
dbExportCmd,
|
dbExportCmd,
|
||||||
|
dbMetadataCmd,
|
||||||
|
dbMigrateFreezerCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
dbInspectCmd = cli.Command{
|
dbInspectCmd = cli.Command{
|
||||||
@ -233,6 +238,38 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||||||
},
|
},
|
||||||
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
|
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
|
||||||
}
|
}
|
||||||
|
dbMetadataCmd = cli.Command{
|
||||||
|
Action: utils.MigrateFlags(showMetaData),
|
||||||
|
Name: "metadata",
|
||||||
|
Usage: "Shows metadata about the chain status.",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
utils.DataDirFlag,
|
||||||
|
utils.SyncModeFlag,
|
||||||
|
utils.MainnetFlag,
|
||||||
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
|
utils.RinkebyFlag,
|
||||||
|
utils.GoerliFlag,
|
||||||
|
},
|
||||||
|
Description: "Shows metadata about the chain status.",
|
||||||
|
}
|
||||||
|
dbMigrateFreezerCmd = cli.Command{
|
||||||
|
Action: utils.MigrateFlags(freezerMigrate),
|
||||||
|
Name: "freezer-migrate",
|
||||||
|
Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
|
||||||
|
ArgsUsage: "",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
utils.DataDirFlag,
|
||||||
|
utils.SyncModeFlag,
|
||||||
|
utils.MainnetFlag,
|
||||||
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
|
utils.RinkebyFlag,
|
||||||
|
utils.GoerliFlag,
|
||||||
|
},
|
||||||
|
Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
|
||||||
|
WARNING: please back-up the receipt files in your ancients before running this command.`,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func removeDB(ctx *cli.Context) error {
|
func removeDB(ctx *cli.Context) error {
|
||||||
@ -539,7 +576,7 @@ func freezerInspect(ctx *cli.Context) error {
|
|||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
|
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
|
||||||
log.Info("Opening freezer", "location", path, "name", kind)
|
log.Info("Opening freezer", "location", path, "name", kind)
|
||||||
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy); err != nil {
|
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
f.DumpIndex(start, end)
|
f.DumpIndex(start, end)
|
||||||
@ -685,3 +722,138 @@ func exportChaindata(ctx *cli.Context) error {
|
|||||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
|
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func showMetaData(ctx *cli.Context) error {
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
defer stack.Close()
|
||||||
|
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
|
ancients, err := db.Ancients()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
|
||||||
|
}
|
||||||
|
pp := func(val *uint64) string {
|
||||||
|
if val == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d (0x%x)", *val, *val)
|
||||||
|
}
|
||||||
|
data := [][]string{
|
||||||
|
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
|
||||||
|
{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
|
||||||
|
{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
|
||||||
|
{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
|
||||||
|
if b := rawdb.ReadHeadBlock(db); b != nil {
|
||||||
|
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
|
||||||
|
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
|
||||||
|
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
|
||||||
|
}
|
||||||
|
if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
|
||||||
|
data = append(data, []string{"SkeletonSyncStatus", string(b)})
|
||||||
|
}
|
||||||
|
if h := rawdb.ReadHeadHeader(db); h != nil {
|
||||||
|
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
|
||||||
|
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
|
||||||
|
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
|
||||||
|
}
|
||||||
|
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
|
||||||
|
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
|
||||||
|
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
|
||||||
|
{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
|
||||||
|
{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
|
||||||
|
{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
|
||||||
|
{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
|
||||||
|
{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
|
||||||
|
{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
|
||||||
|
{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
|
||||||
|
}...)
|
||||||
|
table := tablewriter.NewWriter(os.Stdout)
|
||||||
|
table.SetHeader([]string{"Field", "Value"})
|
||||||
|
table.AppendBulk(data)
|
||||||
|
table.Render()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func freezerMigrate(ctx *cli.Context) error {
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
defer stack.Close()
|
||||||
|
|
||||||
|
db := utils.MakeChainDatabase(ctx, stack, false)
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Check first block for legacy receipt format
|
||||||
|
numAncients, err := db.Ancients()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if numAncients < 1 {
|
||||||
|
log.Info("No receipts in freezer to migrate")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !isFirstLegacy {
|
||||||
|
log.Info("No legacy receipts to migrate")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
|
||||||
|
start := time.Now()
|
||||||
|
if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := db.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("Migration finished", "duration", time.Since(start))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
|
||||||
|
// non-empty receipt and checks its format. The index of this first non-empty element is
|
||||||
|
// the second return parameter.
|
||||||
|
func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
|
||||||
|
// Check first block for legacy receipt format
|
||||||
|
numAncients, err := db.Ancients()
|
||||||
|
if err != nil {
|
||||||
|
return false, 0, err
|
||||||
|
}
|
||||||
|
if numAncients < 1 {
|
||||||
|
return false, 0, nil
|
||||||
|
}
|
||||||
|
if firstIdx >= numAncients {
|
||||||
|
return false, firstIdx, nil
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
legacy bool
|
||||||
|
blob []byte
|
||||||
|
emptyRLPList = []byte{192}
|
||||||
|
)
|
||||||
|
// Find first block with non-empty receipt, only if
|
||||||
|
// the index is not already provided.
|
||||||
|
if firstIdx == 0 {
|
||||||
|
for i := uint64(0); i < numAncients; i++ {
|
||||||
|
blob, err = db.Ancient("receipts", i)
|
||||||
|
if err != nil {
|
||||||
|
return false, 0, err
|
||||||
|
}
|
||||||
|
if len(blob) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !bytes.Equal(blob, emptyRLPList) {
|
||||||
|
firstIdx = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Is first non-empty receipt legacy?
|
||||||
|
first, err := db.Ancient("receipts", firstIdx)
|
||||||
|
if err != nil {
|
||||||
|
return false, 0, err
|
||||||
|
}
|
||||||
|
legacy, err = types.IsLegacyStoredReceipts(first)
|
||||||
|
return legacy, firstIdx, err
|
||||||
|
}
|
||||||
|
@ -18,7 +18,6 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@ -73,8 +72,7 @@ var customGenesisTests = []struct {
|
|||||||
func TestCustomGenesis(t *testing.T) {
|
func TestCustomGenesis(t *testing.T) {
|
||||||
for i, tt := range customGenesisTests {
|
for i, tt := range customGenesisTests {
|
||||||
// Create a temporary data directory to use and inspect later
|
// Create a temporary data directory to use and inspect later
|
||||||
datadir := tmpdir(t)
|
datadir := t.TempDir()
|
||||||
defer os.RemoveAll(datadir)
|
|
||||||
|
|
||||||
// Initialize the data directory with the custom genesis block
|
// Initialize the data directory with the custom genesis block
|
||||||
json := filepath.Join(datadir, "genesis.json")
|
json := filepath.Join(datadir, "genesis.json")
|
||||||
|
@ -107,7 +107,8 @@ var (
|
|||||||
utils.UltraLightFractionFlag,
|
utils.UltraLightFractionFlag,
|
||||||
utils.UltraLightOnlyAnnounceFlag,
|
utils.UltraLightOnlyAnnounceFlag,
|
||||||
utils.LightNoSyncServeFlag,
|
utils.LightNoSyncServeFlag,
|
||||||
utils.WhitelistFlag,
|
utils.EthPeerRequiredBlocksFlag,
|
||||||
|
utils.LegacyWhitelistFlag,
|
||||||
utils.BloomFilterSizeFlag,
|
utils.BloomFilterSizeFlag,
|
||||||
utils.CacheFlag,
|
utils.CacheFlag,
|
||||||
utils.CacheDatabaseFlag,
|
utils.CacheDatabaseFlag,
|
||||||
@ -118,6 +119,7 @@ var (
|
|||||||
utils.CacheSnapshotFlag,
|
utils.CacheSnapshotFlag,
|
||||||
utils.CacheNoPrefetchFlag,
|
utils.CacheNoPrefetchFlag,
|
||||||
utils.CachePreimagesFlag,
|
utils.CachePreimagesFlag,
|
||||||
|
utils.FDLimitFlag,
|
||||||
utils.ListenPortFlag,
|
utils.ListenPortFlag,
|
||||||
utils.MaxPeersFlag,
|
utils.MaxPeersFlag,
|
||||||
utils.MaxPendingPeersFlag,
|
utils.MaxPendingPeersFlag,
|
||||||
@ -146,6 +148,7 @@ var (
|
|||||||
utils.SepoliaFlag,
|
utils.SepoliaFlag,
|
||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.GoerliFlag,
|
utils.GoerliFlag,
|
||||||
|
utils.KilnFlag,
|
||||||
utils.VMEnableDebugFlag,
|
utils.VMEnableDebugFlag,
|
||||||
utils.NetworkIdFlag,
|
utils.NetworkIdFlag,
|
||||||
utils.EthStatsURLFlag,
|
utils.EthStatsURLFlag,
|
||||||
@ -157,7 +160,6 @@ var (
|
|||||||
utils.GpoIgnoreGasPriceFlag,
|
utils.GpoIgnoreGasPriceFlag,
|
||||||
utils.MinerNotifyFullFlag,
|
utils.MinerNotifyFullFlag,
|
||||||
configFileFlag,
|
configFileFlag,
|
||||||
utils.CatalystFlag,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcFlags = []cli.Flag{
|
rpcFlags = []cli.Flag{
|
||||||
@ -165,6 +167,10 @@ var (
|
|||||||
utils.HTTPListenAddrFlag,
|
utils.HTTPListenAddrFlag,
|
||||||
utils.HTTPPortFlag,
|
utils.HTTPPortFlag,
|
||||||
utils.HTTPCORSDomainFlag,
|
utils.HTTPCORSDomainFlag,
|
||||||
|
utils.AuthListenFlag,
|
||||||
|
utils.AuthPortFlag,
|
||||||
|
utils.AuthVirtualHostsFlag,
|
||||||
|
utils.JWTSecretFlag,
|
||||||
utils.HTTPVirtualHostsFlag,
|
utils.HTTPVirtualHostsFlag,
|
||||||
utils.GraphQLEnabledFlag,
|
utils.GraphQLEnabledFlag,
|
||||||
utils.GraphQLCORSDomainFlag,
|
utils.GraphQLCORSDomainFlag,
|
||||||
@ -208,7 +214,7 @@ func init() {
|
|||||||
// Initialize the CLI app and start Geth
|
// Initialize the CLI app and start Geth
|
||||||
app.Action = geth
|
app.Action = geth
|
||||||
app.HideVersion = true // we have a command to print the version
|
app.HideVersion = true // we have a command to print the version
|
||||||
app.Copyright = "Copyright 2013-2021 The go-ethereum Authors"
|
app.Copyright = "Copyright 2013-2022 The go-ethereum Authors"
|
||||||
app.Commands = []cli.Command{
|
app.Commands = []cli.Command{
|
||||||
// See chaincmd.go:
|
// See chaincmd.go:
|
||||||
initCommand,
|
initCommand,
|
||||||
|
@ -19,7 +19,6 @@ package main
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -29,14 +28,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
func tmpdir(t *testing.T) string {
|
|
||||||
dir, err := ioutil.TempDir("", "geth-test")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
return dir
|
|
||||||
}
|
|
||||||
|
|
||||||
type testgeth struct {
|
type testgeth struct {
|
||||||
*cmdtest.TestCmd
|
*cmdtest.TestCmd
|
||||||
|
|
||||||
@ -82,15 +73,9 @@ func runGeth(t *testing.T, args ...string) *testgeth {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if tt.Datadir == "" {
|
if tt.Datadir == "" {
|
||||||
tt.Datadir = tmpdir(t)
|
// The temporary datadir will be removed automatically if something fails below.
|
||||||
tt.Cleanup = func() { os.RemoveAll(tt.Datadir) }
|
tt.Datadir = t.TempDir()
|
||||||
args = append([]string{"--datadir", tt.Datadir}, args...)
|
args = append([]string{"--datadir", tt.Datadir}, args...)
|
||||||
// Remove the temporary datadir if something fails below.
|
|
||||||
defer func() {
|
|
||||||
if t.Failed() {
|
|
||||||
tt.Cleanup()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Boot "geth". This actually runs the test binary but the TestMain
|
// Boot "geth". This actually runs the test binary but the TestMain
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -31,6 +32,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
@ -102,6 +104,25 @@ geth snapshot verify-state <state-root>
|
|||||||
will traverse the whole accounts and storages set based on the specified
|
will traverse the whole accounts and storages set based on the specified
|
||||||
snapshot and recalculate the root hash of state for verification.
|
snapshot and recalculate the root hash of state for verification.
|
||||||
In other words, this command does the snapshot to trie conversion.
|
In other words, this command does the snapshot to trie conversion.
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "check-dangling-storage",
|
||||||
|
Usage: "Check that there is no 'dangling' snap storage",
|
||||||
|
ArgsUsage: "<root>",
|
||||||
|
Action: utils.MigrateFlags(checkDanglingStorage),
|
||||||
|
Category: "MISCELLANEOUS COMMANDS",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
utils.DataDirFlag,
|
||||||
|
utils.AncientFlag,
|
||||||
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
|
utils.RinkebyFlag,
|
||||||
|
utils.GoerliFlag,
|
||||||
|
},
|
||||||
|
Description: `
|
||||||
|
geth snapshot check-dangling-storage <state-root> traverses the snap storage
|
||||||
|
data, and verifies that all snapshot storage data has a corresponding account.
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -242,6 +263,77 @@ func verifyState(ctx *cli.Context) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Info("Verified the state", "root", root)
|
log.Info("Verified the state", "root", root)
|
||||||
|
if err := checkDangling(chaindb, snaptree.Snapshot(root)); err != nil {
|
||||||
|
log.Error("Dangling snap storage check failed", "root", root, "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkDanglingStorage iterates the snap storage data, and verifies that all
|
||||||
|
// storage also has corresponding account data.
|
||||||
|
func checkDanglingStorage(ctx *cli.Context) error {
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
defer stack.Close()
|
||||||
|
|
||||||
|
chaindb := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
|
headBlock := rawdb.ReadHeadBlock(chaindb)
|
||||||
|
if headBlock == nil {
|
||||||
|
log.Error("Failed to load head block")
|
||||||
|
return errors.New("no head block")
|
||||||
|
}
|
||||||
|
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to open snapshot tree", "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if ctx.NArg() > 1 {
|
||||||
|
log.Error("Too many arguments given")
|
||||||
|
return errors.New("too many arguments")
|
||||||
|
}
|
||||||
|
var root = headBlock.Root()
|
||||||
|
if ctx.NArg() == 1 {
|
||||||
|
root, err = parseRoot(ctx.Args()[0])
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to resolve state root", "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return checkDangling(chaindb, snaptree.Snapshot(root))
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkDangling(chaindb ethdb.Database, snap snapshot.Snapshot) error {
|
||||||
|
log.Info("Checking dangling snapshot storage")
|
||||||
|
var (
|
||||||
|
lastReport = time.Now()
|
||||||
|
start = time.Now()
|
||||||
|
lastKey []byte
|
||||||
|
it = rawdb.NewKeyLengthIterator(chaindb.NewIterator(rawdb.SnapshotStoragePrefix, nil), 1+2*common.HashLength)
|
||||||
|
)
|
||||||
|
defer it.Release()
|
||||||
|
for it.Next() {
|
||||||
|
k := it.Key()
|
||||||
|
accKey := k[1:33]
|
||||||
|
if bytes.Equal(accKey, lastKey) {
|
||||||
|
// No need to look up for every slot
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lastKey = common.CopyBytes(accKey)
|
||||||
|
if time.Since(lastReport) > time.Second*8 {
|
||||||
|
log.Info("Iterating snap storage", "at", fmt.Sprintf("%#x", accKey), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
lastReport = time.Now()
|
||||||
|
}
|
||||||
|
data, err := snap.AccountRLP(common.BytesToHash(accKey))
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Error loading snap storage data", "account", fmt.Sprintf("%#x", accKey), "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(data) == 0 {
|
||||||
|
log.Error("Dangling storage - missing account", "account", fmt.Sprintf("%#x", accKey), "storagekey", fmt.Sprintf("%#x", k))
|
||||||
|
return fmt.Errorf("dangling snapshot storage account %#x", accKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Info("Verified the snapshot storage", "root", snap.Root(), "time", common.PrettyDuration(time.Since(start)), "err", it.Error())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -314,8 +406,7 @@ func traverseState(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !bytes.Equal(acc.CodeHash, emptyCode) {
|
if !bytes.Equal(acc.CodeHash, emptyCode) {
|
||||||
code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
|
if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
|
||||||
if len(code) == 0 {
|
|
||||||
log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash))
|
log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash))
|
||||||
return errors.New("missing code")
|
return errors.New("missing code")
|
||||||
}
|
}
|
||||||
@ -386,11 +477,10 @@ func traverseRawState(ctx *cli.Context) error {
|
|||||||
nodes += 1
|
nodes += 1
|
||||||
node := accIter.Hash()
|
node := accIter.Hash()
|
||||||
|
|
||||||
|
// Check the present for non-empty hash node(embedded node doesn't
|
||||||
|
// have their own hash).
|
||||||
if node != (common.Hash{}) {
|
if node != (common.Hash{}) {
|
||||||
// Check the present for non-empty hash node(embedded node doesn't
|
if !rawdb.HasTrieNode(chaindb, node) {
|
||||||
// have their own hash).
|
|
||||||
blob := rawdb.ReadTrieNode(chaindb, node)
|
|
||||||
if len(blob) == 0 {
|
|
||||||
log.Error("Missing trie node(account)", "hash", node)
|
log.Error("Missing trie node(account)", "hash", node)
|
||||||
return errors.New("missing account")
|
return errors.New("missing account")
|
||||||
}
|
}
|
||||||
@ -418,8 +508,7 @@ func traverseRawState(ctx *cli.Context) error {
|
|||||||
// Check the present for non-empty hash node(embedded node doesn't
|
// Check the present for non-empty hash node(embedded node doesn't
|
||||||
// have their own hash).
|
// have their own hash).
|
||||||
if node != (common.Hash{}) {
|
if node != (common.Hash{}) {
|
||||||
blob := rawdb.ReadTrieNode(chaindb, node)
|
if !rawdb.HasTrieNode(chaindb, node) {
|
||||||
if len(blob) == 0 {
|
|
||||||
log.Error("Missing trie node(storage)", "hash", node)
|
log.Error("Missing trie node(storage)", "hash", node)
|
||||||
return errors.New("missing storage")
|
return errors.New("missing storage")
|
||||||
}
|
}
|
||||||
@ -435,8 +524,7 @@ func traverseRawState(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !bytes.Equal(acc.CodeHash, emptyCode) {
|
if !bytes.Equal(acc.CodeHash, emptyCode) {
|
||||||
code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
|
if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
|
||||||
if len(code) == 0 {
|
|
||||||
log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey()))
|
log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey()))
|
||||||
return errors.New("missing code")
|
return errors.New("missing code")
|
||||||
}
|
}
|
||||||
|
@ -46,6 +46,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
utils.RinkebyFlag,
|
utils.RinkebyFlag,
|
||||||
utils.RopstenFlag,
|
utils.RopstenFlag,
|
||||||
utils.SepoliaFlag,
|
utils.SepoliaFlag,
|
||||||
|
utils.KilnFlag,
|
||||||
utils.SyncModeFlag,
|
utils.SyncModeFlag,
|
||||||
utils.ExitWhenSyncedFlag,
|
utils.ExitWhenSyncedFlag,
|
||||||
utils.GCModeFlag,
|
utils.GCModeFlag,
|
||||||
@ -53,7 +54,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
utils.EthStatsURLFlag,
|
utils.EthStatsURLFlag,
|
||||||
utils.IdentityFlag,
|
utils.IdentityFlag,
|
||||||
utils.LightKDFFlag,
|
utils.LightKDFFlag,
|
||||||
utils.WhitelistFlag,
|
utils.EthPeerRequiredBlocksFlag,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -119,6 +120,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
utils.CacheSnapshotFlag,
|
utils.CacheSnapshotFlag,
|
||||||
utils.CacheNoPrefetchFlag,
|
utils.CacheNoPrefetchFlag,
|
||||||
utils.CachePreimagesFlag,
|
utils.CachePreimagesFlag,
|
||||||
|
utils.FDLimitFlag,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -148,6 +150,10 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
utils.WSApiFlag,
|
utils.WSApiFlag,
|
||||||
utils.WSPathPrefixFlag,
|
utils.WSPathPrefixFlag,
|
||||||
utils.WSAllowedOriginsFlag,
|
utils.WSAllowedOriginsFlag,
|
||||||
|
utils.JWTSecretFlag,
|
||||||
|
utils.AuthListenFlag,
|
||||||
|
utils.AuthPortFlag,
|
||||||
|
utils.AuthVirtualHostsFlag,
|
||||||
utils.GraphQLEnabledFlag,
|
utils.GraphQLEnabledFlag,
|
||||||
utils.GraphQLCORSDomainFlag,
|
utils.GraphQLCORSDomainFlag,
|
||||||
utils.GraphQLVirtualHostsFlag,
|
utils.GraphQLVirtualHostsFlag,
|
||||||
@ -221,6 +227,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
Name: "ALIASED (deprecated)",
|
Name: "ALIASED (deprecated)",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
utils.NoUSBFlag,
|
utils.NoUSBFlag,
|
||||||
|
utils.LegacyWhitelistFlag,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -229,7 +236,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
utils.SnapshotFlag,
|
utils.SnapshotFlag,
|
||||||
utils.BloomFilterSizeFlag,
|
utils.BloomFilterSizeFlag,
|
||||||
cli.HelpFlag,
|
cli.HelpFlag,
|
||||||
utils.CatalystFlag,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/jedisct1/go-minisign"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestVerification(t *testing.T) {
|
func TestVerification(t *testing.T) {
|
||||||
@ -128,3 +130,39 @@ func TestMatching(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGethPubKeysParseable(t *testing.T) {
|
||||||
|
for _, pubkey := range gethPubKeys {
|
||||||
|
_, err := minisign.NewPublicKey(pubkey)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Should be parseable")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeyID(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
id [8]byte
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"@holiman key", args{id: extractKeyId(gethPubKeys[0])}, "FB1D084D39BAEC24"},
|
||||||
|
{"second key", args{id: extractKeyId(gethPubKeys[1])}, "138B1CA303E51687"},
|
||||||
|
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := keyID(tt.args.id); got != tt.want {
|
||||||
|
t.Errorf("keyID() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractKeyId(pubkey string) [8]byte {
|
||||||
|
p, _ := minisign.NewPublicKey(pubkey)
|
||||||
|
return p.KeyId
|
||||||
|
}
|
||||||
|
@ -154,11 +154,11 @@ func (b *bigValue) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *bigValue) Set(s string) error {
|
func (b *bigValue) Set(s string) error {
|
||||||
int, ok := math.ParseBig256(s)
|
intVal, ok := math.ParseBig256(s)
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.New("invalid integer syntax")
|
return errors.New("invalid integer syntax")
|
||||||
}
|
}
|
||||||
*b = (bigValue)(*int)
|
*b = (bigValue)(*intVal)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,6 +172,7 @@ func (f BigFlag) String() string {
|
|||||||
|
|
||||||
func (f BigFlag) Apply(set *flag.FlagSet) {
|
func (f BigFlag) Apply(set *flag.FlagSet) {
|
||||||
eachName(f.Name, func(name string) {
|
eachName(f.Name, func(name string) {
|
||||||
|
f.Value = new(big.Int)
|
||||||
set.Var((*bigValue)(f.Value), f.Name, f.Usage)
|
set.Var((*bigValue)(f.Value), f.Name, f.Usage)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||||
@ -56,6 +56,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/les"
|
"github.com/ethereum/go-ethereum/les"
|
||||||
|
lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/metrics/exp"
|
"github.com/ethereum/go-ethereum/metrics/exp"
|
||||||
@ -160,6 +161,10 @@ var (
|
|||||||
Name: "sepolia",
|
Name: "sepolia",
|
||||||
Usage: "Sepolia network: pre-configured proof-of-work test network",
|
Usage: "Sepolia network: pre-configured proof-of-work test network",
|
||||||
}
|
}
|
||||||
|
KilnFlag = cli.BoolFlag{
|
||||||
|
Name: "kiln",
|
||||||
|
Usage: "Kiln network: pre-configured proof-of-work to proof-of-stake test network",
|
||||||
|
}
|
||||||
DeveloperFlag = cli.BoolFlag{
|
DeveloperFlag = cli.BoolFlag{
|
||||||
Name: "dev",
|
Name: "dev",
|
||||||
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
|
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
|
||||||
@ -236,9 +241,13 @@ var (
|
|||||||
Name: "lightkdf",
|
Name: "lightkdf",
|
||||||
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
|
||||||
}
|
}
|
||||||
WhitelistFlag = cli.StringFlag{
|
EthPeerRequiredBlocksFlag = cli.StringFlag{
|
||||||
|
Name: "eth.requiredblocks",
|
||||||
|
Usage: "Comma separated block number-to-hash mappings to require for peering (<number>=<hash>)",
|
||||||
|
}
|
||||||
|
LegacyWhitelistFlag = cli.StringFlag{
|
||||||
Name: "whitelist",
|
Name: "whitelist",
|
||||||
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>)",
|
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>) (deprecated in favor of --peer.requiredblocks)",
|
||||||
}
|
}
|
||||||
BloomFilterSizeFlag = cli.Uint64Flag{
|
BloomFilterSizeFlag = cli.Uint64Flag{
|
||||||
Name: "bloomfilter.size",
|
Name: "bloomfilter.size",
|
||||||
@ -249,7 +258,7 @@ var (
|
|||||||
Name: "override.arrowglacier",
|
Name: "override.arrowglacier",
|
||||||
Usage: "Manually specify Arrow Glacier fork-block, overriding the bundled setting",
|
Usage: "Manually specify Arrow Glacier fork-block, overriding the bundled setting",
|
||||||
}
|
}
|
||||||
OverrideTerminalTotalDifficulty = cli.Uint64Flag{
|
OverrideTerminalTotalDifficulty = BigFlag{
|
||||||
Name: "override.terminaltotaldifficulty",
|
Name: "override.terminaltotaldifficulty",
|
||||||
Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting",
|
Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting",
|
||||||
}
|
}
|
||||||
@ -432,6 +441,10 @@ var (
|
|||||||
Name: "cache.preimages",
|
Name: "cache.preimages",
|
||||||
Usage: "Enable recording the SHA3/keccak preimages of trie keys",
|
Usage: "Enable recording the SHA3/keccak preimages of trie keys",
|
||||||
}
|
}
|
||||||
|
FDLimitFlag = cli.IntFlag{
|
||||||
|
Name: "fdlimit",
|
||||||
|
Usage: "Raise the open file descriptor resource limit (default = system fd limit)",
|
||||||
|
}
|
||||||
// Miner settings
|
// Miner settings
|
||||||
MiningEnabledFlag = cli.BoolFlag{
|
MiningEnabledFlag = cli.BoolFlag{
|
||||||
Name: "mine",
|
Name: "mine",
|
||||||
@ -517,6 +530,26 @@ var (
|
|||||||
Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
|
Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
|
||||||
Value: ethconfig.Defaults.RPCTxFeeCap,
|
Value: ethconfig.Defaults.RPCTxFeeCap,
|
||||||
}
|
}
|
||||||
|
// Authenticated RPC HTTP settings
|
||||||
|
AuthListenFlag = cli.StringFlag{
|
||||||
|
Name: "authrpc.addr",
|
||||||
|
Usage: "Listening address for authenticated APIs",
|
||||||
|
Value: node.DefaultConfig.AuthAddr,
|
||||||
|
}
|
||||||
|
AuthPortFlag = cli.IntFlag{
|
||||||
|
Name: "authrpc.port",
|
||||||
|
Usage: "Listening port for authenticated APIs",
|
||||||
|
Value: node.DefaultConfig.AuthPort,
|
||||||
|
}
|
||||||
|
AuthVirtualHostsFlag = cli.StringFlag{
|
||||||
|
Name: "authrpc.vhosts",
|
||||||
|
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
|
||||||
|
Value: strings.Join(node.DefaultConfig.AuthVirtualHosts, ","),
|
||||||
|
}
|
||||||
|
JWTSecretFlag = cli.StringFlag{
|
||||||
|
Name: "authrpc.jwtsecret",
|
||||||
|
Usage: "Path to a JWT secret to use for authenticated RPC endpoints",
|
||||||
|
}
|
||||||
// Logging and debug settings
|
// Logging and debug settings
|
||||||
EthStatsURLFlag = cli.StringFlag{
|
EthStatsURLFlag = cli.StringFlag{
|
||||||
Name: "ethstats",
|
Name: "ethstats",
|
||||||
@ -789,11 +822,6 @@ var (
|
|||||||
Usage: "InfluxDB organization name (v2 only)",
|
Usage: "InfluxDB organization name (v2 only)",
|
||||||
Value: metrics.DefaultConfig.InfluxDBOrganization,
|
Value: metrics.DefaultConfig.InfluxDBOrganization,
|
||||||
}
|
}
|
||||||
|
|
||||||
CatalystFlag = cli.BoolFlag{
|
|
||||||
Name: "catalyst",
|
|
||||||
Usage: "Catalyst mode (eth2 integration testing)",
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||||
@ -815,6 +843,9 @@ func MakeDataDir(ctx *cli.Context) string {
|
|||||||
if ctx.GlobalBool(SepoliaFlag.Name) {
|
if ctx.GlobalBool(SepoliaFlag.Name) {
|
||||||
return filepath.Join(path, "sepolia")
|
return filepath.Join(path, "sepolia")
|
||||||
}
|
}
|
||||||
|
if ctx.GlobalBool(KilnFlag.Name) {
|
||||||
|
return filepath.Join(path, "kiln")
|
||||||
|
}
|
||||||
return path
|
return path
|
||||||
}
|
}
|
||||||
Fatalf("Cannot determine default data directory, please set manually (--datadir)")
|
Fatalf("Cannot determine default data directory, please set manually (--datadir)")
|
||||||
@ -869,6 +900,8 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
|
|||||||
urls = params.RinkebyBootnodes
|
urls = params.RinkebyBootnodes
|
||||||
case ctx.GlobalBool(GoerliFlag.Name):
|
case ctx.GlobalBool(GoerliFlag.Name):
|
||||||
urls = params.GoerliBootnodes
|
urls = params.GoerliBootnodes
|
||||||
|
case ctx.GlobalBool(KilnFlag.Name):
|
||||||
|
urls = params.KilnBootnodes
|
||||||
case cfg.BootstrapNodes != nil:
|
case cfg.BootstrapNodes != nil:
|
||||||
return // already set, don't apply defaults.
|
return // already set, don't apply defaults.
|
||||||
}
|
}
|
||||||
@ -955,6 +988,18 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
|
|||||||
cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
|
cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ctx.GlobalIsSet(AuthListenFlag.Name) {
|
||||||
|
cfg.AuthAddr = ctx.GlobalString(AuthListenFlag.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.GlobalIsSet(AuthPortFlag.Name) {
|
||||||
|
cfg.AuthPort = ctx.GlobalInt(AuthPortFlag.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.GlobalIsSet(AuthVirtualHostsFlag.Name) {
|
||||||
|
cfg.AuthVirtualHosts = SplitAndTrim(ctx.GlobalString(AuthVirtualHostsFlag.Name))
|
||||||
|
}
|
||||||
|
|
||||||
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
|
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
|
||||||
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
|
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
|
||||||
}
|
}
|
||||||
@ -1061,11 +1106,24 @@ func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
|
|||||||
|
|
||||||
// MakeDatabaseHandles raises out the number of allowed file handles per process
|
// MakeDatabaseHandles raises out the number of allowed file handles per process
|
||||||
// for Geth and returns half of the allowance to assign to the database.
|
// for Geth and returns half of the allowance to assign to the database.
|
||||||
func MakeDatabaseHandles() int {
|
func MakeDatabaseHandles(max int) int {
|
||||||
limit, err := fdlimit.Maximum()
|
limit, err := fdlimit.Maximum()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
|
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
|
||||||
}
|
}
|
||||||
|
switch {
|
||||||
|
case max == 0:
|
||||||
|
// User didn't specify a meaningful value, use system limits
|
||||||
|
case max < 128:
|
||||||
|
// User specified something unhealthy, just use system defaults
|
||||||
|
log.Error("File descriptor limit invalid (<128)", "had", max, "updated", limit)
|
||||||
|
case max > limit:
|
||||||
|
// User requested more than the OS allows, notify that we can't allocate it
|
||||||
|
log.Warn("Requested file descriptors denied by OS", "req", max, "limit", limit)
|
||||||
|
default:
|
||||||
|
// User limit is meaningful and within allowed range, use that
|
||||||
|
limit = max
|
||||||
|
}
|
||||||
raised, err := fdlimit.Raise(uint64(limit))
|
raised, err := fdlimit.Raise(uint64(limit))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Failed to raise file descriptor allowance: %v", err)
|
Fatalf("Failed to raise file descriptor allowance: %v", err)
|
||||||
@ -1222,6 +1280,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
|
|||||||
setDataDir(ctx, cfg)
|
setDataDir(ctx, cfg)
|
||||||
setSmartCard(ctx, cfg)
|
setSmartCard(ctx, cfg)
|
||||||
|
|
||||||
|
if ctx.GlobalIsSet(JWTSecretFlag.Name) {
|
||||||
|
cfg.JWTSecret = ctx.GlobalString(JWTSecretFlag.Name)
|
||||||
|
}
|
||||||
|
|
||||||
if ctx.GlobalIsSet(ExternalSignerFlag.Name) {
|
if ctx.GlobalIsSet(ExternalSignerFlag.Name) {
|
||||||
cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name)
|
cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name)
|
||||||
}
|
}
|
||||||
@ -1290,6 +1352,8 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
|
|||||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
|
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
|
||||||
case ctx.GlobalBool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir():
|
case ctx.GlobalBool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir():
|
||||||
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia")
|
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia")
|
||||||
|
case ctx.GlobalBool(KilnFlag.Name) && cfg.DataDir == node.DefaultDataDir():
|
||||||
|
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "kiln")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1408,26 +1472,33 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) {
|
func setPeerRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) {
|
||||||
whitelist := ctx.GlobalString(WhitelistFlag.Name)
|
peerRequiredBlocks := ctx.GlobalString(EthPeerRequiredBlocksFlag.Name)
|
||||||
if whitelist == "" {
|
|
||||||
return
|
if peerRequiredBlocks == "" {
|
||||||
|
if ctx.GlobalIsSet(LegacyWhitelistFlag.Name) {
|
||||||
|
log.Warn("The flag --rpc is deprecated and will be removed, please use --peer.requiredblocks")
|
||||||
|
peerRequiredBlocks = ctx.GlobalString(LegacyWhitelistFlag.Name)
|
||||||
|
} else {
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
cfg.Whitelist = make(map[uint64]common.Hash)
|
|
||||||
for _, entry := range strings.Split(whitelist, ",") {
|
cfg.PeerRequiredBlocks = make(map[uint64]common.Hash)
|
||||||
|
for _, entry := range strings.Split(peerRequiredBlocks, ",") {
|
||||||
parts := strings.Split(entry, "=")
|
parts := strings.Split(entry, "=")
|
||||||
if len(parts) != 2 {
|
if len(parts) != 2 {
|
||||||
Fatalf("Invalid whitelist entry: %s", entry)
|
Fatalf("Invalid peer required block entry: %s", entry)
|
||||||
}
|
}
|
||||||
number, err := strconv.ParseUint(parts[0], 0, 64)
|
number, err := strconv.ParseUint(parts[0], 0, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Invalid whitelist block number %s: %v", parts[0], err)
|
Fatalf("Invalid peer required block number %s: %v", parts[0], err)
|
||||||
}
|
}
|
||||||
var hash common.Hash
|
var hash common.Hash
|
||||||
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
|
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
|
||||||
Fatalf("Invalid whitelist hash %s: %v", parts[1], err)
|
Fatalf("Invalid peer required block hash %s: %v", parts[1], err)
|
||||||
}
|
}
|
||||||
cfg.Whitelist[number] = hash
|
cfg.PeerRequiredBlocks[number] = hash
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1475,7 +1546,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
|
|||||||
// SetEthConfig applies eth-related command line flags to the config.
|
// SetEthConfig applies eth-related command line flags to the config.
|
||||||
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
||||||
// Avoid conflicting network flags
|
// Avoid conflicting network flags
|
||||||
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag)
|
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag, KilnFlag)
|
||||||
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
|
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
|
||||||
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
|
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
|
||||||
if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 {
|
if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 {
|
||||||
@ -1494,7 +1565,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
setTxPool(ctx, &cfg.TxPool)
|
setTxPool(ctx, &cfg.TxPool)
|
||||||
setEthash(ctx, cfg)
|
setEthash(ctx, cfg)
|
||||||
setMiner(ctx, &cfg.Miner)
|
setMiner(ctx, &cfg.Miner)
|
||||||
setWhitelist(ctx, cfg)
|
setPeerRequiredBlocks(ctx, cfg)
|
||||||
setLes(ctx, cfg)
|
setLes(ctx, cfg)
|
||||||
|
|
||||||
// Cap the cache allowance and tune the garbage collector
|
// Cap the cache allowance and tune the garbage collector
|
||||||
@ -1526,7 +1597,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
|
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
|
||||||
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
|
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
|
||||||
}
|
}
|
||||||
cfg.DatabaseHandles = MakeDatabaseHandles()
|
cfg.DatabaseHandles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
|
||||||
if ctx.GlobalIsSet(AncientFlag.Name) {
|
if ctx.GlobalIsSet(AncientFlag.Name) {
|
||||||
cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name)
|
cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name)
|
||||||
}
|
}
|
||||||
@ -1637,6 +1708,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
}
|
}
|
||||||
cfg.Genesis = core.DefaultGoerliGenesisBlock()
|
cfg.Genesis = core.DefaultGoerliGenesisBlock()
|
||||||
SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash)
|
SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash)
|
||||||
|
case ctx.GlobalBool(KilnFlag.Name):
|
||||||
|
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||||
|
cfg.NetworkId = 1337802
|
||||||
|
}
|
||||||
|
cfg.Genesis = core.DefaultKilnGenesisBlock()
|
||||||
|
SetDNSDiscoveryDefaults(cfg, params.KilnGenesisHash)
|
||||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||||
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
|
||||||
cfg.NetworkId = 1337
|
cfg.NetworkId = 1337
|
||||||
@ -1673,9 +1750,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
// Create a new developer genesis block or reuse existing one
|
// Create a new developer genesis block or reuse existing one
|
||||||
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
|
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
|
||||||
if ctx.GlobalIsSet(DataDirFlag.Name) {
|
if ctx.GlobalIsSet(DataDirFlag.Name) {
|
||||||
|
// If datadir doesn't exist we need to open db in write-mode
|
||||||
|
// so leveldb can create files.
|
||||||
|
readonly := true
|
||||||
|
if !common.FileExist(stack.ResolvePath("chaindata")) {
|
||||||
|
readonly = false
|
||||||
|
}
|
||||||
// Check if we have an already initialized chain and fall back to
|
// Check if we have an already initialized chain and fall back to
|
||||||
// that if so. Otherwise we need to generate a new genesis spec.
|
// that if so. Otherwise we need to generate a new genesis spec.
|
||||||
chaindb := MakeChainDatabase(ctx, stack, false) // TODO (MariusVanDerWijden) make this read only
|
chaindb := MakeChainDatabase(ctx, stack, readonly)
|
||||||
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
||||||
cfg.Genesis = nil // fallback to db content
|
cfg.Genesis = nil // fallback to db content
|
||||||
}
|
}
|
||||||
@ -1710,15 +1793,15 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
|
|||||||
// RegisterEthService adds an Ethereum client to the stack.
|
// RegisterEthService adds an Ethereum client to the stack.
|
||||||
// The second return value is the full node instance, which may be nil if the
|
// The second return value is the full node instance, which may be nil if the
|
||||||
// node is running as a light client.
|
// node is running as a light client.
|
||||||
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool) (ethapi.Backend, *eth.Ethereum) {
|
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
|
||||||
if cfg.SyncMode == downloader.LightSync {
|
if cfg.SyncMode == downloader.LightSync {
|
||||||
backend, err := les.New(stack, cfg)
|
backend, err := les.New(stack, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||||
}
|
}
|
||||||
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
|
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
|
||||||
if isCatalyst {
|
if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
|
||||||
if err := catalyst.RegisterLight(stack, backend); err != nil {
|
if err := lescatalyst.Register(stack, backend); err != nil {
|
||||||
Fatalf("Failed to register the catalyst service: %v", err)
|
Fatalf("Failed to register the catalyst service: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1734,8 +1817,8 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool
|
|||||||
Fatalf("Failed to create the LES server: %v", err)
|
Fatalf("Failed to create the LES server: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isCatalyst {
|
if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
|
||||||
if err := catalyst.Register(stack, backend); err != nil {
|
if err := ethcatalyst.Register(stack, backend); err != nil {
|
||||||
Fatalf("Failed to register the catalyst service: %v", err)
|
Fatalf("Failed to register the catalyst service: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1838,7 +1921,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
|
|||||||
func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
|
func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
|
||||||
var (
|
var (
|
||||||
cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
|
cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
|
||||||
handles = MakeDatabaseHandles()
|
handles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
|
||||||
|
|
||||||
err error
|
err error
|
||||||
chainDb ethdb.Database
|
chainDb ethdb.Database
|
||||||
@ -1869,6 +1952,8 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
|
|||||||
genesis = core.DefaultRinkebyGenesisBlock()
|
genesis = core.DefaultRinkebyGenesisBlock()
|
||||||
case ctx.GlobalBool(GoerliFlag.Name):
|
case ctx.GlobalBool(GoerliFlag.Name):
|
||||||
genesis = core.DefaultGoerliGenesisBlock()
|
genesis = core.DefaultGoerliGenesisBlock()
|
||||||
|
case ctx.GlobalBool(KilnFlag.Name):
|
||||||
|
genesis = core.DefaultKilnGenesisBlock()
|
||||||
case ctx.GlobalBool(DeveloperFlag.Name):
|
case ctx.GlobalBool(DeveloperFlag.Name):
|
||||||
Fatalf("Developer chains are ephemeral")
|
Fatalf("Developer chains are ephemeral")
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
type Solidity struct {
|
type Solidity struct {
|
||||||
Path, Version, FullVersion string
|
Path, Version, FullVersion string
|
||||||
Major, Minor, Patch int
|
Major, Minor, Patch int
|
||||||
|
ExtraAllowedPath []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// --combined-output format
|
// --combined-output format
|
||||||
@ -58,11 +59,19 @@ type solcOutputV8 struct {
|
|||||||
Version string
|
Version string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Solidity) allowedPaths() string {
|
||||||
|
paths := []string{".", "./", "../"} // default to support relative paths
|
||||||
|
if len(s.ExtraAllowedPath) > 0 {
|
||||||
|
paths = append(paths, s.ExtraAllowedPath...)
|
||||||
|
}
|
||||||
|
return strings.Join(paths, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Solidity) makeArgs() []string {
|
func (s *Solidity) makeArgs() []string {
|
||||||
p := []string{
|
p := []string{
|
||||||
"--combined-json", "bin,bin-runtime,srcmap,srcmap-runtime,abi,userdoc,devdoc",
|
"--combined-json", "bin,bin-runtime,srcmap,srcmap-runtime,abi,userdoc,devdoc",
|
||||||
"--optimize", // code optimizer switched on
|
"--optimize", // code optimizer switched on
|
||||||
"--allow-paths", "., ./, ../", // default to support relative paths
|
"--allow-paths", s.allowedPaths(),
|
||||||
}
|
}
|
||||||
if s.Major > 0 || s.Minor > 4 || s.Patch > 6 {
|
if s.Major > 0 || s.Minor > 4 || s.Patch > 6 {
|
||||||
p[1] += ",metadata,hashes"
|
p[1] += ",metadata,hashes"
|
||||||
@ -108,10 +117,7 @@ func CompileSolidityString(solc, source string) (map[string]*Contract, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
args := append(s.makeArgs(), "--")
|
return s.CompileSource(source)
|
||||||
cmd := exec.Command(s.Path, append(args, "-")...)
|
|
||||||
cmd.Stdin = strings.NewReader(source)
|
|
||||||
return s.run(cmd, source)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CompileSolidity compiles all given Solidity source files.
|
// CompileSolidity compiles all given Solidity source files.
|
||||||
@ -119,11 +125,25 @@ func CompileSolidity(solc string, sourcefiles ...string) (map[string]*Contract,
|
|||||||
if len(sourcefiles) == 0 {
|
if len(sourcefiles) == 0 {
|
||||||
return nil, errors.New("solc: no source files")
|
return nil, errors.New("solc: no source files")
|
||||||
}
|
}
|
||||||
source, err := slurpFiles(sourcefiles)
|
s, err := SolidityVersion(solc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
s, err := SolidityVersion(solc)
|
|
||||||
|
return s.CompileFiles(sourcefiles...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompileSource builds and returns all the contracts contained within a source string.
|
||||||
|
func (s *Solidity) CompileSource(source string) (map[string]*Contract, error) {
|
||||||
|
args := append(s.makeArgs(), "--")
|
||||||
|
cmd := exec.Command(s.Path, append(args, "-")...)
|
||||||
|
cmd.Stdin = strings.NewReader(source)
|
||||||
|
return s.run(cmd, source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompileFiles compiles all given Solidity source files.
|
||||||
|
func (s *Solidity) CompileFiles(sourcefiles ...string) (map[string]*Contract, error) {
|
||||||
|
source, err := slurpFiles(sourcefiles)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,6 @@ var (
|
|||||||
// error types into the consensus package.
|
// error types into the consensus package.
|
||||||
var (
|
var (
|
||||||
errTooManyUncles = errors.New("too many uncles")
|
errTooManyUncles = errors.New("too many uncles")
|
||||||
errInvalidMixDigest = errors.New("invalid mix digest")
|
|
||||||
errInvalidNonce = errors.New("invalid nonce")
|
errInvalidNonce = errors.New("invalid nonce")
|
||||||
errInvalidUncleHash = errors.New("invalid uncle hash")
|
errInvalidUncleHash = errors.New("invalid uncle hash")
|
||||||
)
|
)
|
||||||
@ -182,10 +181,7 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
|
|||||||
if len(header.Extra) > 32 {
|
if len(header.Extra) > 32 {
|
||||||
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
|
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
|
||||||
}
|
}
|
||||||
// Verify the seal parts. Ensure the mixhash, nonce and uncle hash are the expected value.
|
// Verify the seal parts. Ensure the nonce and uncle hash are the expected value.
|
||||||
if header.MixDigest != (common.Hash{}) {
|
|
||||||
return errInvalidMixDigest
|
|
||||||
}
|
|
||||||
if header.Nonce != beaconNonce {
|
if header.Nonce != beaconNonce {
|
||||||
return errInvalidNonce
|
return errInvalidNonce
|
||||||
}
|
}
|
||||||
|
@ -698,6 +698,8 @@ func TestHashimoto(t *testing.T) {
|
|||||||
// Tests that caches generated on disk may be done concurrently.
|
// Tests that caches generated on disk may be done concurrently.
|
||||||
func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
||||||
// Create a temp folder to generate the caches into
|
// Create a temp folder to generate the caches into
|
||||||
|
// TODO: t.TempDir fails to remove the directory on Windows
|
||||||
|
// \AppData\Local\Temp\1\TestConcurrentDiskCacheGeneration2382060137\001\cache-R23-1dca8a85e74aa763: Access is denied.
|
||||||
cachedir, err := ioutil.TempDir("", "")
|
cachedir, err := ioutil.TempDir("", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create temporary cache dir: %v", err)
|
t.Fatalf("Failed to create temporary cache dir: %v", err)
|
||||||
@ -794,11 +796,7 @@ func BenchmarkHashimotoFullSmall(b *testing.B) {
|
|||||||
|
|
||||||
func benchmarkHashimotoFullMmap(b *testing.B, name string, lock bool) {
|
func benchmarkHashimotoFullMmap(b *testing.B, name string, lock bool) {
|
||||||
b.Run(name, func(b *testing.B) {
|
b.Run(name, func(b *testing.B) {
|
||||||
tmpdir, err := ioutil.TempDir("", "ethash-test")
|
tmpdir := b.TempDir()
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
|
|
||||||
d := &dataset{epoch: 0}
|
d := &dataset{epoch: 0}
|
||||||
d.generate(tmpdir, 1, lock, false)
|
d.generate(tmpdir, 1, lock, false)
|
||||||
|
@ -549,6 +549,11 @@ func NewShared() *Ethash {
|
|||||||
|
|
||||||
// Close closes the exit channel to notify all backend threads exiting.
|
// Close closes the exit channel to notify all backend threads exiting.
|
||||||
func (ethash *Ethash) Close() error {
|
func (ethash *Ethash) Close() error {
|
||||||
|
return ethash.StopRemoteSealer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopRemoteSealer stops the remote sealer
|
||||||
|
func (ethash *Ethash) StopRemoteSealer() error {
|
||||||
ethash.closeOnce.Do(func() {
|
ethash.closeOnce.Do(func() {
|
||||||
// Short circuit if the exit channel is not allocated.
|
// Short circuit if the exit channel is not allocated.
|
||||||
if ethash.remote == nil {
|
if ethash.remote == nil {
|
||||||
|
@ -57,6 +57,8 @@ func TestTestMode(t *testing.T) {
|
|||||||
// This test checks that cache lru logic doesn't crash under load.
|
// This test checks that cache lru logic doesn't crash under load.
|
||||||
// It reproduces https://github.com/ethereum/go-ethereum/issues/14943
|
// It reproduces https://github.com/ethereum/go-ethereum/issues/14943
|
||||||
func TestCacheFileEvict(t *testing.T) {
|
func TestCacheFileEvict(t *testing.T) {
|
||||||
|
// TODO: t.TempDir fails to remove the directory on Windows
|
||||||
|
// \AppData\Local\Temp\1\TestCacheFileEvict2179435125\001\cache-R23-0000000000000000: Access is denied.
|
||||||
tmpdir, err := ioutil.TempDir("", "ethash-test")
|
tmpdir, err := ioutil.TempDir("", "ethash-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@ -88,10 +87,7 @@ type tester struct {
|
|||||||
// Please ensure you call Close() on the returned tester to avoid leaks.
|
// Please ensure you call Close() on the returned tester to avoid leaks.
|
||||||
func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester {
|
func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester {
|
||||||
// Create a temporary storage for the node keys and initialize it
|
// Create a temporary storage for the node keys and initialize it
|
||||||
workspace, err := ioutil.TempDir("", "console-tester-")
|
workspace := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temporary keystore: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a networkless protocol stack and start an Ethereum service within
|
// Create a networkless protocol stack and start an Ethereum service within
|
||||||
stack, err := node.New(&node.Config{DataDir: workspace, UseLightweightKDF: true, Name: testInstance})
|
stack, err := node.New(&node.Config{DataDir: workspace, UseLightweightKDF: true, Name: testInstance})
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
// Package checkpointoracle is a an on-chain light client checkpoint oracle.
|
// Package checkpointoracle is a an on-chain light client checkpoint oracle.
|
||||||
package checkpointoracle
|
package checkpointoracle
|
||||||
|
|
||||||
//go:generate abigen --sol contract/oracle.sol --pkg contract --out contract/oracle.go
|
//go:generate go run ../../cmd/abigen --sol contract/oracle.sol --pkg contract --out contract/oracle.go
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
50
core/beacon/errors.go
Normal file
50
core/beacon/errors.go
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
package beacon
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/rpc"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// VALID is returned by the engine API in the following calls:
|
||||||
|
// - newPayloadV1: if the payload was already known or was just validated and executed
|
||||||
|
// - forkchoiceUpdateV1: if the chain accepted the reorg (might ignore if it's stale)
|
||||||
|
VALID = "VALID"
|
||||||
|
|
||||||
|
// INVALID is returned by the engine API in the following calls:
|
||||||
|
// - newPayloadV1: if the payload failed to execute on top of the local chain
|
||||||
|
// - forkchoiceUpdateV1: if the new head is unknown, pre-merge, or reorg to it fails
|
||||||
|
INVALID = "INVALID"
|
||||||
|
|
||||||
|
// SYNCING is returned by the engine API in the following calls:
|
||||||
|
// - newPayloadV1: if the payload was accepted on top of an active sync
|
||||||
|
// - forkchoiceUpdateV1: if the new head was seen before, but not part of the chain
|
||||||
|
SYNCING = "SYNCING"
|
||||||
|
|
||||||
|
// ACCEPTED is returned by the engine API in the following calls:
|
||||||
|
// - newPayloadV1: if the payload was accepted, but not processed (side chain)
|
||||||
|
ACCEPTED = "ACCEPTED"
|
||||||
|
|
||||||
|
INVALIDBLOCKHASH = "INVALID_BLOCK_HASH"
|
||||||
|
INVALIDTERMINALBLOCK = "INVALID_TERMINAL_BLOCK"
|
||||||
|
|
||||||
|
GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"}
|
||||||
|
UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"}
|
||||||
|
InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"}
|
||||||
|
|
||||||
|
STATUS_INVALID = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: INVALID}, PayloadID: nil}
|
||||||
|
STATUS_SYNCING = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: SYNCING}, PayloadID: nil}
|
||||||
|
)
|
@ -1,6 +1,6 @@
|
|||||||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||||
|
|
||||||
package catalyst
|
package beacon
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@ -16,7 +16,7 @@ var _ = (*payloadAttributesMarshaling)(nil)
|
|||||||
func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) {
|
func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) {
|
||||||
type PayloadAttributesV1 struct {
|
type PayloadAttributesV1 struct {
|
||||||
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
||||||
Random common.Hash `json:"random" gencodec:"required"`
|
Random common.Hash `json:"prevRandao" gencodec:"required"`
|
||||||
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
||||||
}
|
}
|
||||||
var enc PayloadAttributesV1
|
var enc PayloadAttributesV1
|
||||||
@ -30,7 +30,7 @@ func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) {
|
|||||||
func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error {
|
func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error {
|
||||||
type PayloadAttributesV1 struct {
|
type PayloadAttributesV1 struct {
|
||||||
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
||||||
Random *common.Hash `json:"random" gencodec:"required"`
|
Random *common.Hash `json:"prevRandao" gencodec:"required"`
|
||||||
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
||||||
}
|
}
|
||||||
var dec PayloadAttributesV1
|
var dec PayloadAttributesV1
|
||||||
@ -42,7 +42,7 @@ func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error {
|
|||||||
}
|
}
|
||||||
p.Timestamp = uint64(*dec.Timestamp)
|
p.Timestamp = uint64(*dec.Timestamp)
|
||||||
if dec.Random == nil {
|
if dec.Random == nil {
|
||||||
return errors.New("missing required field 'random' for PayloadAttributesV1")
|
return errors.New("missing required field 'prevRandao' for PayloadAttributesV1")
|
||||||
}
|
}
|
||||||
p.Random = *dec.Random
|
p.Random = *dec.Random
|
||||||
if dec.SuggestedFeeRecipient == nil {
|
if dec.SuggestedFeeRecipient == nil {
|
@ -1,6 +1,6 @@
|
|||||||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||||
|
|
||||||
package catalyst
|
package beacon
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@ -19,9 +19,9 @@ func (e ExecutableDataV1) MarshalJSON() ([]byte, error) {
|
|||||||
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
|
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
|
||||||
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
|
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
|
||||||
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
|
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
|
||||||
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
|
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
|
||||||
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
|
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
|
||||||
Random common.Hash `json:"random" gencodec:"required"`
|
Random common.Hash `json:"prevRandao" gencodec:"required"`
|
||||||
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
|
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
|
||||||
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
|
GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
|
||||||
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||||
@ -60,9 +60,9 @@ func (e *ExecutableDataV1) UnmarshalJSON(input []byte) error {
|
|||||||
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
|
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
|
||||||
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
|
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
|
||||||
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
|
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
|
||||||
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
|
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
|
||||||
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
|
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
|
||||||
Random *common.Hash `json:"random" gencodec:"required"`
|
Random *common.Hash `json:"prevRandao" gencodec:"required"`
|
||||||
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
|
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
|
||||||
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
|
GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"`
|
||||||
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"`
|
||||||
@ -97,7 +97,7 @@ func (e *ExecutableDataV1) UnmarshalJSON(input []byte) error {
|
|||||||
}
|
}
|
||||||
e.LogsBloom = *dec.LogsBloom
|
e.LogsBloom = *dec.LogsBloom
|
||||||
if dec.Random == nil {
|
if dec.Random == nil {
|
||||||
return errors.New("missing required field 'random' for ExecutableDataV1")
|
return errors.New("missing required field 'prevRandao' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.Random = *dec.Random
|
e.Random = *dec.Random
|
||||||
if dec.Number == nil {
|
if dec.Number == nil {
|
194
core/beacon/types.go
Normal file
194
core/beacon/types.go
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package beacon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate go run github.com/fjl/gencodec -type PayloadAttributesV1 -field-override payloadAttributesMarshaling -out gen_blockparams.go
|
||||||
|
|
||||||
|
// PayloadAttributesV1 structure described at https://github.com/ethereum/execution-apis/pull/74
|
||||||
|
type PayloadAttributesV1 struct {
|
||||||
|
Timestamp uint64 `json:"timestamp" gencodec:"required"`
|
||||||
|
Random common.Hash `json:"prevRandao" gencodec:"required"`
|
||||||
|
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSON type overrides for PayloadAttributesV1.
|
||||||
|
type payloadAttributesMarshaling struct {
|
||||||
|
Timestamp hexutil.Uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate go run github.com/fjl/gencodec -type ExecutableDataV1 -field-override executableDataMarshaling -out gen_ed.go
|
||||||
|
|
||||||
|
// ExecutableDataV1 structure described at https://github.com/ethereum/execution-apis/src/engine/specification.md
|
||||||
|
type ExecutableDataV1 struct {
|
||||||
|
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
|
||||||
|
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
|
||||||
|
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
|
||||||
|
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
|
||||||
|
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
|
||||||
|
Random common.Hash `json:"prevRandao" gencodec:"required"`
|
||||||
|
Number uint64 `json:"blockNumber" gencodec:"required"`
|
||||||
|
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
|
||||||
|
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
|
||||||
|
Timestamp uint64 `json:"timestamp" gencodec:"required"`
|
||||||
|
ExtraData []byte `json:"extraData" gencodec:"required"`
|
||||||
|
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
|
||||||
|
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
|
||||||
|
Transactions [][]byte `json:"transactions" gencodec:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSON type overrides for executableData.
|
||||||
|
type executableDataMarshaling struct {
|
||||||
|
Number hexutil.Uint64
|
||||||
|
GasLimit hexutil.Uint64
|
||||||
|
GasUsed hexutil.Uint64
|
||||||
|
Timestamp hexutil.Uint64
|
||||||
|
BaseFeePerGas *hexutil.Big
|
||||||
|
ExtraData hexutil.Bytes
|
||||||
|
LogsBloom hexutil.Bytes
|
||||||
|
Transactions []hexutil.Bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
type PayloadStatusV1 struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
LatestValidHash *common.Hash `json:"latestValidHash"`
|
||||||
|
ValidationError *string `json:"validationError"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TransitionConfigurationV1 struct {
|
||||||
|
TerminalTotalDifficulty *hexutil.Big `json:"terminalTotalDifficulty"`
|
||||||
|
TerminalBlockHash common.Hash `json:"terminalBlockHash"`
|
||||||
|
TerminalBlockNumber hexutil.Uint64 `json:"terminalBlockNumber"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PayloadID is an identifier of the payload build process
|
||||||
|
type PayloadID [8]byte
|
||||||
|
|
||||||
|
func (b PayloadID) String() string {
|
||||||
|
return hexutil.Encode(b[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b PayloadID) MarshalText() ([]byte, error) {
|
||||||
|
return hexutil.Bytes(b[:]).MarshalText()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *PayloadID) UnmarshalText(input []byte) error {
|
||||||
|
err := hexutil.UnmarshalFixedText("PayloadID", input, b[:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid payload id %q: %w", input, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ForkChoiceResponse struct {
|
||||||
|
PayloadStatus PayloadStatusV1 `json:"payloadStatus"`
|
||||||
|
PayloadID *PayloadID `json:"payloadId"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ForkchoiceStateV1 struct {
|
||||||
|
HeadBlockHash common.Hash `json:"headBlockHash"`
|
||||||
|
SafeBlockHash common.Hash `json:"safeBlockHash"`
|
||||||
|
FinalizedBlockHash common.Hash `json:"finalizedBlockHash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeTransactions(txs []*types.Transaction) [][]byte {
|
||||||
|
var enc = make([][]byte, len(txs))
|
||||||
|
for i, tx := range txs {
|
||||||
|
enc[i], _ = tx.MarshalBinary()
|
||||||
|
}
|
||||||
|
return enc
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
|
||||||
|
var txs = make([]*types.Transaction, len(enc))
|
||||||
|
for i, encTx := range enc {
|
||||||
|
var tx types.Transaction
|
||||||
|
if err := tx.UnmarshalBinary(encTx); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
|
||||||
|
}
|
||||||
|
txs[i] = &tx
|
||||||
|
}
|
||||||
|
return txs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecutableDataToBlock constructs a block from executable data.
|
||||||
|
// It verifies that the following fields:
|
||||||
|
// len(extraData) <= 32
|
||||||
|
// uncleHash = emptyUncleHash
|
||||||
|
// difficulty = 0
|
||||||
|
// and that the blockhash of the constructed block matches the parameters.
|
||||||
|
func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
|
||||||
|
txs, err := decodeTransactions(params.Transactions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(params.ExtraData) > 32 {
|
||||||
|
return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
|
||||||
|
}
|
||||||
|
header := &types.Header{
|
||||||
|
ParentHash: params.ParentHash,
|
||||||
|
UncleHash: types.EmptyUncleHash,
|
||||||
|
Coinbase: params.FeeRecipient,
|
||||||
|
Root: params.StateRoot,
|
||||||
|
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
|
||||||
|
ReceiptHash: params.ReceiptsRoot,
|
||||||
|
Bloom: types.BytesToBloom(params.LogsBloom),
|
||||||
|
Difficulty: common.Big0,
|
||||||
|
Number: new(big.Int).SetUint64(params.Number),
|
||||||
|
GasLimit: params.GasLimit,
|
||||||
|
GasUsed: params.GasUsed,
|
||||||
|
Time: params.Timestamp,
|
||||||
|
BaseFee: params.BaseFeePerGas,
|
||||||
|
Extra: params.ExtraData,
|
||||||
|
MixDigest: params.Random,
|
||||||
|
}
|
||||||
|
block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
|
||||||
|
if block.Hash() != params.BlockHash {
|
||||||
|
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
|
||||||
|
}
|
||||||
|
return block, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockToExecutableData constructs the executableDataV1 structure by filling the
|
||||||
|
// fields from the given block. It assumes the given block is post-merge block.
|
||||||
|
func BlockToExecutableData(block *types.Block) *ExecutableDataV1 {
|
||||||
|
return &ExecutableDataV1{
|
||||||
|
BlockHash: block.Hash(),
|
||||||
|
ParentHash: block.ParentHash(),
|
||||||
|
FeeRecipient: block.Coinbase(),
|
||||||
|
StateRoot: block.Root(),
|
||||||
|
Number: block.NumberU64(),
|
||||||
|
GasLimit: block.GasLimit(),
|
||||||
|
GasUsed: block.GasUsed(),
|
||||||
|
BaseFeePerGas: block.BaseFee(),
|
||||||
|
Timestamp: block.Time(),
|
||||||
|
ReceiptsRoot: block.ReceiptHash(),
|
||||||
|
LogsBloom: block.Bloom().Bytes(),
|
||||||
|
Transactions: encodeTransactions(block.Transactions()),
|
||||||
|
Random: block.MixDigest(),
|
||||||
|
ExtraData: block.Extra(),
|
||||||
|
}
|
||||||
|
}
|
@ -18,9 +18,7 @@ package core
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"io/ioutil"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@ -162,7 +160,7 @@ func genTxRing(naccounts int) func(int, *BlockGen) {
|
|||||||
|
|
||||||
// genUncles generates blocks with two uncle headers.
|
// genUncles generates blocks with two uncle headers.
|
||||||
func genUncles(i int, gen *BlockGen) {
|
func genUncles(i int, gen *BlockGen) {
|
||||||
if i >= 6 {
|
if i >= 7 {
|
||||||
b2 := gen.PrevBlock(i - 6).Header()
|
b2 := gen.PrevBlock(i - 6).Header()
|
||||||
b2.Extra = []byte("foo")
|
b2.Extra = []byte("foo")
|
||||||
gen.AddUncle(b2)
|
gen.AddUncle(b2)
|
||||||
@ -175,14 +173,11 @@ func genUncles(i int, gen *BlockGen) {
|
|||||||
func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
|
func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) {
|
||||||
// Create the database in memory or in a temporary directory.
|
// Create the database in memory or in a temporary directory.
|
||||||
var db ethdb.Database
|
var db ethdb.Database
|
||||||
|
var err error
|
||||||
if !disk {
|
if !disk {
|
||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
} else {
|
} else {
|
||||||
dir, err := ioutil.TempDir("", "eth-core-bench")
|
dir := b.TempDir()
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("cannot create temporary directory: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false)
|
db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("cannot create temporary database: %v", err)
|
b.Fatalf("cannot create temporary database: %v", err)
|
||||||
@ -278,26 +273,18 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) {
|
|||||||
|
|
||||||
func benchWriteChain(b *testing.B, full bool, count uint64) {
|
func benchWriteChain(b *testing.B, full bool, count uint64) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
dir, err := ioutil.TempDir("", "eth-chain-bench")
|
dir := b.TempDir()
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("cannot create temporary directory: %v", err)
|
|
||||||
}
|
|
||||||
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
|
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("error opening database at %v: %v", dir, err)
|
b.Fatalf("error opening database at %v: %v", dir, err)
|
||||||
}
|
}
|
||||||
makeChainForBench(db, full, count)
|
makeChainForBench(db, full, count)
|
||||||
db.Close()
|
db.Close()
|
||||||
os.RemoveAll(dir)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func benchReadChain(b *testing.B, full bool, count uint64) {
|
func benchReadChain(b *testing.B, full bool, count uint64) {
|
||||||
dir, err := ioutil.TempDir("", "eth-chain-bench")
|
dir := b.TempDir()
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("cannot create temporary directory: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
|
db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -542,6 +542,19 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if beyondRoot || newHeadBlock.NumberU64() == 0 {
|
if beyondRoot || newHeadBlock.NumberU64() == 0 {
|
||||||
|
if newHeadBlock.NumberU64() == 0 {
|
||||||
|
// Recommit the genesis state into disk in case the rewinding destination
|
||||||
|
// is genesis block and the relevant state is gone. In the future this
|
||||||
|
// rewinding destination can be the earliest block stored in the chain
|
||||||
|
// if the historical chain pruning is enabled. In that case the logic
|
||||||
|
// needs to be improved here.
|
||||||
|
if !bc.HasState(bc.genesisBlock.Root()) {
|
||||||
|
if err := CommitGenesisState(bc.db, bc.genesisBlock.Hash()); err != nil {
|
||||||
|
log.Crit("Failed to commit genesis state", "err", err)
|
||||||
|
}
|
||||||
|
log.Debug("Recommitted genesis state to disk")
|
||||||
|
}
|
||||||
|
}
|
||||||
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
|
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -554,7 +567,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
|
|||||||
// Degrade the chain markers if they are explicitly reverted.
|
// Degrade the chain markers if they are explicitly reverted.
|
||||||
// In theory we should update all in-memory markers in the
|
// In theory we should update all in-memory markers in the
|
||||||
// last step, however the direction of SetHead is from high
|
// last step, however the direction of SetHead is from high
|
||||||
// to low, so it's safe the update in-memory markers directly.
|
// to low, so it's safe to update in-memory markers directly.
|
||||||
bc.currentBlock.Store(newHeadBlock)
|
bc.currentBlock.Store(newHeadBlock)
|
||||||
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
|
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
|
||||||
}
|
}
|
||||||
@ -592,7 +605,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
|
|||||||
if num+1 <= frozen {
|
if num+1 <= frozen {
|
||||||
// Truncate all relative data(header, total difficulty, body, receipt
|
// Truncate all relative data(header, total difficulty, body, receipt
|
||||||
// and canonical hash) from ancient store.
|
// and canonical hash) from ancient store.
|
||||||
if err := bc.db.TruncateAncients(num); err != nil {
|
if err := bc.db.TruncateHead(num); err != nil {
|
||||||
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
|
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
|
||||||
}
|
}
|
||||||
// Remove the hash <-> number mapping from the active store.
|
// Remove the hash <-> number mapping from the active store.
|
||||||
@ -979,38 +992,37 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
// range. In this case, all tx indices of newly imported blocks should be
|
// range. In this case, all tx indices of newly imported blocks should be
|
||||||
// generated.
|
// generated.
|
||||||
var batch = bc.db.NewBatch()
|
var batch = bc.db.NewBatch()
|
||||||
for _, block := range blockChain {
|
for i, block := range blockChain {
|
||||||
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
|
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
|
||||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||||
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
|
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
|
||||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||||
}
|
}
|
||||||
stats.processed++
|
stats.processed++
|
||||||
}
|
|
||||||
|
|
||||||
// Flush all tx-lookup index data.
|
if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 {
|
||||||
size += int64(batch.ValueSize())
|
size += int64(batch.ValueSize())
|
||||||
if err := batch.Write(); err != nil {
|
if err = batch.Write(); err != nil {
|
||||||
// The tx index data could not be written.
|
fastBlock := bc.CurrentFastBlock().NumberU64()
|
||||||
// Roll back the ancient store update.
|
if err := bc.db.TruncateHead(fastBlock + 1); err != nil {
|
||||||
fastBlock := bc.CurrentFastBlock().NumberU64()
|
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
||||||
if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
|
}
|
||||||
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
return 0, err
|
||||||
|
}
|
||||||
|
batch.Reset()
|
||||||
}
|
}
|
||||||
return 0, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
||||||
if err := bc.db.Sync(); err != nil {
|
if err := bc.db.Sync(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the current fast block because all block data is now present in DB.
|
// Update the current fast block because all block data is now present in DB.
|
||||||
previousFastBlock := bc.CurrentFastBlock().NumberU64()
|
previousFastBlock := bc.CurrentFastBlock().NumberU64()
|
||||||
if !updateHead(blockChain[len(blockChain)-1]) {
|
if !updateHead(blockChain[len(blockChain)-1]) {
|
||||||
// We end up here if the header chain has reorg'ed, and the blocks/receipts
|
// We end up here if the header chain has reorg'ed, and the blocks/receipts
|
||||||
// don't match the canonical chain.
|
// don't match the canonical chain.
|
||||||
if err := bc.db.TruncateAncients(previousFastBlock + 1); err != nil {
|
if err := bc.db.TruncateHead(previousFastBlock + 1); err != nil {
|
||||||
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
||||||
}
|
}
|
||||||
return 0, errSideChainReceipts
|
return 0, errSideChainReceipts
|
||||||
@ -1647,12 +1659,16 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
|
|||||||
blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits)
|
blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits)
|
||||||
blockInsertTimer.UpdateSince(start)
|
blockInsertTimer.UpdateSince(start)
|
||||||
|
|
||||||
if !setHead {
|
// Report the import stats before returning the various results
|
||||||
// We did not setHead, so we don't have any stats to update
|
stats.processed++
|
||||||
log.Info("Inserted block", "number", block.Number(), "hash", block.Hash(), "txs", len(block.Transactions()), "elapsed", common.PrettyDuration(time.Since(start)))
|
stats.usedGas += usedGas
|
||||||
return it.index, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
|
dirty, _ := bc.stateCache.TrieDB().Size()
|
||||||
|
stats.report(chain, it.index, dirty, setHead)
|
||||||
|
|
||||||
|
if !setHead {
|
||||||
|
return it.index, nil // Direct block insertion of a single block
|
||||||
|
}
|
||||||
switch status {
|
switch status {
|
||||||
case CanonStatTy:
|
case CanonStatTy:
|
||||||
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
|
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
|
||||||
@ -1679,11 +1695,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
|
|||||||
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
|
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
|
||||||
"root", block.Root())
|
"root", block.Root())
|
||||||
}
|
}
|
||||||
stats.processed++
|
|
||||||
stats.usedGas += usedGas
|
|
||||||
|
|
||||||
dirty, _ := bc.stateCache.TrieDB().Size()
|
|
||||||
stats.report(chain, it.index, dirty)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Any blocks remaining here? The only ones we care about are the future ones
|
// Any blocks remaining here? The only ones we care about are the future ones
|
||||||
@ -2080,28 +2091,39 @@ func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
|
|||||||
// block. It's possible that after the reorg the relevant state of head
|
// block. It's possible that after the reorg the relevant state of head
|
||||||
// is missing. It can be fixed by inserting a new block which triggers
|
// is missing. It can be fixed by inserting a new block which triggers
|
||||||
// the re-execution.
|
// the re-execution.
|
||||||
func (bc *BlockChain) SetChainHead(newBlock *types.Block) error {
|
func (bc *BlockChain) SetChainHead(head *types.Block) error {
|
||||||
if !bc.chainmu.TryLock() {
|
if !bc.chainmu.TryLock() {
|
||||||
return errChainStopped
|
return errChainStopped
|
||||||
}
|
}
|
||||||
defer bc.chainmu.Unlock()
|
defer bc.chainmu.Unlock()
|
||||||
|
|
||||||
// Run the reorg if necessary and set the given block as new head.
|
// Run the reorg if necessary and set the given block as new head.
|
||||||
if newBlock.ParentHash() != bc.CurrentBlock().Hash() {
|
start := time.Now()
|
||||||
if err := bc.reorg(bc.CurrentBlock(), newBlock); err != nil {
|
if head.ParentHash() != bc.CurrentBlock().Hash() {
|
||||||
|
if err := bc.reorg(bc.CurrentBlock(), head); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
bc.writeHeadBlock(newBlock)
|
bc.writeHeadBlock(head)
|
||||||
|
|
||||||
// Emit events
|
// Emit events
|
||||||
logs := bc.collectLogs(newBlock.Hash(), false)
|
logs := bc.collectLogs(head.Hash(), false)
|
||||||
bc.chainFeed.Send(ChainEvent{Block: newBlock, Hash: newBlock.Hash(), Logs: logs})
|
bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs})
|
||||||
if len(logs) > 0 {
|
if len(logs) > 0 {
|
||||||
bc.logsFeed.Send(logs)
|
bc.logsFeed.Send(logs)
|
||||||
}
|
}
|
||||||
bc.chainHeadFeed.Send(ChainHeadEvent{Block: newBlock})
|
bc.chainHeadFeed.Send(ChainHeadEvent{Block: head})
|
||||||
log.Info("Set the chain head", "number", newBlock.Number(), "hash", newBlock.Hash())
|
|
||||||
|
context := []interface{}{
|
||||||
|
"number", head.Number(),
|
||||||
|
"hash", head.Hash(),
|
||||||
|
"root", head.Root(),
|
||||||
|
"elapsed", time.Since(start),
|
||||||
|
}
|
||||||
|
if timestamp := time.Unix(int64(head.Time()), 0); time.Since(timestamp) > time.Minute {
|
||||||
|
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
|
||||||
|
}
|
||||||
|
log.Info("Chain head was updated", context...)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2287,6 +2309,9 @@ Error: %v
|
|||||||
// of the header retrieval mechanisms already need to verify nonces, as well as
|
// of the header retrieval mechanisms already need to verify nonces, as well as
|
||||||
// because nonces can be verified sparsely, not needing to check each.
|
// because nonces can be verified sparsely, not needing to check each.
|
||||||
func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
|
func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
|
||||||
|
if len(chain) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
|
if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
|
||||||
return i, err
|
return i, err
|
||||||
|
@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second
|
|||||||
|
|
||||||
// report prints statistics if some number of blocks have been processed
|
// report prints statistics if some number of blocks have been processed
|
||||||
// or more than a few seconds have passed since the last message.
|
// or more than a few seconds have passed since the last message.
|
||||||
func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize) {
|
func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize, setHead bool) {
|
||||||
// Fetch the timings for the batch
|
// Fetch the timings for the batch
|
||||||
var (
|
var (
|
||||||
now = mclock.Now()
|
now = mclock.Now()
|
||||||
@ -71,8 +71,11 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor
|
|||||||
if st.ignored > 0 {
|
if st.ignored > 0 {
|
||||||
context = append(context, []interface{}{"ignored", st.ignored}...)
|
context = append(context, []interface{}{"ignored", st.ignored}...)
|
||||||
}
|
}
|
||||||
log.Info("Imported new chain segment", context...)
|
if setHead {
|
||||||
|
log.Info("Imported new chain segment", context...)
|
||||||
|
} else {
|
||||||
|
log.Info("Imported new potential chain segment", context...)
|
||||||
|
}
|
||||||
// Bump the stats reported to the next section
|
// Bump the stats reported to the next section
|
||||||
*st = insertStats{startTime: now, lastIndex: index + 1}
|
*st = insertStats{startTime: now, lastIndex: index + 1}
|
||||||
}
|
}
|
||||||
|
@ -21,9 +21,7 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -1756,11 +1754,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
|||||||
// fmt.Println(tt.dump(true))
|
// fmt.Println(tt.dump(true))
|
||||||
|
|
||||||
// Create a temporary persistent database
|
// Create a temporary persistent database
|
||||||
datadir, err := ioutil.TempDir("", "")
|
datadir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create temporary datadir: %v", err)
|
|
||||||
}
|
|
||||||
os.RemoveAll(datadir)
|
|
||||||
|
|
||||||
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1779,6 +1773,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
|||||||
SnapshotLimit: 0, // Disable snapshot by default
|
SnapshotLimit: 0, // Disable snapshot by default
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
defer engine.Close()
|
||||||
if snapshots {
|
if snapshots {
|
||||||
config.SnapshotLimit = 256
|
config.SnapshotLimit = 256
|
||||||
config.SnapshotWait = true
|
config.SnapshotWait = true
|
||||||
@ -1836,25 +1831,25 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
|||||||
}
|
}
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
newChain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to recreate chain: %v", err)
|
t.Fatalf("Failed to recreate chain: %v", err)
|
||||||
}
|
}
|
||||||
defer chain.Stop()
|
defer newChain.Stop()
|
||||||
|
|
||||||
// Iterate over all the remaining blocks and ensure there are no gaps
|
// Iterate over all the remaining blocks and ensure there are no gaps
|
||||||
verifyNoGaps(t, chain, true, canonblocks)
|
verifyNoGaps(t, newChain, true, canonblocks)
|
||||||
verifyNoGaps(t, chain, false, sideblocks)
|
verifyNoGaps(t, newChain, false, sideblocks)
|
||||||
verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
|
verifyCutoff(t, newChain, true, canonblocks, tt.expCanonicalBlocks)
|
||||||
verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
|
verifyCutoff(t, newChain, false, sideblocks, tt.expSidechainBlocks)
|
||||||
|
|
||||||
if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
|
if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
|
||||||
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
|
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
|
||||||
}
|
}
|
||||||
if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
|
if head := newChain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
|
||||||
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
|
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
|
||||||
}
|
}
|
||||||
if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
|
if head := newChain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
|
||||||
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
|
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
|
||||||
}
|
}
|
||||||
if frozen, err := db.(freezer).Ancients(); err != nil {
|
if frozen, err := db.(freezer).Ancients(); err != nil {
|
||||||
@ -1883,11 +1878,7 @@ func TestIssue23496(t *testing.T) {
|
|||||||
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
//log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
|
||||||
|
|
||||||
// Create a temporary persistent database
|
// Create a temporary persistent database
|
||||||
datadir, err := ioutil.TempDir("", "")
|
datadir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create temporary datadir: %v", err)
|
|
||||||
}
|
|
||||||
os.RemoveAll(datadir)
|
|
||||||
|
|
||||||
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -21,9 +21,7 @@ package core
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -1955,11 +1953,7 @@ func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) {
|
|||||||
// fmt.Println(tt.dump(false))
|
// fmt.Println(tt.dump(false))
|
||||||
|
|
||||||
// Create a temporary persistent database
|
// Create a temporary persistent database
|
||||||
datadir, err := ioutil.TempDir("", "")
|
datadir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create temporary datadir: %v", err)
|
|
||||||
}
|
|
||||||
os.RemoveAll(datadir)
|
|
||||||
|
|
||||||
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -22,7 +22,6 @@ package core
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
@ -59,11 +58,7 @@ type snapshotTestBasic struct {
|
|||||||
|
|
||||||
func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) {
|
func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) {
|
||||||
// Create a temporary persistent database
|
// Create a temporary persistent database
|
||||||
datadir, err := ioutil.TempDir("", "")
|
datadir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create temporary datadir: %v", err)
|
|
||||||
}
|
|
||||||
os.RemoveAll(datadir)
|
|
||||||
|
|
||||||
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -19,7 +19,6 @@ package core
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
@ -791,15 +790,12 @@ func TestFastVsFullChains(t *testing.T) {
|
|||||||
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
t.Fatalf("failed to insert receipt %d: %v", n, err)
|
||||||
}
|
}
|
||||||
// Freezer style fast import the chain.
|
// Freezer style fast import the chain.
|
||||||
frdir, err := ioutil.TempDir("", "")
|
frdir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(frdir)
|
|
||||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
|
defer ancientDb.Close()
|
||||||
gspec.MustCommit(ancientDb)
|
gspec.MustCommit(ancientDb)
|
||||||
ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
defer ancient.Stop()
|
defer ancient.Stop()
|
||||||
@ -886,18 +882,14 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil)
|
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil)
|
||||||
|
|
||||||
// makeDb creates a db instance for testing.
|
// makeDb creates a db instance for testing.
|
||||||
makeDb := func() (ethdb.Database, func()) {
|
makeDb := func() ethdb.Database {
|
||||||
dir, err := ioutil.TempDir("", "")
|
dir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(dir)
|
|
||||||
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false)
|
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
gspec.MustCommit(db)
|
gspec.MustCommit(db)
|
||||||
return db, func() { os.RemoveAll(dir) }
|
return db
|
||||||
}
|
}
|
||||||
// Configure a subchain to roll back
|
// Configure a subchain to roll back
|
||||||
remove := blocks[height/2].NumberU64()
|
remove := blocks[height/2].NumberU64()
|
||||||
@ -917,8 +909,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Import the chain as an archive node and ensure all pointers are updated
|
// Import the chain as an archive node and ensure all pointers are updated
|
||||||
archiveDb, delfn := makeDb()
|
archiveDb := makeDb()
|
||||||
defer delfn()
|
defer archiveDb.Close()
|
||||||
|
|
||||||
archiveCaching := *defaultCacheConfig
|
archiveCaching := *defaultCacheConfig
|
||||||
archiveCaching.TrieDirtyDisabled = true
|
archiveCaching.TrieDirtyDisabled = true
|
||||||
@ -934,8 +926,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
assert(t, "archive", archive, height/2, height/2, height/2)
|
assert(t, "archive", archive, height/2, height/2, height/2)
|
||||||
|
|
||||||
// Import the chain as a non-archive node and ensure all pointers are updated
|
// Import the chain as a non-archive node and ensure all pointers are updated
|
||||||
fastDb, delfn := makeDb()
|
fastDb := makeDb()
|
||||||
defer delfn()
|
defer fastDb.Close()
|
||||||
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
fast, _ := NewBlockChain(fastDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
defer fast.Stop()
|
defer fast.Stop()
|
||||||
|
|
||||||
@ -954,8 +946,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
assert(t, "fast", fast, height/2, height/2, 0)
|
assert(t, "fast", fast, height/2, height/2, 0)
|
||||||
|
|
||||||
// Import the chain as a ancient-first node and ensure all pointers are updated
|
// Import the chain as a ancient-first node and ensure all pointers are updated
|
||||||
ancientDb, delfn := makeDb()
|
ancientDb := makeDb()
|
||||||
defer delfn()
|
defer ancientDb.Close()
|
||||||
ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
defer ancient.Stop()
|
defer ancient.Stop()
|
||||||
|
|
||||||
@ -973,8 +965,8 @@ func TestLightVsFastVsFullChainHeads(t *testing.T) {
|
|||||||
t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
|
t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen)
|
||||||
}
|
}
|
||||||
// Import the chain as a light node and ensure all pointers are updated
|
// Import the chain as a light node and ensure all pointers are updated
|
||||||
lightDb, delfn := makeDb()
|
lightDb := makeDb()
|
||||||
defer delfn()
|
defer lightDb.Close()
|
||||||
light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
light, _ := NewBlockChain(lightDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
|
if n, err := light.InsertHeaderChain(headers, 1); err != nil {
|
||||||
t.Fatalf("failed to insert header %d: %v", n, err)
|
t.Fatalf("failed to insert header %d: %v", n, err)
|
||||||
@ -1753,16 +1745,13 @@ func TestBlockchainRecovery(t *testing.T) {
|
|||||||
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil)
|
blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), nil)
|
||||||
|
|
||||||
// Import the chain as a ancient-first node and ensure all pointers are updated
|
// Import the chain as a ancient-first node and ensure all pointers are updated
|
||||||
frdir, err := ioutil.TempDir("", "")
|
frdir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(frdir)
|
|
||||||
|
|
||||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
|
defer ancientDb.Close()
|
||||||
gspec.MustCommit(ancientDb)
|
gspec.MustCommit(ancientDb)
|
||||||
ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
ancient, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
|
|
||||||
@ -1825,15 +1814,12 @@ func TestInsertReceiptChainRollback(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set up a BlockChain that uses the ancient store.
|
// Set up a BlockChain that uses the ancient store.
|
||||||
frdir, err := ioutil.TempDir("", "")
|
frdir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(frdir)
|
|
||||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
|
defer ancientDb.Close()
|
||||||
gspec := Genesis{Config: params.AllEthashProtocolChanges}
|
gspec := Genesis{Config: params.AllEthashProtocolChanges}
|
||||||
gspec.MustCommit(ancientDb)
|
gspec.MustCommit(ancientDb)
|
||||||
ancientChain, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
ancientChain, _ := NewBlockChain(ancientDb, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
||||||
@ -2090,17 +2076,13 @@ func testInsertKnownChainData(t *testing.T, typ string) {
|
|||||||
b.OffsetTime(-9) // A higher difficulty
|
b.OffsetTime(-9) // A higher difficulty
|
||||||
})
|
})
|
||||||
// Import the shared chain and the original canonical one
|
// Import the shared chain and the original canonical one
|
||||||
dir, err := ioutil.TempDir("", "")
|
dir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(dir)
|
|
||||||
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false)
|
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(chaindb)
|
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(chaindb)
|
||||||
defer os.RemoveAll(dir)
|
defer chaindb.Close()
|
||||||
|
|
||||||
chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
|
chain, err := NewBlockChain(chaindb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2254,17 +2236,13 @@ func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight i
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Import the shared chain and the original canonical one
|
// Import the shared chain and the original canonical one
|
||||||
dir, err := ioutil.TempDir("", "")
|
dir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(dir)
|
|
||||||
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false)
|
chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), dir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(chaindb)
|
(&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(chaindb)
|
||||||
defer os.RemoveAll(dir)
|
defer chaindb.Close()
|
||||||
|
|
||||||
chain, err := NewBlockChain(chaindb, nil, &chainConfig, runEngine, vm.Config{}, nil, nil)
|
chain, err := NewBlockChain(chaindb, nil, &chainConfig, runEngine, vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2564,11 +2542,7 @@ func TestTransactionIndices(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
frdir, err := ioutil.TempDir("", "")
|
frdir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(frdir)
|
|
||||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
@ -2621,6 +2595,7 @@ func TestTransactionIndices(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
|
defer ancientDb.Close()
|
||||||
gspec.MustCommit(ancientDb)
|
gspec.MustCommit(ancientDb)
|
||||||
|
|
||||||
limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
|
limit = []uint64{0, 64 /* drop stale */, 32 /* shorten history */, 64 /* extend history */, 0 /* restore all */}
|
||||||
@ -2691,15 +2666,12 @@ func TestSkipStaleTxIndicesInSnapSync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
frdir, err := ioutil.TempDir("", "")
|
frdir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(frdir)
|
|
||||||
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create temp freezer db: %v", err)
|
t.Fatalf("failed to create temp freezer db: %v", err)
|
||||||
}
|
}
|
||||||
|
defer ancientDb.Close()
|
||||||
gspec.MustCommit(ancientDb)
|
gspec.MustCommit(ancientDb)
|
||||||
|
|
||||||
// Import all blocks into ancient db, only HEAD-32 indices are kept.
|
// Import all blocks into ancient db, only HEAD-32 indices are kept.
|
||||||
@ -2987,10 +2959,10 @@ func TestDeleteRecreateSlots(t *testing.T) {
|
|||||||
initCode := []byte{
|
initCode := []byte{
|
||||||
byte(vm.PUSH1), 0x3, // value
|
byte(vm.PUSH1), 0x3, // value
|
||||||
byte(vm.PUSH1), 0x3, // location
|
byte(vm.PUSH1), 0x3, // location
|
||||||
byte(vm.SSTORE), // Set slot[3] = 1
|
byte(vm.SSTORE), // Set slot[3] = 3
|
||||||
byte(vm.PUSH1), 0x4, // value
|
byte(vm.PUSH1), 0x4, // value
|
||||||
byte(vm.PUSH1), 0x4, // location
|
byte(vm.PUSH1), 0x4, // location
|
||||||
byte(vm.SSTORE), // Set slot[4] = 1
|
byte(vm.SSTORE), // Set slot[4] = 4
|
||||||
// Slots are set, now return the code
|
// Slots are set, now return the code
|
||||||
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
|
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
|
||||||
byte(vm.PUSH1), 0x0, // memory start on stack
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
||||||
|
@ -40,6 +40,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
|
|||||||
var (
|
var (
|
||||||
beneficiary common.Address
|
beneficiary common.Address
|
||||||
baseFee *big.Int
|
baseFee *big.Int
|
||||||
|
random *common.Hash
|
||||||
)
|
)
|
||||||
|
|
||||||
// If we don't have an explicit author (i.e. not mining), extract from the header
|
// If we don't have an explicit author (i.e. not mining), extract from the header
|
||||||
@ -51,6 +52,9 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
|
|||||||
if header.BaseFee != nil {
|
if header.BaseFee != nil {
|
||||||
baseFee = new(big.Int).Set(header.BaseFee)
|
baseFee = new(big.Int).Set(header.BaseFee)
|
||||||
}
|
}
|
||||||
|
if header.Difficulty.Cmp(common.Big0) == 0 {
|
||||||
|
random = &header.MixDigest
|
||||||
|
}
|
||||||
return vm.BlockContext{
|
return vm.BlockContext{
|
||||||
CanTransfer: CanTransfer,
|
CanTransfer: CanTransfer,
|
||||||
Transfer: Transfer,
|
Transfer: Transfer,
|
||||||
@ -61,6 +65,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
|
|||||||
Difficulty: new(big.Int).Set(header.Difficulty),
|
Difficulty: new(big.Int).Set(header.Difficulty),
|
||||||
BaseFee: baseFee,
|
BaseFee: baseFee,
|
||||||
GasLimit: header.GasLimit,
|
GasLimit: header.GasLimit,
|
||||||
|
Random: random,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
127
core/genesis.go
127
core/genesis.go
@ -39,8 +39,8 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go
|
//go:generate go run github.com/fjl/gencodec -type Genesis -field-override genesisSpecMarshaling -out gen_genesis.go
|
||||||
//go:generate gencodec -type GenesisAccount -field-override genesisAccountMarshaling -out gen_genesis_account.go
|
//go:generate go run github.com/fjl/gencodec -type GenesisAccount -field-override genesisAccountMarshaling -out gen_genesis_account.go
|
||||||
|
|
||||||
var errGenesisNoConfig = errors.New("genesis has no chain configuration")
|
var errGenesisNoConfig = errors.New("genesis has no chain configuration")
|
||||||
|
|
||||||
@ -80,6 +80,81 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// flush adds allocated genesis accounts into a fresh new statedb and
|
||||||
|
// commit the state changes into the given database handler.
|
||||||
|
func (ga *GenesisAlloc) flush(db ethdb.Database) (common.Hash, error) {
|
||||||
|
statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, err
|
||||||
|
}
|
||||||
|
for addr, account := range *ga {
|
||||||
|
statedb.AddBalance(addr, account.Balance)
|
||||||
|
statedb.SetCode(addr, account.Code)
|
||||||
|
statedb.SetNonce(addr, account.Nonce)
|
||||||
|
for key, value := range account.Storage {
|
||||||
|
statedb.SetState(addr, key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
root, err := statedb.Commit(false)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, err
|
||||||
|
}
|
||||||
|
err = statedb.Database().TrieDB().Commit(root, true, nil)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}, err
|
||||||
|
}
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// write writes the json marshaled genesis state into database
|
||||||
|
// with the given block hash as the unique identifier.
|
||||||
|
func (ga *GenesisAlloc) write(db ethdb.KeyValueWriter, hash common.Hash) error {
|
||||||
|
blob, err := json.Marshal(ga)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rawdb.WriteGenesisState(db, hash, blob)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommitGenesisState loads the stored genesis state with the given block
|
||||||
|
// hash and commits them into the given database handler.
|
||||||
|
func CommitGenesisState(db ethdb.Database, hash common.Hash) error {
|
||||||
|
var alloc GenesisAlloc
|
||||||
|
blob := rawdb.ReadGenesisState(db, hash)
|
||||||
|
if len(blob) != 0 {
|
||||||
|
if err := alloc.UnmarshalJSON(blob); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Genesis allocation is missing and there are several possibilities:
|
||||||
|
// the node is legacy which doesn't persist the genesis allocation or
|
||||||
|
// the persisted allocation is just lost.
|
||||||
|
// - supported networks(mainnet, testnets), recover with defined allocations
|
||||||
|
// - private network, can't recover
|
||||||
|
var genesis *Genesis
|
||||||
|
switch hash {
|
||||||
|
case params.MainnetGenesisHash:
|
||||||
|
genesis = DefaultGenesisBlock()
|
||||||
|
case params.RopstenGenesisHash:
|
||||||
|
genesis = DefaultRopstenGenesisBlock()
|
||||||
|
case params.RinkebyGenesisHash:
|
||||||
|
genesis = DefaultRinkebyGenesisBlock()
|
||||||
|
case params.GoerliGenesisHash:
|
||||||
|
genesis = DefaultGoerliGenesisBlock()
|
||||||
|
case params.SepoliaGenesisHash:
|
||||||
|
genesis = DefaultSepoliaGenesisBlock()
|
||||||
|
}
|
||||||
|
if genesis != nil {
|
||||||
|
alloc = genesis.Alloc
|
||||||
|
} else {
|
||||||
|
return errors.New("not found")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, err := alloc.flush(db)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// GenesisAccount is an account in the state of the genesis block.
|
// GenesisAccount is an account in the state of the genesis block.
|
||||||
type GenesisAccount struct {
|
type GenesisAccount struct {
|
||||||
Code []byte `json:"code,omitempty"`
|
Code []byte `json:"code,omitempty"`
|
||||||
@ -219,11 +294,19 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
|
|||||||
rawdb.WriteChainConfig(db, stored, newcfg)
|
rawdb.WriteChainConfig(db, stored, newcfg)
|
||||||
return newcfg, stored, nil
|
return newcfg, stored, nil
|
||||||
}
|
}
|
||||||
// Special case: don't change the existing config of a non-mainnet chain if no new
|
// Special case: if a private network is being used (no genesis and also no
|
||||||
// config is supplied. These chains would get AllProtocolChanges (and a compat error)
|
// mainnet hash in the database), we must not apply the `configOrDefault`
|
||||||
// if we just continued here.
|
// chain config as that would be AllProtocolChanges (applying any new fork
|
||||||
|
// on top of an existing private network genesis block). In that case, only
|
||||||
|
// apply the overrides.
|
||||||
if genesis == nil && stored != params.MainnetGenesisHash {
|
if genesis == nil && stored != params.MainnetGenesisHash {
|
||||||
return storedcfg, stored, nil
|
newcfg = storedcfg
|
||||||
|
if overrideArrowGlacier != nil {
|
||||||
|
newcfg.ArrowGlacierBlock = overrideArrowGlacier
|
||||||
|
}
|
||||||
|
if overrideTerminalTotalDifficulty != nil {
|
||||||
|
newcfg.TerminalTotalDifficulty = overrideTerminalTotalDifficulty
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Check config compatibility and write the config. Compatibility errors
|
// Check config compatibility and write the config. Compatibility errors
|
||||||
// are returned to the caller unless we're already at block zero.
|
// are returned to the caller unless we're already at block zero.
|
||||||
@ -253,6 +336,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
|
|||||||
return params.RinkebyChainConfig
|
return params.RinkebyChainConfig
|
||||||
case ghash == params.GoerliGenesisHash:
|
case ghash == params.GoerliGenesisHash:
|
||||||
return params.GoerliChainConfig
|
return params.GoerliChainConfig
|
||||||
|
case ghash == params.KilnGenesisHash:
|
||||||
|
return DefaultKilnGenesisBlock().Config
|
||||||
default:
|
default:
|
||||||
return params.AllEthashProtocolChanges
|
return params.AllEthashProtocolChanges
|
||||||
}
|
}
|
||||||
@ -264,19 +349,10 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
|||||||
if db == nil {
|
if db == nil {
|
||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
}
|
}
|
||||||
statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
|
root, err := g.Alloc.flush(db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
for addr, account := range g.Alloc {
|
|
||||||
statedb.AddBalance(addr, account.Balance)
|
|
||||||
statedb.SetCode(addr, account.Code)
|
|
||||||
statedb.SetNonce(addr, account.Nonce)
|
|
||||||
for key, value := range account.Storage {
|
|
||||||
statedb.SetState(addr, key, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
root := statedb.IntermediateRoot(false)
|
|
||||||
head := &types.Header{
|
head := &types.Header{
|
||||||
Number: new(big.Int).SetUint64(g.Number),
|
Number: new(big.Int).SetUint64(g.Number),
|
||||||
Nonce: types.EncodeNonce(g.Nonce),
|
Nonce: types.EncodeNonce(g.Nonce),
|
||||||
@ -294,7 +370,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
|||||||
if g.GasLimit == 0 {
|
if g.GasLimit == 0 {
|
||||||
head.GasLimit = params.GenesisGasLimit
|
head.GasLimit = params.GenesisGasLimit
|
||||||
}
|
}
|
||||||
if g.Difficulty == nil {
|
if g.Difficulty == nil && g.Mixhash == (common.Hash{}) {
|
||||||
head.Difficulty = params.GenesisDifficulty
|
head.Difficulty = params.GenesisDifficulty
|
||||||
}
|
}
|
||||||
if g.Config != nil && g.Config.IsLondon(common.Big0) {
|
if g.Config != nil && g.Config.IsLondon(common.Big0) {
|
||||||
@ -304,9 +380,6 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
|||||||
head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee)
|
head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
statedb.Commit(false)
|
|
||||||
statedb.Database().TrieDB().Commit(root, true, nil)
|
|
||||||
|
|
||||||
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
|
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,9 +397,12 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
|
|||||||
if err := config.CheckConfigForkOrder(); err != nil {
|
if err := config.CheckConfigForkOrder(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if config.Clique != nil && len(block.Extra()) == 0 {
|
if config.Clique != nil && len(block.Extra()) < 32+crypto.SignatureLength {
|
||||||
return nil, errors.New("can't start clique chain without signers")
|
return nil, errors.New("can't start clique chain without signers")
|
||||||
}
|
}
|
||||||
|
if err := g.Alloc.write(db, block.Hash()); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
|
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
|
||||||
rawdb.WriteBlock(db, block)
|
rawdb.WriteBlock(db, block)
|
||||||
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
|
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
|
||||||
@ -418,6 +494,15 @@ func DefaultSepoliaGenesisBlock() *Genesis {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DefaultKilnGenesisBlock() *Genesis {
|
||||||
|
g := new(Genesis)
|
||||||
|
reader := strings.NewReader(KilnAllocData)
|
||||||
|
if err := json.NewDecoder(reader).Decode(g); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return g
|
||||||
|
}
|
||||||
|
|
||||||
// DeveloperGenesisBlock returns the 'geth --dev' genesis block.
|
// DeveloperGenesisBlock returns the 'geth --dev' genesis block.
|
||||||
func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *Genesis {
|
func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *Genesis {
|
||||||
// Override the default period to the user requested one
|
// Override the default period to the user requested one
|
||||||
|
File diff suppressed because one or more lines are too long
@ -213,3 +213,33 @@ func TestGenesis_Commit(t *testing.T) {
|
|||||||
t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
|
t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReadWriteGenesisAlloc(t *testing.T) {
|
||||||
|
var (
|
||||||
|
db = rawdb.NewMemoryDatabase()
|
||||||
|
alloc = &GenesisAlloc{
|
||||||
|
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
|
||||||
|
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
|
||||||
|
}
|
||||||
|
hash = common.HexToHash("0xdeadbeef")
|
||||||
|
)
|
||||||
|
alloc.write(db, hash)
|
||||||
|
|
||||||
|
var reload GenesisAlloc
|
||||||
|
err := reload.UnmarshalJSON(rawdb.ReadGenesisState(db, hash))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to load genesis state %v", err)
|
||||||
|
}
|
||||||
|
if len(reload) != len(*alloc) {
|
||||||
|
t.Fatal("Unexpected genesis allocation")
|
||||||
|
}
|
||||||
|
for addr, account := range reload {
|
||||||
|
want, ok := (*alloc)[addr]
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("Account is not found")
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(want, account) {
|
||||||
|
t.Fatal("Unexpected account")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -83,8 +83,8 @@ type NumberHash struct {
|
|||||||
Hash common.Hash
|
Hash common.Hash
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
|
// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain
|
||||||
// both canonical and reorged forks included.
|
// heights, both canonical and reorged forks included.
|
||||||
// This method considers both limits to be _inclusive_.
|
// This method considers both limits to be _inclusive_.
|
||||||
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
|
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
|
||||||
var (
|
var (
|
||||||
@ -776,7 +776,7 @@ func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
|
|||||||
WriteHeader(db, block.Header())
|
WriteHeader(db, block.Header())
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
|
// WriteAncientBlocks writes entire block data into ancient store and returns the total written size.
|
||||||
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
|
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
|
||||||
var (
|
var (
|
||||||
tdSum = new(big.Int).Set(td)
|
tdSum = new(big.Int).Set(td)
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -435,11 +434,7 @@ func checkReceiptsRLP(have, want types.Receipts) error {
|
|||||||
|
|
||||||
func TestAncientStorage(t *testing.T) {
|
func TestAncientStorage(t *testing.T) {
|
||||||
// Freezer style fast import the chain.
|
// Freezer style fast import the chain.
|
||||||
frdir, err := ioutil.TempDir("", "")
|
frdir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(frdir)
|
|
||||||
|
|
||||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -577,15 +572,12 @@ func TestHashesInRange(t *testing.T) {
|
|||||||
// This measures the write speed of the WriteAncientBlocks operation.
|
// This measures the write speed of the WriteAncientBlocks operation.
|
||||||
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
func BenchmarkWriteAncientBlocks(b *testing.B) {
|
||||||
// Open freezer database.
|
// Open freezer database.
|
||||||
frdir, err := ioutil.TempDir("", "")
|
frdir := b.TempDir()
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(frdir)
|
|
||||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("failed to create database with ancient backend")
|
b.Fatalf("failed to create database with ancient backend")
|
||||||
}
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
// Create the data to insert. The blocks must have consecutive numbers, so we create
|
// Create the data to insert. The blocks must have consecutive numbers, so we create
|
||||||
// all of them ahead of time. However, there is no need to create receipts
|
// all of them ahead of time. However, there is no need to create receipts
|
||||||
@ -886,11 +878,7 @@ func BenchmarkDecodeRLPLogs(b *testing.B) {
|
|||||||
|
|
||||||
func TestHeadersRLPStorage(t *testing.T) {
|
func TestHeadersRLPStorage(t *testing.T) {
|
||||||
// Have N headers in the freezer
|
// Have N headers in the freezer
|
||||||
frdir, err := ioutil.TempDir("", "")
|
frdir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("failed to create temp freezer dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.Remove(frdir)
|
|
||||||
|
|
||||||
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -81,6 +81,19 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadGenesisState retrieves the genesis state based on the given genesis hash.
|
||||||
|
func ReadGenesisState(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
||||||
|
data, _ := db.Get(genesisKey(hash))
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteGenesisState writes the genesis state into the disk.
|
||||||
|
func WriteGenesisState(db ethdb.KeyValueWriter, hash common.Hash, data []byte) {
|
||||||
|
if err := db.Put(genesisKey(hash), data); err != nil {
|
||||||
|
log.Crit("Failed to store genesis state", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// crashList is a list of unclean-shutdown-markers, for rlp-encoding to the
|
// crashList is a list of unclean-shutdown-markers, for rlp-encoding to the
|
||||||
// database
|
// database
|
||||||
type crashList struct {
|
type crashList struct {
|
||||||
|
@ -115,7 +115,7 @@ func DeleteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash com
|
|||||||
// IterateStorageSnapshots returns an iterator for walking the entire storage
|
// IterateStorageSnapshots returns an iterator for walking the entire storage
|
||||||
// space of a specific account.
|
// space of a specific account.
|
||||||
func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator {
|
func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator {
|
||||||
return db.NewIterator(storageSnapshotsKey(accountHash), nil)
|
return NewKeyLengthIterator(db.NewIterator(storageSnapshotsKey(accountHash), nil), len(SnapshotStoragePrefix)+2*common.HashLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
|
// ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
|
||||||
|
@ -28,6 +28,58 @@ func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
|||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadCode retrieves the contract code of the provided code hash.
|
||||||
|
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
||||||
|
// Try with the prefixed code scheme first, if not then try with legacy
|
||||||
|
// scheme.
|
||||||
|
data := ReadCodeWithPrefix(db, hash)
|
||||||
|
if len(data) != 0 {
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
data, _ = db.Get(hash.Bytes())
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
|
||||||
|
// The main difference between this function and ReadCode is this function
|
||||||
|
// will only check the existence with latest scheme(with prefix).
|
||||||
|
func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
||||||
|
data, _ := db.Get(codeKey(hash))
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadTrieNode retrieves the trie node of the provided hash.
|
||||||
|
func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
||||||
|
data, _ := db.Get(hash.Bytes())
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasCode checks if the contract code corresponding to the
|
||||||
|
// provided code hash is present in the db.
|
||||||
|
func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool {
|
||||||
|
// Try with the prefixed code scheme first, if not then try with legacy
|
||||||
|
// scheme.
|
||||||
|
if ok := HasCodeWithPrefix(db, hash); ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
ok, _ := db.Has(hash.Bytes())
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasCodeWithPrefix checks if the contract code corresponding to the
|
||||||
|
// provided code hash is present in the db. This function will only check
|
||||||
|
// presence using the prefix-scheme.
|
||||||
|
func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
|
||||||
|
ok, _ := db.Has(codeKey(hash))
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasTrieNode checks if the trie node with the provided hash is present in db.
|
||||||
|
func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
|
||||||
|
ok, _ := db.Has(hash.Bytes())
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
// WritePreimages writes the provided set of preimages to the database.
|
// WritePreimages writes the provided set of preimages to the database.
|
||||||
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
|
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
|
||||||
for hash, preimage := range preimages {
|
for hash, preimage := range preimages {
|
||||||
@ -39,28 +91,6 @@ func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
|
|||||||
preimageHitCounter.Inc(int64(len(preimages)))
|
preimageHitCounter.Inc(int64(len(preimages)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadCode retrieves the contract code of the provided code hash.
|
|
||||||
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
|
||||||
// Try with the legacy code scheme first, if not then try with current
|
|
||||||
// scheme. Since most of the code will be found with legacy scheme.
|
|
||||||
//
|
|
||||||
// todo(rjl493456442) change the order when we forcibly upgrade the code
|
|
||||||
// scheme with snapshot.
|
|
||||||
data, _ := db.Get(hash[:])
|
|
||||||
if len(data) != 0 {
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
return ReadCodeWithPrefix(db, hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
|
|
||||||
// The main difference between this function and ReadCode is this function
|
|
||||||
// will only check the existence with latest scheme(with prefix).
|
|
||||||
func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
|
||||||
data, _ := db.Get(codeKey(hash))
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteCode writes the provided contract code database.
|
// WriteCode writes the provided contract code database.
|
||||||
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
|
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
|
||||||
if err := db.Put(codeKey(hash), code); err != nil {
|
if err := db.Put(codeKey(hash), code); err != nil {
|
||||||
@ -68,6 +98,13 @@ func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WriteTrieNode writes the provided trie node database.
|
||||||
|
func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
|
||||||
|
if err := db.Put(hash.Bytes(), node); err != nil {
|
||||||
|
log.Crit("Failed to store trie node", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// DeleteCode deletes the specified contract code from the database.
|
// DeleteCode deletes the specified contract code from the database.
|
||||||
func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
|
func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
|
||||||
if err := db.Delete(codeKey(hash)); err != nil {
|
if err := db.Delete(codeKey(hash)); err != nil {
|
||||||
@ -75,19 +112,6 @@ func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadTrieNode retrieves the trie node of the provided hash.
|
|
||||||
func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
|
||||||
data, _ := db.Get(hash.Bytes())
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteTrieNode writes the provided trie node database.
|
|
||||||
func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
|
|
||||||
if err := db.Put(hash.Bytes(), node); err != nil {
|
|
||||||
log.Crit("Failed to store trie node", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteTrieNode deletes the specified trie node from the database.
|
// DeleteTrieNode deletes the specified trie node from the database.
|
||||||
func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
|
func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
|
||||||
if err := db.Delete(hash.Bytes()); err != nil {
|
if err := db.Delete(hash.Bytes()); err != nil {
|
||||||
|
80
core/rawdb/accessors_sync.go
Normal file
80
core/rawdb/accessors_sync.go
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadSkeletonSyncStatus retrieves the serialized sync status saved at shutdown.
|
||||||
|
func ReadSkeletonSyncStatus(db ethdb.KeyValueReader) []byte {
|
||||||
|
data, _ := db.Get(skeletonSyncStatusKey)
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSkeletonSyncStatus stores the serialized sync status to save at shutdown.
|
||||||
|
func WriteSkeletonSyncStatus(db ethdb.KeyValueWriter, status []byte) {
|
||||||
|
if err := db.Put(skeletonSyncStatusKey, status); err != nil {
|
||||||
|
log.Crit("Failed to store skeleton sync status", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSkeletonSyncStatus deletes the serialized sync status saved at the last
|
||||||
|
// shutdown
|
||||||
|
func DeleteSkeletonSyncStatus(db ethdb.KeyValueWriter) {
|
||||||
|
if err := db.Delete(skeletonSyncStatusKey); err != nil {
|
||||||
|
log.Crit("Failed to remove skeleton sync status", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSkeletonHeader retrieves a block header from the skeleton sync store,
|
||||||
|
func ReadSkeletonHeader(db ethdb.KeyValueReader, number uint64) *types.Header {
|
||||||
|
data, _ := db.Get(skeletonHeaderKey(number))
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
header := new(types.Header)
|
||||||
|
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
||||||
|
log.Error("Invalid skeleton header RLP", "number", number, "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return header
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteSkeletonHeader stores a block header into the skeleton sync store.
|
||||||
|
func WriteSkeletonHeader(db ethdb.KeyValueWriter, header *types.Header) {
|
||||||
|
data, err := rlp.EncodeToBytes(header)
|
||||||
|
if err != nil {
|
||||||
|
log.Crit("Failed to RLP encode header", "err", err)
|
||||||
|
}
|
||||||
|
key := skeletonHeaderKey(header.Number.Uint64())
|
||||||
|
if err := db.Put(key, data); err != nil {
|
||||||
|
log.Crit("Failed to store skeleton header", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteSkeletonHeader removes all block header data associated with a hash.
|
||||||
|
func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) {
|
||||||
|
if err := db.Delete(skeletonHeaderKey(number)); err != nil {
|
||||||
|
log.Crit("Failed to delete skeleton header", "err", err)
|
||||||
|
}
|
||||||
|
}
|
@ -99,6 +99,11 @@ func (db *nofreezedb) Ancients() (uint64, error) {
|
|||||||
return 0, errNotSupported
|
return 0, errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tail returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) Tail() (uint64, error) {
|
||||||
|
return 0, errNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
// AncientSize returns an error as we don't have a backing chain freezer.
|
// AncientSize returns an error as we don't have a backing chain freezer.
|
||||||
func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
|
func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
|
||||||
return 0, errNotSupported
|
return 0, errNotSupported
|
||||||
@ -109,8 +114,13 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e
|
|||||||
return 0, errNotSupported
|
return 0, errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
// TruncateAncients returns an error as we don't have a backing chain freezer.
|
// TruncateHead returns an error as we don't have a backing chain freezer.
|
||||||
func (db *nofreezedb) TruncateAncients(items uint64) error {
|
func (db *nofreezedb) TruncateHead(items uint64) error {
|
||||||
|
return errNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
|
// TruncateTail returns an error as we don't have a backing chain freezer.
|
||||||
|
func (db *nofreezedb) TruncateTail(items uint64) error {
|
||||||
return errNotSupported
|
return errNotSupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,6 +145,12 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (e
|
|||||||
return fn(db)
|
return fn(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MigrateTable processes the entries in a given table in sequence
|
||||||
|
// converting them to a new format if they're of an old format.
|
||||||
|
func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
|
||||||
|
return errNotSupported
|
||||||
|
}
|
||||||
|
|
||||||
// NewDatabase creates a high level database on top of a given key-value data
|
// NewDatabase creates a high level database on top of a given key-value data
|
||||||
// store without a freezer moving immutable chain segments into cold storage.
|
// store without a freezer moving immutable chain segments into cold storage.
|
||||||
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
|
||||||
@ -211,7 +227,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
|
|||||||
// Block #1 is still in the database, we're allowed to init a new feezer
|
// Block #1 is still in the database, we're allowed to init a new feezer
|
||||||
}
|
}
|
||||||
// Otherwise, the head header is still the genesis, we're allowed to init a new
|
// Otherwise, the head header is still the genesis, we're allowed to init a new
|
||||||
// feezer.
|
// freezer.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Freezer is consistent with the key-value database, permit combining the two
|
// Freezer is consistent with the key-value database, permit combining the two
|
||||||
@ -321,6 +337,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||||||
storageSnaps stat
|
storageSnaps stat
|
||||||
preimages stat
|
preimages stat
|
||||||
bloomBits stat
|
bloomBits stat
|
||||||
|
beaconHeaders stat
|
||||||
cliqueSnaps stat
|
cliqueSnaps stat
|
||||||
|
|
||||||
// Ancient store statistics
|
// Ancient store statistics
|
||||||
@ -375,10 +392,14 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||||||
preimages.Add(size)
|
preimages.Add(size)
|
||||||
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
|
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
|
||||||
metadata.Add(size)
|
metadata.Add(size)
|
||||||
|
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
|
||||||
|
metadata.Add(size)
|
||||||
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
|
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
|
||||||
bloomBits.Add(size)
|
bloomBits.Add(size)
|
||||||
case bytes.HasPrefix(key, BloomBitsIndexPrefix):
|
case bytes.HasPrefix(key, BloomBitsIndexPrefix):
|
||||||
bloomBits.Add(size)
|
bloomBits.Add(size)
|
||||||
|
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
|
||||||
|
beaconHeaders.Add(size)
|
||||||
case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
|
case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
|
||||||
cliqueSnaps.Add(size)
|
cliqueSnaps.Add(size)
|
||||||
case bytes.HasPrefix(key, []byte("cht-")) ||
|
case bytes.HasPrefix(key, []byte("cht-")) ||
|
||||||
@ -395,7 +416,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||||||
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
|
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
|
||||||
fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
|
fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
|
||||||
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
|
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
|
||||||
uncleanShutdownKey, badBlockKey, transitionStatusKey,
|
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
|
||||||
} {
|
} {
|
||||||
if bytes.Equal(key, meta) {
|
if bytes.Equal(key, meta) {
|
||||||
metadata.Add(size)
|
metadata.Add(size)
|
||||||
@ -441,6 +462,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
|
|||||||
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
|
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
|
||||||
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
|
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
|
||||||
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
|
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
|
||||||
|
{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
|
||||||
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
|
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
|
||||||
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
|
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
|
||||||
{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
|
{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
|
||||||
|
@ -19,6 +19,7 @@ package rawdb
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -66,7 +67,7 @@ const (
|
|||||||
freezerTableSize = 2 * 1000 * 1000 * 1000
|
freezerTableSize = 2 * 1000 * 1000 * 1000
|
||||||
)
|
)
|
||||||
|
|
||||||
// freezer is an memory mapped append-only database to store immutable chain data
|
// freezer is a memory mapped append-only database to store immutable chain data
|
||||||
// into flat files:
|
// into flat files:
|
||||||
//
|
//
|
||||||
// - The append only nature ensures that disk writes are minimized.
|
// - The append only nature ensures that disk writes are minimized.
|
||||||
@ -78,6 +79,7 @@ type freezer struct {
|
|||||||
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
|
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
|
||||||
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
|
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
|
||||||
frozen uint64 // Number of blocks already frozen
|
frozen uint64 // Number of blocks already frozen
|
||||||
|
tail uint64 // Number of the first stored item in the freezer
|
||||||
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
|
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
|
||||||
|
|
||||||
// This lock synchronizes writers and the truncate operation, as well as
|
// This lock synchronizes writers and the truncate operation, as well as
|
||||||
@ -133,7 +135,7 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
|
|||||||
|
|
||||||
// Create the tables.
|
// Create the tables.
|
||||||
for name, disableSnappy := range tables {
|
for name, disableSnappy := range tables {
|
||||||
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy)
|
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
for _, table := range freezer.tables {
|
for _, table := range freezer.tables {
|
||||||
table.Close()
|
table.Close()
|
||||||
@ -144,8 +146,15 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
|
|||||||
freezer.tables[name] = table
|
freezer.tables[name] = table
|
||||||
}
|
}
|
||||||
|
|
||||||
// Truncate all tables to common length.
|
if freezer.readonly {
|
||||||
if err := freezer.repair(); err != nil {
|
// In readonly mode only validate, don't truncate.
|
||||||
|
// validate also sets `freezer.frozen`.
|
||||||
|
err = freezer.validate()
|
||||||
|
} else {
|
||||||
|
// Truncate all tables to common length.
|
||||||
|
err = freezer.repair()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
for _, table := range freezer.tables {
|
for _, table := range freezer.tables {
|
||||||
table.Close()
|
table.Close()
|
||||||
}
|
}
|
||||||
@ -219,6 +228,11 @@ func (f *freezer) Ancients() (uint64, error) {
|
|||||||
return atomic.LoadUint64(&f.frozen), nil
|
return atomic.LoadUint64(&f.frozen), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tail returns the number of first stored item in the freezer.
|
||||||
|
func (f *freezer) Tail() (uint64, error) {
|
||||||
|
return atomic.LoadUint64(&f.tail), nil
|
||||||
|
}
|
||||||
|
|
||||||
// AncientSize returns the ancient size of the specified category.
|
// AncientSize returns the ancient size of the specified category.
|
||||||
func (f *freezer) AncientSize(kind string) (uint64, error) {
|
func (f *freezer) AncientSize(kind string) (uint64, error) {
|
||||||
// This needs the write lock to avoid data races on table fields.
|
// This needs the write lock to avoid data races on table fields.
|
||||||
@ -254,7 +268,7 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// The write operation has failed. Go back to the previous item position.
|
// The write operation has failed. Go back to the previous item position.
|
||||||
for name, table := range f.tables {
|
for name, table := range f.tables {
|
||||||
err := table.truncate(prevItem)
|
err := table.truncateHead(prevItem)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
|
log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
|
||||||
}
|
}
|
||||||
@ -274,8 +288,8 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
|
|||||||
return writeSize, nil
|
return writeSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TruncateAncients discards any recent data above the provided threshold number.
|
// TruncateHead discards any recent data above the provided threshold number.
|
||||||
func (f *freezer) TruncateAncients(items uint64) error {
|
func (f *freezer) TruncateHead(items uint64) error {
|
||||||
if f.readonly {
|
if f.readonly {
|
||||||
return errReadOnly
|
return errReadOnly
|
||||||
}
|
}
|
||||||
@ -286,7 +300,7 @@ func (f *freezer) TruncateAncients(items uint64) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for _, table := range f.tables {
|
for _, table := range f.tables {
|
||||||
if err := table.truncate(items); err != nil {
|
if err := table.truncateHead(items); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -294,6 +308,26 @@ func (f *freezer) TruncateAncients(items uint64) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TruncateTail discards any recent data below the provided threshold number.
|
||||||
|
func (f *freezer) TruncateTail(tail uint64) error {
|
||||||
|
if f.readonly {
|
||||||
|
return errReadOnly
|
||||||
|
}
|
||||||
|
f.writeLock.Lock()
|
||||||
|
defer f.writeLock.Unlock()
|
||||||
|
|
||||||
|
if atomic.LoadUint64(&f.tail) >= tail {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, table := range f.tables {
|
||||||
|
if err := table.truncateTail(tail); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
atomic.StoreUint64(&f.tail, tail)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Sync flushes all data tables to disk.
|
// Sync flushes all data tables to disk.
|
||||||
func (f *freezer) Sync() error {
|
func (f *freezer) Sync() error {
|
||||||
var errs []error
|
var errs []error
|
||||||
@ -308,21 +342,59 @@ func (f *freezer) Sync() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validate checks that every table has the same length.
|
||||||
|
// Used instead of `repair` in readonly mode.
|
||||||
|
func (f *freezer) validate() error {
|
||||||
|
if len(f.tables) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
length uint64
|
||||||
|
name string
|
||||||
|
)
|
||||||
|
// Hack to get length of any table
|
||||||
|
for kind, table := range f.tables {
|
||||||
|
length = atomic.LoadUint64(&table.items)
|
||||||
|
name = kind
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Now check every table against that length
|
||||||
|
for kind, table := range f.tables {
|
||||||
|
items := atomic.LoadUint64(&table.items)
|
||||||
|
if length != items {
|
||||||
|
return fmt.Errorf("freezer tables %s and %s have differing lengths: %d != %d", kind, name, items, length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
atomic.StoreUint64(&f.frozen, length)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// repair truncates all data tables to the same length.
|
// repair truncates all data tables to the same length.
|
||||||
func (f *freezer) repair() error {
|
func (f *freezer) repair() error {
|
||||||
min := uint64(math.MaxUint64)
|
var (
|
||||||
|
head = uint64(math.MaxUint64)
|
||||||
|
tail = uint64(0)
|
||||||
|
)
|
||||||
for _, table := range f.tables {
|
for _, table := range f.tables {
|
||||||
items := atomic.LoadUint64(&table.items)
|
items := atomic.LoadUint64(&table.items)
|
||||||
if min > items {
|
if head > items {
|
||||||
min = items
|
head = items
|
||||||
|
}
|
||||||
|
hidden := atomic.LoadUint64(&table.itemHidden)
|
||||||
|
if hidden > tail {
|
||||||
|
tail = hidden
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, table := range f.tables {
|
for _, table := range f.tables {
|
||||||
if err := table.truncate(min); err != nil {
|
if err := table.truncateHead(head); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := table.truncateTail(tail); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
atomic.StoreUint64(&f.frozen, min)
|
atomic.StoreUint64(&f.frozen, head)
|
||||||
|
atomic.StoreUint64(&f.tail, tail)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -546,3 +618,116 @@ func (f *freezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []
|
|||||||
|
|
||||||
return hashes, err
|
return hashes, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// convertLegacyFn takes a raw freezer entry in an older format and
|
||||||
|
// returns it in the new format.
|
||||||
|
type convertLegacyFn = func([]byte) ([]byte, error)
|
||||||
|
|
||||||
|
// MigrateTable processes the entries in a given table in sequence
|
||||||
|
// converting them to a new format if they're of an old format.
|
||||||
|
func (f *freezer) MigrateTable(kind string, convert convertLegacyFn) error {
|
||||||
|
if f.readonly {
|
||||||
|
return errReadOnly
|
||||||
|
}
|
||||||
|
f.writeLock.Lock()
|
||||||
|
defer f.writeLock.Unlock()
|
||||||
|
|
||||||
|
table, ok := f.tables[kind]
|
||||||
|
if !ok {
|
||||||
|
return errUnknownTable
|
||||||
|
}
|
||||||
|
// forEach iterates every entry in the table serially and in order, calling `fn`
|
||||||
|
// with the item as argument. If `fn` returns an error the iteration stops
|
||||||
|
// and that error will be returned.
|
||||||
|
forEach := func(t *freezerTable, offset uint64, fn func(uint64, []byte) error) error {
|
||||||
|
var (
|
||||||
|
items = atomic.LoadUint64(&t.items)
|
||||||
|
batchSize = uint64(1024)
|
||||||
|
maxBytes = uint64(1024 * 1024)
|
||||||
|
)
|
||||||
|
for i := offset; i < items; {
|
||||||
|
if i+batchSize > items {
|
||||||
|
batchSize = items - i
|
||||||
|
}
|
||||||
|
data, err := t.RetrieveItems(i, batchSize, maxBytes)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for j, item := range data {
|
||||||
|
if err := fn(i+uint64(j), item); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i += uint64(len(data))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// TODO(s1na): This is a sanity-check since as of now no process does tail-deletion. But the migration
|
||||||
|
// process assumes no deletion at tail and needs to be modified to account for that.
|
||||||
|
if table.itemOffset > 0 || table.itemHidden > 0 {
|
||||||
|
return fmt.Errorf("migration not supported for tail-deleted freezers")
|
||||||
|
}
|
||||||
|
ancientsPath := filepath.Dir(table.index.Name())
|
||||||
|
// Set up new dir for the migrated table, the content of which
|
||||||
|
// we'll at the end move over to the ancients dir.
|
||||||
|
migrationPath := filepath.Join(ancientsPath, "migration")
|
||||||
|
newTable, err := NewFreezerTable(migrationPath, kind, FreezerNoSnappy[kind], false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
batch = newTable.newBatch()
|
||||||
|
out []byte
|
||||||
|
start = time.Now()
|
||||||
|
logged = time.Now()
|
||||||
|
offset = newTable.items
|
||||||
|
)
|
||||||
|
if offset > 0 {
|
||||||
|
log.Info("found previous migration attempt", "migrated", offset)
|
||||||
|
}
|
||||||
|
// Iterate through entries and transform them
|
||||||
|
if err := forEach(table, offset, func(i uint64, blob []byte) error {
|
||||||
|
if i%10000 == 0 && time.Since(logged) > 16*time.Second {
|
||||||
|
log.Info("Processing legacy elements", "count", i, "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
logged = time.Now()
|
||||||
|
}
|
||||||
|
out, err = convert(blob)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := batch.AppendRaw(i, out); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := batch.commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Info("Replacing old table files with migrated ones", "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
|
// Release and delete old table files. Note this won't
|
||||||
|
// delete the index file.
|
||||||
|
table.releaseFilesAfter(0, true)
|
||||||
|
|
||||||
|
if err := newTable.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
files, err := ioutil.ReadDir(migrationPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Move migrated files to ancients dir.
|
||||||
|
for _, f := range files {
|
||||||
|
// This will replace the old index file as a side-effect.
|
||||||
|
if err := os.Rename(filepath.Join(migrationPath, f.Name()), filepath.Join(ancientsPath, f.Name())); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Delete by now empty dir.
|
||||||
|
if err := os.Remove(migrationPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -191,7 +191,7 @@ func (batch *freezerTableBatch) commit() error {
|
|||||||
dataSize := int64(len(batch.dataBuffer))
|
dataSize := int64(len(batch.dataBuffer))
|
||||||
batch.dataBuffer = batch.dataBuffer[:0]
|
batch.dataBuffer = batch.dataBuffer[:0]
|
||||||
|
|
||||||
// Write index.
|
// Write indices.
|
||||||
_, err = batch.t.index.Write(batch.indexBuffer)
|
_, err = batch.t.index.Write(batch.indexBuffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
109
core/rawdb/freezer_meta.go
Normal file
109
core/rawdb/freezer_meta.go
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
package rawdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const freezerVersion = 1 // The initial version tag of freezer table metadata
|
||||||
|
|
||||||
|
// freezerTableMeta wraps all the metadata of the freezer table.
|
||||||
|
type freezerTableMeta struct {
|
||||||
|
// Version is the versioning descriptor of the freezer table.
|
||||||
|
Version uint16
|
||||||
|
|
||||||
|
// VirtualTail indicates how many items have been marked as deleted.
|
||||||
|
// Its value is equal to the number of items removed from the table
|
||||||
|
// plus the number of items hidden in the table, so it should never
|
||||||
|
// be lower than the "actual tail".
|
||||||
|
VirtualTail uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// newMetadata initializes the metadata object with the given virtual tail.
|
||||||
|
func newMetadata(tail uint64) *freezerTableMeta {
|
||||||
|
return &freezerTableMeta{
|
||||||
|
Version: freezerVersion,
|
||||||
|
VirtualTail: tail,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMetadata reads the metadata of the freezer table from the
|
||||||
|
// given metadata file.
|
||||||
|
func readMetadata(file *os.File) (*freezerTableMeta, error) {
|
||||||
|
_, err := file.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var meta freezerTableMeta
|
||||||
|
if err := rlp.Decode(file, &meta); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &meta, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// writeMetadata writes the metadata of the freezer table into the
|
||||||
|
// given metadata file.
|
||||||
|
func writeMetadata(file *os.File, meta *freezerTableMeta) error {
|
||||||
|
_, err := file.Seek(0, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return rlp.Encode(file, meta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadMetadata loads the metadata from the given metadata file.
|
||||||
|
// Initializes the metadata file with the given "actual tail" if
|
||||||
|
// it's empty.
|
||||||
|
func loadMetadata(file *os.File, tail uint64) (*freezerTableMeta, error) {
|
||||||
|
stat, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Write the metadata with the given actual tail into metadata file
|
||||||
|
// if it's non-existent. There are two possible scenarios here:
|
||||||
|
// - the freezer table is empty
|
||||||
|
// - the freezer table is legacy
|
||||||
|
// In both cases, write the meta into the file with the actual tail
|
||||||
|
// as the virtual tail.
|
||||||
|
if stat.Size() == 0 {
|
||||||
|
m := newMetadata(tail)
|
||||||
|
if err := writeMetadata(file, m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
m, err := readMetadata(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Update the virtual tail with the given actual tail if it's even
|
||||||
|
// lower than it. Theoretically it shouldn't happen at all, print
|
||||||
|
// a warning here.
|
||||||
|
if m.VirtualTail < tail {
|
||||||
|
log.Warn("Updated virtual tail", "have", m.VirtualTail, "now", tail)
|
||||||
|
m.VirtualTail = tail
|
||||||
|
if err := writeMetadata(file, m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
61
core/rawdb/freezer_meta_test.go
Normal file
61
core/rawdb/freezer_meta_test.go
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
package rawdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReadWriteFreezerTableMeta(t *testing.T) {
|
||||||
|
f, err := ioutil.TempFile(os.TempDir(), "*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create file %v", err)
|
||||||
|
}
|
||||||
|
err = writeMetadata(f, newMetadata(100))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to write metadata %v", err)
|
||||||
|
}
|
||||||
|
meta, err := readMetadata(f)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read metadata %v", err)
|
||||||
|
}
|
||||||
|
if meta.Version != freezerVersion {
|
||||||
|
t.Fatalf("Unexpected version field")
|
||||||
|
}
|
||||||
|
if meta.VirtualTail != uint64(100) {
|
||||||
|
t.Fatalf("Unexpected virtual tail field")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitializeFreezerTableMeta(t *testing.T) {
|
||||||
|
f, err := ioutil.TempFile(os.TempDir(), "*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create file %v", err)
|
||||||
|
}
|
||||||
|
meta, err := loadMetadata(f, uint64(100))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to read metadata %v", err)
|
||||||
|
}
|
||||||
|
if meta.Version != freezerVersion {
|
||||||
|
t.Fatalf("Unexpected version field")
|
||||||
|
}
|
||||||
|
if meta.VirtualTail != uint64(100) {
|
||||||
|
t.Fatalf("Unexpected virtual tail field")
|
||||||
|
}
|
||||||
|
}
|
@ -47,20 +47,19 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// indexEntry contains the number/id of the file that the data resides in, aswell as the
|
// indexEntry contains the number/id of the file that the data resides in, aswell as the
|
||||||
// offset within the file to the end of the data
|
// offset within the file to the end of the data.
|
||||||
// In serialized form, the filenum is stored as uint16.
|
// In serialized form, the filenum is stored as uint16.
|
||||||
type indexEntry struct {
|
type indexEntry struct {
|
||||||
filenum uint32 // stored as uint16 ( 2 bytes)
|
filenum uint32 // stored as uint16 ( 2 bytes )
|
||||||
offset uint32 // stored as uint32 ( 4 bytes)
|
offset uint32 // stored as uint32 ( 4 bytes )
|
||||||
}
|
}
|
||||||
|
|
||||||
const indexEntrySize = 6
|
const indexEntrySize = 6
|
||||||
|
|
||||||
// unmarshalBinary deserializes binary b into the rawIndex entry.
|
// unmarshalBinary deserializes binary b into the rawIndex entry.
|
||||||
func (i *indexEntry) unmarshalBinary(b []byte) error {
|
func (i *indexEntry) unmarshalBinary(b []byte) {
|
||||||
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
|
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
|
||||||
i.offset = binary.BigEndian.Uint32(b[2:6])
|
i.offset = binary.BigEndian.Uint32(b[2:6])
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// append adds the encoded entry to the end of b.
|
// append adds the encoded entry to the end of b.
|
||||||
@ -75,14 +74,14 @@ func (i *indexEntry) append(b []byte) []byte {
|
|||||||
// bounds returns the start- and end- offsets, and the file number of where to
|
// bounds returns the start- and end- offsets, and the file number of where to
|
||||||
// read there data item marked by the two index entries. The two entries are
|
// read there data item marked by the two index entries. The two entries are
|
||||||
// assumed to be sequential.
|
// assumed to be sequential.
|
||||||
func (start *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
|
func (i *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
|
||||||
if start.filenum != end.filenum {
|
if i.filenum != end.filenum {
|
||||||
// If a piece of data 'crosses' a data-file,
|
// If a piece of data 'crosses' a data-file,
|
||||||
// it's actually in one piece on the second data-file.
|
// it's actually in one piece on the second data-file.
|
||||||
// We return a zero-indexEntry for the second file as start
|
// We return a zero-indexEntry for the second file as start
|
||||||
return 0, end.offset, end.filenum
|
return 0, end.offset, end.filenum
|
||||||
}
|
}
|
||||||
return start.offset, end.offset, end.filenum
|
return i.offset, end.offset, end.filenum
|
||||||
}
|
}
|
||||||
|
|
||||||
// freezerTable represents a single chained data table within the freezer (e.g. blocks).
|
// freezerTable represents a single chained data table within the freezer (e.g. blocks).
|
||||||
@ -92,22 +91,28 @@ type freezerTable struct {
|
|||||||
// WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
|
// WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
|
||||||
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
|
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
|
||||||
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
|
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
|
||||||
items uint64 // Number of items stored in the table (including items removed from tail)
|
items uint64 // Number of items stored in the table (including items removed from tail)
|
||||||
|
itemOffset uint64 // Number of items removed from the table
|
||||||
|
|
||||||
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
|
// itemHidden is the number of items marked as deleted. Tail deletion is
|
||||||
|
// only supported at file level which means the actual deletion will be
|
||||||
|
// delayed until the entire data file is marked as deleted. Before that
|
||||||
|
// these items will be hidden to prevent being visited again. The value
|
||||||
|
// should never be lower than itemOffset.
|
||||||
|
itemHidden uint64
|
||||||
|
|
||||||
|
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
|
||||||
|
readonly bool
|
||||||
maxFileSize uint32 // Max file size for data-files
|
maxFileSize uint32 // Max file size for data-files
|
||||||
name string
|
name string
|
||||||
path string
|
path string
|
||||||
|
|
||||||
head *os.File // File descriptor for the data head of the table
|
head *os.File // File descriptor for the data head of the table
|
||||||
|
index *os.File // File descriptor for the indexEntry file of the table
|
||||||
|
meta *os.File // File descriptor for metadata of the table
|
||||||
files map[uint32]*os.File // open files
|
files map[uint32]*os.File // open files
|
||||||
headId uint32 // number of the currently active head file
|
headId uint32 // number of the currently active head file
|
||||||
tailId uint32 // number of the earliest file
|
tailId uint32 // number of the earliest file
|
||||||
index *os.File // File descriptor for the indexEntry file of the table
|
|
||||||
|
|
||||||
// In the case that old items are deleted (from the tail), we use itemOffset
|
|
||||||
// to count how many historic items have gone missing.
|
|
||||||
itemOffset uint32 // Offset (number of discarded items)
|
|
||||||
|
|
||||||
headBytes int64 // Number of bytes written to the head file
|
headBytes int64 // Number of bytes written to the head file
|
||||||
readMeter metrics.Meter // Meter for measuring the effective amount of data read
|
readMeter metrics.Meter // Meter for measuring the effective amount of data read
|
||||||
@ -119,71 +124,61 @@ type freezerTable struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFreezerTable opens the given path as a freezer table.
|
// NewFreezerTable opens the given path as a freezer table.
|
||||||
func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
|
func NewFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) {
|
||||||
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy)
|
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
|
||||||
}
|
|
||||||
|
|
||||||
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
|
||||||
func openFreezerFileForAppend(filename string) (*os.File, error) {
|
|
||||||
// Open the file without the O_APPEND flag
|
|
||||||
// because it has differing behaviour during Truncate operations
|
|
||||||
// on different OS's
|
|
||||||
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Seek to end for append
|
|
||||||
if _, err = file.Seek(0, io.SeekEnd); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return file, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openFreezerFileForReadOnly opens a freezer table file for read only access
|
|
||||||
func openFreezerFileForReadOnly(filename string) (*os.File, error) {
|
|
||||||
return os.OpenFile(filename, os.O_RDONLY, 0644)
|
|
||||||
}
|
|
||||||
|
|
||||||
// openFreezerFileTruncated opens a freezer table making sure it is truncated
|
|
||||||
func openFreezerFileTruncated(filename string) (*os.File, error) {
|
|
||||||
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
|
||||||
}
|
|
||||||
|
|
||||||
// truncateFreezerFile resizes a freezer table file and seeks to the end
|
|
||||||
func truncateFreezerFile(file *os.File, size int64) error {
|
|
||||||
if err := file.Truncate(size); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Seek to end for append
|
|
||||||
if _, err := file.Seek(0, io.SeekEnd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTable opens a freezer table, creating the data and index files if they are
|
// newTable opens a freezer table, creating the data and index files if they are
|
||||||
// non existent. Both files are truncated to the shortest common length to ensure
|
// non-existent. Both files are truncated to the shortest common length to ensure
|
||||||
// they don't go out of sync.
|
// they don't go out of sync.
|
||||||
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
|
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) {
|
||||||
// Ensure the containing directory exists and open the indexEntry file
|
// Ensure the containing directory exists and open the indexEntry file
|
||||||
if err := os.MkdirAll(path, 0755); err != nil {
|
if err := os.MkdirAll(path, 0755); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var idxName string
|
var idxName string
|
||||||
if noCompression {
|
if noCompression {
|
||||||
// Raw idx
|
idxName = fmt.Sprintf("%s.ridx", name) // raw index file
|
||||||
idxName = fmt.Sprintf("%s.ridx", name)
|
|
||||||
} else {
|
} else {
|
||||||
// Compressed idx
|
idxName = fmt.Sprintf("%s.cidx", name) // compressed index file
|
||||||
idxName = fmt.Sprintf("%s.cidx", name)
|
|
||||||
}
|
}
|
||||||
offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
|
var (
|
||||||
if err != nil {
|
err error
|
||||||
return nil, err
|
index *os.File
|
||||||
|
meta *os.File
|
||||||
|
)
|
||||||
|
if readonly {
|
||||||
|
// Will fail if table doesn't exist
|
||||||
|
index, err = openFreezerFileForReadOnly(filepath.Join(path, idxName))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// TODO(rjl493456442) change it to read-only mode. Open the metadata file
|
||||||
|
// in rw mode. It's a temporary solution for now and should be changed
|
||||||
|
// whenever the tail deletion is actually used. The reason for this hack is
|
||||||
|
// the additional meta file for each freezer table is added in order to support
|
||||||
|
// tail deletion, but for most legacy nodes this file is missing. This check
|
||||||
|
// will suddenly break lots of database relevant commands. So the metadata file
|
||||||
|
// is always opened for mutation and nothing else will be written except
|
||||||
|
// the initialization.
|
||||||
|
meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
index, err = openFreezerFileForAppend(filepath.Join(path, idxName))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Create the table and repair any past inconsistency
|
// Create the table and repair any past inconsistency
|
||||||
tab := &freezerTable{
|
tab := &freezerTable{
|
||||||
index: offsets,
|
index: index,
|
||||||
|
meta: meta,
|
||||||
files: make(map[uint32]*os.File),
|
files: make(map[uint32]*os.File),
|
||||||
readMeter: readMeter,
|
readMeter: readMeter,
|
||||||
writeMeter: writeMeter,
|
writeMeter: writeMeter,
|
||||||
@ -192,6 +187,7 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
|
|||||||
path: path,
|
path: path,
|
||||||
logger: log.New("database", path, "table", name),
|
logger: log.New("database", path, "table", name),
|
||||||
noCompression: noCompression,
|
noCompression: noCompression,
|
||||||
|
readonly: readonly,
|
||||||
maxFileSize: maxFilesize,
|
maxFileSize: maxFilesize,
|
||||||
}
|
}
|
||||||
if err := tab.repair(); err != nil {
|
if err := tab.repair(); err != nil {
|
||||||
@ -209,7 +205,7 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
|
|||||||
return tab, nil
|
return tab, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// repair cross checks the head and the index file and truncates them to
|
// repair cross-checks the head and the index file and truncates them to
|
||||||
// be in sync with each other after a potential crash / data loss.
|
// be in sync with each other after a potential crash / data loss.
|
||||||
func (t *freezerTable) repair() error {
|
func (t *freezerTable) repair() error {
|
||||||
// Create a temporary offset buffer to init files with and read indexEntry into
|
// Create a temporary offset buffer to init files with and read indexEntry into
|
||||||
@ -247,12 +243,32 @@ func (t *freezerTable) repair() error {
|
|||||||
t.index.ReadAt(buffer, 0)
|
t.index.ReadAt(buffer, 0)
|
||||||
firstIndex.unmarshalBinary(buffer)
|
firstIndex.unmarshalBinary(buffer)
|
||||||
|
|
||||||
|
// Assign the tail fields with the first stored index.
|
||||||
|
// The total removed items is represented with an uint32,
|
||||||
|
// which is not enough in theory but enough in practice.
|
||||||
|
// TODO: use uint64 to represent total removed items.
|
||||||
t.tailId = firstIndex.filenum
|
t.tailId = firstIndex.filenum
|
||||||
t.itemOffset = firstIndex.offset
|
t.itemOffset = uint64(firstIndex.offset)
|
||||||
|
|
||||||
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
// Load metadata from the file
|
||||||
lastIndex.unmarshalBinary(buffer)
|
meta, err := loadMetadata(t.meta, t.itemOffset)
|
||||||
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.itemHidden = meta.VirtualTail
|
||||||
|
|
||||||
|
// Read the last index, use the default value in case the freezer is empty
|
||||||
|
if offsetsSize == indexEntrySize {
|
||||||
|
lastIndex = indexEntry{filenum: t.tailId, offset: 0}
|
||||||
|
} else {
|
||||||
|
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
||||||
|
lastIndex.unmarshalBinary(buffer)
|
||||||
|
}
|
||||||
|
if t.readonly {
|
||||||
|
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
|
||||||
|
} else {
|
||||||
|
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -263,7 +279,6 @@ func (t *freezerTable) repair() error {
|
|||||||
|
|
||||||
// Keep truncating both files until they come in sync
|
// Keep truncating both files until they come in sync
|
||||||
contentExp = int64(lastIndex.offset)
|
contentExp = int64(lastIndex.offset)
|
||||||
|
|
||||||
for contentExp != contentSize {
|
for contentExp != contentSize {
|
||||||
// Truncate the head file to the last offset pointer
|
// Truncate the head file to the last offset pointer
|
||||||
if contentExp < contentSize {
|
if contentExp < contentSize {
|
||||||
@ -280,9 +295,16 @@ func (t *freezerTable) repair() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
offsetsSize -= indexEntrySize
|
offsetsSize -= indexEntrySize
|
||||||
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
|
||||||
|
// Read the new head index, use the default value in case
|
||||||
|
// the freezer is already empty.
|
||||||
var newLastIndex indexEntry
|
var newLastIndex indexEntry
|
||||||
newLastIndex.unmarshalBinary(buffer)
|
if offsetsSize == indexEntrySize {
|
||||||
|
newLastIndex = indexEntry{filenum: t.tailId, offset: 0}
|
||||||
|
} else {
|
||||||
|
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
||||||
|
newLastIndex.unmarshalBinary(buffer)
|
||||||
|
}
|
||||||
// We might have slipped back into an earlier head-file here
|
// We might have slipped back into an earlier head-file here
|
||||||
if newLastIndex.filenum != lastIndex.filenum {
|
if newLastIndex.filenum != lastIndex.filenum {
|
||||||
// Release earlier opened file
|
// Release earlier opened file
|
||||||
@ -301,18 +323,30 @@ func (t *freezerTable) repair() error {
|
|||||||
contentExp = int64(lastIndex.offset)
|
contentExp = int64(lastIndex.offset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ensure all reparation changes have been written to disk
|
// Sync() fails for read-only files on windows.
|
||||||
if err := t.index.Sync(); err != nil {
|
if !t.readonly {
|
||||||
return err
|
// Ensure all reparation changes have been written to disk
|
||||||
}
|
if err := t.index.Sync(); err != nil {
|
||||||
if err := t.head.Sync(); err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
|
if err := t.head.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := t.meta.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Update the item and byte counters and return
|
// Update the item and byte counters and return
|
||||||
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
|
t.items = t.itemOffset + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
|
||||||
t.headBytes = contentSize
|
t.headBytes = contentSize
|
||||||
t.headId = lastIndex.filenum
|
t.headId = lastIndex.filenum
|
||||||
|
|
||||||
|
// Delete the leftover files because of head deletion
|
||||||
|
t.releaseFilesAfter(t.headId, true)
|
||||||
|
|
||||||
|
// Delete the leftover files because of tail deletion
|
||||||
|
t.releaseFilesBefore(t.tailId, true)
|
||||||
|
|
||||||
// Close opened files and preopen all files
|
// Close opened files and preopen all files
|
||||||
if err := t.preopen(); err != nil {
|
if err := t.preopen(); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -328,27 +362,35 @@ func (t *freezerTable) repair() error {
|
|||||||
func (t *freezerTable) preopen() (err error) {
|
func (t *freezerTable) preopen() (err error) {
|
||||||
// The repair might have already opened (some) files
|
// The repair might have already opened (some) files
|
||||||
t.releaseFilesAfter(0, false)
|
t.releaseFilesAfter(0, false)
|
||||||
|
|
||||||
// Open all except head in RDONLY
|
// Open all except head in RDONLY
|
||||||
for i := t.tailId; i < t.headId; i++ {
|
for i := t.tailId; i < t.headId; i++ {
|
||||||
if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
|
if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Open head in read/write
|
if t.readonly {
|
||||||
t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
|
t.head, err = t.openFile(t.headId, openFreezerFileForReadOnly)
|
||||||
|
} else {
|
||||||
|
// Open head in read/write
|
||||||
|
t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// truncate discards any recent data above the provided threshold number.
|
// truncateHead discards any recent data above the provided threshold number.
|
||||||
func (t *freezerTable) truncate(items uint64) error {
|
func (t *freezerTable) truncateHead(items uint64) error {
|
||||||
t.lock.Lock()
|
t.lock.Lock()
|
||||||
defer t.lock.Unlock()
|
defer t.lock.Unlock()
|
||||||
|
|
||||||
// If our item count is correct, don't do anything
|
// Ensure the given truncate target falls in the correct range
|
||||||
existing := atomic.LoadUint64(&t.items)
|
existing := atomic.LoadUint64(&t.items)
|
||||||
if existing <= items {
|
if existing <= items {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if items < atomic.LoadUint64(&t.itemHidden) {
|
||||||
|
return errors.New("truncation below tail")
|
||||||
|
}
|
||||||
// We need to truncate, save the old size for metrics tracking
|
// We need to truncate, save the old size for metrics tracking
|
||||||
oldSize, err := t.sizeNolock()
|
oldSize, err := t.sizeNolock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -360,17 +402,24 @@ func (t *freezerTable) truncate(items uint64) error {
|
|||||||
log = t.logger.Warn // Only loud warn if we delete multiple items
|
log = t.logger.Warn // Only loud warn if we delete multiple items
|
||||||
}
|
}
|
||||||
log("Truncating freezer table", "items", existing, "limit", items)
|
log("Truncating freezer table", "items", existing, "limit", items)
|
||||||
if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil {
|
|
||||||
|
// Truncate the index file first, the tail position is also considered
|
||||||
|
// when calculating the new freezer table length.
|
||||||
|
length := items - atomic.LoadUint64(&t.itemOffset)
|
||||||
|
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Calculate the new expected size of the data file and truncate it
|
// Calculate the new expected size of the data file and truncate it
|
||||||
buffer := make([]byte, indexEntrySize)
|
|
||||||
if _, err := t.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var expected indexEntry
|
var expected indexEntry
|
||||||
expected.unmarshalBinary(buffer)
|
if length == 0 {
|
||||||
|
expected = indexEntry{filenum: t.tailId, offset: 0}
|
||||||
|
} else {
|
||||||
|
buffer := make([]byte, indexEntrySize)
|
||||||
|
if _, err := t.index.ReadAt(buffer, int64(length*indexEntrySize)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
expected.unmarshalBinary(buffer)
|
||||||
|
}
|
||||||
// We might need to truncate back to older files
|
// We might need to truncate back to older files
|
||||||
if expected.filenum != t.headId {
|
if expected.filenum != t.headId {
|
||||||
// If already open for reading, force-reopen for writing
|
// If already open for reading, force-reopen for writing
|
||||||
@ -399,7 +448,110 @@ func (t *freezerTable) truncate(items uint64) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
t.sizeGauge.Dec(int64(oldSize - newSize))
|
t.sizeGauge.Dec(int64(oldSize - newSize))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// truncateTail discards any recent data before the provided threshold number.
|
||||||
|
func (t *freezerTable) truncateTail(items uint64) error {
|
||||||
|
t.lock.Lock()
|
||||||
|
defer t.lock.Unlock()
|
||||||
|
|
||||||
|
// Ensure the given truncate target falls in the correct range
|
||||||
|
if atomic.LoadUint64(&t.itemHidden) >= items {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if atomic.LoadUint64(&t.items) < items {
|
||||||
|
return errors.New("truncation above head")
|
||||||
|
}
|
||||||
|
// Load the new tail index by the given new tail position
|
||||||
|
var (
|
||||||
|
newTailId uint32
|
||||||
|
buffer = make([]byte, indexEntrySize)
|
||||||
|
)
|
||||||
|
if atomic.LoadUint64(&t.items) == items {
|
||||||
|
newTailId = t.headId
|
||||||
|
} else {
|
||||||
|
offset := items - atomic.LoadUint64(&t.itemOffset)
|
||||||
|
if _, err := t.index.ReadAt(buffer, int64((offset+1)*indexEntrySize)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var newTail indexEntry
|
||||||
|
newTail.unmarshalBinary(buffer)
|
||||||
|
newTailId = newTail.filenum
|
||||||
|
}
|
||||||
|
// Update the virtual tail marker and hidden these entries in table.
|
||||||
|
atomic.StoreUint64(&t.itemHidden, items)
|
||||||
|
if err := writeMetadata(t.meta, newMetadata(items)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Hidden items still fall in the current tail file, no data file
|
||||||
|
// can be dropped.
|
||||||
|
if t.tailId == newTailId {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Hidden items fall in the incorrect range, returns the error.
|
||||||
|
if t.tailId > newTailId {
|
||||||
|
return fmt.Errorf("invalid index, tail-file %d, item-file %d", t.tailId, newTailId)
|
||||||
|
}
|
||||||
|
// Hidden items exceed the current tail file, drop the relevant
|
||||||
|
// data files. We need to truncate, save the old size for metrics
|
||||||
|
// tracking.
|
||||||
|
oldSize, err := t.sizeNolock()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Count how many items can be deleted from the file.
|
||||||
|
var (
|
||||||
|
newDeleted = items
|
||||||
|
deleted = atomic.LoadUint64(&t.itemOffset)
|
||||||
|
)
|
||||||
|
for current := items - 1; current >= deleted; current -= 1 {
|
||||||
|
if _, err := t.index.ReadAt(buffer, int64((current-deleted+1)*indexEntrySize)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var pre indexEntry
|
||||||
|
pre.unmarshalBinary(buffer)
|
||||||
|
if pre.filenum != newTailId {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
newDeleted = current
|
||||||
|
}
|
||||||
|
// Commit the changes of metadata file first before manipulating
|
||||||
|
// the indexes file.
|
||||||
|
if err := t.meta.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Truncate the deleted index entries from the index file.
|
||||||
|
err = copyFrom(t.index.Name(), t.index.Name(), indexEntrySize*(newDeleted-deleted+1), func(f *os.File) error {
|
||||||
|
tailIndex := indexEntry{
|
||||||
|
filenum: newTailId,
|
||||||
|
offset: uint32(newDeleted),
|
||||||
|
}
|
||||||
|
_, err := f.Write(tailIndex.append(nil))
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Reopen the modified index file to load the changes
|
||||||
|
if err := t.index.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.index, err = openFreezerFileForAppend(t.index.Name())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Release any files before the current tail
|
||||||
|
t.tailId = newTailId
|
||||||
|
atomic.StoreUint64(&t.itemOffset, newDeleted)
|
||||||
|
t.releaseFilesBefore(t.tailId, true)
|
||||||
|
|
||||||
|
// Retrieve the new size and update the total size counter
|
||||||
|
newSize, err := t.sizeNolock()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.sizeGauge.Dec(int64(oldSize - newSize))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -414,6 +566,11 @@ func (t *freezerTable) Close() error {
|
|||||||
}
|
}
|
||||||
t.index = nil
|
t.index = nil
|
||||||
|
|
||||||
|
if err := t.meta.Close(); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
t.meta = nil
|
||||||
|
|
||||||
for _, f := range t.files {
|
for _, f := range t.files {
|
||||||
if err := f.Close(); err != nil {
|
if err := f.Close(); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
@ -468,6 +625,19 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// releaseFilesBefore closes all open files with a lower number, and optionally also deletes the files
|
||||||
|
func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) {
|
||||||
|
for fnum, f := range t.files {
|
||||||
|
if fnum < num {
|
||||||
|
delete(t.files, fnum)
|
||||||
|
f.Close()
|
||||||
|
if remove {
|
||||||
|
os.Remove(f.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// getIndices returns the index entries for the given from-item, covering 'count' items.
|
// getIndices returns the index entries for the given from-item, covering 'count' items.
|
||||||
// N.B: The actual number of returned indices for N items will always be N+1 (unless an
|
// N.B: The actual number of returned indices for N items will always be N+1 (unless an
|
||||||
// error is returned).
|
// error is returned).
|
||||||
@ -476,7 +646,7 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
|
|||||||
// it will return error.
|
// it will return error.
|
||||||
func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) {
|
func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) {
|
||||||
// Apply the table-offset
|
// Apply the table-offset
|
||||||
from = from - uint64(t.itemOffset)
|
from = from - t.itemOffset
|
||||||
// For reading N items, we need N+1 indices.
|
// For reading N items, we need N+1 indices.
|
||||||
buffer := make([]byte, (count+1)*indexEntrySize)
|
buffer := make([]byte, (count+1)*indexEntrySize)
|
||||||
if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil {
|
if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil {
|
||||||
@ -561,18 +731,21 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
|
|||||||
t.lock.RLock()
|
t.lock.RLock()
|
||||||
defer t.lock.RUnlock()
|
defer t.lock.RUnlock()
|
||||||
|
|
||||||
// Ensure the table and the item is accessible
|
// Ensure the table and the item are accessible
|
||||||
if t.index == nil || t.head == nil {
|
if t.index == nil || t.head == nil {
|
||||||
return nil, nil, errClosed
|
return nil, nil, errClosed
|
||||||
}
|
}
|
||||||
itemCount := atomic.LoadUint64(&t.items) // max number
|
var (
|
||||||
|
items = atomic.LoadUint64(&t.items) // the total items(head + 1)
|
||||||
|
hidden = atomic.LoadUint64(&t.itemHidden) // the number of hidden items
|
||||||
|
)
|
||||||
// Ensure the start is written, not deleted from the tail, and that the
|
// Ensure the start is written, not deleted from the tail, and that the
|
||||||
// caller actually wants something
|
// caller actually wants something
|
||||||
if itemCount <= start || uint64(t.itemOffset) > start || count == 0 {
|
if items <= start || hidden > start || count == 0 {
|
||||||
return nil, nil, errOutOfBounds
|
return nil, nil, errOutOfBounds
|
||||||
}
|
}
|
||||||
if start+count > itemCount {
|
if start+count > items {
|
||||||
count = itemCount - start
|
count = items - start
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
output = make([]byte, maxBytes) // Buffer to read data into
|
output = make([]byte, maxBytes) // Buffer to read data into
|
||||||
@ -648,10 +821,10 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
|
|||||||
return output[:outputSize], sizes, nil
|
return output[:outputSize], sizes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// has returns an indicator whether the specified number data
|
// has returns an indicator whether the specified number data is still accessible
|
||||||
// exists in the freezer table.
|
// in the freezer table.
|
||||||
func (t *freezerTable) has(number uint64) bool {
|
func (t *freezerTable) has(number uint64) bool {
|
||||||
return atomic.LoadUint64(&t.items) > number
|
return atomic.LoadUint64(&t.items) > number && atomic.LoadUint64(&t.itemHidden) <= number
|
||||||
}
|
}
|
||||||
|
|
||||||
// size returns the total data size in the freezer table.
|
// size returns the total data size in the freezer table.
|
||||||
@ -705,6 +878,9 @@ func (t *freezerTable) Sync() error {
|
|||||||
if err := t.index.Sync(); err != nil {
|
if err := t.index.Sync(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := t.meta.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return t.head.Sync()
|
return t.head.Sync()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -722,13 +898,20 @@ func (t *freezerTable) dumpIndexString(start, stop int64) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
|
func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
|
||||||
|
meta, err := readMetadata(t.meta)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(w, "Failed to decode freezer table %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "Version %d deleted %d, hidden %d\n", meta.Version, atomic.LoadUint64(&t.itemOffset), atomic.LoadUint64(&t.itemHidden))
|
||||||
|
|
||||||
buf := make([]byte, indexEntrySize)
|
buf := make([]byte, indexEntrySize)
|
||||||
|
|
||||||
fmt.Fprintf(w, "| number | fileno | offset |\n")
|
fmt.Fprintf(w, "| number | fileno | offset |\n")
|
||||||
fmt.Fprintf(w, "|--------|--------|--------|\n")
|
fmt.Fprintf(w, "|--------|--------|--------|\n")
|
||||||
|
|
||||||
for i := uint64(start); ; i++ {
|
for i := uint64(start); ; i++ {
|
||||||
if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
|
if _, err := t.index.ReadAt(buf, int64((i+1)*indexEntrySize)); err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
var entry indexEntry
|
var entry indexEntry
|
||||||
|
@ -18,13 +18,18 @@ package rawdb
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/davecgh/go-spew/spew"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@ -40,7 +45,7 @@ func TestFreezerBasics(t *testing.T) {
|
|||||||
// set cutoff at 50 bytes
|
// set cutoff at 50 bytes
|
||||||
f, err := newTable(os.TempDir(),
|
f, err := newTable(os.TempDir(),
|
||||||
fmt.Sprintf("unittest-%d", rand.Uint64()),
|
fmt.Sprintf("unittest-%d", rand.Uint64()),
|
||||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -85,7 +90,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
|||||||
f *freezerTable
|
f *freezerTable
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -99,7 +104,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
|||||||
require.NoError(t, batch.commit())
|
require.NoError(t, batch.commit())
|
||||||
f.Close()
|
f.Close()
|
||||||
|
|
||||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -116,7 +121,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
|||||||
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -131,7 +136,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
|||||||
|
|
||||||
// Fill table
|
// Fill table
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -160,7 +165,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
|||||||
|
|
||||||
// Now open it again
|
// Now open it again
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -183,7 +188,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
|||||||
|
|
||||||
// Fill a table and close it
|
// Fill a table and close it
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -204,12 +209,12 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Remove everything but the first item, and leave data unaligned
|
// Remove everything but the first item, and leave data unaligned
|
||||||
// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
|
// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
|
||||||
idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2)
|
idxFile.Truncate(2*indexEntrySize + indexEntrySize/2)
|
||||||
idxFile.Close()
|
idxFile.Close()
|
||||||
|
|
||||||
// Now open it again
|
// Now open it again
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -232,7 +237,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
|||||||
|
|
||||||
// And if we open it, we should now be able to read all of them (new values)
|
// And if we open it, we should now be able to read all of them (new values)
|
||||||
{
|
{
|
||||||
f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
for y := 1; y < 255; y++ {
|
for y := 1; y < 255; y++ {
|
||||||
exp := getChunk(15, ^y)
|
exp := getChunk(15, ^y)
|
||||||
got, err := f.Retrieve(uint64(y))
|
got, err := f.Retrieve(uint64(y))
|
||||||
@ -254,7 +259,7 @@ func TestSnappyDetection(t *testing.T) {
|
|||||||
|
|
||||||
// Open with snappy
|
// Open with snappy
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -265,7 +270,7 @@ func TestSnappyDetection(t *testing.T) {
|
|||||||
|
|
||||||
// Open without snappy
|
// Open without snappy
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -277,7 +282,7 @@ func TestSnappyDetection(t *testing.T) {
|
|||||||
|
|
||||||
// Open with snappy
|
// Open with snappy
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -309,7 +314,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
|||||||
|
|
||||||
// Fill a table and close it
|
// Fill a table and close it
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -345,7 +350,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
|||||||
// 45, 45, 15
|
// 45, 45, 15
|
||||||
// with 3+3+1 items
|
// with 3+3+1 items
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -366,7 +371,7 @@ func TestFreezerTruncate(t *testing.T) {
|
|||||||
|
|
||||||
// Fill table
|
// Fill table
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -382,12 +387,12 @@ func TestFreezerTruncate(t *testing.T) {
|
|||||||
|
|
||||||
// Reopen, truncate
|
// Reopen, truncate
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
f.truncate(10) // 150 bytes
|
f.truncateHead(10) // 150 bytes
|
||||||
if f.items != 10 {
|
if f.items != 10 {
|
||||||
t.Fatalf("expected %d items, got %d", 10, f.items)
|
t.Fatalf("expected %d items, got %d", 10, f.items)
|
||||||
}
|
}
|
||||||
@ -407,7 +412,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
|||||||
|
|
||||||
// Fill table
|
// Fill table
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -440,7 +445,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
|||||||
|
|
||||||
// Reopen
|
// Reopen
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -475,7 +480,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
|||||||
|
|
||||||
// Fill table
|
// Fill table
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -491,7 +496,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
|||||||
|
|
||||||
// Reopen and read all files
|
// Reopen and read all files
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -504,7 +509,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Now, truncate back to zero
|
// Now, truncate back to zero
|
||||||
f.truncate(0)
|
f.truncateHead(0)
|
||||||
|
|
||||||
// Write the data again
|
// Write the data again
|
||||||
batch := f.newBatch()
|
batch := f.newBatch()
|
||||||
@ -523,7 +528,7 @@ func TestFreezerOffset(t *testing.T) {
|
|||||||
|
|
||||||
// Fill table
|
// Fill table
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -565,18 +570,19 @@ func TestFreezerOffset(t *testing.T) {
|
|||||||
// Update the index file, so that we store
|
// Update the index file, so that we store
|
||||||
// [ file = 2, offset = 4 ] at index zero
|
// [ file = 2, offset = 4 ] at index zero
|
||||||
|
|
||||||
tailId := uint32(2) // First file is 2
|
|
||||||
itemOffset := uint32(4) // We have removed four items
|
|
||||||
zeroIndex := indexEntry{
|
zeroIndex := indexEntry{
|
||||||
filenum: tailId,
|
filenum: uint32(2), // First file is 2
|
||||||
offset: itemOffset,
|
offset: uint32(4), // We have removed four items
|
||||||
}
|
}
|
||||||
buf := zeroIndex.append(nil)
|
buf := zeroIndex.append(nil)
|
||||||
|
|
||||||
// Overwrite index zero
|
// Overwrite index zero
|
||||||
copy(indexBuf, buf)
|
copy(indexBuf, buf)
|
||||||
|
|
||||||
// Remove the four next indices by overwriting
|
// Remove the four next indices by overwriting
|
||||||
copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
|
copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
|
||||||
indexFile.WriteAt(indexBuf, 0)
|
indexFile.WriteAt(indexBuf, 0)
|
||||||
|
|
||||||
// Need to truncate the moved index items
|
// Need to truncate the moved index items
|
||||||
indexFile.Truncate(indexEntrySize * (1 + 2))
|
indexFile.Truncate(indexEntrySize * (1 + 2))
|
||||||
indexFile.Close()
|
indexFile.Close()
|
||||||
@ -584,7 +590,7 @@ func TestFreezerOffset(t *testing.T) {
|
|||||||
|
|
||||||
// Now open again
|
// Now open again
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -623,13 +629,12 @@ func TestFreezerOffset(t *testing.T) {
|
|||||||
// Update the index file, so that we store
|
// Update the index file, so that we store
|
||||||
// [ file = 2, offset = 1M ] at index zero
|
// [ file = 2, offset = 1M ] at index zero
|
||||||
|
|
||||||
tailId := uint32(2) // First file is 2
|
|
||||||
itemOffset := uint32(1000000) // We have removed 1M items
|
|
||||||
zeroIndex := indexEntry{
|
zeroIndex := indexEntry{
|
||||||
offset: itemOffset,
|
offset: uint32(1000000), // We have removed 1M items
|
||||||
filenum: tailId,
|
filenum: uint32(2), // First file is 2
|
||||||
}
|
}
|
||||||
buf := zeroIndex.append(nil)
|
buf := zeroIndex.append(nil)
|
||||||
|
|
||||||
// Overwrite index zero
|
// Overwrite index zero
|
||||||
copy(indexBuf, buf)
|
copy(indexBuf, buf)
|
||||||
indexFile.WriteAt(indexBuf, 0)
|
indexFile.WriteAt(indexBuf, 0)
|
||||||
@ -638,7 +643,7 @@ func TestFreezerOffset(t *testing.T) {
|
|||||||
|
|
||||||
// Check that existing items have been moved to index 1M.
|
// Check that existing items have been moved to index 1M.
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -659,6 +664,171 @@ func TestFreezerOffset(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTruncateTail(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
|
fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64())
|
||||||
|
|
||||||
|
// Fill table
|
||||||
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write 7 x 20 bytes, splitting out into four files
|
||||||
|
batch := f.newBatch()
|
||||||
|
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
|
||||||
|
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
|
||||||
|
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
|
||||||
|
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
|
||||||
|
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
|
||||||
|
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
|
||||||
|
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
|
||||||
|
require.NoError(t, batch.commit())
|
||||||
|
|
||||||
|
// nothing to do, all the items should still be there.
|
||||||
|
f.truncateTail(0)
|
||||||
|
fmt.Println(f.dumpIndexString(0, 1000))
|
||||||
|
checkRetrieve(t, f, map[uint64][]byte{
|
||||||
|
0: getChunk(20, 0xFF),
|
||||||
|
1: getChunk(20, 0xEE),
|
||||||
|
2: getChunk(20, 0xdd),
|
||||||
|
3: getChunk(20, 0xcc),
|
||||||
|
4: getChunk(20, 0xbb),
|
||||||
|
5: getChunk(20, 0xaa),
|
||||||
|
6: getChunk(20, 0x11),
|
||||||
|
})
|
||||||
|
|
||||||
|
// truncate single element( item 0 ), deletion is only supported at file level
|
||||||
|
f.truncateTail(1)
|
||||||
|
fmt.Println(f.dumpIndexString(0, 1000))
|
||||||
|
checkRetrieveError(t, f, map[uint64]error{
|
||||||
|
0: errOutOfBounds,
|
||||||
|
})
|
||||||
|
checkRetrieve(t, f, map[uint64][]byte{
|
||||||
|
1: getChunk(20, 0xEE),
|
||||||
|
2: getChunk(20, 0xdd),
|
||||||
|
3: getChunk(20, 0xcc),
|
||||||
|
4: getChunk(20, 0xbb),
|
||||||
|
5: getChunk(20, 0xaa),
|
||||||
|
6: getChunk(20, 0x11),
|
||||||
|
})
|
||||||
|
|
||||||
|
// Reopen the table, the deletion information should be persisted as well
|
||||||
|
f.Close()
|
||||||
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
checkRetrieveError(t, f, map[uint64]error{
|
||||||
|
0: errOutOfBounds,
|
||||||
|
})
|
||||||
|
checkRetrieve(t, f, map[uint64][]byte{
|
||||||
|
1: getChunk(20, 0xEE),
|
||||||
|
2: getChunk(20, 0xdd),
|
||||||
|
3: getChunk(20, 0xcc),
|
||||||
|
4: getChunk(20, 0xbb),
|
||||||
|
5: getChunk(20, 0xaa),
|
||||||
|
6: getChunk(20, 0x11),
|
||||||
|
})
|
||||||
|
|
||||||
|
// truncate two elements( item 0, item 1 ), the file 0 should be deleted
|
||||||
|
f.truncateTail(2)
|
||||||
|
checkRetrieveError(t, f, map[uint64]error{
|
||||||
|
0: errOutOfBounds,
|
||||||
|
1: errOutOfBounds,
|
||||||
|
})
|
||||||
|
checkRetrieve(t, f, map[uint64][]byte{
|
||||||
|
2: getChunk(20, 0xdd),
|
||||||
|
3: getChunk(20, 0xcc),
|
||||||
|
4: getChunk(20, 0xbb),
|
||||||
|
5: getChunk(20, 0xaa),
|
||||||
|
6: getChunk(20, 0x11),
|
||||||
|
})
|
||||||
|
|
||||||
|
// Reopen the table, the above testing should still pass
|
||||||
|
f.Close()
|
||||||
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
checkRetrieveError(t, f, map[uint64]error{
|
||||||
|
0: errOutOfBounds,
|
||||||
|
1: errOutOfBounds,
|
||||||
|
})
|
||||||
|
checkRetrieve(t, f, map[uint64][]byte{
|
||||||
|
2: getChunk(20, 0xdd),
|
||||||
|
3: getChunk(20, 0xcc),
|
||||||
|
4: getChunk(20, 0xbb),
|
||||||
|
5: getChunk(20, 0xaa),
|
||||||
|
6: getChunk(20, 0x11),
|
||||||
|
})
|
||||||
|
|
||||||
|
// truncate all, the entire freezer should be deleted
|
||||||
|
f.truncateTail(7)
|
||||||
|
checkRetrieveError(t, f, map[uint64]error{
|
||||||
|
0: errOutOfBounds,
|
||||||
|
1: errOutOfBounds,
|
||||||
|
2: errOutOfBounds,
|
||||||
|
3: errOutOfBounds,
|
||||||
|
4: errOutOfBounds,
|
||||||
|
5: errOutOfBounds,
|
||||||
|
6: errOutOfBounds,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTruncateHead(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
|
fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64())
|
||||||
|
|
||||||
|
// Fill table
|
||||||
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write 7 x 20 bytes, splitting out into four files
|
||||||
|
batch := f.newBatch()
|
||||||
|
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
|
||||||
|
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
|
||||||
|
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
|
||||||
|
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
|
||||||
|
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
|
||||||
|
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
|
||||||
|
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
|
||||||
|
require.NoError(t, batch.commit())
|
||||||
|
|
||||||
|
f.truncateTail(4) // Tail = 4
|
||||||
|
|
||||||
|
// NewHead is required to be 3, the entire table should be truncated
|
||||||
|
f.truncateHead(4)
|
||||||
|
checkRetrieveError(t, f, map[uint64]error{
|
||||||
|
0: errOutOfBounds, // Deleted by tail
|
||||||
|
1: errOutOfBounds, // Deleted by tail
|
||||||
|
2: errOutOfBounds, // Deleted by tail
|
||||||
|
3: errOutOfBounds, // Deleted by tail
|
||||||
|
4: errOutOfBounds, // Deleted by Head
|
||||||
|
5: errOutOfBounds, // Deleted by Head
|
||||||
|
6: errOutOfBounds, // Deleted by Head
|
||||||
|
})
|
||||||
|
|
||||||
|
// Append new items
|
||||||
|
batch = f.newBatch()
|
||||||
|
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
|
||||||
|
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
|
||||||
|
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
|
||||||
|
require.NoError(t, batch.commit())
|
||||||
|
|
||||||
|
checkRetrieve(t, f, map[uint64][]byte{
|
||||||
|
4: getChunk(20, 0xbb),
|
||||||
|
5: getChunk(20, 0xaa),
|
||||||
|
6: getChunk(20, 0x11),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
|
func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
@ -726,7 +896,7 @@ func TestSequentialRead(t *testing.T) {
|
|||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
|
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -736,7 +906,7 @@ func TestSequentialRead(t *testing.T) {
|
|||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
{ // Open it, iterate, verify iteration
|
{ // Open it, iterate, verify iteration
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -757,7 +927,7 @@ func TestSequentialRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
{ // Open it, iterate, verify byte limit. The byte limit is less than item
|
{ // Open it, iterate, verify byte limit. The byte limit is less than item
|
||||||
// size, so each lookup should only return one item
|
// size, so each lookup should only return one item
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -786,7 +956,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
|||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
|
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -808,7 +978,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
|||||||
{100, 109, 10},
|
{100, 109, 10},
|
||||||
} {
|
} {
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -829,3 +999,298 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFreezerReadonly(t *testing.T) {
|
||||||
|
tmpdir := os.TempDir()
|
||||||
|
// Case 1: Check it fails on non-existent file.
|
||||||
|
_, err := newTable(tmpdir,
|
||||||
|
fmt.Sprintf("readonlytest-%d", rand.Uint64()),
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("readonly table instantiation should fail for non-existent table")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 2: Check that it fails on invalid index length.
|
||||||
|
fname := fmt.Sprintf("readonlytest-%d", rand.Uint64())
|
||||||
|
idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname)))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to open index file: %v\n", err)
|
||||||
|
}
|
||||||
|
// size should not be a multiple of indexEntrySize.
|
||||||
|
idxFile.Write(make([]byte, 17))
|
||||||
|
idxFile.Close()
|
||||||
|
_, err = newTable(tmpdir, fname,
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("readonly table instantiation should fail for invalid index size")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 3: Open table non-readonly table to write some data.
|
||||||
|
// Then corrupt the head file and make sure opening the table
|
||||||
|
// again in readonly triggers an error.
|
||||||
|
fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
|
||||||
|
f, err := newTable(tmpdir, fname,
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to instantiate table: %v", err)
|
||||||
|
}
|
||||||
|
writeChunks(t, f, 8, 32)
|
||||||
|
// Corrupt table file
|
||||||
|
if _, err := f.head.Write([]byte{1, 1}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = newTable(tmpdir, fname,
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("readonly table instantiation should fail for corrupt table file")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 4: Write some data to a table and later re-open it as readonly.
|
||||||
|
// Should be successful.
|
||||||
|
fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
|
||||||
|
f, err = newTable(tmpdir, fname,
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to instantiate table: %v\n", err)
|
||||||
|
}
|
||||||
|
writeChunks(t, f, 32, 128)
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
f, err = newTable(tmpdir, fname,
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
v, err := f.Retrieve(10)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
exp := getChunk(128, 10)
|
||||||
|
if !bytes.Equal(v, exp) {
|
||||||
|
t.Errorf("retrieved value is incorrect")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 5: Now write some data via a batch.
|
||||||
|
// This should fail either during AppendRaw or Commit
|
||||||
|
batch := f.newBatch()
|
||||||
|
writeErr := batch.AppendRaw(32, make([]byte, 1))
|
||||||
|
if writeErr == nil {
|
||||||
|
writeErr = batch.commit()
|
||||||
|
}
|
||||||
|
if writeErr == nil {
|
||||||
|
t.Fatalf("Writing to readonly table should fail")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// randTest performs random freezer table operations.
|
||||||
|
// Instances of this test are created by Generate.
|
||||||
|
type randTest []randTestStep
|
||||||
|
|
||||||
|
type randTestStep struct {
|
||||||
|
op int
|
||||||
|
items []uint64 // for append and retrieve
|
||||||
|
blobs [][]byte // for append
|
||||||
|
target uint64 // for truncate(head/tail)
|
||||||
|
err error // for debugging
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
opReload = iota
|
||||||
|
opAppend
|
||||||
|
opRetrieve
|
||||||
|
opTruncateHead
|
||||||
|
opTruncateHeadAll
|
||||||
|
opTruncateTail
|
||||||
|
opTruncateTailAll
|
||||||
|
opCheckAll
|
||||||
|
opMax // boundary value, not an actual op
|
||||||
|
)
|
||||||
|
|
||||||
|
func getVals(first uint64, n int) [][]byte {
|
||||||
|
var ret [][]byte
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
val := make([]byte, 8)
|
||||||
|
binary.BigEndian.PutUint64(val, first+uint64(i))
|
||||||
|
ret = append(ret, val)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
|
||||||
|
var (
|
||||||
|
deleted uint64 // The number of deleted items from tail
|
||||||
|
items []uint64 // The index of entries in table
|
||||||
|
|
||||||
|
// getItems retrieves the indexes for items in table.
|
||||||
|
getItems = func(n int) []uint64 {
|
||||||
|
length := len(items)
|
||||||
|
if length == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var ret []uint64
|
||||||
|
index := rand.Intn(length)
|
||||||
|
for i := index; len(ret) < n && i < length; i++ {
|
||||||
|
ret = append(ret, items[i])
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// addItems appends the given length items into the table.
|
||||||
|
addItems = func(n int) []uint64 {
|
||||||
|
var first = deleted
|
||||||
|
if len(items) != 0 {
|
||||||
|
first = items[len(items)-1] + 1
|
||||||
|
}
|
||||||
|
var ret []uint64
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
ret = append(ret, first+uint64(i))
|
||||||
|
}
|
||||||
|
items = append(items, ret...)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var steps randTest
|
||||||
|
for i := 0; i < size; i++ {
|
||||||
|
step := randTestStep{op: r.Intn(opMax)}
|
||||||
|
switch step.op {
|
||||||
|
case opReload, opCheckAll:
|
||||||
|
case opAppend:
|
||||||
|
num := r.Intn(3)
|
||||||
|
step.items = addItems(num)
|
||||||
|
if len(step.items) == 0 {
|
||||||
|
step.blobs = nil
|
||||||
|
} else {
|
||||||
|
step.blobs = getVals(step.items[0], num)
|
||||||
|
}
|
||||||
|
case opRetrieve:
|
||||||
|
step.items = getItems(r.Intn(3))
|
||||||
|
case opTruncateHead:
|
||||||
|
if len(items) == 0 {
|
||||||
|
step.target = deleted
|
||||||
|
} else {
|
||||||
|
index := r.Intn(len(items))
|
||||||
|
items = items[:index]
|
||||||
|
step.target = deleted + uint64(index)
|
||||||
|
}
|
||||||
|
case opTruncateHeadAll:
|
||||||
|
step.target = deleted
|
||||||
|
items = items[:0]
|
||||||
|
case opTruncateTail:
|
||||||
|
if len(items) == 0 {
|
||||||
|
step.target = deleted
|
||||||
|
} else {
|
||||||
|
index := r.Intn(len(items))
|
||||||
|
items = items[index:]
|
||||||
|
deleted += uint64(index)
|
||||||
|
step.target = deleted
|
||||||
|
}
|
||||||
|
case opTruncateTailAll:
|
||||||
|
step.target = deleted + uint64(len(items))
|
||||||
|
items = items[:0]
|
||||||
|
deleted = step.target
|
||||||
|
}
|
||||||
|
steps = append(steps, step)
|
||||||
|
}
|
||||||
|
return reflect.ValueOf(steps)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runRandTest(rt randTest) bool {
|
||||||
|
fname := fmt.Sprintf("randtest-%d", rand.Uint64())
|
||||||
|
f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||||
|
if err != nil {
|
||||||
|
panic("failed to initialize table")
|
||||||
|
}
|
||||||
|
var values [][]byte
|
||||||
|
for i, step := range rt {
|
||||||
|
switch step.op {
|
||||||
|
case opReload:
|
||||||
|
f.Close()
|
||||||
|
f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||||
|
if err != nil {
|
||||||
|
rt[i].err = fmt.Errorf("failed to reload table %v", err)
|
||||||
|
}
|
||||||
|
case opCheckAll:
|
||||||
|
tail := atomic.LoadUint64(&f.itemHidden)
|
||||||
|
head := atomic.LoadUint64(&f.items)
|
||||||
|
|
||||||
|
if tail == head {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000)
|
||||||
|
if err != nil {
|
||||||
|
rt[i].err = err
|
||||||
|
} else {
|
||||||
|
if !reflect.DeepEqual(got, values) {
|
||||||
|
rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case opAppend:
|
||||||
|
batch := f.newBatch()
|
||||||
|
for i := 0; i < len(step.items); i++ {
|
||||||
|
batch.AppendRaw(step.items[i], step.blobs[i])
|
||||||
|
}
|
||||||
|
batch.commit()
|
||||||
|
values = append(values, step.blobs...)
|
||||||
|
|
||||||
|
case opRetrieve:
|
||||||
|
var blobs [][]byte
|
||||||
|
if len(step.items) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
tail := atomic.LoadUint64(&f.itemHidden)
|
||||||
|
for i := 0; i < len(step.items); i++ {
|
||||||
|
blobs = append(blobs, values[step.items[i]-tail])
|
||||||
|
}
|
||||||
|
got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000)
|
||||||
|
if err != nil {
|
||||||
|
rt[i].err = err
|
||||||
|
} else {
|
||||||
|
if !reflect.DeepEqual(got, blobs) {
|
||||||
|
rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case opTruncateHead:
|
||||||
|
f.truncateHead(step.target)
|
||||||
|
|
||||||
|
length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden)
|
||||||
|
values = values[:length]
|
||||||
|
|
||||||
|
case opTruncateHeadAll:
|
||||||
|
f.truncateHead(step.target)
|
||||||
|
values = nil
|
||||||
|
|
||||||
|
case opTruncateTail:
|
||||||
|
prev := atomic.LoadUint64(&f.itemHidden)
|
||||||
|
f.truncateTail(step.target)
|
||||||
|
|
||||||
|
truncated := atomic.LoadUint64(&f.itemHidden) - prev
|
||||||
|
values = values[truncated:]
|
||||||
|
|
||||||
|
case opTruncateTailAll:
|
||||||
|
f.truncateTail(step.target)
|
||||||
|
values = nil
|
||||||
|
}
|
||||||
|
// Abort the test on error.
|
||||||
|
if rt[i].err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRandom(t *testing.T) {
|
||||||
|
if err := quick.Check(runRandTest, nil); err != nil {
|
||||||
|
if cerr, ok := err.(*quick.CheckError); ok {
|
||||||
|
t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
|
||||||
|
}
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -20,10 +20,10 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@ -49,8 +49,7 @@ func TestFreezerModify(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tables := map[string]bool{"raw": true, "rlp": false}
|
tables := map[string]bool{"raw": true, "rlp": false}
|
||||||
f, dir := newFreezerForTesting(t, tables)
|
f, _ := newFreezerForTesting(t, tables)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// Commit test data.
|
// Commit test data.
|
||||||
@ -96,7 +95,6 @@ func TestFreezerModifyRollback(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
f, dir := newFreezerForTesting(t, freezerTestTableDef)
|
f, dir := newFreezerForTesting(t, freezerTestTableDef)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
|
|
||||||
theError := errors.New("oops")
|
theError := errors.New("oops")
|
||||||
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
||||||
@ -127,8 +125,7 @@ func TestFreezerModifyRollback(t *testing.T) {
|
|||||||
func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
|
func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
f, dir := newFreezerForTesting(t, freezerTestTableDef)
|
f, _ := newFreezerForTesting(t, freezerTestTableDef)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -186,17 +183,16 @@ func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test runs ModifyAncients and TruncateAncients concurrently with each other.
|
// This test runs ModifyAncients and TruncateHead concurrently with each other.
|
||||||
func TestFreezerConcurrentModifyTruncate(t *testing.T) {
|
func TestFreezerConcurrentModifyTruncate(t *testing.T) {
|
||||||
f, dir := newFreezerForTesting(t, freezerTestTableDef)
|
f, _ := newFreezerForTesting(t, freezerTestTableDef)
|
||||||
defer os.RemoveAll(dir)
|
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
var item = make([]byte, 256)
|
var item = make([]byte, 256)
|
||||||
|
|
||||||
for i := 0; i < 1000; i++ {
|
for i := 0; i < 1000; i++ {
|
||||||
// First reset and write 100 items.
|
// First reset and write 100 items.
|
||||||
if err := f.TruncateAncients(0); err != nil {
|
if err := f.TruncateHead(0); err != nil {
|
||||||
t.Fatal("truncate failed:", err)
|
t.Fatal("truncate failed:", err)
|
||||||
}
|
}
|
||||||
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
||||||
@ -231,7 +227,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
|
|||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
truncateErr = f.TruncateAncients(10)
|
truncateErr = f.TruncateHead(10)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
@ -253,13 +249,44 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFreezerReadonlyValidate(t *testing.T) {
|
||||||
|
tables := map[string]bool{"a": true, "b": true}
|
||||||
|
dir := t.TempDir()
|
||||||
|
// Open non-readonly freezer and fill individual tables
|
||||||
|
// with different amount of data.
|
||||||
|
f, err := newFreezer(dir, "", false, 2049, tables)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("can't open freezer", err)
|
||||||
|
}
|
||||||
|
var item = make([]byte, 1024)
|
||||||
|
aBatch := f.tables["a"].newBatch()
|
||||||
|
require.NoError(t, aBatch.AppendRaw(0, item))
|
||||||
|
require.NoError(t, aBatch.AppendRaw(1, item))
|
||||||
|
require.NoError(t, aBatch.AppendRaw(2, item))
|
||||||
|
require.NoError(t, aBatch.commit())
|
||||||
|
bBatch := f.tables["b"].newBatch()
|
||||||
|
require.NoError(t, bBatch.AppendRaw(0, item))
|
||||||
|
require.NoError(t, bBatch.commit())
|
||||||
|
if f.tables["a"].items != 3 {
|
||||||
|
t.Fatalf("unexpected number of items in table")
|
||||||
|
}
|
||||||
|
if f.tables["b"].items != 1 {
|
||||||
|
t.Fatalf("unexpected number of items in table")
|
||||||
|
}
|
||||||
|
require.NoError(t, f.Close())
|
||||||
|
|
||||||
|
// Re-openening as readonly should fail when validating
|
||||||
|
// table lengths.
|
||||||
|
f, err = newFreezer(dir, "", true, 2049, tables)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("readonly freezer should fail with differing table lengths")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
|
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
dir, err := ioutil.TempDir("", "freezer")
|
dir := t.TempDir()
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// note: using low max table size here to ensure the tests actually
|
// note: using low max table size here to ensure the tests actually
|
||||||
// switch between multiple files.
|
// switch between multiple files.
|
||||||
f, err := newFreezer(dir, "", false, 2049, tables)
|
f, err := newFreezer(dir, "", false, 2049, tables)
|
||||||
@ -299,3 +326,84 @@ func checkAncientCount(t *testing.T, f *freezer, kind string, n uint64) {
|
|||||||
t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
|
t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRenameWindows(t *testing.T) {
|
||||||
|
var (
|
||||||
|
fname = "file.bin"
|
||||||
|
fname2 = "file2.bin"
|
||||||
|
data = []byte{1, 2, 3, 4}
|
||||||
|
data2 = []byte{2, 3, 4, 5}
|
||||||
|
data3 = []byte{3, 5, 6, 7}
|
||||||
|
dataLen = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create 2 temp dirs
|
||||||
|
dir1 := t.TempDir()
|
||||||
|
dir2 := t.TempDir()
|
||||||
|
|
||||||
|
// Create file in dir1 and fill with data
|
||||||
|
f, err := os.Create(path.Join(dir1, fname))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
f2, err := os.Create(path.Join(dir1, fname2))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
f3, err := os.Create(path.Join(dir2, fname2))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := f.Write(data); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := f2.Write(data2); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := f3.Write(data3); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := f2.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := f3.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.Rename(f.Name(), path.Join(dir2, fname)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.Rename(f2.Name(), path.Join(dir2, fname2)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check file contents
|
||||||
|
f, err = os.Open(path.Join(dir2, fname))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
defer os.Remove(f.Name())
|
||||||
|
buf := make([]byte, dataLen)
|
||||||
|
if _, err := f.Read(buf); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(buf, data) {
|
||||||
|
t.Errorf("unexpected file contents. Got %v\n", buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err = os.Open(path.Join(dir2, fname2))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
defer os.Remove(f.Name())
|
||||||
|
if _, err := f.Read(buf); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(buf, data2) {
|
||||||
|
t.Errorf("unexpected file contents. Got %v\n", buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
120
core/rawdb/freezer_utils.go
Normal file
120
core/rawdb/freezer_utils.go
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// copyFrom copies data from 'srcPath' at offset 'offset' into 'destPath'.
|
||||||
|
// The 'destPath' is created if it doesn't exist, otherwise it is overwritten.
|
||||||
|
// Before the copy is executed, there is a callback can be registered to
|
||||||
|
// manipulate the dest file.
|
||||||
|
// It is perfectly valid to have destPath == srcPath.
|
||||||
|
func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) error) error {
|
||||||
|
// Create a temp file in the same dir where we want it to wind up
|
||||||
|
f, err := ioutil.TempFile(filepath.Dir(destPath), "*")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
fname := f.Name()
|
||||||
|
|
||||||
|
// Clean up the leftover file
|
||||||
|
defer func() {
|
||||||
|
if f != nil {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
os.Remove(fname)
|
||||||
|
}()
|
||||||
|
// Apply the given function if it's not nil before we copy
|
||||||
|
// the content from the src.
|
||||||
|
if before != nil {
|
||||||
|
if err := before(f); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Open the source file
|
||||||
|
src, err := os.Open(srcPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err = src.Seek(int64(offset), 0); err != nil {
|
||||||
|
src.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// io.Copy uses 32K buffer internally.
|
||||||
|
_, err = io.Copy(f, src)
|
||||||
|
if err != nil {
|
||||||
|
src.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Rename the temporary file to the specified dest name.
|
||||||
|
// src may be same as dest, so needs to be closed before
|
||||||
|
// we do the final move.
|
||||||
|
src.Close()
|
||||||
|
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f = nil
|
||||||
|
|
||||||
|
if err := os.Rename(fname, destPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
||||||
|
func openFreezerFileForAppend(filename string) (*os.File, error) {
|
||||||
|
// Open the file without the O_APPEND flag
|
||||||
|
// because it has differing behaviour during Truncate operations
|
||||||
|
// on different OS's
|
||||||
|
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Seek to end for append
|
||||||
|
if _, err = file.Seek(0, io.SeekEnd); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// openFreezerFileForReadOnly opens a freezer table file for read only access
|
||||||
|
func openFreezerFileForReadOnly(filename string) (*os.File, error) {
|
||||||
|
return os.OpenFile(filename, os.O_RDONLY, 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// openFreezerFileTruncated opens a freezer table making sure it is truncated
|
||||||
|
func openFreezerFileTruncated(filename string) (*os.File, error) {
|
||||||
|
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// truncateFreezerFile resizes a freezer table file and seeks to the end
|
||||||
|
func truncateFreezerFile(file *os.File, size int64) error {
|
||||||
|
if err := file.Truncate(size); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Seek to end for append
|
||||||
|
if _, err := file.Seek(0, io.SeekEnd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
76
core/rawdb/freezer_utils_test.go
Normal file
76
core/rawdb/freezer_utils_test.go
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCopyFrom(t *testing.T) {
|
||||||
|
var (
|
||||||
|
content = []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
|
||||||
|
prefix = []byte{0x9, 0xa, 0xb, 0xc, 0xd, 0xf}
|
||||||
|
)
|
||||||
|
var cases = []struct {
|
||||||
|
src, dest string
|
||||||
|
offset uint64
|
||||||
|
writePrefix bool
|
||||||
|
}{
|
||||||
|
{"foo", "bar", 0, false},
|
||||||
|
{"foo", "bar", 1, false},
|
||||||
|
{"foo", "bar", 8, false},
|
||||||
|
{"foo", "foo", 0, false},
|
||||||
|
{"foo", "foo", 1, false},
|
||||||
|
{"foo", "foo", 8, false},
|
||||||
|
{"foo", "bar", 0, true},
|
||||||
|
{"foo", "bar", 1, true},
|
||||||
|
{"foo", "bar", 8, true},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
ioutil.WriteFile(c.src, content, 0644)
|
||||||
|
|
||||||
|
if err := copyFrom(c.src, c.dest, c.offset, func(f *os.File) error {
|
||||||
|
if !c.writePrefix {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
f.Write(prefix)
|
||||||
|
return nil
|
||||||
|
}); err != nil {
|
||||||
|
os.Remove(c.src)
|
||||||
|
t.Fatalf("Failed to copy %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
blob, err := ioutil.ReadFile(c.dest)
|
||||||
|
if err != nil {
|
||||||
|
os.Remove(c.src)
|
||||||
|
os.Remove(c.dest)
|
||||||
|
t.Fatalf("Failed to read %v", err)
|
||||||
|
}
|
||||||
|
want := content[c.offset:]
|
||||||
|
if c.writePrefix {
|
||||||
|
want = append(prefix, want...)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(blob, want) {
|
||||||
|
t.Fatal("Unexpected value")
|
||||||
|
}
|
||||||
|
os.Remove(c.src)
|
||||||
|
os.Remove(c.dest)
|
||||||
|
}
|
||||||
|
}
|
47
core/rawdb/key_length_iterator.go
Normal file
47
core/rawdb/key_length_iterator.go
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/ethdb"
|
||||||
|
|
||||||
|
// KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs
|
||||||
|
// with a specific key length will be returned.
|
||||||
|
type KeyLengthIterator struct {
|
||||||
|
requiredKeyLength int
|
||||||
|
ethdb.Iterator
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewKeyLengthIterator returns a wrapped version of the iterator that will only return key-value
|
||||||
|
// pairs where keys with a specific key length will be returned.
|
||||||
|
func NewKeyLengthIterator(it ethdb.Iterator, keyLen int) ethdb.Iterator {
|
||||||
|
return &KeyLengthIterator{
|
||||||
|
Iterator: it,
|
||||||
|
requiredKeyLength: keyLen,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *KeyLengthIterator) Next() bool {
|
||||||
|
// Return true as soon as a key with the required key length is discovered
|
||||||
|
for it.Iterator.Next() {
|
||||||
|
if len(it.Iterator.Key()) == it.requiredKeyLength {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return false when we exhaust the keys in the underlying iterator.
|
||||||
|
return false
|
||||||
|
}
|
60
core/rawdb/key_length_iterator_test.go
Normal file
60
core/rawdb/key_length_iterator_test.go
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package rawdb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestKeyLengthIterator(t *testing.T) {
|
||||||
|
db := NewMemoryDatabase()
|
||||||
|
|
||||||
|
keyLen := 8
|
||||||
|
expectedKeys := make(map[string]struct{})
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
key := make([]byte, keyLen)
|
||||||
|
binary.BigEndian.PutUint64(key, uint64(i))
|
||||||
|
if err := db.Put(key, []byte{0x1}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
expectedKeys[string(key)] = struct{}{}
|
||||||
|
|
||||||
|
longerKey := make([]byte, keyLen*2)
|
||||||
|
binary.BigEndian.PutUint64(longerKey, uint64(i))
|
||||||
|
if err := db.Put(longerKey, []byte{0x1}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
it := NewKeyLengthIterator(db.NewIterator(nil, nil), keyLen)
|
||||||
|
for it.Next() {
|
||||||
|
key := it.Key()
|
||||||
|
_, exists := expectedKeys[string(key)]
|
||||||
|
if !exists {
|
||||||
|
t.Fatalf("Found unexpected key %d", binary.BigEndian.Uint64(key))
|
||||||
|
}
|
||||||
|
delete(expectedKeys, string(key))
|
||||||
|
if len(key) != keyLen {
|
||||||
|
t.Fatalf("Found unexpected key in key length iterator with length %d", len(key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(expectedKeys) != 0 {
|
||||||
|
t.Fatalf("Expected all keys of length %d to be removed from expected keys during iteration", keyLen)
|
||||||
|
}
|
||||||
|
}
|
@ -63,6 +63,9 @@ var (
|
|||||||
// snapshotSyncStatusKey tracks the snapshot sync status across restarts.
|
// snapshotSyncStatusKey tracks the snapshot sync status across restarts.
|
||||||
snapshotSyncStatusKey = []byte("SnapshotSyncStatus")
|
snapshotSyncStatusKey = []byte("SnapshotSyncStatus")
|
||||||
|
|
||||||
|
// skeletonSyncStatusKey tracks the skeleton sync status across restarts.
|
||||||
|
skeletonSyncStatusKey = []byte("SkeletonSyncStatus")
|
||||||
|
|
||||||
// txIndexTailKey tracks the oldest block whose transactions have been indexed.
|
// txIndexTailKey tracks the oldest block whose transactions have been indexed.
|
||||||
txIndexTailKey = []byte("TransactionIndexTail")
|
txIndexTailKey = []byte("TransactionIndexTail")
|
||||||
|
|
||||||
@ -92,9 +95,11 @@ var (
|
|||||||
SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
|
SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
|
||||||
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
|
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
|
||||||
CodePrefix = []byte("c") // CodePrefix + code hash -> account code
|
CodePrefix = []byte("c") // CodePrefix + code hash -> account code
|
||||||
|
skeletonHeaderPrefix = []byte("S") // skeletonHeaderPrefix + num (uint64 big endian) -> header
|
||||||
|
|
||||||
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
|
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
|
||||||
configPrefix = []byte("ethereum-config-") // config prefix for the db
|
configPrefix = []byte("ethereum-config-") // config prefix for the db
|
||||||
|
genesisPrefix = []byte("ethereum-genesis-") // genesis state prefix for the db
|
||||||
|
|
||||||
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
|
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
|
||||||
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
|
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
|
||||||
@ -210,6 +215,11 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
|
|||||||
return key
|
return key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// skeletonHeaderKey = skeletonHeaderPrefix + num (uint64 big endian)
|
||||||
|
func skeletonHeaderKey(number uint64) []byte {
|
||||||
|
return append(skeletonHeaderPrefix, encodeBlockNumber(number)...)
|
||||||
|
}
|
||||||
|
|
||||||
// preimageKey = PreimagePrefix + hash
|
// preimageKey = PreimagePrefix + hash
|
||||||
func preimageKey(hash common.Hash) []byte {
|
func preimageKey(hash common.Hash) []byte {
|
||||||
return append(PreimagePrefix, hash.Bytes()...)
|
return append(PreimagePrefix, hash.Bytes()...)
|
||||||
@ -233,3 +243,8 @@ func IsCodeKey(key []byte) (bool, []byte) {
|
|||||||
func configKey(hash common.Hash) []byte {
|
func configKey(hash common.Hash) []byte {
|
||||||
return append(configPrefix, hash.Bytes()...)
|
return append(configPrefix, hash.Bytes()...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// genesisKey = genesisPrefix + hash
|
||||||
|
func genesisKey(hash common.Hash) []byte {
|
||||||
|
return append(genesisPrefix, hash.Bytes()...)
|
||||||
|
}
|
||||||
|
@ -74,6 +74,12 @@ func (t *table) Ancients() (uint64, error) {
|
|||||||
return t.db.Ancients()
|
return t.db.Ancients()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tail is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) Tail() (uint64, error) {
|
||||||
|
return t.db.Tail()
|
||||||
|
}
|
||||||
|
|
||||||
// AncientSize is a noop passthrough that just forwards the request to the underlying
|
// AncientSize is a noop passthrough that just forwards the request to the underlying
|
||||||
// database.
|
// database.
|
||||||
func (t *table) AncientSize(kind string) (uint64, error) {
|
func (t *table) AncientSize(kind string) (uint64, error) {
|
||||||
@ -89,10 +95,16 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err err
|
|||||||
return t.db.ReadAncients(fn)
|
return t.db.ReadAncients(fn)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TruncateAncients is a noop passthrough that just forwards the request to the underlying
|
// TruncateHead is a noop passthrough that just forwards the request to the underlying
|
||||||
// database.
|
// database.
|
||||||
func (t *table) TruncateAncients(items uint64) error {
|
func (t *table) TruncateHead(items uint64) error {
|
||||||
return t.db.TruncateAncients(items)
|
return t.db.TruncateHead(items)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TruncateTail is a noop passthrough that just forwards the request to the underlying
|
||||||
|
// database.
|
||||||
|
func (t *table) TruncateTail(items uint64) error {
|
||||||
|
return t.db.TruncateTail(items)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync is a noop passthrough that just forwards the request to the underlying
|
// Sync is a noop passthrough that just forwards the request to the underlying
|
||||||
@ -101,6 +113,12 @@ func (t *table) Sync() error {
|
|||||||
return t.db.Sync()
|
return t.db.Sync()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MigrateTable processes the entries in a given table in sequence
|
||||||
|
// converting them to a new format if they're of an old format.
|
||||||
|
func (t *table) MigrateTable(kind string, convert convertLegacyFn) error {
|
||||||
|
return t.db.MigrateTable(kind, convert)
|
||||||
|
}
|
||||||
|
|
||||||
// Put inserts the given value into the database at a prefixed version of the
|
// Put inserts the given value into the database at a prefixed version of the
|
||||||
// provided key.
|
// provided key.
|
||||||
func (t *table) Put(key []byte, value []byte) error {
|
func (t *table) Put(key []byte, value []byte) error {
|
||||||
@ -172,6 +190,18 @@ func (t *table) NewBatch() ethdb.Batch {
|
|||||||
return &tableBatch{t.db.NewBatch(), t.prefix}
|
return &tableBatch{t.db.NewBatch(), t.prefix}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
|
||||||
|
func (t *table) NewBatchWithSize(size int) ethdb.Batch {
|
||||||
|
return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSnapshot creates a database snapshot based on the current state.
|
||||||
|
// The created snapshot will not be affected by all following mutations
|
||||||
|
// happened on the database.
|
||||||
|
func (t *table) NewSnapshot() (ethdb.Snapshot, error) {
|
||||||
|
return t.db.NewSnapshot()
|
||||||
|
}
|
||||||
|
|
||||||
// tableBatch is a wrapper around a database batch that prefixes each key access
|
// tableBatch is a wrapper around a database batch that prefixes each key access
|
||||||
// with a pre-configured string.
|
// with a pre-configured string.
|
||||||
type tableBatch struct {
|
type tableBatch struct {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user