Compare commits

..

21 Commits

Author SHA1 Message Date
Guillaume Ballet
a3437cc17c call onleaf in verkle commit 2021-12-17 18:26:52 +01:00
Guillaume Ballet
fe75603d0b remove outdated comment 2021-12-06 11:09:25 +01:00
jwasinger
5bac5b3262 consensus/ethash: move accumulation of coinbase witness before coinbase account is credited (#41) 2021-12-02 09:41:11 +01:00
jwasinger
fa753db9e8 consensus/ethash: ensure uncle accounts are included in block witness (#40) 2021-12-02 09:39:51 +01:00
Guillaume Ballet
86bdc3fb39 Remove access witness from the signature of Process (#38) 2021-11-26 16:38:20 +01:00
Guillaume Ballet
909049c5fe use the witness in statedb, revert applyTx signature (#36)
* use the witness in statedb, revert applyTx signature

* fix miner tests

* fix catalyst build
2021-11-26 16:38:20 +01:00
Guillaume Ballet
7360d168c8 fix calculation in get_tree_key_for_storage_slot (#35) 2021-11-26 16:38:20 +01:00
Guillaume Ballet
361a328cb7 upgrade go version (#34) 2021-11-26 16:38:20 +01:00
Guillaume Ballet
41c2f754cc remove unnecessary cancun block declaration in tests (#33) 2021-11-26 16:38:20 +01:00
Guillaume Ballet
7cb1add36a add circleci support (#32)
* add circleci support

* disable linter, which is broken again

* actually run tests
2021-11-26 16:38:20 +01:00
Guillaume Ballet
03dbc0a210 fix boundary condition check in PUSH32 2021-11-26 16:38:20 +01:00
Guillaume Ballet
6d40e11fe3 fix bound check in code chunking 2021-11-26 16:38:20 +01:00
Guillaume Ballet
5ca990184f fix boundary check in PUSH 2021-11-26 16:38:20 +01:00
Guillaume Ballet
15d98607f3 initialize the new access witness if not already present 2021-11-26 16:38:20 +01:00
Guillaume Ballet
ef08e51e40 merge undefined instead of panicking (#30) 2021-11-26 16:38:20 +01:00
Guillaume Ballet
e1144745a7 fix linter issue 2021-11-26 16:38:20 +01:00
Guillaume Ballet
bc06d2c740 fix rebase issues 2021-11-26 16:38:20 +01:00
jwasinger
97a79f50e8 enable verkle on cancun block: take 2 (#28)
* enable verkle on cancun block: take 2

* fix typo.  make unreachable line panic message more clear
2021-11-26 16:38:17 +01:00
Guillaume Ballet
9f9c03a94c fixes for the IPA testnet
upgrade to latest go-verkle

update go-verkle to get more fixes

simplify code by removing all stateless references (#25)

fix verkle proof test by enforcing values alignment to 32 bytes

remove unneeded KZG tag

fix the stateless test

Move AccessWitness into StateDB (#27)

* move AccessWitness into StateDB

* set Accesses in TxContext constructor

* Ensures that a statedb is initialized with a witness

* copy AccessWitness in StateDB.Copy.  use copied state in miner worker.commit.

* remove redundant line

Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com>

Fix contract creation issue
2021-11-26 16:30:06 +01:00
Guillaume Ballet
719bf47354 Upgrade go-verkle to its IPA version (#24) 2021-11-26 16:30:06 +01:00
Guillaume Ballet
162780515a all: implement EIP-compliant verkle trees
verkle: Implement Trie, NodeIterator and Database ifs

Fix crash in TestDump

Fix TestDump

Fix TrieCopy

remove unnecessary traces

fix: Error() returned errIteratorEnd in verkle node iterator

rewrite the iterator and change the signature of OpenStorageTrie

add the adapter to reuse the account trie for storage

don't try to deserialize a storage leaf into an account

Fix statedb unit tests (#14)

* debug code

* Fix more unit tests

* remove traces

* Go back to the full range

One tree to rule them all

remove updateRoot, there is no root to update

store code inside the account leaf

fix build

save current state for Sina

Update go-verkle to latest

Charge WITNESS_*_COST gas on storage loads

Add witness costs for SSTORE as well

Charge witness gas in the case of code execution

corresponding code deletion

add a --verkle flag to separate verkle experiments from regular geth operations

use the snapshot to get data

stateless execution from block witness

AccessWitness functions

Add block generation test + genesis snapshot generation

test stateless block execution (#18)

* test stateless block execution

* Force tree resolution before generating the proof

increased coverage in stateless test execution (#19)

* test stateless block execution

* Force tree resolution before generating the proof

* increase coverage in stateless test execution

ensure geth compiles

fix issues in tests with verkle trees deactivated

Ensure stateless data is available when executing statelessly (#20)

* Ensure stateless data is available when executing statelessly

* Actual execution of a statless block

* bugfixes in stateless block execution

* code cleanup

 - Reduce PR footprint by reverting NewEVM to its original signature
 - Move the access witness to the block context
 - prepare for a change in AW semantics
   Need to store the initial values.
 - Use the touch helper function, DRY

* revert the signature of MustCommit to its original form (#21)

fix leaf proofs in stateless execution (#22)

* Fixes in witness pre-state

* Add the recipient's nonce to the witness

* reduce PR footprint and investigate issue in root state calculation

* quick build fix

cleanup: Remove extra parameter in ToBlock

revert ToBlock to its older signature

fix import cycle in vm tests

fix linter issue

fix appveyor build

fix nil pointers in tests

Add indices, yis and Cis to the block's Verkle proof

upgrade geth dependency to drop geth's common dep

fix cmd/devp2p tests

fix rebase issues

quell an appveyor warning

fix address touching in SLOAD and SSTORE

fix access witness for code size

touch target account data before calling

make sure the proper locations get touched in (ext)codecopy

touch all code pages in execution

add pushdata to witness

remove useless code in genesis snapshot generation

testnet: fix some of the rebase/drift issues

Fix verkle proof generation in block

fix an issue occuring when chunking past the code size

fix: ensure the code copy doesn't extend past the code size
2021-11-26 16:30:03 +01:00
352 changed files with 6473 additions and 17385 deletions

45
.circleci/config.yml Normal file
View File

@@ -0,0 +1,45 @@
# Use the latest 2.1 version of CircleCI pipeline process engine.
# See: https://circleci.com/docs/2.0/configuration-reference
version: 2.1
# Define a job to be invoked later in a workflow.
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
jobs:
build:
working_directory: ~/repo
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
docker:
- image: circleci/golang:1.16.10
# Add steps to the job
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
steps:
- checkout
- restore_cache:
keys:
- go-mod-v4-{{ checksum "go.sum" }}
- run:
name: Install Dependencies
command: go mod download
- save_cache:
key: go-mod-v4-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
#- run:
# name: Run linter
# command: |
# go run build/ci.go lint
- run:
name: Run tests
command: |
go run build/ci.go test -coverage
- store_test_results:
path: /tmp/test-reports
# Invoke jobs via workflows
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
workflows:
sample: # This is the name of the workflow, feel free to change it to better match your workflow.
# Inside the workflow, you define the jobs you want to run.
jobs:
- build

4
.gitmodules vendored
View File

@@ -2,7 +2,3 @@
path = tests/testdata
url = https://github.com/ethereum/tests
shallow = true
[submodule "evm-benchmarks"]
path = tests/evm-benchmarks
url = https://github.com/ipsilon/evm-benchmarks
shallow = true

View File

@@ -1,7 +1,7 @@
# This file configures github.com/golangci/golangci-lint.
run:
timeout: 20m
timeout: 5m
tests: true
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$

View File

@@ -16,7 +16,7 @@ jobs:
- stage: lint
os: linux
dist: bionic
go: 1.18.x
go: 1.17.x
env:
- lint
git:
@@ -31,7 +31,7 @@ jobs:
os: linux
arch: amd64
dist: bionic
go: 1.18.x
go: 1.17.x
env:
- docker
services:
@@ -48,7 +48,7 @@ jobs:
os: linux
arch: arm64
dist: bionic
go: 1.18.x
go: 1.17.x
env:
- docker
services:
@@ -65,7 +65,7 @@ jobs:
if: type = push
os: linux
dist: bionic
go: 1.18.x
go: 1.17.x
env:
- ubuntu-ppa
- GO111MODULE=on
@@ -90,7 +90,7 @@ jobs:
os: linux
dist: bionic
sudo: required
go: 1.18.x
go: 1.17.x
env:
- azure-linux
- GO111MODULE=on
@@ -148,7 +148,7 @@ jobs:
- sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle"
# Install Go to allow building with
- curl https://dl.google.com/go/go1.18.linux-amd64.tar.gz | tar -xz
- curl https://dl.google.com/go/go1.16.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
@@ -162,7 +162,7 @@ jobs:
- stage: build
if: type = push
os: osx
go: 1.18.x
go: 1.17.x
env:
- azure-osx
- azure-ios
@@ -194,7 +194,7 @@ jobs:
os: linux
arch: amd64
dist: bionic
go: 1.18.x
go: 1.17.x
env:
- GO111MODULE=on
script:
@@ -205,7 +205,7 @@ jobs:
os: linux
arch: arm64
dist: bionic
go: 1.18.x
go: 1.17.x
env:
- GO111MODULE=on
script:
@@ -214,7 +214,7 @@ jobs:
- stage: build
os: linux
dist: bionic
go: 1.17.x
go: 1.16.x
env:
- GO111MODULE=on
script:
@@ -225,7 +225,7 @@ jobs:
if: type = cron
os: linux
dist: bionic
go: 1.18.x
go: 1.17.x
env:
- azure-purge
- GO111MODULE=on
@@ -239,7 +239,7 @@ jobs:
if: type = cron
os: linux
dist: bionic
go: 1.18.x
go: 1.17.x
env:
- GO111MODULE=on
script:

View File

@@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
FROM golang:1.18-alpine as builder
FROM golang:1.17-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git

View File

@@ -4,7 +4,7 @@ ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
FROM golang:1.18-alpine as builder
FROM golang:1.17-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git

View File

@@ -52,28 +52,12 @@ Going through all the possible command line flags is out of scope here (please c
but we've enumerated a few common parameter combos to get you up to speed quickly
on how you can run your own `geth` instance.
### Hardware Requirements
Minimum:
* CPU with 2+ cores
* 4GB RAM
* 500GB free storage space to sync the Mainnet
* 8 MBit/sec download Internet service
Recommended:
* Fast CPU with 4+ cores
* 16GB+ RAM
* High Performance SSD with at least 500GB free space
* 25+ MBit/sec download Internet service
### Full node on the main Ethereum network
By far the most common scenario is people wanting to simply interact with the Ethereum
network: create accounts; transfer funds; deploy and interact with contracts. For this
particular use-case the user doesn't care about years-old historical data, so we can
sync quickly to the current state of the network. To do so:
fast-sync quickly to the current state of the network. To do so:
```shell
$ geth console
@@ -84,7 +68,7 @@ This command will:
causing it to download more data in exchange for avoiding processing the entire history
of the Ethereum network, which is very CPU intensive.
* Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md)
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://web3js.readthedocs.io/)
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
This tool is optional and if you leave it out you can always attach to an already running
@@ -175,13 +159,13 @@ docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
ethereum/client-go
```
This will start `geth` in snap-sync mode with a DB memory allowance of 1GB just as the
This will start `geth` in fast-sync mode with a DB memory allowance of 1GB just as the
above command does. It will also create a persistent volume in your home directory for
saving your blockchain as well as map the default ports. There is also an `alpine` tag
available for a slim version of the image.
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not
accessible from the outside.
### Programmatically interfacing `geth` nodes

View File

@@ -19,7 +19,7 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
**Please do not file a public ticket** mentioning the vulnerability.
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities.
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publically disclosed security vulnerabilities.
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
@@ -29,147 +29,92 @@ Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A`
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.6
Comment: Hostname: pgp.mit.edu
Version: GnuPG v1
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1
82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM
qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q
lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac
48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y
PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho
yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F
nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB
8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG
b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa
FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b
mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28
rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB
Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV
Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD
CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt
09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX
QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt
Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp
XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2
D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl
s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI
ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85
kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j
dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu
O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01
ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky
fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc
T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S
V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL
CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf
Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm
5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp
7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm
otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS
DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF
aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr
A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz
/WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF
rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt
BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt
lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0
GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj
GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB
6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E
qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA
zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA
nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP
5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN
WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2
8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+
N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8
c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT
wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl
D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem
J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf
Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw
NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL
QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V
Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye
uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7
TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc
Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ
diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC
sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF
E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA
B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU
C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7
BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1
TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ
SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg
CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8
HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR
0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19
VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O
wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0
OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR
WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1
EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG
jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M
Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC
MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh
ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX
FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm
/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+
Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6
DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT
1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655
9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU
+2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1
qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4
OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN
BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h
YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1
9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj
26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7
FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G
rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe
bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs
gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/
bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa
CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg
3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz
ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg
JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6
sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM
BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q
Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4
/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p
M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl
BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q
1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs
6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l
LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3
yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd
cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle
5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX
GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1
rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR
XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa
QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc
mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB
8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy
I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro
yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh
ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz
i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP
+m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB
402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur
epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx
PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano=
=arte
-----END PGP PUBLIC KEY BLOCK------
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY
neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9
L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi
m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b
fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd
EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7
M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv
QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H
h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ
EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB
tDlFdGhlcmV1bSBGb3VuZGF0aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0
aGVyZXVtLm9yZz6JAj4EEwECACgCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheA
BQJaCWH6BQkFo2BYAAoJEOiNMzT6X2oK+DEP/3H6dxkm0hvHZKoHLVuuxcu3EHYo
k5sd3MMWPrZSN8qzZnY7ayEDMxnarWOizc+2jfOxfJlzX/g8lR1/fsHdWPFPhPoV
Qk8ygrHn1H8U8+rpw/U03BqmqHpYCDzJ+CIis9UWROniqXw1nuqu/FtWOsdWxNKh
jUo6k/0EsaXsxRPzgJv7fEUcVcQ7as/C3x9sy3muc2gvgA4/BKoGPb1/U0GuA8lV
fDIDshAggmnSUAg+TuYSAAdoFQ1sKwFMPigcLJF2eyKuK3iUyixJrec/c4LSf3wA
cGghbeuqI8INP0Y2zvXDQN2cByxsFAuoZG+m0cyKGaDH2MVUvOKKYqn/03qvrf15
AWAsW0l0yQwOTCo3FbsNzemClm5Bj/xH0E4XuwXwChcMCMOWJrFoxyvCEI+keoQc
c08/a8/MtS7vBAABXwOziSmm6CNqmzpWrh/fDrjlJlba9U3MxzvqU3IFlTdMratv
6V+SgX+L25lCzW4NxxUavoB8fAlvo8lxpHKo24FP+RcLQ8XqkU3RiUsgRjQRFOqQ
TaJcsp8mimmiYyf24mNu6b48pi+a5c/eQR9w59emeEUZqsJU+nqv8BWIIp7o4Agh
NYnKjkhPlY5e1fLVfAHIADZFynWwRPkPMJSrBiP5EtcOFxQGHGjRxU/KjXkvE0hV
xYb1PB8pWMTu/beeiQI+BBMBAgAoBQJYJd7YAhsDBQkB4TOABgsJCAcDAgYVCAIJ
CgsEFgIDAQIeAQIXgAAKCRDojTM0+l9qCplDD/9IZ2i+m1cnqQKtiyHbyFGx32oL
fzqPylX2bOG5DPsSTorSUdJMGVfT04oVxXc4S/2DVnNvi7RAbSiLapCWSplgtBOj
j1xlblOoXxT3m7s1XHGCX5tENxI9fVSSPVKJn+fQaWpPB2MhBA+1lUI6GJ+11T7K
J8LrP/fiw1/nOb7rW61HW44Gtyox23sA/d1+DsFVaF8hxJlNj5coPKr8xWzQ8pQl
juzdjHDukjevuw4rRmRq9vozvj9keEU9XJ5dldyEVXFmdDk7KT0p0Rla9nxYhzf/
r/Bv8Bzy0HCWRb2D31BjXXGG05oVnYmNGxGFxYja4MwgrMmne3ilEVjfUJsapsqi
w41BAyQgIdfREulYN7ahsF5PrjVAqBd9IGtE8ULelF2SQxEBQBngEkP0ahP6tRAL
i7/CBjPKOyKijtqVny7qrGOnU2ygcA88/WDibexDhrjz0Gx8WmErU7rIWZiZ5u4Y
vJYVRo0+6rBCXRPeSJfiP5h1p17Anr2l42boAYslfcrzquB8MHtrNcyn650OLtHG
nbxgIdniKrpuzGN6Opw+O2id2JhD1/1p4SOemwAmthplr1MIyOHNP3q93rEj2J7h
5zPS/AJuKkMDFUpslPNLQjCOwPXtdzL7/kUZGBSyez1T3TaW1uY6l9XaJJRaSn+v
1zPgfp4GJ3lPs4AlAbQ0RXRoZXJldW0gRm91bmRhdGlvbiBCdWcgQm91bnR5IDxi
b3VudHlAZXRoZXJldW0ub3JnPokCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagoENg/+LnSaVeMxiGVtcjWl
b7Xd73yrEy4uxiESS1AalW9mMf7oZzfI05f7QIQlaLAkNac74vZDJbPKjtb7tpMO
RFhRZMCveq6CPKU6pd1SI8IUVUKwpEe6AJP3lHdVP57dquieFE2HlYKm6uHbCGWU
0cjyTA+uu2KbgCHGmofsPY/xOcZLGEHTHqa5w60JJAQm+BSDKnw8wTyrxGvA3EK/
ePSvOZMYa+iw6vYuZeBIMbdiXR/A2keBi3GuvqB8tDMj7P22TrH5mVDm3zNqGYD6
amDPeiWp4cztY3aZyLcgYotqXPpDceZzDn+HopBPzAb/llCdE7bVswKRhphVMw4b
bhL0R/TQY7Sf6TK2LKSBrjv0DWOSijikE71SJcBnJvHU7EpKrQQ0lMGclm3ynyji
Nf0YTPXQt4I+fwTmOew2GFeK3UytNWbWI7oXX7Nm4bj9bhf3IJ0kmZb/Gs73+xII
e7Rz52Mby436tWyQIQiF9ITYNGvNf53TwBBZMn0pKPiTyr3Ur7FHEotkEOFNh1//
4zQY10XxuBdLrYGyZ4V8xHJM+oKre8Eg2R9qHXVbjvErHE+7CvgnV7YUip0criPr
BlKRvuoJaSliH2JFhSjWVrkPmFGrWN0BAx10yIqMnEplfKeHf4P9Elek3oInS8WP
G1zJG6s/t5+hQK0X37+TB+6rd3GJAj4EEwECACgFAlgl4TsCGwMFCQHhM4AGCwkI
BwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOiNMzT6X2oKzf8P/iIKd77WHTbp4pMN
8h52HyZJtDJmjA1DPZrbGl1TesW/Z9uTd12txlgqZnbG2GfN9+LSP6EOPzR6v2xC
OVhR+RdWhZDJJuQCVS7lJIqQrZgmeTZG0TyQPZdLjVFBOrrhVwYX+HXbu429IzHr
URf5InyR1QgqOXyElDYS6e28HFqvaoA0DWTWDDqOLPVl+U5fuceIE2XXdv3AGLeP
Yf8J5MPobjPiZtBqI6S6iENY2Yn35qLX+axeC/iYSCHVtFuCCIdb/QYR1ZZV8Ps/
aI9DwC7LU+YfPw7iqCIoqxSeA3o1PORkdSigEg3jtfRv5UqVo9a0oBb9jdoADsat
F/gW0E7mto3XGOiaR0eB9SSdsM3x7Bz4A0HIGNaxpZo1RWqlO91leP4c13Px7ISv
5OGXfLg+M8qb+qxbGd1HpitGi9s1y1aVfEj1kOtZ0tN8eu+Upg5WKwPNBDX3ar7J
9NCULgVSL+E79FG+zXw62gxiQrLfKzm4wU/9L5wVkwQnm29hLJ0tokrSBZFnc/1l
7OC+GM63tYicKkY4rqmoWUeYx7IwFH9mtDtvR1RxO85RbQhZizwpZpdpRkH0DqZu
ZJRmRa5r7rPqmfa7d+VIFhz2Xs8pJMLVqxTsLKcLglmjw7aOrYG0SWeH7YraXWGD
N3SlvSBiVwcK7QUKzLLvpadLwxfsuQINBFgl3tgBEACbgq6HTN5gEBi0lkD/MafI
nmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4hYontkMaKRlCg2Rvgjvk3Zve0
PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT19BdeAQRFvcfd+8w8
f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj26bf+2+1
DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66i
PsR99MQ7FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A
4tGkHl08KZ2N9o6GrfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8gr
eW8xB4zuf9Mkuou+RHNmo8PebHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0
VRxdPImKun+4LOXbfOxArOSkY6i35+gsgkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9
IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/bM1ACUtipMiIVeUs2uFiRjpz
A1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJaCWIIBQkFo2BYAAoJ
EOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg3IHMGxDM
b/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0I
Q1UKKXvzZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0
K9lneidcqtBDvlggJTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0T
NOOE8fXlvu8iuIAMBSDL9ep6sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd
5MTi0MDRNTij431kn8T/D0LCgmoUmYYMBgbwFhXr67axPZlKjrqR0z3F/Elv0ZPP
cVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1qScl9HiMxjt/H6aPastH63/7w
cN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4/Lih6Z1TlwcFVap+
cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1pM6AOQPpZ
85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4
=r6KK
-----END PGP PUBLIC KEY BLOCK-----
```

View File

@@ -81,7 +81,13 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
if len(arguments) != 0 {
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
}
return make([]interface{}, 0), nil
// Nothing to unmarshal, return default variables
nonIndexedArgs := arguments.NonIndexed()
defaultVars := make([]interface{}, len(nonIndexedArgs))
for index, arg := range nonIndexedArgs {
defaultVars[index] = reflect.New(arg.Type.GetType())
}
return defaultVars, nil
}
return arguments.UnpackValues(data)
}

View File

@@ -230,9 +230,6 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common
defer b.mu.Unlock()
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
if receipt == nil {
return nil, ethereum.NotFound
}
return receipt, nil
}
@@ -642,6 +639,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
}
// SendTransaction updates the pending block to include the given transaction.
// It panics if the transaction is invalid.
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
b.mu.Lock()
defer b.mu.Unlock()
@@ -649,17 +647,17 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
// Get the last block
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
if err != nil {
return fmt.Errorf("could not fetch parent")
panic("could not fetch parent")
}
// Check transaction validity
signer := types.MakeSigner(b.blockchain.Config(), block.Number())
sender, err := types.Sender(signer, tx)
if err != nil {
return fmt.Errorf("invalid transaction: %v", err)
panic(fmt.Errorf("invalid transaction: %v", err))
}
nonce := b.pendingState.GetNonce(sender)
if tx.Nonce() != nonce {
return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)
panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce))
}
// Include tx in chain
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {

View File

@@ -496,7 +496,7 @@ func TestEstimateGas(t *testing.T) {
GasPrice: big.NewInt(0),
Value: nil,
Data: common.Hex2Bytes("b9b046f9"),
}, 0, errors.New("invalid opcode: INVALID"), nil},
}, 0, errors.New("invalid opcode: opcode 0xfe not defined"), nil},
{"Valid", ethereum.CallMsg{
From: addr,

View File

@@ -88,13 +88,6 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
transactIdentifiers = make(map[string]bool)
eventIdentifiers = make(map[string]bool)
)
for _, input := range evmABI.Constructor.Inputs {
if hasStruct(input.Type) {
bindStructType[lang](input.Type, structs)
}
}
for _, original := range evmABI.Methods {
// Normalize the method for capital cases and non-anonymous inputs/outputs
normalized := original

View File

@@ -1911,50 +1911,6 @@ var bindTests = []struct {
nil,
nil,
},
{
name: `ConstructorWithStructParam`,
contract: `
pragma solidity >=0.8.0 <0.9.0;
contract ConstructorWithStructParam {
struct StructType {
uint256 field;
}
constructor(StructType memory st) {}
}
`,
bytecode: []string{`0x608060405234801561001057600080fd5b506040516101c43803806101c48339818101604052810190610032919061014a565b50610177565b6000604051905090565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6100958261004c565b810181811067ffffffffffffffff821117156100b4576100b361005d565b5b80604052505050565b60006100c7610038565b90506100d3828261008c565b919050565b6000819050919050565b6100eb816100d8565b81146100f657600080fd5b50565b600081519050610108816100e2565b92915050565b60006020828403121561012457610123610047565b5b61012e60206100bd565b9050600061013e848285016100f9565b60008301525092915050565b6000602082840312156101605761015f610042565b5b600061016e8482850161010e565b91505092915050565b603f806101856000396000f3fe6080604052600080fdfea2646970667358221220cdffa667affecefac5561f65f4a4ba914204a8d4eb859d8cd426fb306e5c12a364736f6c634300080a0033`},
abi: []string{`[{"inputs":[{"components":[{"internalType":"uint256","name":"field","type":"uint256"}],"internalType":"struct ConstructorWithStructParam.StructType","name":"st","type":"tuple"}],"stateMutability":"nonpayable","type":"constructor"}]`},
imports: `
"math/big"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/ethconfig"
`,
tester: `
var (
key, _ = crypto.GenerateKey()
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
)
defer sim.Close()
_, tx, _, err := DeployConstructorWithStructParam(user, sim, ConstructorWithStructParamStructType{Field: big.NewInt(42)})
if err != nil {
t.Fatalf("DeployConstructorWithStructParam() got err %v; want nil err", err)
}
sim.Commit()
if _, err = bind.WaitDeployed(nil, sim, tx); err != nil {
t.Logf("Deployment tx: %+v", tx)
t.Errorf("bind.WaitDeployed(nil, %T, <deployment tx>) got err %v; want nil err", sim, err)
}
`,
},
}
// Tests that packages generated by the binder can be successfully compiled and
@@ -1978,23 +1934,22 @@ func TestGolangBindings(t *testing.T) {
}
// Generate the test suite for all the contracts
for i, tt := range bindTests {
t.Run(tt.name, func(t *testing.T) {
var types []string
if tt.types != nil {
types = tt.types
} else {
types = []string{tt.name}
}
// Generate the binding and create a Go source file in the workspace
bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases)
if err != nil {
t.Fatalf("test %d: failed to generate binding: %v", i, err)
}
if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil {
t.Fatalf("test %d: failed to write binding: %v", i, err)
}
// Generate the test file with the injected test code
code := fmt.Sprintf(`
var types []string
if tt.types != nil {
types = tt.types
} else {
types = []string{tt.name}
}
// Generate the binding and create a Go source file in the workspace
bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases)
if err != nil {
t.Fatalf("test %d: failed to generate binding: %v", i, err)
}
if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil {
t.Fatalf("test %d: failed to write binding: %v", i, err)
}
// Generate the test file with the injected test code
code := fmt.Sprintf(`
package bindtest
import (
@@ -2006,10 +1961,9 @@ func TestGolangBindings(t *testing.T) {
%s
}
`, tt.imports, tt.name, tt.tester)
if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil {
t.Fatalf("test %d: failed to write tests: %v", i, err)
}
})
if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil {
t.Fatalf("test %d: failed to write tests: %v", i, err)
}
}
// Convert the package to go modules and use the current source for go-ethereum
moder := exec.Command(gocmd, "mod", "init", "bindtest")

View File

@@ -21,7 +21,6 @@ import (
"errors"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
@@ -36,16 +35,14 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
logger := log.New("hash", tx.Hash())
for {
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
if err == nil {
if receipt != nil {
return receipt, nil
}
if errors.Is(err, ethereum.NotFound) {
logger.Trace("Transaction not yet mined")
} else {
if err != nil {
logger.Trace("Receipt retrieval failed", "err", err)
} else {
logger.Trace("Transaction not yet mined")
}
// Wait for the next round.
select {
case <-ctx.Done():

View File

@@ -1,152 +0,0 @@
package abi
import (
"fmt"
)
type SelectorMarshaling struct {
Name string `json:"name"`
Type string `json:"type"`
Inputs []ArgumentMarshaling `json:"inputs"`
}
func isDigit(c byte) bool {
return c >= '0' && c <= '9'
}
func isAlpha(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
func isIdentifierSymbol(c byte) bool {
return c == '$' || c == '_'
}
func parseToken(unescapedSelector string, isIdent bool) (string, string, error) {
if len(unescapedSelector) == 0 {
return "", "", fmt.Errorf("empty token")
}
firstChar := unescapedSelector[0]
position := 1
if !(isAlpha(firstChar) || (isIdent && isIdentifierSymbol(firstChar))) {
return "", "", fmt.Errorf("invalid token start: %c", firstChar)
}
for position < len(unescapedSelector) {
char := unescapedSelector[position]
if !(isAlpha(char) || isDigit(char) || (isIdent && isIdentifierSymbol(char))) {
break
}
position++
}
return unescapedSelector[:position], unescapedSelector[position:], nil
}
func parseIdentifier(unescapedSelector string) (string, string, error) {
return parseToken(unescapedSelector, true)
}
func parseElementaryType(unescapedSelector string) (string, string, error) {
parsedType, rest, err := parseToken(unescapedSelector, false)
if err != nil {
return "", "", fmt.Errorf("failed to parse elementary type: %v", err)
}
// handle arrays
for len(rest) > 0 && rest[0] == '[' {
parsedType = parsedType + string(rest[0])
rest = rest[1:]
for len(rest) > 0 && isDigit(rest[0]) {
parsedType = parsedType + string(rest[0])
rest = rest[1:]
}
if len(rest) == 0 || rest[0] != ']' {
return "", "", fmt.Errorf("failed to parse array: expected ']', got %c", unescapedSelector[0])
}
parsedType = parsedType + string(rest[0])
rest = rest[1:]
}
return parsedType, rest, nil
}
func parseCompositeType(unescapedSelector string) ([]interface{}, string, error) {
if len(unescapedSelector) == 0 || unescapedSelector[0] != '(' {
return nil, "", fmt.Errorf("expected '(', got %c", unescapedSelector[0])
}
parsedType, rest, err := parseType(unescapedSelector[1:])
if err != nil {
return nil, "", fmt.Errorf("failed to parse type: %v", err)
}
result := []interface{}{parsedType}
for len(rest) > 0 && rest[0] != ')' {
parsedType, rest, err = parseType(rest[1:])
if err != nil {
return nil, "", fmt.Errorf("failed to parse type: %v", err)
}
result = append(result, parsedType)
}
if len(rest) == 0 || rest[0] != ')' {
return nil, "", fmt.Errorf("expected ')', got '%s'", rest)
}
return result, rest[1:], nil
}
func parseType(unescapedSelector string) (interface{}, string, error) {
if len(unescapedSelector) == 0 {
return nil, "", fmt.Errorf("empty type")
}
if unescapedSelector[0] == '(' {
return parseCompositeType(unescapedSelector)
} else {
return parseElementaryType(unescapedSelector)
}
}
func assembleArgs(args []interface{}) ([]ArgumentMarshaling, error) {
arguments := make([]ArgumentMarshaling, 0)
for i, arg := range args {
// generate dummy name to avoid unmarshal issues
name := fmt.Sprintf("name%d", i)
if s, ok := arg.(string); ok {
arguments = append(arguments, ArgumentMarshaling{name, s, s, nil, false})
} else if components, ok := arg.([]interface{}); ok {
subArgs, err := assembleArgs(components)
if err != nil {
return nil, fmt.Errorf("failed to assemble components: %v", err)
}
arguments = append(arguments, ArgumentMarshaling{name, "tuple", "tuple", subArgs, false})
} else {
return nil, fmt.Errorf("failed to assemble args: unexpected type %T", arg)
}
}
return arguments, nil
}
// ParseSelector converts a method selector into a struct that can be JSON encoded
// and consumed by other functions in this package.
// Note, although uppercase letters are not part of the ABI spec, this function
// still accepts it as the general format is valid.
func ParseSelector(unescapedSelector string) (SelectorMarshaling, error) {
name, rest, err := parseIdentifier(unescapedSelector)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
}
args := []interface{}{}
if len(rest) >= 2 && rest[0] == '(' && rest[1] == ')' {
rest = rest[2:]
} else {
args, rest, err = parseCompositeType(rest)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
}
}
if len(rest) > 0 {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest)
}
// Reassemble the fake ABI and constuct the JSON
fakeArgs, err := assembleArgs(args)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err)
}
return SelectorMarshaling{name, "function", fakeArgs}, nil
}

View File

@@ -1,54 +0,0 @@
package abi
import (
"fmt"
"log"
"reflect"
"testing"
)
func TestParseSelector(t *testing.T) {
mkType := func(types ...interface{}) []ArgumentMarshaling {
var result []ArgumentMarshaling
for i, typeOrComponents := range types {
name := fmt.Sprintf("name%d", i)
if typeName, ok := typeOrComponents.(string); ok {
result = append(result, ArgumentMarshaling{name, typeName, typeName, nil, false})
} else if components, ok := typeOrComponents.([]ArgumentMarshaling); ok {
result = append(result, ArgumentMarshaling{name, "tuple", "tuple", components, false})
} else {
log.Fatalf("unexpected type %T", typeOrComponents)
}
}
return result
}
tests := []struct {
input string
name string
args []ArgumentMarshaling
}{
{"noargs()", "noargs", []ArgumentMarshaling{}},
{"simple(uint256,uint256,uint256)", "simple", mkType("uint256", "uint256", "uint256")},
{"other(uint256,address)", "other", mkType("uint256", "address")},
{"withArray(uint256[],address[2],uint8[4][][5])", "withArray", mkType("uint256[]", "address[2]", "uint8[4][][5]")},
{"singleNest(bytes32,uint8,(uint256,uint256),address)", "singleNest", mkType("bytes32", "uint8", mkType("uint256", "uint256"), "address")},
{"multiNest(address,(uint256[],uint256),((address,bytes32),uint256))", "multiNest",
mkType("address", mkType("uint256[]", "uint256"), mkType(mkType("address", "bytes32"), "uint256"))},
}
for i, tt := range tests {
selector, err := ParseSelector(tt.input)
if err != nil {
t.Errorf("test %d: failed to parse selector '%v': %v", i, tt.input, err)
}
if selector.Name != tt.name {
t.Errorf("test %d: unexpected function name: '%s' != '%s'", i, selector.Name, tt.name)
}
if selector.Type != "function" {
t.Errorf("test %d: unexpected type: '%s' != '%s'", i, selector.Type, "function")
}
if !reflect.DeepEqual(selector.Inputs, tt.args) {
t.Errorf("test %d: unexpected args: '%v' != '%v'", i, selector.Inputs, tt.args)
}
}
}

View File

@@ -290,7 +290,7 @@ func tuplePointsTo(index int, output []byte) (start int, err error) {
offset := big.NewInt(0).SetBytes(output[index : index+32])
outputLen := big.NewInt(int64(len(output)))
if offset.Cmp(outputLen) > 0 {
if offset.Cmp(big.NewInt(int64(len(output)))) > 0 {
return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
}
if offset.BitLen() > 63 {

View File

@@ -46,7 +46,7 @@ const (
// accounts (derived from the same seed).
type Wallet interface {
// URL retrieves the canonical path under which this wallet is reachable. It is
// used by upper layers to define a sorting order over all wallets from multiple
// user by upper layers to define a sorting order over all wallets from multiple
// backends.
URL() URL
@@ -89,7 +89,7 @@ type Wallet interface {
// accounts.
//
// Note, self derivation will increment the last component of the specified path
// opposed to descending into a child path to allow discovering accounts starting
// opposed to decending into a child path to allow discovering accounts starting
// from non zero components.
//
// Some hardware wallets switched derivation paths through their evolution, so
@@ -105,7 +105,7 @@ type Wallet interface {
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
// a password to decrypt the account, or a PIN code to verify the transaction),
// a password to decrypt the account, or a PIN code o verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
@@ -124,13 +124,13 @@ type Wallet interface {
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
// a password to decrypt the account, or a PIN code to verify the transaction),
// a password to decrypt the account, or a PIN code o verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
// the account in a keystore).
//
// This method should return the signature in 'canonical' format, with v 0 or 1.
// This method should return the signature in 'canonical' format, with v 0 or 1
SignText(account Account, text []byte) ([]byte, error)
// SignTextWithPassphrase is identical to Signtext, but also takes a password
@@ -176,7 +176,7 @@ type Backend interface {
// TextHash is a helper function that calculates a hash for the given message that can be
// safely used to calculate a signature from.
//
// The hash is calculated as
// The hash is calulcated as
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
//
// This gives context to the signed message and prevents signing of transactions.
@@ -188,7 +188,7 @@ func TextHash(data []byte) []byte {
// TextAndHash is a helper function that calculates a hash for the given message that can be
// safely used to calculate a signature from.
//
// The hash is calculated as
// The hash is calulcated as
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
//
// This gives context to the signed message and prevents signing of transactions.

View File

@@ -42,7 +42,7 @@ var ErrInvalidPassphrase = errors.New("invalid password")
var ErrWalletAlreadyOpen = errors.New("wallet already open")
// ErrWalletClosed is returned if a wallet is attempted to be opened the
// second time.
// secodn time.
var ErrWalletClosed = errors.New("wallet closed")
// AuthNeededError is returned by backends for signing requests where the user

View File

@@ -638,7 +638,7 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
// accounts.
//
// Note, self derivation will increment the last component of the specified path
// opposed to descending into a child path to allow discovering accounts starting
// opposed to decending into a child path to allow discovering accounts starting
// from non zero components.
//
// Some hardware wallets switched derivation paths through their evolution, so

View File

@@ -496,7 +496,7 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
// accounts.
//
// Note, self derivation will increment the last component of the specified path
// opposed to descending into a child path to allow discovering accounts starting
// opposed to decending into a child path to allow discovering accounts starting
// from non zero components.
//
// Some hardware wallets switched derivation paths through their evolution, so

View File

@@ -13,7 +13,7 @@ environment:
GETH_MINGW: 'C:\msys64\mingw32'
install:
- git submodule update --init --depth 1 --recursive
- git submodule update --init --depth 1
- go version
for:

View File

@@ -1,58 +1,37 @@
# This file contains sha256 checksums of optional build dependencies.
38f423db4cc834883f2b52344282fa7a39fbb93650dc62a11fdf0be6409bdad6 go1.18.src.tar.gz
70bb4a066997535e346c8bfa3e0dfe250d61100b17ccc5676274642447834969 go1.18.darwin-amd64.tar.gz
9cab6123af9ffade905525d79fc9ee76651e716c85f1f215872b5f2976782480 go1.18.darwin-arm64.tar.gz
e63492d4f38487331518eb4b50e670d853bb8d67e88596269af84bb9aca0b381 go1.18.freebsd-386.tar.gz
01cd67bbc12e659ff236ecebde1806f76452f7ca145c172d5ecdbf4f4803daae go1.18.freebsd-amd64.tar.gz
1c04cf4440b323a66328e0df95d409f955b9b475e58eae235fdd3d1f1cf02f4f go1.18.linux-386.tar.gz
e85278e98f57cdb150fe8409e6e5df5343ecb13cebf03a5d5ff12bd55a80264f go1.18.linux-amd64.tar.gz
7ac7b396a691e588c5fb57687759e6c4db84a2a3bbebb0765f4b38e5b1c5b00e go1.18.linux-arm64.tar.gz
a80fa43d1f4575fb030adbfbaa94acd860c6847820764eecb06c63b7c103612b go1.18.linux-armv6l.tar.gz
070351edac192483c074b38d08ec19251a83f8210765a532a84c3dcf8aec04d8 go1.18.linux-ppc64le.tar.gz
ea265f5e62fcaf941d53f0cdb81222d9668e1672a0d39d992f16ff0e87c0ee6b go1.18.linux-s390x.tar.gz
e23fd2a0509690fe7e63b2b1bcd4c39ed57b46ccde76f35dc0d16ca7fdbc5aaa go1.18.windows-386.zip
65c5c0c709a7ca1b357091b10b795b439d8b50e579d3893edab4c7e9b384f435 go1.18.windows-amd64.zip
1c454eb60c64d481965a165c623ff1ed6cf32d68c6b31f36069c8768d908f093 go1.18.windows-arm64.zip
2255eb3e4e824dd7d5fcdc2e7f84534371c186312e546fb1086a34c17752f431 go1.17.2.src.tar.gz
7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94 go1.17.2.darwin-amd64.tar.gz
ce8771bd3edfb5b28104084b56bbb532eeb47fbb7769c3e664c6223712c30904 go1.17.2.darwin-arm64.tar.gz
8cea5b8d1f8e8cbb58069bfed58954c71c5b1aca2f3c857765dae83bf724d0d7 go1.17.2.freebsd-386.tar.gz
c96e57218fb03e74d683ad63b1684d44c89d5e5b994f36102b33dce21b58499a go1.17.2.freebsd-amd64.tar.gz
8617f2e40d51076983502894181ae639d1d8101bfbc4d7463a2b442f239f5596 go1.17.2.linux-386.tar.gz
f242a9db6a0ad1846de7b6d94d507915d14062660616a61ef7c808a76e4f1676 go1.17.2.linux-amd64.tar.gz
a5a43c9cdabdb9f371d56951b14290eba8ce2f9b0db48fb5fc657943984fd4fc go1.17.2.linux-arm64.tar.gz
04d16105008230a9763005be05606f7eb1c683a3dbf0fbfed4034b23889cb7f2 go1.17.2.linux-armv6l.tar.gz
12e2dc7e0ffeebe77083f267ef6705fec1621cdf2ed6489b3af04a13597ed68d go1.17.2.linux-ppc64le.tar.gz
c4b2349a8d11350ca038b8c57f3cc58dc0b31284bcbed4f7fca39aeed28b4a51 go1.17.2.linux-s390x.tar.gz
8a85257a351996fdf045fe95ed5fdd6917dd48636d562dd11dedf193005a53e0 go1.17.2.windows-386.zip
fa6da0b829a66f5fab7e4e312fd6aa1b2d8f045c7ecee83b3d00f6fe5306759a go1.17.2.windows-amd64.zip
00575c85dc7a129ba892685a456b27a3f3670f71c8bfde1c5ad151f771d55df7 go1.17.2.windows-arm64.zip
03c181fc1bb29ea3e73cbb23399c43b081063833a7cf7554b94e5a98308df53e golangci-lint-1.45.2-linux-riscv64.deb
08a50bbbf451ede6d5354179eb3e14a5634e156dfa92cb9a2606f855a637e35b golangci-lint-1.45.2-linux-ppc64le.rpm
0d12f6ec1296b5a70e392aa88cd2295cceef266165eb7028e675f455515dd1c9 golangci-lint-1.45.2-linux-armv7.deb
10f2846e2e50e4ea8ae426ee62dcd2227b23adddd8e991aa3c065927ac948735 golangci-lint-1.45.2-linux-ppc64le.deb
1463049b744871168095e3e8f687247d6040eeb895955b869889ea151e0603ab golangci-lint-1.45.2-linux-arm64.tar.gz
15720f9c4c6f9324af695f081dc189adc7751b255759e78d7b2df1d7e9192533 golangci-lint-1.45.2-linux-amd64.deb
166d922e4d3cfe3d47786c590154a9c8ea689dff0aa92b73d2f5fc74fc570c29 golangci-lint-1.45.2-linux-arm64.rpm
1a3754c69f7cc19ab89cbdcc2550da4cf9abb3120383c6b3bd440c1ec22da2e6 golangci-lint-1.45.2-freebsd-386.tar.gz
1dec0aa46d4f0d241863b573f70129bdf1de9c595cf51172a840a588a4cd9fc5 golangci-lint-1.45.2-windows-amd64.zip
3198453806517c1ad988229f5e758ef850e671203f46d6905509df5bdf4dc24b golangci-lint-1.45.2-freebsd-armv7.tar.gz
46a3cd1749d7b98adc2dc01510ddbe21abe42689c8a53fb0e81662713629f215 golangci-lint-1.45.2-linux-386.deb
4e28bfb593d464b9e160f2acd5b71993836a183270bf8299b78ad31f7a168c0d golangci-lint-1.45.2-linux-arm64.deb
5157a58c8f9ab85c33af2e46f0d7c57a3b1e8953b81d61130e292e09f545cfab golangci-lint-1.45.2-linux-mips64le.tar.gz
518cd027644129fbf8ec4f02bd6f9ad7278aae826f92b63c80d4d0819ddde49a golangci-lint-1.45.2-linux-armv6.rpm
595ad6c6dade4c064351bc309f411703e457f8ffbb7a1806b3d8ee713333427f golangci-lint-1.45.2-linux-amd64.tar.gz
6994d6c80f0730751090986184a3481b4be2e6b6e84416238a2b857910045a4f golangci-lint-1.45.2-windows-arm64.zip
6c81652fc340118811b487f713c441fc6f527800bf5fd11b8929d08124efa015 golangci-lint-1.45.2-linux-armv7.tar.gz
726cb045559b7518bafdd3459de70a0647c087eb1b4634627a4b2e95b1258580 golangci-lint-1.45.2-freebsd-amd64.tar.gz
77df3774cdfda49b956d4a0e676da9a9b883f496ee37293c530770fef6b1d24e golangci-lint-1.45.2-linux-mips64.deb
7a9840f279a7d5d405bb434e101c2290964b3729630ac2add29280b962b7b9a5 golangci-lint-1.45.2-windows-armv6.zip
7d4bf9a5d80ec467aaaf66e78dbdcab567bbc6ba8151334c714eee58766aae32 golangci-lint-1.45.2-windows-armv7.zip
7e5f8821d39bb11d273b0841b34355f56bd5a45a2d5179f0d09e614e0efc0482 golangci-lint-1.45.2-linux-s390x.rpm
828de1bde796b23d8656b17a8885fbd879ef612795d62d1e4618126b419728b5 golangci-lint-1.45.2-linux-mips64.rpm
879a52107a797678a03c175cc7cf441411a14a01f66dc87f70bdfa304a4129a6 golangci-lint-1.45.2-windows-386.zip
87b6c7e3a3769f7d9abeb3bb82119b3c91e3c975300f6834fdeef8b2e37c98ff golangci-lint-1.45.2-linux-amd64.rpm
8b605c6d686c8af53ecc4ef39544541eeb1644d34cc10f9ffc5087808210c4ff golangci-lint-1.45.2-linux-s390x.deb
9427dbf51d0ac6f73a0f992838bd40c817470cc5bf6c8e2e2bea6fac46d7af6e golangci-lint-1.45.2-linux-ppc64le.tar.gz
995e509e895ca6a64ffc7395ac884d5961bdec98423cb896b17f345a9b4a19cf golangci-lint-1.45.2-darwin-amd64.tar.gz
a3f36278f2ea5516341e9071a2df6e65df272be80230b5406a12b72c6d425bee golangci-lint-1.45.2-linux-armv7.rpm
a5e12c50c23e87ac1deffc872f92ae85427b1198604969399805ae47cfe43f08 golangci-lint-1.45.2-linux-riscv64.tar.gz
aa8fa1be0729dbc2fbc4e01e82027097613eee74bd686ebef20f860b01fff8b3 golangci-lint-1.45.2-freebsd-armv6.tar.gz
c2b9669decc1b638cf2ee9060571af4e255f6dfcbb225c293e3a7ee4bb2c7217 golangci-lint-1.45.2-darwin-arm64.tar.gz
dfa8bdaf0387aec1cd5c1aa8857f67b2bbdfc2e42efce540c8fb9bbe3e8af302 golangci-lint-1.45.2-linux-armv6.tar.gz
eb8b8539dd017eee5c131ea9b875893ab2cebeeca41e8c6624907fb02224d643 golangci-lint-1.45.2-linux-386.rpm
ed6c7e17a857f30d715c5302fa250d95936936b277024bffea201187a257d7a7 golangci-lint-1.45.2-linux-armv6.deb
ef4d0154ace4001f01b288baeb118176242efb4fd163e178763e3213b77ef30b golangci-lint-1.45.2-linux-mips64le.deb
ef7002a2229f5ff5ba201a715fcf877664ea88decbe58e69d163293913024955 golangci-lint-1.45.2-linux-s390x.tar.gz
f13ecbd09228632e6bbe91a8324bd675c406eed22eb6d2c1e8192eed9ec4f914 golangci-lint-1.45.2-linux-386.tar.gz
f4cd9cfb09252f51699407277512263cae8409b665dd764f55a34738d0e89edc golangci-lint-1.45.2-linux-riscv64.rpm
fb1945dc59d37c9d14bf0a4aea11ea8651fa0e1d582ea80c4c44d0a536c08893 golangci-lint-1.45.2-linux-mips64.tar.gz
fe542c22738010f453c735a3c410decfd3784d1bd394b395c298ee298fc4c606 golangci-lint-1.45.2-linux-mips64le.rpm
d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
14d912a3fa856830339472fc4dc341933adf15f37bdb7130bbbfcf960ecf4809 golangci-lint-1.42.0-freebsd-386.tar.gz
337257fccc9baeb5ee1cd7e70c153e9d9f59d3afde46d631659500048afbdf80 golangci-lint-1.42.0-freebsd-amd64.tar.gz
6debcc266b629359fdd8eef4f4abb05a621604079d27016265afb5b4593b0eff golangci-lint-1.42.0-freebsd-armv6.tar.gz
878f0e190169db2ce9dde8cefbd99adc4fe28b90b68686bbfcfcc2085e6d693e golangci-lint-1.42.0-freebsd-armv7.tar.gz
42c78e31faf62b225363eff1b1d2aa74f9dbcb75686c8914aa3e90d6af65cece golangci-lint-1.42.0-linux-386.tar.gz
6937f62f8e2329e94822dc11c10b871ace5557ae1fcc4ee2f9980cd6aecbc159 golangci-lint-1.42.0-linux-amd64.tar.gz
2cf8d23d96cd854a537b355dab2962b960b88a06b615232599f066afd233f246 golangci-lint-1.42.0-linux-arm64.tar.gz
08b003d1ed61367473886defc957af5301066e62338e5d96a319c34dadc4c1d1 golangci-lint-1.42.0-linux-armv6.tar.gz
c7c00ec4845e806a1f32685f5b150219e180bd6d6a9d584be8d27f0c41d7a1bf golangci-lint-1.42.0-linux-armv7.tar.gz
3650fcf29eb3d8ee326d77791a896b15259eb2d5bf77437dc72e7efe5af6bd40 golangci-lint-1.42.0-linux-mips64.tar.gz
f51ae003fdbca4fef78ba73e2eb736a939c8eaa178cd452234213b489da5a420 golangci-lint-1.42.0-linux-mips64le.tar.gz
1b0bb7b8b22cc4ea7da44fd5ad5faaf6111d0677e01cc6f961b62a96537de2c6 golangci-lint-1.42.0-linux-ppc64le.tar.gz
8cb56927eb75e572450efbe0ff0f9cf3f56dc9faa81d9e8d30d6559fc1d06e6d golangci-lint-1.42.0-linux-riscv64.tar.gz
5ac41cd31825a176b21505a371a7b307cd9cdf17df0f35bbb3bf1466f9356ccc golangci-lint-1.42.0-linux-s390x.tar.gz
e1cebd2af621ac4b64c20937df92c3819264f2174c92f51e196db1e64ae097e0 golangci-lint-1.42.0-windows-386.zip
7e70fcde8e87a17cae0455df07d257ebc86669f3968d568e12727fa24bbe9883 golangci-lint-1.42.0-windows-amd64.zip
59da7ce1bda432616bfc28ae663e52c3675adee8d9bf5959fafd657c159576ab golangci-lint-1.42.0-windows-armv6.zip
65f62dda937bfcede0326ac77abe947ce1548931e6e13298ca036cb31f224db5 golangci-lint-1.42.0-windows-armv7.zip

View File

@@ -130,14 +130,13 @@ var (
// Distros for which packages are created.
// Note: vivid is unsupported because there is no golang-1.6 package for it.
// Note: the following Ubuntu releases have been officially deprecated on Launchpad:
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy, hirsuite
// wily, yakkety, zesty, artful, cosmic, disco, eoan, groovy
debDistroGoBoots = map[string]string{
"trusty": "golang-1.11", // EOL: 04/2024
"xenial": "golang-go", // EOL: 04/2026
"bionic": "golang-go", // EOL: 04/2028
"focal": "golang-go", // EOL: 04/2030
"impish": "golang-go", // EOL: 07/2022
// "jammy": "golang-go", // EOL: 04/2027
"trusty": "golang-1.11",
"xenial": "golang-go",
"bionic": "golang-go",
"focal": "golang-go",
"hirsute": "golang-go",
}
debGoBootPaths = map[string]string{
@@ -148,7 +147,7 @@ var (
// This is the version of go that will be downloaded by
//
// go run ci.go install -dlgo
dlgoVersion = "1.18"
dlgoVersion = "1.17.2"
)
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
@@ -332,21 +331,12 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string {
const version = "1.45.2"
const version = "1.42.0"
csdb := build.MustLoadChecksums("build/checksums.txt")
arch := runtime.GOARCH
ext := ".tar.gz"
if runtime.GOOS == "windows" {
ext = ".zip"
}
if arch == "arm" {
arch += "v" + os.Getenv("GOARM")
}
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch)
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s%s", version, base, ext)
archivePath := filepath.Join(cachedir, base+ext)
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base)
archivePath := filepath.Join(cachedir, base+".tar.gz")
if err := csdb.DownloadFile(url, archivePath); err != nil {
log.Fatal(err)
}
@@ -1243,21 +1233,21 @@ func doPurge(cmdline []string) {
// Iterate over the blobs, collect and sort all unstable builds
for i := 0; i < len(blobs); i++ {
if !strings.Contains(*blobs[i].Name, "unstable") {
if !strings.Contains(blobs[i].Name, "unstable") {
blobs = append(blobs[:i], blobs[i+1:]...)
i--
}
}
for i := 0; i < len(blobs); i++ {
for j := i + 1; j < len(blobs); j++ {
if blobs[i].Properties.LastModified.After(*blobs[j].Properties.LastModified) {
if blobs[i].Properties.LastModified.After(blobs[j].Properties.LastModified) {
blobs[i], blobs[j] = blobs[j], blobs[i]
}
}
}
// Filter out all archives more recent that the given threshold
for i, blob := range blobs {
if time.Since(*blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
if time.Since(blob.Properties.LastModified) < time.Duration(*limit)*24*time.Hour {
blobs = blobs[:i]
break
}

View File

@@ -661,7 +661,7 @@ func signer(c *cli.Context) error {
if err != nil {
utils.Fatalf("Could not register API: %w", err)
}
handler := node.NewHTTPHandlerStack(srv, cors, vhosts, nil)
handler := node.NewHTTPHandlerStack(srv, cors, vhosts)
// set port
port := c.Int(rpcPortFlag.Name)
@@ -898,7 +898,7 @@ func testExternalUI(api *core.SignerAPI) {
addr, _ := common.NewMixedcaseAddressFromString("0x0011223344556677889900112233445566778899")
data := `{"types":{"EIP712Domain":[{"name":"name","type":"string"},{"name":"version","type":"string"},{"name":"chainId","type":"uint256"},{"name":"verifyingContract","type":"address"}],"Person":[{"name":"name","type":"string"},{"name":"test","type":"uint8"},{"name":"wallet","type":"address"}],"Mail":[{"name":"from","type":"Person"},{"name":"to","type":"Person"},{"name":"contents","type":"string"}]},"primaryType":"Mail","domain":{"name":"Ether Mail","version":"1","chainId":"1","verifyingContract":"0xCCCcccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"},"message":{"from":{"name":"Cow","test":"3","wallet":"0xcD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"},"to":{"name":"Bob","wallet":"0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB","test":"2"},"contents":"Hello, Bob!"}}`
//_, err := api.SignData(ctx, accounts.MimetypeTypedData, *addr, hexutil.Encode([]byte(data)))
var typedData apitypes.TypedData
var typedData core.TypedData
json.Unmarshal([]byte(data), &typedData)
_, err := api.SignTypedData(ctx, *addr, typedData)
expectApprove("sign 712 typed data", err)
@@ -1025,7 +1025,7 @@ func GenDoc(ctx *cli.Context) {
"of the work in canonicalizing and making sense of the data, and it's up to the UI to present" +
"the user with the contents of the `message`"
sighash, msg := accounts.TextAndHash([]byte("hello world"))
messages := []*apitypes.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}}
messages := []*core.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}}
add("SignDataRequest", desc, &core.SignDataRequest{
Address: common.NewMixedcaseAddress(a),

View File

@@ -26,7 +26,6 @@ import (
"os"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
"github.com/ethereum/go-ethereum/core/types"
@@ -68,13 +67,6 @@ func (c *Chain) TotalDifficultyAt(height int) *big.Int {
return sum
}
func (c *Chain) RootAt(height int) common.Hash {
if height < c.Len() {
return c.blocks[height].Root()
}
return common.Hash{}
}
// ForkID gets the fork id of the chain.
func (c *Chain) ForkID() forkid.ID {
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))

View File

@@ -96,19 +96,6 @@ func (s *Suite) dial66() (*Conn, error) {
return conn, nil
}
// dial66 attempts to dial the given node and perform a handshake,
// returning the created Conn with additional snap/1 capabilities if
// successful.
func (s *Suite) dialSnap() (*Conn, error) {
conn, err := s.dial66()
if err != nil {
return nil, fmt.Errorf("dial failed: %v", err)
}
conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1})
conn.ourHighestSnapProtoVersion = 1
return conn, nil
}
// peer performs both the protocol handshake and the status message
// exchange with the node in order to peer with it.
func (c *Conn) peer(chain *Chain, status *Status) error {
@@ -144,11 +131,7 @@ func (c *Conn) handshake() error {
}
c.negotiateEthProtocol(msg.Caps)
if c.negotiatedProtoVersion == 0 {
return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
}
// If we require snap, verify that it was negotiated
if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion {
return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion)
return fmt.Errorf("could not negotiate protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
}
return nil
default:
@@ -160,21 +143,15 @@ func (c *Conn) handshake() error {
// advertised capability from peer.
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
var highestEthVersion uint
var highestSnapVersion uint
for _, capability := range caps {
switch capability.Name {
case "eth":
if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
highestEthVersion = capability.Version
}
case "snap":
if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion {
highestSnapVersion = capability.Version
}
if capability.Name != "eth" {
continue
}
if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
highestEthVersion = capability.Version
}
}
c.negotiatedProtoVersion = highestEthVersion
c.negotiatedSnapProtoVersion = highestSnapVersion
}
// statusExchange performs a `Status` message exchange with the given node.
@@ -348,15 +325,6 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bo
}
}
func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) {
defer c.SetReadDeadline(time.Time{})
c.SetReadDeadline(time.Now().Add(5 * time.Second))
if err := c.Write(msg); err != nil {
return nil, fmt.Errorf("could not write to connection: %v", err)
}
return c.ReadSnap(id)
}
// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol.
func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) {
// write request

View File

@@ -1,675 +0,0 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethtest
import (
"bytes"
"errors"
"fmt"
"math/rand"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth/protocols/snap"
"github.com/ethereum/go-ethereum/internal/utesting"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
)
func (s *Suite) TestSnapStatus(t *utesting.T) {
conn, err := s.dialSnap()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
defer conn.Close()
if err := conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err)
}
}
type accRangeTest struct {
nBytes uint64
root common.Hash
origin common.Hash
limit common.Hash
expAccounts int
expFirst common.Hash
expLast common.Hash
}
// TestSnapGetAccountRange various forms of GetAccountRange requests.
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
var (
root = s.chain.RootAt(999)
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
zero = common.Hash{}
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
firstKeyPlus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b")
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
storageRoot = common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790")
)
for i, tc := range []accRangeTest{
// Tests decreasing the number of bytes
{4000, root, zero, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
{3000, root, zero, ffHash, 57, firstKey, common.HexToHash("0x9b63fa753ece5cb90657d02ecb15df4dc1508d8c1d187af1bf7f1a05e747d3c7")},
{2000, root, zero, ffHash, 38, firstKey, common.HexToHash("0x5e6140ecae4354a9e8f47559a8c6209c1e0e69cb077b067b528556c11698b91f")},
{1, root, zero, ffHash, 1, firstKey, firstKey},
// Tests variations of the range
//
// [00b to firstkey]: should return [firstkey, secondkey], where secondkey is out of bounds
{4000, root, common.HexToHash("0x00bf000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b"), 2, firstKey, secondKey},
// [00b0 to 0bf0]: where both are before firstkey. Should return firstKey (even though it's out of bounds)
{4000, root, common.HexToHash("0x00b0000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000"), 1, firstKey, firstKey},
{4000, root, zero, zero, 1, firstKey, firstKey},
{4000, root, firstKey, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
{4000, root, firstKeyPlus1, ffHash, 76, secondKey, common.HexToHash("0xd28f55d3b994f16389f36944ad685b48e0fc3f8fbe86c3ca92ebecadf16a783f")},
// Test different root hashes
//
// A stateroot that does not exist
{4000, common.Hash{0x13, 37}, zero, ffHash, 0, zero, zero},
// The genesis stateroot (we expect it to not be served)
{4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero},
// A 127 block old stateroot, expected to be served
{4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")},
// A root which is not actually an account root, but a storage orot
{4000, storageRoot, zero, ffHash, 0, zero, zero},
// And some non-sensical requests
//
// range from [0xFF to 0x00], wrong order. Expect not to be serviced
{4000, root, ffHash, zero, 0, zero, zero},
// range from [firstkey, firstkey-1], wrong order. Expect to get first key.
{4000, root, firstKey, firstKeyMinus1, 1, firstKey, firstKey},
// range from [firstkey, 0], wrong order. Expect to get first key.
{4000, root, firstKey, zero, 1, firstKey, firstKey},
// Max bytes: 0. Expect to deliver one account.
{0, root, zero, ffHash, 1, firstKey, firstKey},
} {
if err := s.snapGetAccountRange(t, &tc); err != nil {
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err)
}
}
}
type stRangesTest struct {
root common.Hash
accounts []common.Hash
origin []byte
limit []byte
nBytes uint64
expSlots int
}
// TestSnapGetStorageRange various forms of GetStorageRanges requests.
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
var (
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
zero = common.Hash{}
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
)
for i, tc := range []stRangesTest{
{
root: s.chain.RootAt(999),
accounts: []common.Hash{secondKey, firstKey},
origin: zero[:],
limit: ffHash[:],
nBytes: 500,
expSlots: 0,
},
/*
Some tests against this account:
{
"balance": "0",
"nonce": 1,
"root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790",
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"storage": {
"0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02",
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01",
"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03"
},
"key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844"
}
*/
{ // [:] -> [slot1, slot2, slot3]
root: s.chain.RootAt(999),
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
origin: zero[:],
limit: ffHash[:],
nBytes: 500,
expSlots: 3,
},
{ // [slot1:] -> [slot1, slot2, slot3]
root: s.chain.RootAt(999),
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
limit: ffHash[:],
nBytes: 500,
expSlots: 3,
},
{ // [slot1+ :] -> [slot2, slot3]
root: s.chain.RootAt(999),
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5acf"),
limit: ffHash[:],
nBytes: 500,
expSlots: 2,
},
{ // [slot1:slot2] -> [slot1, slot2]
root: s.chain.RootAt(999),
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
nBytes: 500,
expSlots: 2,
},
{ // [slot1+:slot2+] -> [slot2, slot3]
root: s.chain.RootAt(999),
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
origin: common.FromHex("0x4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7"),
nBytes: 500,
expSlots: 2,
},
} {
if err := s.snapGetStorageRanges(t, &tc); err != nil {
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v",
i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err)
}
}
}
type byteCodesTest struct {
nBytes uint64
hashes []common.Hash
expHashes int
}
var (
// emptyRoot is the known root hash of an empty trie.
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
// emptyCode is the known hash of the empty EVM bytecode.
emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
)
// TestSnapGetByteCodes various forms of GetByteCodes requests.
func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
// The halfchain import should yield these bytecodes
var hcBytecodes []common.Hash
for _, s := range []string{
"0x200c90460d8b0063210d5f5b9918e053c8f2c024485e0f1b48be8b1fc71b1317",
"0x20ba67ed4ac6aff626e0d1d4db623e2fada9593daeefc4a6eb4b70e6cff986f3",
"0x24b5b4902cb3d897c1cee9f16be8e897d8fa277c04c6dc8214f18295fca5de44",
"0x320b9d0a2be39b8a1c858f9f8cb96b1df0983071681de07ded3a7c0d05db5fd6",
"0x48cb0d5275936a24632babc7408339f9f7b051274809de565b8b0db76e97e03c",
"0x67c7a6f5cdaa43b4baa0e15b2be63346d1b9ce9f2c3d7e5804e0cacd44ee3b04",
"0x6d8418059bdc8c3fabf445e6bfc662af3b6a4ae45999b953996e42c7ead2ab49",
"0x7043422e5795d03f17ee0463a37235258e609fdd542247754895d72695e3e142",
"0x727f9e6f0c4bac1ff8d72c2972122d9c8d37ccb37e04edde2339e8da193546f1",
"0x86ccd5e23c78568a8334e0cebaf3e9f48c998307b0bfb1c378cee83b4bfb29cb",
"0x8fc89b00d6deafd4c4279531e743365626dbfa28845ec697919d305c2674302d",
"0x92cfc353bcb9746bb6f9996b6b9df779c88af2e9e0eeac44879ca19887c9b732",
"0x941b4872104f0995a4898fcf0f615ea6bf46bfbdfcf63ea8f2fd45b3f3286b77",
"0xa02fe8f41159bb39d2b704c633c3d6389cf4bfcb61a2539a9155f60786cf815f",
"0xa4b94e0afdffcb0af599677709dac067d3145489ea7aede57672bee43e3b7373",
"0xaf4e64edd3234c1205b725e42963becd1085f013590bd7ed93f8d711c5eb65fb",
"0xb69a18fa855b742031420081999086f6fb56c3930ae8840944e8b8ae9931c51e",
"0xc246c217bc73ce6666c93a93a94faa5250564f50a3fdc27ea74c231c07fe2ca6",
"0xcd6e4ab2c3034df2a8a1dfaaeb1c4baecd162a93d22de35e854ee2945cbe0c35",
"0xe24b692d09d6fc2f3d1a6028c400a27c37d7cbb11511907c013946d6ce263d3b",
"0xe440c5f0e8603fd1ed25976eee261ccee8038cf79d6a4c0eb31b2bf883be737f",
"0xe6eacbc509203d21ac814b350e72934fde686b7f673c19be8cf956b0c70078ce",
"0xe8530de4371467b5be7ea0e69e675ab36832c426d6c1ce9513817c0f0ae1486b",
"0xe85d487abbbc83bf3423cf9731360cf4f5a37220e18e5add54e72ee20861196a",
"0xf195ea389a5eea28db0be93660014275b158963dec44af1dfa7d4743019a9a49",
} {
hcBytecodes = append(hcBytecodes, common.HexToHash(s))
}
for i, tc := range []byteCodesTest{
// A few stateroots
{
nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(999)},
expHashes: 0,
},
{
nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(0)},
expHashes: 0,
},
// Empties
{
nBytes: 10000, hashes: []common.Hash{emptyRoot},
expHashes: 0,
},
{
nBytes: 10000, hashes: []common.Hash{emptyCode},
expHashes: 1,
},
{
nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode},
expHashes: 3,
},
// The existing bytecodes
{
nBytes: 10000, hashes: hcBytecodes,
expHashes: len(hcBytecodes),
},
// The existing, with limited byte arg
{
nBytes: 1, hashes: hcBytecodes,
expHashes: 1,
},
{
nBytes: 0, hashes: hcBytecodes,
expHashes: 1,
},
{
nBytes: 1000, hashes: []common.Hash{hcBytecodes[0], hcBytecodes[0], hcBytecodes[0], hcBytecodes[0]},
expHashes: 4,
},
} {
if err := s.snapGetByteCodes(t, &tc); err != nil {
t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err)
}
}
}
type trieNodesTest struct {
root common.Hash
paths []snap.TrieNodePathSet
nBytes uint64
expHashes []common.Hash
expReject bool
}
func decodeNibbles(nibbles []byte, bytes []byte) {
for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 {
bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1]
}
}
// hasTerm returns whether a hex key has the terminator flag.
func hasTerm(s []byte) bool {
return len(s) > 0 && s[len(s)-1] == 16
}
func keybytesToHex(str []byte) []byte {
l := len(str)*2 + 1
var nibbles = make([]byte, l)
for i, b := range str {
nibbles[i*2] = b / 16
nibbles[i*2+1] = b % 16
}
nibbles[l-1] = 16
return nibbles
}
func hexToCompact(hex []byte) []byte {
terminator := byte(0)
if hasTerm(hex) {
terminator = 1
hex = hex[:len(hex)-1]
}
buf := make([]byte, len(hex)/2+1)
buf[0] = terminator << 5 // the flag byte
if len(hex)&1 == 1 {
buf[0] |= 1 << 4 // odd flag
buf[0] |= hex[0] // first nibble is contained in the first byte
hex = hex[1:]
}
decodeNibbles(hex, buf[1:])
return buf
}
// TestSnapTrieNodes various forms of GetTrieNodes requests.
func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
// helper function to iterate the key, and generate the compact-encoded
// trie paths along the way.
pathTo := func(length int) snap.TrieNodePathSet {
hex := keybytesToHex(key)[:length]
hex[len(hex)-1] = 0 // remove term flag
hKey := hexToCompact(hex)
return snap.TrieNodePathSet{hKey}
}
var accPaths []snap.TrieNodePathSet
for i := 1; i <= 65; i++ {
accPaths = append(accPaths, pathTo(i))
}
empty := emptyCode
for i, tc := range []trieNodesTest{
{
root: s.chain.RootAt(999),
paths: nil,
nBytes: 500,
expHashes: nil,
},
{
root: s.chain.RootAt(999),
paths: []snap.TrieNodePathSet{
snap.TrieNodePathSet{}, // zero-length pathset should 'abort' and kick us off
snap.TrieNodePathSet{[]byte{0}},
},
nBytes: 5000,
expHashes: []common.Hash{},
expReject: true,
},
{
root: s.chain.RootAt(999),
paths: []snap.TrieNodePathSet{
snap.TrieNodePathSet{[]byte{0}},
snap.TrieNodePathSet{[]byte{1}, []byte{0}},
},
nBytes: 5000,
//0x6b3724a41b8c38b46d4d02fba2bb2074c47a507eb16a9a4b978f91d32e406faf
expHashes: []common.Hash{s.chain.RootAt(999)},
},
{ // nonsensically long path
root: s.chain.RootAt(999),
paths: []snap.TrieNodePathSet{
snap.TrieNodePathSet{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8,
0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}},
},
nBytes: 5000,
expHashes: []common.Hash{common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")},
},
{
root: s.chain.RootAt(0),
paths: []snap.TrieNodePathSet{
snap.TrieNodePathSet{[]byte{0}},
snap.TrieNodePathSet{[]byte{1}, []byte{0}},
},
nBytes: 5000,
expHashes: []common.Hash{},
},
{
// The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures.
root: s.chain.RootAt(999),
paths: accPaths,
nBytes: 5000,
expHashes: []common.Hash{
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
empty, empty, empty},
},
{
// Basically the same as above, with different ordering
root: s.chain.RootAt(999),
paths: []snap.TrieNodePathSet{
accPaths[10], accPaths[1], accPaths[0],
},
nBytes: 5000,
expHashes: []common.Hash{
empty,
common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
},
},
} {
if err := s.snapGetTrieNodes(t, &tc); err != nil {
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
}
}
}
func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
conn, err := s.dialSnap()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
defer conn.Close()
if err = conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err)
}
// write request
req := &GetAccountRange{
ID: uint64(rand.Int63()),
Root: tc.root,
Origin: tc.origin,
Limit: tc.limit,
Bytes: tc.nBytes,
}
resp, err := conn.snapRequest(req, req.ID, s.chain)
if err != nil {
return fmt.Errorf("account range request failed: %v", err)
}
var res *snap.AccountRangePacket
if r, ok := resp.(*AccountRange); !ok {
return fmt.Errorf("account range response wrong: %T %v", resp, resp)
} else {
res = (*snap.AccountRangePacket)(r)
}
if exp, got := tc.expAccounts, len(res.Accounts); exp != got {
return fmt.Errorf("expected %d accounts, got %d", exp, got)
}
// Check that the encoding order is correct
for i := 1; i < len(res.Accounts); i++ {
if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
}
}
var (
hashes []common.Hash
accounts [][]byte
proof = res.Proof
)
hashes, accounts, err = res.Unpack()
if err != nil {
return err
}
if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
return nil
}
if len(hashes) > 0 {
if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got {
return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got)
}
if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got {
return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got)
}
}
// Reconstruct a partial trie from the response and verify it
keys := make([][]byte, len(hashes))
for i, key := range hashes {
keys[i] = common.CopyBytes(key[:])
}
nodes := make(light.NodeList, len(proof))
for i, node := range proof {
nodes[i] = node
}
proofdb := nodes.NodeSet()
var end []byte
if len(keys) > 0 {
end = keys[len(keys)-1]
}
_, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb)
return err
}
func (s *Suite) snapGetStorageRanges(t *utesting.T, tc *stRangesTest) error {
conn, err := s.dialSnap()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
defer conn.Close()
if err = conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err)
}
// write request
req := &GetStorageRanges{
ID: uint64(rand.Int63()),
Root: tc.root,
Accounts: tc.accounts,
Origin: tc.origin,
Limit: tc.limit,
Bytes: tc.nBytes,
}
resp, err := conn.snapRequest(req, req.ID, s.chain)
if err != nil {
return fmt.Errorf("account range request failed: %v", err)
}
var res *snap.StorageRangesPacket
if r, ok := resp.(*StorageRanges); !ok {
return fmt.Errorf("account range response wrong: %T %v", resp, resp)
} else {
res = (*snap.StorageRangesPacket)(r)
}
gotSlots := 0
// Ensure the ranges are monotonically increasing
for i, slots := range res.Slots {
gotSlots += len(slots)
for j := 1; j < len(slots); j++ {
if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
}
}
}
if exp, got := tc.expSlots, gotSlots; exp != got {
return fmt.Errorf("expected %d slots, got %d", exp, got)
}
return nil
}
func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error {
conn, err := s.dialSnap()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
defer conn.Close()
if err = conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err)
}
// write request
req := &GetByteCodes{
ID: uint64(rand.Int63()),
Hashes: tc.hashes,
Bytes: tc.nBytes,
}
resp, err := conn.snapRequest(req, req.ID, s.chain)
if err != nil {
return fmt.Errorf("getBytecodes request failed: %v", err)
}
var res *snap.ByteCodesPacket
if r, ok := resp.(*ByteCodes); !ok {
return fmt.Errorf("bytecodes response wrong: %T %v", resp, resp)
} else {
res = (*snap.ByteCodesPacket)(r)
}
if exp, got := tc.expHashes, len(res.Codes); exp != got {
for i, c := range res.Codes {
fmt.Printf("%d. %#x\n", i, c)
}
return fmt.Errorf("expected %d bytecodes, got %d", exp, got)
}
// Cross reference the requested bytecodes with the response to find gaps
// that the serving node is missing
var (
bytecodes = res.Codes
hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState)
hash = make([]byte, 32)
codes = make([][]byte, len(req.Hashes))
)
for i, j := 0, 0; i < len(bytecodes); i++ {
// Find the next hash that we've been served, leaving misses with nils
hasher.Reset()
hasher.Write(bytecodes[i])
hasher.Read(hash)
for j < len(req.Hashes) && !bytes.Equal(hash, req.Hashes[j][:]) {
j++
}
if j < len(req.Hashes) {
codes[j] = bytecodes[i]
j++
continue
}
// We've either ran out of hashes, or got unrequested data
return errors.New("unexpected bytecode")
}
return nil
}
func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
conn, err := s.dialSnap()
if err != nil {
t.Fatalf("dial failed: %v", err)
}
defer conn.Close()
if err = conn.peer(s.chain, nil); err != nil {
t.Fatalf("peering failed: %v", err)
}
// write request
req := &GetTrieNodes{
ID: uint64(rand.Int63()),
Root: tc.root,
Paths: tc.paths,
Bytes: tc.nBytes,
}
resp, err := conn.snapRequest(req, req.ID, s.chain)
if err != nil {
if tc.expReject {
return nil
}
return fmt.Errorf("trienodes request failed: %v", err)
}
var res *snap.TrieNodesPacket
if r, ok := resp.(*TrieNodes); !ok {
return fmt.Errorf("trienodes response wrong: %T %v", resp, resp)
} else {
res = (*snap.TrieNodesPacket)(r)
}
// Check the correctness
// Cross reference the requested trienodes with the response to find gaps
// that the serving node is missing
hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
hash := make([]byte, 32)
trienodes := res.Nodes
if got, want := len(trienodes), len(tc.expHashes); got != want {
return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want)
}
for i, trienode := range trienodes {
hasher.Reset()
hasher.Write(trienode)
hasher.Read(hash)
if got, want := hash, tc.expHashes[i]; !bytes.Equal(got, want[:]) {
fmt.Printf("hash %d wrong, got %#x, want %#x\n", i, got, want)
err = fmt.Errorf("hash %d wrong, got %#x, want %#x", i, got, want)
}
}
return err
}

View File

@@ -1,36 +0,0 @@
package ethtest
import "github.com/ethereum/go-ethereum/eth/protocols/snap"
// GetAccountRange represents an account range query.
type GetAccountRange snap.GetAccountRangePacket
func (g GetAccountRange) Code() int { return 33 }
type AccountRange snap.AccountRangePacket
func (g AccountRange) Code() int { return 34 }
type GetStorageRanges snap.GetStorageRangesPacket
func (g GetStorageRanges) Code() int { return 35 }
type StorageRanges snap.StorageRangesPacket
func (g StorageRanges) Code() int { return 36 }
type GetByteCodes snap.GetByteCodesPacket
func (g GetByteCodes) Code() int { return 37 }
type ByteCodes snap.ByteCodesPacket
func (g ByteCodes) Code() int { return 38 }
type GetTrieNodes snap.GetTrieNodesPacket
func (g GetTrieNodes) Code() int { return 39 }
type TrieNodes snap.TrieNodesPacket
func (g TrieNodes) Code() int { return 40 }

View File

@@ -125,16 +125,6 @@ func (s *Suite) Eth66Tests() []utesting.Test {
}
}
func (s *Suite) SnapTests() []utesting.Test {
return []utesting.Test{
{Name: "TestSnapStatus", Fn: s.TestSnapStatus},
{Name: "TestSnapAccountRange", Fn: s.TestSnapGetAccountRange},
{Name: "TestSnapGetByteCodes", Fn: s.TestSnapGetByteCodes},
{Name: "TestSnapGetTrieNodes", Fn: s.TestSnapTrieNodes},
{Name: "TestSnapGetStorageRanges", Fn: s.TestSnapGetStorageRanges},
}
}
var (
eth66 = true // indicates whether suite should negotiate eth66 connection
eth65 = false // indicates whether suite should negotiate eth65 connection or below.

View File

@@ -55,27 +55,6 @@ func TestEthSuite(t *testing.T) {
}
}
func TestSnapSuite(t *testing.T) {
geth, err := runGeth()
if err != nil {
t.Fatalf("could not run geth: %v", err)
}
defer geth.Close()
suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile)
if err != nil {
t.Fatalf("could not create new test suite: %v", err)
}
for _, test := range suite.SnapTests() {
t.Run(test.Name, func(t *testing.T) {
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
if result[0].Failed {
t.Fatal()
}
})
}
}
// runGeth creates and starts a geth node
func runGeth() (*node.Node, error) {
stack, err := node.New(&node.Config{

View File

@@ -19,7 +19,6 @@ package ethtest
import (
"crypto/ecdsa"
"fmt"
"time"
"github.com/ethereum/go-ethereum/eth/protocols/eth"
"github.com/ethereum/go-ethereum/p2p"
@@ -127,12 +126,10 @@ func (pt PooledTransactions) Code() int { return 26 }
// Conn represents an individual connection with a peer
type Conn struct {
*rlpx.Conn
ourKey *ecdsa.PrivateKey
negotiatedProtoVersion uint
negotiatedSnapProtoVersion uint
ourHighestProtoVersion uint
ourHighestSnapProtoVersion uint
caps []p2p.Cap
ourKey *ecdsa.PrivateKey
negotiatedProtoVersion uint
ourHighestProtoVersion uint
caps []p2p.Cap
}
// Read reads an eth packet from the connection.
@@ -262,7 +259,12 @@ func (c *Conn) Read66() (uint64, Message) {
// Write writes a eth packet to the connection.
func (c *Conn) Write(msg Message) error {
payload, err := rlp.EncodeToBytes(msg)
// check if message is eth protocol message
var (
payload []byte
err error
)
payload, err = rlp.EncodeToBytes(msg)
if err != nil {
return err
}
@@ -279,43 +281,3 @@ func (c *Conn) Write66(req eth.Packet, code int) error {
_, err = c.Conn.Write(uint64(code), payload)
return err
}
// ReadSnap reads a snap/1 response with the given id from the connection.
func (c *Conn) ReadSnap(id uint64) (Message, error) {
respId := id + 1
start := time.Now()
for respId != id && time.Since(start) < timeout {
code, rawData, _, err := c.Conn.Read()
if err != nil {
return nil, fmt.Errorf("could not read from connection: %v", err)
}
var snpMsg interface{}
switch int(code) {
case (GetAccountRange{}).Code():
snpMsg = new(GetAccountRange)
case (AccountRange{}).Code():
snpMsg = new(AccountRange)
case (GetStorageRanges{}).Code():
snpMsg = new(GetStorageRanges)
case (StorageRanges{}).Code():
snpMsg = new(StorageRanges)
case (GetByteCodes{}).Code():
snpMsg = new(GetByteCodes)
case (ByteCodes{}).Code():
snpMsg = new(ByteCodes)
case (GetTrieNodes{}).Code():
snpMsg = new(GetTrieNodes)
case (TrieNodes{}).Code():
snpMsg = new(TrieNodes)
default:
//return nil, fmt.Errorf("invalid message code: %d", code)
continue
}
if err := rlp.DecodeBytes(rawData, snpMsg); err != nil {
return nil, fmt.Errorf("could not rlp decode message: %v", err)
}
return snpMsg.(Message), nil
}
return nil, fmt.Errorf("request timed out")
}

View File

@@ -36,7 +36,6 @@ var (
Subcommands: []cli.Command{
rlpxPingCommand,
rlpxEthTestCommand,
rlpxSnapTestCommand,
},
}
rlpxPingCommand = cli.Command{
@@ -54,16 +53,6 @@ var (
testTAPFlag,
},
}
rlpxSnapTestCommand = cli.Command{
Name: "snap-test",
Usage: "Runs tests against a node",
ArgsUsage: "<node> <chain.rlp> <genesis.json>",
Action: rlpxSnapTest,
Flags: []cli.Flag{
testPatternFlag,
testTAPFlag,
},
}
)
func rlpxPing(ctx *cli.Context) error {
@@ -117,15 +106,3 @@ func rlpxEthTest(ctx *cli.Context) error {
}
return runTests(ctx, suite.AllEthTests())
}
// rlpxSnapTest runs the snap protocol test suite.
func rlpxSnapTest(ctx *cli.Context) error {
if ctx.NArg() < 3 {
exit("missing path to chain.rlp as command-line argument")
}
suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2])
if err != nil {
exit(err)
}
return runTests(ctx, suite.SnapTests())
}

View File

@@ -49,7 +49,7 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string {
// signHash is a helper function that calculates a hash for the given message
// that can be safely used to calculate a signature from.
//
// The hash is calculated as
// The hash is calulcated as
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
//
// This gives context to the signed message and prevents signing of transactions.

View File

@@ -67,7 +67,6 @@ type ommer struct {
type stEnv struct {
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
Difficulty *big.Int `json:"currentDifficulty"`
Random *big.Int `json:"currentRandom"`
ParentDifficulty *big.Int `json:"parentDifficulty"`
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
Number uint64 `json:"currentNumber" gencodec:"required"`
@@ -82,7 +81,6 @@ type stEnv struct {
type stEnvMarshaling struct {
Coinbase common.UnprefixedAddress
Difficulty *math.HexOrDecimal256
Random *math.HexOrDecimal256
ParentDifficulty *math.HexOrDecimal256
GasLimit math.HexOrDecimal64
Number math.HexOrDecimal64
@@ -141,11 +139,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
if pre.Env.BaseFee != nil {
vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee)
}
// If random is defined, add it to the vmContext.
if pre.Env.Random != nil {
rnd := common.BigToHash(pre.Env.Random)
vmContext.Random = &rnd
}
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
// done in StateProcessor.Process(block, ...), right before transactions are applied.
if chainConfig.DAOForkSupport &&

View File

@@ -18,7 +18,6 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
type stEnv struct {
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
Random *math.HexOrDecimal256 `json:"currentRandom"`
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
@@ -32,7 +31,6 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
var enc stEnv
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
enc.Random = (*math.HexOrDecimal256)(s.Random)
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
enc.Number = math.HexOrDecimal64(s.Number)
@@ -50,7 +48,6 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
type stEnv struct {
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
Random *math.HexOrDecimal256 `json:"currentRandom"`
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
@@ -72,9 +69,6 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
if dec.Difficulty != nil {
s.Difficulty = (*big.Int)(dec.Difficulty)
}
if dec.Random != nil {
s.Random = (*big.Int)(dec.Random)
}
if dec.ParentDifficulty != nil {
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
}

View File

@@ -252,10 +252,6 @@ func Transition(ctx *cli.Context) error {
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
}
}
// Sanity check, to not `panic` in state_transition
if prestate.Env.Random != nil && !chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
return NewError(ErrorConfig, errors.New("can only apply RANDOM on top of London chainrules"))
}
if env := prestate.Env; env.Difficulty == nil {
// If difficulty was not provided by caller, we need to calculate it.
switch {

View File

@@ -21,19 +21,19 @@
"error": "transaction type not supported"
},
{
"error": "typed transaction too short"
"error": "rlp: expected List"
},
{
"error": "typed transaction too short"
"error": "rlp: expected List"
},
{
"error": "typed transaction too short"
"error": "rlp: expected List"
},
{
"error": "typed transaction too short"
"error": "rlp: expected List"
},
{
"error": "typed transaction too short"
"error": "rlp: expected List"
},
{
"error": "rlp: expected input list for types.AccessListTx"

View File

@@ -120,7 +120,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) {
if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
t.Error(err)
}
geth := runGeth(t, "--lightkdf", "account", "import", keyfile, "-password", passwordFile)
geth := runGeth(t, "account", "import", keyfile, "-password", passwordFile)
defer geth.ExpectExit()
geth.Expect(expected)
}

View File

@@ -32,7 +32,6 @@ import (
"github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
@@ -160,25 +159,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name))
}
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
cfg.Eth.OverrideTerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
}
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Warn users to migrate if they have a legacy freezer format.
if eth != nil {
firstIdx := uint64(0)
// Hack to speed up check for mainnet because we know
// the first non-empty block.
ghash := rawdb.ReadCanonicalHash(eth.ChainDb(), 0)
if cfg.Eth.NetworkId == 1 && ghash == params.MainnetGenesisHash {
firstIdx = 46147
}
isLegacy, _, err := dbHasLegacyReceipts(eth.ChainDb(), firstIdx)
if err != nil {
log.Error("Failed to check db for legacy receipts", "err", err)
} else if isLegacy {
log.Warn("Database has receipts with a legacy format. Please run `geth db freezer-migrate`.")
}
cfg.Eth.Genesis.Config.TerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
}
backend, _ := utils.RegisterEthService(stack, &cfg.Eth, ctx.GlobalBool(utils.CatalystFlag.Name))
// Configure GraphQL if requested
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {

View File

@@ -77,13 +77,13 @@ func localConsole(ctx *cli.Context) error {
// Create and start the node based on the CLI flags
prepare(ctx)
stack, backend := makeFullNode(ctx)
startNode(ctx, stack, backend, true)
startNode(ctx, stack, backend)
defer stack.Close()
// Attach to the newly started node and create the JavaScript console.
// Attach to the newly started node and start the JavaScript console
client, err := stack.Attach()
if err != nil {
return fmt.Errorf("Failed to attach to the inproc geth: %v", err)
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
}
config := console.Config{
DataDir: utils.MakeDataDir(ctx),
@@ -91,34 +91,29 @@ func localConsole(ctx *cli.Context) error {
Client: client,
Preload: utils.MakeConsolePreloads(ctx),
}
console, err := console.New(config)
if err != nil {
return fmt.Errorf("Failed to start the JavaScript console: %v", err)
utils.Fatalf("Failed to start the JavaScript console: %v", err)
}
defer console.Stop(false)
// If only a short execution was requested, evaluate and return.
// If only a short execution was requested, evaluate and return
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
console.Evaluate(script)
return nil
}
// Track node shutdown and stop the console when it goes down.
// This happens when SIGTERM is sent to the process.
go func() {
stack.Wait()
console.StopInteractive()
}()
// Print the welcome screen and enter interactive mode.
// Otherwise print the welcome screen and enter interactive mode
console.Welcome()
console.Interactive()
return nil
}
// remoteConsole will connect to a remote geth instance, attaching a JavaScript
// console to it.
func remoteConsole(ctx *cli.Context) error {
// Attach to a remotely running geth instance and start the JavaScript console
endpoint := ctx.Args().First()
if endpoint == "" {
path := node.DefaultDataDir()
@@ -155,6 +150,7 @@ func remoteConsole(ctx *cli.Context) error {
Client: client,
Preload: utils.MakeConsolePreloads(ctx),
}
console, err := console.New(config)
if err != nil {
utils.Fatalf("Failed to start the JavaScript console: %v", err)
@@ -169,6 +165,7 @@ func remoteConsole(ctx *cli.Context) error {
// Otherwise print the welcome screen and enter interactive mode
console.Welcome()
console.Interactive()
return nil
}
@@ -192,13 +189,13 @@ func dialRPC(endpoint string) (*rpc.Client, error) {
func ephemeralConsole(ctx *cli.Context) error {
// Create and start the node based on the CLI flags
stack, backend := makeFullNode(ctx)
startNode(ctx, stack, backend, false)
startNode(ctx, stack, backend)
defer stack.Close()
// Attach to the newly started node and start the JavaScript console
client, err := stack.Attach()
if err != nil {
return fmt.Errorf("Failed to attach to the inproc geth: %v", err)
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
}
config := console.Config{
DataDir: utils.MakeDataDir(ctx),
@@ -209,24 +206,22 @@ func ephemeralConsole(ctx *cli.Context) error {
console, err := console.New(config)
if err != nil {
return fmt.Errorf("Failed to start the JavaScript console: %v", err)
utils.Fatalf("Failed to start the JavaScript console: %v", err)
}
defer console.Stop(false)
// Interrupt the JS interpreter when node is stopped.
// Evaluate each of the specified JavaScript files
for _, file := range ctx.Args() {
if err = console.Execute(file); err != nil {
utils.Fatalf("Failed to execute %s: %v", file, err)
}
}
go func() {
stack.Wait()
console.Stop(false)
}()
// Evaluate each of the specified JavaScript files.
for _, file := range ctx.Args() {
if err = console.Execute(file); err != nil {
return fmt.Errorf("Failed to execute %s: %v", file, err)
}
}
// The main script is now done, but keep running timers/callbacks.
console.Stop(true)
return nil
}

View File

@@ -34,12 +34,9 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1"
)
@@ -72,8 +69,6 @@ Remove blockchain and state databases`,
dbDumpFreezerIndex,
dbImportCmd,
dbExportCmd,
dbMetadataCmd,
dbMigrateFreezerCmd,
},
}
dbInspectCmd = cli.Command{
@@ -238,38 +233,6 @@ WARNING: This is a low-level operation which may cause database corruption!`,
},
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
}
dbMetadataCmd = cli.Command{
Action: utils.MigrateFlags(showMetaData),
Name: "metadata",
Usage: "Shows metadata about the chain status.",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: "Shows metadata about the chain status.",
}
dbMigrateFreezerCmd = cli.Command{
Action: utils.MigrateFlags(freezerMigrate),
Name: "freezer-migrate",
Usage: "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
ArgsUsage: "",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.SyncModeFlag,
utils.MainnetFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
},
Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
WARNING: please back-up the receipt files in your ancients before running this command.`,
}
)
func removeDB(ctx *cli.Context) error {
@@ -576,7 +539,7 @@ func freezerInspect(ctx *cli.Context) error {
defer stack.Close()
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
log.Info("Opening freezer", "location", path, "name", kind)
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy); err != nil {
return err
} else {
f.DumpIndex(start, end)
@@ -722,138 +685,3 @@ func exportChaindata(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
}
func showMetaData(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true)
ancients, err := db.Ancients()
if err != nil {
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
}
pp := func(val *uint64) string {
if val == nil {
return "<nil>"
}
return fmt.Sprintf("%d (0x%x)", *val, *val)
}
data := [][]string{
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
if b := rawdb.ReadHeadBlock(db); b != nil {
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
}
if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
data = append(data, []string{"SkeletonSyncStatus", string(b)})
}
if h := rawdb.ReadHeadHeader(db); h != nil {
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
}
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
}...)
table := tablewriter.NewWriter(os.Stdout)
table.SetHeader([]string{"Field", "Value"})
table.AppendBulk(data)
table.Render()
return nil
}
func freezerMigrate(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, false)
defer db.Close()
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return err
}
if numAncients < 1 {
log.Info("No receipts in freezer to migrate")
return nil
}
isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
if err != nil {
return err
}
if !isFirstLegacy {
log.Info("No legacy receipts to migrate")
return nil
}
log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
start := time.Now()
if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
return err
}
if err := db.Close(); err != nil {
return err
}
log.Info("Migration finished", "duration", time.Since(start))
return nil
}
// dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
// non-empty receipt and checks its format. The index of this first non-empty element is
// the second return parameter.
func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
// Check first block for legacy receipt format
numAncients, err := db.Ancients()
if err != nil {
return false, 0, err
}
if numAncients < 1 {
return false, 0, nil
}
if firstIdx >= numAncients {
return false, firstIdx, nil
}
var (
legacy bool
blob []byte
emptyRLPList = []byte{192}
)
// Find first block with non-empty receipt, only if
// the index is not already provided.
if firstIdx == 0 {
for i := uint64(0); i < numAncients; i++ {
blob, err = db.Ancient("receipts", i)
if err != nil {
return false, 0, err
}
if len(blob) == 0 {
continue
}
if !bytes.Equal(blob, emptyRLPList) {
firstIdx = i
break
}
}
}
// Is first non-empty receipt legacy?
first, err := db.Ancient("receipts", firstIdx)
if err != nil {
return false, 0, err
}
legacy, err = types.IsLegacyStoredReceipts(first)
return legacy, firstIdx, err
}

View File

@@ -107,8 +107,7 @@ var (
utils.UltraLightFractionFlag,
utils.UltraLightOnlyAnnounceFlag,
utils.LightNoSyncServeFlag,
utils.EthPeerRequiredBlocksFlag,
utils.LegacyWhitelistFlag,
utils.WhitelistFlag,
utils.BloomFilterSizeFlag,
utils.CacheFlag,
utils.CacheDatabaseFlag,
@@ -119,7 +118,6 @@ var (
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.FDLimitFlag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
utils.MaxPendingPeersFlag,
@@ -148,7 +146,6 @@ var (
utils.SepoliaFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.KilnFlag,
utils.VMEnableDebugFlag,
utils.NetworkIdFlag,
utils.EthStatsURLFlag,
@@ -160,6 +157,7 @@ var (
utils.GpoIgnoreGasPriceFlag,
utils.MinerNotifyFullFlag,
configFileFlag,
utils.CatalystFlag,
}
rpcFlags = []cli.Flag{
@@ -167,10 +165,6 @@ var (
utils.HTTPListenAddrFlag,
utils.HTTPPortFlag,
utils.HTTPCORSDomainFlag,
utils.AuthListenFlag,
utils.AuthPortFlag,
utils.AuthVirtualHostsFlag,
utils.JWTSecretFlag,
utils.HTTPVirtualHostsFlag,
utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag,
@@ -214,7 +208,7 @@ func init() {
// Initialize the CLI app and start Geth
app.Action = geth
app.HideVersion = true // we have a command to print the version
app.Copyright = "Copyright 2013-2022 The go-ethereum Authors"
app.Copyright = "Copyright 2013-2021 The go-ethereum Authors"
app.Commands = []cli.Command{
// See chaincmd.go:
initCommand,
@@ -280,9 +274,6 @@ func prepare(ctx *cli.Context) {
case ctx.GlobalIsSet(utils.RopstenFlag.Name):
log.Info("Starting Geth on Ropsten testnet...")
case ctx.GlobalIsSet(utils.SepoliaFlag.Name):
log.Info("Starting Geth on Sepolia testnet...")
case ctx.GlobalIsSet(utils.RinkebyFlag.Name):
log.Info("Starting Geth on Rinkeby testnet...")
@@ -298,11 +289,7 @@ func prepare(ctx *cli.Context) {
// If we're a full node on mainnet without --cache specified, bump default cache allowance
if ctx.GlobalString(utils.SyncModeFlag.Name) != "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) && !ctx.GlobalIsSet(utils.NetworkIdFlag.Name) {
// Make sure we're not on any supported preconfigured testnet either
if !ctx.GlobalIsSet(utils.RopstenFlag.Name) &&
!ctx.GlobalIsSet(utils.SepoliaFlag.Name) &&
!ctx.GlobalIsSet(utils.RinkebyFlag.Name) &&
!ctx.GlobalIsSet(utils.GoerliFlag.Name) &&
!ctx.GlobalIsSet(utils.DeveloperFlag.Name) {
if !ctx.GlobalIsSet(utils.RopstenFlag.Name) && !ctx.GlobalIsSet(utils.RinkebyFlag.Name) && !ctx.GlobalIsSet(utils.GoerliFlag.Name) && !ctx.GlobalIsSet(utils.DeveloperFlag.Name) {
// Nope, we're really on mainnet. Bump that cache up!
log.Info("Bumping default cache on mainnet", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 4096)
ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(4096))
@@ -333,7 +320,7 @@ func geth(ctx *cli.Context) error {
stack, backend := makeFullNode(ctx)
defer stack.Close()
startNode(ctx, stack, backend, false)
startNode(ctx, stack, backend)
stack.Wait()
return nil
}
@@ -341,11 +328,11 @@ func geth(ctx *cli.Context) error {
// startNode boots up the system node and all registered protocols, after which
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
// miner.
func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isConsole bool) {
func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) {
debug.Memsize.Add("node", stack)
// Start up the node itself
utils.StartNode(ctx, stack, isConsole)
utils.StartNode(ctx, stack)
// Unlock any account specifically requested
unlockAccounts(ctx, stack)

View File

@@ -220,7 +220,7 @@ func verifyState(ctx *cli.Context) error {
log.Error("Failed to load head block")
return errors.New("no head block")
}
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false)
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false, false)
if err != nil {
log.Error("Failed to open snapshot tree", "err", err)
return err
@@ -314,7 +314,8 @@ func traverseState(ctx *cli.Context) error {
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
if len(code) == 0 {
log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash))
return errors.New("missing code")
}
@@ -385,10 +386,11 @@ func traverseRawState(ctx *cli.Context) error {
nodes += 1
node := accIter.Hash()
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
if !rawdb.HasTrieNode(chaindb, node) {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
blob := rawdb.ReadTrieNode(chaindb, node)
if len(blob) == 0 {
log.Error("Missing trie node(account)", "hash", node)
return errors.New("missing account")
}
@@ -416,7 +418,8 @@ func traverseRawState(ctx *cli.Context) error {
// Check the present for non-empty hash node(embedded node doesn't
// have their own hash).
if node != (common.Hash{}) {
if !rawdb.HasTrieNode(chaindb, node) {
blob := rawdb.ReadTrieNode(chaindb, node)
if len(blob) == 0 {
log.Error("Missing trie node(storage)", "hash", node)
return errors.New("missing storage")
}
@@ -432,7 +435,8 @@ func traverseRawState(ctx *cli.Context) error {
}
}
if !bytes.Equal(acc.CodeHash, emptyCode) {
if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) {
code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash))
if len(code) == 0 {
log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey()))
return errors.New("missing code")
}
@@ -468,7 +472,7 @@ func dumpState(ctx *cli.Context) error {
if err != nil {
return err
}
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false)
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false, false)
if err != nil {
return err
}

View File

@@ -46,7 +46,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.RinkebyFlag,
utils.RopstenFlag,
utils.SepoliaFlag,
utils.KilnFlag,
utils.SyncModeFlag,
utils.ExitWhenSyncedFlag,
utils.GCModeFlag,
@@ -54,7 +53,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.EthStatsURLFlag,
utils.IdentityFlag,
utils.LightKDFFlag,
utils.EthPeerRequiredBlocksFlag,
utils.WhitelistFlag,
},
},
{
@@ -120,7 +119,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.CacheSnapshotFlag,
utils.CacheNoPrefetchFlag,
utils.CachePreimagesFlag,
utils.FDLimitFlag,
},
},
{
@@ -150,10 +148,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.WSApiFlag,
utils.WSPathPrefixFlag,
utils.WSAllowedOriginsFlag,
utils.JWTSecretFlag,
utils.AuthListenFlag,
utils.AuthPortFlag,
utils.AuthVirtualHostsFlag,
utils.GraphQLEnabledFlag,
utils.GraphQLCORSDomainFlag,
utils.GraphQLVirtualHostsFlag,
@@ -227,7 +221,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
Name: "ALIASED (deprecated)",
Flags: []cli.Flag{
utils.NoUSBFlag,
utils.LegacyWhitelistFlag,
},
},
{
@@ -236,6 +229,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{
utils.SnapshotFlag,
utils.BloomFilterSizeFlag,
cli.HelpFlag,
utils.CatalystFlag,
},
},
}

View File

@@ -25,8 +25,6 @@ import (
"strconv"
"strings"
"testing"
"github.com/jedisct1/go-minisign"
)
func TestVerification(t *testing.T) {
@@ -130,39 +128,3 @@ func TestMatching(t *testing.T) {
}
}
}
func TestGethPubKeysParseable(t *testing.T) {
for _, pubkey := range gethPubKeys {
_, err := minisign.NewPublicKey(pubkey)
if err != nil {
t.Errorf("Should be parseable")
}
}
}
func TestKeyID(t *testing.T) {
type args struct {
id [8]byte
}
tests := []struct {
name string
args args
want string
}{
{"@holiman key", args{id: extractKeyId(gethPubKeys[0])}, "FB1D084D39BAEC24"},
{"second key", args{id: extractKeyId(gethPubKeys[1])}, "138B1CA303E51687"},
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := keyID(tt.args.id); got != tt.want {
t.Errorf("keyID() = %v, want %v", got, tt.want)
}
})
}
}
func extractKeyId(pubkey string) [8]byte {
p, _ := minisign.NewPublicKey(pubkey)
return p.KeyId
}

View File

@@ -68,7 +68,7 @@ func Fatalf(format string, args ...interface{}) {
os.Exit(1)
}
func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
func StartNode(ctx *cli.Context, stack *node.Node) {
if err := stack.Start(); err != nil {
Fatalf("Error starting protocol stack: %v", err)
}
@@ -87,33 +87,17 @@ func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
}
shutdown := func() {
log.Info("Got interrupt, shutting down...")
go stack.Close()
for i := 10; i > 0; i-- {
<-sigc
if i > 1 {
log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
}
}
debug.Exit() // ensure trace and CPU profile data is flushed.
debug.LoudPanic("boom")
}
if isConsole {
// In JS console mode, SIGINT is ignored because it's handled by the console.
// However, SIGTERM still shuts down the node.
for {
sig := <-sigc
if sig == syscall.SIGTERM {
shutdown()
return
}
}
} else {
<-sigc
log.Info("Got interrupt, shutting down...")
go stack.Close()
for i := 10; i > 0; i-- {
<-sigc
shutdown()
if i > 1 {
log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
}
}
debug.Exit() // ensure trace and CPU profile data is flushed.
debug.LoudPanic("boom")
}()
}

View File

@@ -45,7 +45,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/eth"
ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/catalyst"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/eth/ethconfig"
"github.com/ethereum/go-ethereum/eth/gasprice"
@@ -56,7 +56,6 @@ import (
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/les"
lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/metrics/exp"
@@ -161,10 +160,6 @@ var (
Name: "sepolia",
Usage: "Sepolia network: pre-configured proof-of-work test network",
}
KilnFlag = cli.BoolFlag{
Name: "kiln",
Usage: "Kiln network: pre-configured proof-of-work to proof-of-stake test network",
}
DeveloperFlag = cli.BoolFlag{
Name: "dev",
Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled",
@@ -241,13 +236,9 @@ var (
Name: "lightkdf",
Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength",
}
EthPeerRequiredBlocksFlag = cli.StringFlag{
Name: "eth.requiredblocks",
Usage: "Comma separated block number-to-hash mappings to require for peering (<number>=<hash>)",
}
LegacyWhitelistFlag = cli.StringFlag{
WhitelistFlag = cli.StringFlag{
Name: "whitelist",
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>) (deprecated in favor of --peer.requiredblocks)",
Usage: "Comma separated block number-to-hash mappings to enforce (<number>=<hash>)",
}
BloomFilterSizeFlag = cli.Uint64Flag{
Name: "bloomfilter.size",
@@ -441,10 +432,6 @@ var (
Name: "cache.preimages",
Usage: "Enable recording the SHA3/keccak preimages of trie keys",
}
FDLimitFlag = cli.IntFlag{
Name: "fdlimit",
Usage: "Raise the open file descriptor resource limit (default = system fd limit)",
}
// Miner settings
MiningEnabledFlag = cli.BoolFlag{
Name: "mine",
@@ -530,26 +517,6 @@ var (
Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)",
Value: ethconfig.Defaults.RPCTxFeeCap,
}
// Authenticated RPC HTTP settings
AuthListenFlag = cli.StringFlag{
Name: "authrpc.addr",
Usage: "Listening address for authenticated APIs",
Value: node.DefaultConfig.AuthAddr,
}
AuthPortFlag = cli.IntFlag{
Name: "authrpc.port",
Usage: "Listening port for authenticated APIs",
Value: node.DefaultConfig.AuthPort,
}
AuthVirtualHostsFlag = cli.StringFlag{
Name: "authrpc.vhosts",
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
Value: strings.Join(node.DefaultConfig.AuthVirtualHosts, ","),
}
JWTSecretFlag = cli.StringFlag{
Name: "authrpc.jwtsecret",
Usage: "Path to a JWT secret to use for authenticated RPC endpoints",
}
// Logging and debug settings
EthStatsURLFlag = cli.StringFlag{
Name: "ethstats",
@@ -822,6 +789,11 @@ var (
Usage: "InfluxDB organization name (v2 only)",
Value: metrics.DefaultConfig.InfluxDBOrganization,
}
CatalystFlag = cli.BoolFlag{
Name: "catalyst",
Usage: "Catalyst mode (eth2 integration testing)",
}
)
// MakeDataDir retrieves the currently requested data directory, terminating
@@ -843,9 +815,6 @@ func MakeDataDir(ctx *cli.Context) string {
if ctx.GlobalBool(SepoliaFlag.Name) {
return filepath.Join(path, "sepolia")
}
if ctx.GlobalBool(KilnFlag.Name) {
return filepath.Join(path, "kiln")
}
return path
}
Fatalf("Cannot determine default data directory, please set manually (--datadir)")
@@ -900,8 +869,6 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
urls = params.RinkebyBootnodes
case ctx.GlobalBool(GoerliFlag.Name):
urls = params.GoerliBootnodes
case ctx.GlobalBool(KilnFlag.Name):
urls = params.KilnBootnodes
case cfg.BootstrapNodes != nil:
return // already set, don't apply defaults.
}
@@ -988,18 +955,6 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name)
}
if ctx.GlobalIsSet(AuthListenFlag.Name) {
cfg.AuthAddr = ctx.GlobalString(AuthListenFlag.Name)
}
if ctx.GlobalIsSet(AuthPortFlag.Name) {
cfg.AuthPort = ctx.GlobalInt(AuthPortFlag.Name)
}
if ctx.GlobalIsSet(AuthVirtualHostsFlag.Name) {
cfg.AuthVirtualHosts = SplitAndTrim(ctx.GlobalString(AuthVirtualHostsFlag.Name))
}
if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) {
cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name))
}
@@ -1106,24 +1061,11 @@ func setLes(ctx *cli.Context, cfg *ethconfig.Config) {
// MakeDatabaseHandles raises out the number of allowed file handles per process
// for Geth and returns half of the allowance to assign to the database.
func MakeDatabaseHandles(max int) int {
func MakeDatabaseHandles() int {
limit, err := fdlimit.Maximum()
if err != nil {
Fatalf("Failed to retrieve file descriptor allowance: %v", err)
}
switch {
case max == 0:
// User didn't specify a meaningful value, use system limits
case max < 128:
// User specified something unhealthy, just use system defaults
log.Error("File descriptor limit invalid (<128)", "had", max, "updated", limit)
case max > limit:
// User requested more than the OS allows, notify that we can't allocate it
log.Warn("Requested file descriptors denied by OS", "req", max, "limit", limit)
default:
// User limit is meaningful and within allowed range, use that
limit = max
}
raised, err := fdlimit.Raise(uint64(limit))
if err != nil {
Fatalf("Failed to raise file descriptor allowance: %v", err)
@@ -1280,10 +1222,6 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
setDataDir(ctx, cfg)
setSmartCard(ctx, cfg)
if ctx.GlobalIsSet(JWTSecretFlag.Name) {
cfg.JWTSecret = ctx.GlobalString(JWTSecretFlag.Name)
}
if ctx.GlobalIsSet(ExternalSignerFlag.Name) {
cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name)
}
@@ -1352,8 +1290,6 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) {
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli")
case ctx.GlobalBool(SepoliaFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "sepolia")
case ctx.GlobalBool(KilnFlag.Name) && cfg.DataDir == node.DefaultDataDir():
cfg.DataDir = filepath.Join(node.DefaultDataDir(), "kiln")
}
}
@@ -1472,33 +1408,26 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) {
}
}
func setPeerRequiredBlocks(ctx *cli.Context, cfg *ethconfig.Config) {
peerRequiredBlocks := ctx.GlobalString(EthPeerRequiredBlocksFlag.Name)
if peerRequiredBlocks == "" {
if ctx.GlobalIsSet(LegacyWhitelistFlag.Name) {
log.Warn("The flag --rpc is deprecated and will be removed, please use --peer.requiredblocks")
peerRequiredBlocks = ctx.GlobalString(LegacyWhitelistFlag.Name)
} else {
return
}
func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) {
whitelist := ctx.GlobalString(WhitelistFlag.Name)
if whitelist == "" {
return
}
cfg.PeerRequiredBlocks = make(map[uint64]common.Hash)
for _, entry := range strings.Split(peerRequiredBlocks, ",") {
cfg.Whitelist = make(map[uint64]common.Hash)
for _, entry := range strings.Split(whitelist, ",") {
parts := strings.Split(entry, "=")
if len(parts) != 2 {
Fatalf("Invalid peer required block entry: %s", entry)
Fatalf("Invalid whitelist entry: %s", entry)
}
number, err := strconv.ParseUint(parts[0], 0, 64)
if err != nil {
Fatalf("Invalid peer required block number %s: %v", parts[0], err)
Fatalf("Invalid whitelist block number %s: %v", parts[0], err)
}
var hash common.Hash
if err = hash.UnmarshalText([]byte(parts[1])); err != nil {
Fatalf("Invalid peer required block hash %s: %v", parts[1], err)
Fatalf("Invalid whitelist hash %s: %v", parts[1], err)
}
cfg.PeerRequiredBlocks[number] = hash
cfg.Whitelist[number] = hash
}
}
@@ -1546,7 +1475,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) {
// SetEthConfig applies eth-related command line flags to the config.
func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Avoid conflicting network flags
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag, KilnFlag)
CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, SepoliaFlag)
CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light")
CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer
if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 {
@@ -1565,7 +1494,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
setTxPool(ctx, &cfg.TxPool)
setEthash(ctx, cfg)
setMiner(ctx, &cfg.Miner)
setPeerRequiredBlocks(ctx, cfg)
setWhitelist(ctx, cfg)
setLes(ctx, cfg)
// Cap the cache allowance and tune the garbage collector
@@ -1597,7 +1526,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) {
cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
}
cfg.DatabaseHandles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
cfg.DatabaseHandles = MakeDatabaseHandles()
if ctx.GlobalIsSet(AncientFlag.Name) {
cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name)
}
@@ -1708,12 +1637,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
}
cfg.Genesis = core.DefaultGoerliGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash)
case ctx.GlobalBool(KilnFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1337802
}
cfg.Genesis = core.DefaultKilnGenesisBlock()
SetDNSDiscoveryDefaults(cfg, params.KilnGenesisHash)
case ctx.GlobalBool(DeveloperFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1337
@@ -1750,15 +1673,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
// Create a new developer genesis block or reuse existing one
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
if ctx.GlobalIsSet(DataDirFlag.Name) {
// If datadir doesn't exist we need to open db in write-mode
// so leveldb can create files.
readonly := true
if !common.FileExist(stack.ResolvePath("chaindata")) {
readonly = false
}
// Check if we have an already initialized chain and fall back to
// that if so. Otherwise we need to generate a new genesis spec.
chaindb := MakeChainDatabase(ctx, stack, readonly)
chaindb := MakeChainDatabase(ctx, stack, false) // TODO (MariusVanDerWijden) make this read only
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
cfg.Genesis = nil // fallback to db content
}
@@ -1793,15 +1710,15 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
// RegisterEthService adds an Ethereum client to the stack.
// The second return value is the full node instance, which may be nil if the
// node is running as a light client.
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool) (ethapi.Backend, *eth.Ethereum) {
if cfg.SyncMode == downloader.LightSync {
backend, err := les.New(stack, cfg)
if err != nil {
Fatalf("Failed to register the Ethereum service: %v", err)
}
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
if err := lescatalyst.Register(stack, backend); err != nil {
if isCatalyst {
if err := catalyst.RegisterLight(stack, backend); err != nil {
Fatalf("Failed to register the catalyst service: %v", err)
}
}
@@ -1817,8 +1734,8 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend
Fatalf("Failed to create the LES server: %v", err)
}
}
if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
if err := ethcatalyst.Register(stack, backend); err != nil {
if isCatalyst {
if err := catalyst.Register(stack, backend); err != nil {
Fatalf("Failed to register the catalyst service: %v", err)
}
}
@@ -1921,7 +1838,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string {
func MakeChainDatabase(ctx *cli.Context, stack *node.Node, readonly bool) ethdb.Database {
var (
cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100
handles = MakeDatabaseHandles(ctx.GlobalInt(FDLimitFlag.Name))
handles = MakeDatabaseHandles()
err error
chainDb ethdb.Database
@@ -1952,8 +1869,6 @@ func MakeGenesis(ctx *cli.Context) *core.Genesis {
genesis = core.DefaultRinkebyGenesisBlock()
case ctx.GlobalBool(GoerliFlag.Name):
genesis = core.DefaultGoerliGenesisBlock()
case ctx.GlobalBool(KilnFlag.Name):
genesis = core.DefaultKilnGenesisBlock()
case ctx.GlobalBool(DeveloperFlag.Name):
Fatalf("Developer chains are ephemeral")
}

View File

@@ -26,7 +26,6 @@ import (
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
)
@@ -43,6 +42,7 @@ var (
// error types into the consensus package.
var (
errTooManyUncles = errors.New("too many uncles")
errInvalidMixDigest = errors.New("invalid mix digest")
errInvalidNonce = errors.New("invalid nonce")
errInvalidUncleHash = errors.New("invalid uncle hash")
)
@@ -181,7 +181,10 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
if len(header.Extra) > 32 {
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
}
// Verify the seal parts. Ensure the nonce and uncle hash are the expected value.
// Verify the seal parts. Ensure the mixhash, nonce and uncle hash are the expected value.
if header.MixDigest != (common.Hash{}) {
return errInvalidMixDigest
}
if header.Nonce != beaconNonce {
return errInvalidNonce
}
@@ -193,8 +196,9 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, beaconDifficulty)
}
// Verify that the gas limit is <= 2^63-1
if header.GasLimit > params.MaxGasLimit {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
cap := uint64(0x7fffffffffffffff)
if header.GasLimit > cap {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
}
// Verify that the gasUsed is <= gasLimit
if header.GasUsed > header.GasLimit {

View File

@@ -295,8 +295,9 @@ func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
}
}
// Verify that the gas limit is <= 2^63-1
if header.GasLimit > params.MaxGasLimit {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
cap := uint64(0x7fffffffffffffff)
if header.GasLimit > cap {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
}
// If all checks passed, validate any special fields for hard forks
if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {

View File

@@ -34,6 +34,7 @@ import (
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/utils"
"golang.org/x/crypto/sha3"
)
@@ -281,8 +282,9 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
}
// Verify that the gas limit is <= 2^63-1
if header.GasLimit > params.MaxGasLimit {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
cap := uint64(0x7fffffffffffffff)
if header.GasLimit > cap {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
}
// Verify that the gasUsed is <= gasLimit
if header.GasUsed > header.GasLimit {
@@ -659,10 +661,14 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header
r.Sub(r, header.Number)
r.Mul(r, blockReward)
r.Div(r, big8)
uncleCoinbase := utils.GetTreeKeyBalance(uncle.Coinbase.Bytes())
state.Witness().TouchAddress(uncleCoinbase, state.GetBalance(uncle.Coinbase).Bytes())
state.AddBalance(uncle.Coinbase, r)
r.Div(blockReward, big32)
reward.Add(reward, r)
}
coinbase := utils.GetTreeKeyBalance(header.Coinbase.Bytes())
state.Witness().TouchAddress(coinbase, state.GetBalance(header.Coinbase).Bytes())
state.AddBalance(header.Coinbase, reward)
}

View File

@@ -549,11 +549,6 @@ func NewShared() *Ethash {
// Close closes the exit channel to notify all backend threads exiting.
func (ethash *Ethash) Close() error {
return ethash.StopRemoteSealer()
}
// StopRemoteSealer stops the remote sealer
func (ethash *Ethash) StopRemoteSealer() error {
ethash.closeOnce.Do(func() {
// Short circuit if the exit channel is not allocated.
if ethash.remote == nil {

View File

@@ -17,7 +17,6 @@
package console
import (
"errors"
"fmt"
"io"
"io/ioutil"
@@ -27,7 +26,6 @@ import (
"regexp"
"sort"
"strings"
"sync"
"syscall"
"github.com/dop251/goja"
@@ -76,13 +74,6 @@ type Console struct {
histPath string // Absolute path to the console scrollback history
history []string // Scroll history maintained by the console
printer io.Writer // Output writer to serialize any display strings to
interactiveStopped chan struct{}
stopInteractiveCh chan struct{}
signalReceived chan struct{}
stopped chan struct{}
wg sync.WaitGroup
stopOnce sync.Once
}
// New initializes a JavaScript interpreted runtime environment and sets defaults
@@ -101,16 +92,12 @@ func New(config Config) (*Console, error) {
// Initialize the console and return
console := &Console{
client: config.Client,
jsre: jsre.New(config.DocRoot, config.Printer),
prompt: config.Prompt,
prompter: config.Prompter,
printer: config.Printer,
histPath: filepath.Join(config.DataDir, HistoryFile),
interactiveStopped: make(chan struct{}),
stopInteractiveCh: make(chan struct{}),
signalReceived: make(chan struct{}, 1),
stopped: make(chan struct{}),
client: config.Client,
jsre: jsre.New(config.DocRoot, config.Printer),
prompt: config.Prompt,
prompter: config.Prompter,
printer: config.Printer,
histPath: filepath.Join(config.DataDir, HistoryFile),
}
if err := os.MkdirAll(config.DataDir, 0700); err != nil {
return nil, err
@@ -118,10 +105,6 @@ func New(config Config) (*Console, error) {
if err := console.init(config.Preload); err != nil {
return nil, err
}
console.wg.Add(1)
go console.interruptHandler()
return console, nil
}
@@ -354,63 +337,9 @@ func (c *Console) Evaluate(statement string) {
}
}()
c.jsre.Evaluate(statement, c.printer)
// Avoid exiting Interactive when jsre was interrupted by SIGINT.
c.clearSignalReceived()
}
// interruptHandler runs in its own goroutine and waits for signals.
// When a signal is received, it interrupts the JS interpreter.
func (c *Console) interruptHandler() {
defer c.wg.Done()
// During Interactive, liner inhibits the signal while it is prompting for
// input. However, the signal will be received while evaluating JS.
//
// On unsupported terminals, SIGINT can also happen while prompting.
// Unfortunately, it is not possible to abort the prompt in this case and
// the c.readLines goroutine leaks.
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGINT)
defer signal.Stop(sig)
for {
select {
case <-sig:
c.setSignalReceived()
c.jsre.Interrupt(errors.New("interrupted"))
case <-c.stopInteractiveCh:
close(c.interactiveStopped)
c.jsre.Interrupt(errors.New("interrupted"))
case <-c.stopped:
return
}
}
}
func (c *Console) setSignalReceived() {
select {
case c.signalReceived <- struct{}{}:
default:
}
}
func (c *Console) clearSignalReceived() {
select {
case <-c.signalReceived:
default:
}
}
// StopInteractive causes Interactive to return as soon as possible.
func (c *Console) StopInteractive() {
select {
case c.stopInteractiveCh <- struct{}{}:
case <-c.stopped:
}
}
// Interactive starts an interactive user session, where in.put is propted from
// Interactive starts an interactive user session, where input is propted from
// the configured user prompter.
func (c *Console) Interactive() {
var (
@@ -420,11 +349,15 @@ func (c *Console) Interactive() {
inputLine = make(chan string, 1) // receives user input
inputErr = make(chan error, 1) // receives liner errors
requestLine = make(chan string) // requests a line of input
interrupt = make(chan os.Signal, 1)
)
defer func() {
c.writeHistory()
}()
// Monitor Ctrl-C. While liner does turn on the relevant terminal mode bits to avoid
// the signal, a signal can still be received for unsupported terminals. Unfortunately
// there is no way to cancel the line reader when this happens. The readLines
// goroutine will be leaked in this case.
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
// The line reader runs in a separate goroutine.
go c.readLines(inputLine, inputErr, requestLine)
@@ -435,14 +368,7 @@ func (c *Console) Interactive() {
requestLine <- prompt
select {
case <-c.interactiveStopped:
fmt.Fprintln(c.printer, "node is down, exiting console")
return
case <-c.signalReceived:
// SIGINT received while prompting for input -> unsupported terminal.
// I'm not sure if the best choice would be to leave the console running here.
// Bash keeps running in this case. node.js does not.
case <-interrupt:
fmt.Fprintln(c.printer, "caught interrupt, exiting")
return
@@ -550,19 +476,12 @@ func (c *Console) Execute(path string) error {
// Stop cleans up the console and terminates the runtime environment.
func (c *Console) Stop(graceful bool) error {
c.stopOnce.Do(func() {
// Stop the interrupt handler.
close(c.stopped)
c.wg.Wait()
})
c.jsre.Stop(graceful)
return nil
}
func (c *Console) writeHistory() error {
if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil {
return err
}
return os.Chmod(c.histPath, 0600) // Force 0600, even if it was different previously
if err := os.Chmod(c.histPath, 0600); err != nil { // Force 0600, even if it was different previously
return err
}
c.jsre.Stop(graceful)
return nil
}

View File

@@ -68,10 +68,10 @@ func (it tokenType) String() string {
var stringtokenTypes = []string{
eof: "EOF",
lineStart: "new line",
lineEnd: "end of line",
invalidStatement: "invalid statement",
element: "element",
lineEnd: "end of line",
lineStart: "new line",
label: "label",
labelDef: "label definition",
number: "number",

View File

@@ -1,50 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
package beacon
import "github.com/ethereum/go-ethereum/rpc"
var (
// VALID is returned by the engine API in the following calls:
// - newPayloadV1: if the payload was already known or was just validated and executed
// - forkchoiceUpdateV1: if the chain accepted the reorg (might ignore if it's stale)
VALID = "VALID"
// INVALID is returned by the engine API in the following calls:
// - newPayloadV1: if the payload failed to execute on top of the local chain
// - forkchoiceUpdateV1: if the new head is unknown, pre-merge, or reorg to it fails
INVALID = "INVALID"
// SYNCING is returned by the engine API in the following calls:
// - newPayloadV1: if the payload was accepted on top of an active sync
// - forkchoiceUpdateV1: if the new head was seen before, but not part of the chain
SYNCING = "SYNCING"
// ACCEPTED is returned by the engine API in the following calls:
// - newPayloadV1: if the payload was accepted, but not processed (side chain)
ACCEPTED = "ACCEPTED"
INVALIDBLOCKHASH = "INVALID_BLOCK_HASH"
INVALIDTERMINALBLOCK = "INVALID_TERMINAL_BLOCK"
GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"}
UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"}
InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"}
STATUS_INVALID = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: INVALID}, PayloadID: nil}
STATUS_SYNCING = ForkChoiceResponse{PayloadStatus: PayloadStatusV1{Status: SYNCING}, PayloadID: nil}
)

View File

@@ -1,53 +0,0 @@
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
package beacon
import (
"encoding/json"
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
)
var _ = (*payloadAttributesMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) {
type PayloadAttributesV1 struct {
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
}
var enc PayloadAttributesV1
enc.Timestamp = hexutil.Uint64(p.Timestamp)
enc.Random = p.Random
enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient
return json.Marshal(&enc)
}
// UnmarshalJSON unmarshals from JSON.
func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error {
type PayloadAttributesV1 struct {
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
Random *common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
}
var dec PayloadAttributesV1
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
if dec.Timestamp == nil {
return errors.New("missing required field 'timestamp' for PayloadAttributesV1")
}
p.Timestamp = uint64(*dec.Timestamp)
if dec.Random == nil {
return errors.New("missing required field 'prevRandao' for PayloadAttributesV1")
}
p.Random = *dec.Random
if dec.SuggestedFeeRecipient == nil {
return errors.New("missing required field 'suggestedFeeRecipient' for PayloadAttributesV1")
}
p.SuggestedFeeRecipient = *dec.SuggestedFeeRecipient
return nil
}

View File

@@ -1,194 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package beacon
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/trie"
)
//go:generate go run github.com/fjl/gencodec -type PayloadAttributesV1 -field-override payloadAttributesMarshaling -out gen_blockparams.go
// PayloadAttributesV1 structure described at https://github.com/ethereum/execution-apis/pull/74
type PayloadAttributesV1 struct {
Timestamp uint64 `json:"timestamp" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
}
// JSON type overrides for PayloadAttributesV1.
type payloadAttributesMarshaling struct {
Timestamp hexutil.Uint64
}
//go:generate go run github.com/fjl/gencodec -type ExecutableDataV1 -field-override executableDataMarshaling -out gen_ed.go
// ExecutableDataV1 structure described at https://github.com/ethereum/execution-apis/src/engine/specification.md
type ExecutableDataV1 struct {
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
Random common.Hash `json:"prevRandao" gencodec:"required"`
Number uint64 `json:"blockNumber" gencodec:"required"`
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
Timestamp uint64 `json:"timestamp" gencodec:"required"`
ExtraData []byte `json:"extraData" gencodec:"required"`
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
Transactions [][]byte `json:"transactions" gencodec:"required"`
}
// JSON type overrides for executableData.
type executableDataMarshaling struct {
Number hexutil.Uint64
GasLimit hexutil.Uint64
GasUsed hexutil.Uint64
Timestamp hexutil.Uint64
BaseFeePerGas *hexutil.Big
ExtraData hexutil.Bytes
LogsBloom hexutil.Bytes
Transactions []hexutil.Bytes
}
type PayloadStatusV1 struct {
Status string `json:"status"`
LatestValidHash *common.Hash `json:"latestValidHash"`
ValidationError *string `json:"validationError"`
}
type TransitionConfigurationV1 struct {
TerminalTotalDifficulty *hexutil.Big `json:"terminalTotalDifficulty"`
TerminalBlockHash common.Hash `json:"terminalBlockHash"`
TerminalBlockNumber hexutil.Uint64 `json:"terminalBlockNumber"`
}
// PayloadID is an identifier of the payload build process
type PayloadID [8]byte
func (b PayloadID) String() string {
return hexutil.Encode(b[:])
}
func (b PayloadID) MarshalText() ([]byte, error) {
return hexutil.Bytes(b[:]).MarshalText()
}
func (b *PayloadID) UnmarshalText(input []byte) error {
err := hexutil.UnmarshalFixedText("PayloadID", input, b[:])
if err != nil {
return fmt.Errorf("invalid payload id %q: %w", input, err)
}
return nil
}
type ForkChoiceResponse struct {
PayloadStatus PayloadStatusV1 `json:"payloadStatus"`
PayloadID *PayloadID `json:"payloadId"`
}
type ForkchoiceStateV1 struct {
HeadBlockHash common.Hash `json:"headBlockHash"`
SafeBlockHash common.Hash `json:"safeBlockHash"`
FinalizedBlockHash common.Hash `json:"finalizedBlockHash"`
}
func encodeTransactions(txs []*types.Transaction) [][]byte {
var enc = make([][]byte, len(txs))
for i, tx := range txs {
enc[i], _ = tx.MarshalBinary()
}
return enc
}
func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
var txs = make([]*types.Transaction, len(enc))
for i, encTx := range enc {
var tx types.Transaction
if err := tx.UnmarshalBinary(encTx); err != nil {
return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
}
txs[i] = &tx
}
return txs, nil
}
// ExecutableDataToBlock constructs a block from executable data.
// It verifies that the following fields:
// len(extraData) <= 32
// uncleHash = emptyUncleHash
// difficulty = 0
// and that the blockhash of the constructed block matches the parameters.
func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
txs, err := decodeTransactions(params.Transactions)
if err != nil {
return nil, err
}
if len(params.ExtraData) > 32 {
return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
}
header := &types.Header{
ParentHash: params.ParentHash,
UncleHash: types.EmptyUncleHash,
Coinbase: params.FeeRecipient,
Root: params.StateRoot,
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
ReceiptHash: params.ReceiptsRoot,
Bloom: types.BytesToBloom(params.LogsBloom),
Difficulty: common.Big0,
Number: new(big.Int).SetUint64(params.Number),
GasLimit: params.GasLimit,
GasUsed: params.GasUsed,
Time: params.Timestamp,
BaseFee: params.BaseFeePerGas,
Extra: params.ExtraData,
MixDigest: params.Random,
}
block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
if block.Hash() != params.BlockHash {
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
}
return block, nil
}
// BlockToExecutableData constructs the executableDataV1 structure by filling the
// fields from the given block. It assumes the given block is post-merge block.
func BlockToExecutableData(block *types.Block) *ExecutableDataV1 {
return &ExecutableDataV1{
BlockHash: block.Hash(),
ParentHash: block.ParentHash(),
FeeRecipient: block.Coinbase(),
StateRoot: block.Root(),
Number: block.NumberU64(),
GasLimit: block.GasLimit(),
GasUsed: block.GasUsed(),
BaseFeePerGas: block.BaseFee(),
Timestamp: block.Time(),
ReceiptsRoot: block.ReceiptHash(),
LogsBloom: block.Bloom().Bytes(),
Transactions: encodeTransactions(block.Transactions()),
Random: block.MixDigest(),
ExtraData: block.Extra(),
}
}

View File

@@ -226,15 +226,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
futureBlocks, _ := lru.New(maxFutureBlocks)
bc := &BlockChain{
chainConfig: chainConfig,
cacheConfig: cacheConfig,
db: db,
triegc: prque.New(nil),
stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
Cache: cacheConfig.TrieCleanLimit,
Journal: cacheConfig.TrieCleanJournal,
Preimages: cacheConfig.Preimages,
}),
chainConfig: chainConfig,
cacheConfig: cacheConfig,
db: db,
triegc: prque.New(nil),
quit: make(chan struct{}),
chainmu: syncx.NewClosableMutex(),
bodyCache: bodyCache,
@@ -283,6 +278,13 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
// Make sure the state associated with the block is available
head := bc.CurrentBlock()
bc.stateCache = state.NewDatabaseWithConfig(db, &trie.Config{
Cache: cacheConfig.TrieCleanLimit,
Journal: cacheConfig.TrieCleanJournal,
Preimages: cacheConfig.Preimages,
UseVerkle: chainConfig.IsCancun(head.Header().Number),
})
if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
// Head state is missing, before the state recovery, find out the
// disk layer point of snapshot(if it's enabled). Make sure the
@@ -375,7 +377,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
recover = true
}
bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover, chainConfig.IsCancun(head.Header().Number))
}
// Start future block processor.
@@ -542,19 +544,6 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
}
}
if beyondRoot || newHeadBlock.NumberU64() == 0 {
if newHeadBlock.NumberU64() == 0 {
// Recommit the genesis state into disk in case the rewinding destination
// is genesis block and the relevant state is gone. In the future this
// rewinding destination can be the earliest block stored in the chain
// if the historical chain pruning is enabled. In that case the logic
// needs to be improved here.
if !bc.HasState(bc.genesisBlock.Root()) {
if err := CommitGenesisState(bc.db, bc.genesisBlock.Hash()); err != nil {
log.Crit("Failed to commit genesis state", "err", err)
}
log.Debug("Recommitted genesis state to disk")
}
}
log.Debug("Rewound to block with state", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash())
break
}
@@ -567,7 +556,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
// Degrade the chain markers if they are explicitly reverted.
// In theory we should update all in-memory markers in the
// last step, however the direction of SetHead is from high
// to low, so it's safe to update in-memory markers directly.
// to low, so it's safe the update in-memory markers directly.
bc.currentBlock.Store(newHeadBlock)
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
}
@@ -605,7 +594,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
if num+1 <= frozen {
// Truncate all relative data(header, total difficulty, body, receipt
// and canonical hash) from ancient store.
if err := bc.db.TruncateHead(num); err != nil {
if err := bc.db.TruncateAncients(num); err != nil {
log.Crit("Failed to truncate ancient data", "number", num, "err", err)
}
// Remove the hash <-> number mapping from the active store.
@@ -992,37 +981,38 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
// range. In this case, all tx indices of newly imported blocks should be
// generated.
var batch = bc.db.NewBatch()
for i, block := range blockChain {
for _, block := range blockChain {
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
rawdb.WriteTxLookupEntriesByBlock(batch, block)
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
rawdb.WriteTxLookupEntriesByBlock(batch, block)
}
stats.processed++
}
if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 {
size += int64(batch.ValueSize())
if err = batch.Write(); err != nil {
fastBlock := bc.CurrentFastBlock().NumberU64()
if err := bc.db.TruncateHead(fastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, err
}
batch.Reset()
// Flush all tx-lookup index data.
size += int64(batch.ValueSize())
if err := batch.Write(); err != nil {
// The tx index data could not be written.
// Roll back the ancient store update.
fastBlock := bc.CurrentFastBlock().NumberU64()
if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, err
}
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
if err := bc.db.Sync(); err != nil {
return 0, err
}
// Update the current fast block because all block data is now present in DB.
previousFastBlock := bc.CurrentFastBlock().NumberU64()
if !updateHead(blockChain[len(blockChain)-1]) {
// We end up here if the header chain has reorg'ed, and the blocks/receipts
// don't match the canonical chain.
if err := bc.db.TruncateHead(previousFastBlock + 1); err != nil {
if err := bc.db.TruncateAncients(previousFastBlock + 1); err != nil {
log.Error("Can't truncate ancient store after failed insert", "err", err)
}
return 0, errSideChainReceipts
@@ -1604,7 +1594,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
// Process block using the parent state as reference point
substart := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
var (
usedGas uint64
receipts types.Receipts
logs []*types.Log
)
receipts, logs, usedGas, err = bc.processor.Process(block, statedb, bc.vmConfig)
if err != nil {
bc.reportBlock(block, receipts, err)
atomic.StoreUint32(&followupInterrupt, 1)
@@ -1659,16 +1654,12 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
blockWriteTimer.Update(time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits)
blockInsertTimer.UpdateSince(start)
// Report the import stats before returning the various results
stats.processed++
stats.usedGas += usedGas
dirty, _ := bc.stateCache.TrieDB().Size()
stats.report(chain, it.index, dirty, setHead)
if !setHead {
return it.index, nil // Direct block insertion of a single block
// We did not setHead, so we don't have any stats to update
log.Info("Inserted block", "number", block.Number(), "hash", block.Hash(), "txs", len(block.Transactions()), "elapsed", common.PrettyDuration(time.Since(start)))
return it.index, nil
}
switch status {
case CanonStatTy:
log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(),
@@ -1695,6 +1686,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
}
stats.processed++
stats.usedGas += usedGas
dirty, _ := bc.stateCache.TrieDB().Size()
stats.report(chain, it.index, dirty)
}
// Any blocks remaining here? The only ones we care about are the future ones
@@ -2091,39 +2087,28 @@ func (bc *BlockChain) InsertBlockWithoutSetHead(block *types.Block) error {
// block. It's possible that after the reorg the relevant state of head
// is missing. It can be fixed by inserting a new block which triggers
// the re-execution.
func (bc *BlockChain) SetChainHead(head *types.Block) error {
func (bc *BlockChain) SetChainHead(newBlock *types.Block) error {
if !bc.chainmu.TryLock() {
return errChainStopped
}
defer bc.chainmu.Unlock()
// Run the reorg if necessary and set the given block as new head.
start := time.Now()
if head.ParentHash() != bc.CurrentBlock().Hash() {
if err := bc.reorg(bc.CurrentBlock(), head); err != nil {
if newBlock.ParentHash() != bc.CurrentBlock().Hash() {
if err := bc.reorg(bc.CurrentBlock(), newBlock); err != nil {
return err
}
}
bc.writeHeadBlock(head)
bc.writeHeadBlock(newBlock)
// Emit events
logs := bc.collectLogs(head.Hash(), false)
bc.chainFeed.Send(ChainEvent{Block: head, Hash: head.Hash(), Logs: logs})
logs := bc.collectLogs(newBlock.Hash(), false)
bc.chainFeed.Send(ChainEvent{Block: newBlock, Hash: newBlock.Hash(), Logs: logs})
if len(logs) > 0 {
bc.logsFeed.Send(logs)
}
bc.chainHeadFeed.Send(ChainHeadEvent{Block: head})
context := []interface{}{
"number", head.Number(),
"hash", head.Hash(),
"root", head.Root(),
"elapsed", time.Since(start),
}
if timestamp := time.Unix(int64(head.Time()), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
log.Info("Chain head was updated", context...)
bc.chainHeadFeed.Send(ChainHeadEvent{Block: newBlock})
log.Info("Set the chain head", "number", newBlock.Number(), "hash", newBlock.Hash())
return nil
}
@@ -2226,14 +2211,7 @@ func (bc *BlockChain) maintainTxIndex(ancients uint64) {
// If a previous indexing existed, make sure that we fill in any missing entries
if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
if *tail > 0 {
// It can happen when chain is rewound to a historical point which
// is even lower than the indexes tail, recap the indexing target
// to new head to avoid reading non-existent block bodies.
end := *tail
if end > head+1 {
end = head + 1
}
rawdb.IndexTransactions(bc.db, 0, end, bc.quit)
rawdb.IndexTransactions(bc.db, 0, *tail, bc.quit)
}
return
}
@@ -2309,9 +2287,6 @@ Error: %v
// of the header retrieval mechanisms already need to verify nonces, as well as
// because nonces can be verified sparsely, not needing to check each.
func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
if len(chain) == 0 {
return 0, nil
}
start := time.Now()
if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil {
return i, err

View File

@@ -39,7 +39,7 @@ const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize, setHead bool) {
func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize) {
// Fetch the timings for the batch
var (
now = mclock.Now()
@@ -71,11 +71,8 @@ func (st *insertStats) report(chain []*types.Block, index int, dirty common.Stor
if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...)
}
if setHead {
log.Info("Imported new chain segment", context...)
} else {
log.Info("Imported new potential chain segment", context...)
}
log.Info("Imported new chain segment", context...)
// Bump the stats reported to the next section
*st = insertStats{startTime: now, lastIndex: index + 1}
}

View File

@@ -73,12 +73,6 @@ func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
return bc.hc.GetHeaderByNumber(number)
}
// GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going
// backwards from the given number.
func (bc *BlockChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
return bc.hc.GetHeadersFrom(number, count)
}
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {

View File

@@ -1779,7 +1779,6 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
SnapshotLimit: 0, // Disable snapshot by default
}
)
defer engine.Close()
if snapshots {
config.SnapshotLimit = 256
config.SnapshotWait = true
@@ -1837,25 +1836,25 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
}
defer db.Close()
newChain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
defer newChain.Stop()
defer chain.Stop()
// Iterate over all the remaining blocks and ensure there are no gaps
verifyNoGaps(t, newChain, true, canonblocks)
verifyNoGaps(t, newChain, false, sideblocks)
verifyCutoff(t, newChain, true, canonblocks, tt.expCanonicalBlocks)
verifyCutoff(t, newChain, false, sideblocks, tt.expSidechainBlocks)
verifyNoGaps(t, chain, true, canonblocks)
verifyNoGaps(t, chain, false, sideblocks)
verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
}
if head := newChain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
}
if head := newChain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
}
if frozen, err := db.(freezer).Ancients(); err != nil {

View File

@@ -2987,10 +2987,10 @@ func TestDeleteRecreateSlots(t *testing.T) {
initCode := []byte{
byte(vm.PUSH1), 0x3, // value
byte(vm.PUSH1), 0x3, // location
byte(vm.SSTORE), // Set slot[3] = 3
byte(vm.SSTORE), // Set slot[3] = 1
byte(vm.PUSH1), 0x4, // value
byte(vm.PUSH1), 0x4, // location
byte(vm.SSTORE), // Set slot[4] = 4
byte(vm.SSTORE), // Set slot[4] = 1
// Slots are set, now return the code
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
byte(vm.PUSH1), 0x0, // memory start on stack

View File

@@ -28,6 +28,7 @@ import (
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
)
// BlockGen creates blocks for testing.
@@ -284,6 +285,91 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
return blocks, receipts
}
func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
if config == nil {
config = params.TestChainConfig
}
blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
chainreader := &fakeChainReader{config: config}
genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
b.header = makeHeader(chainreader, parent, statedb, b.engine)
// Mutate the state and block according to any hard-fork specs
if daoBlock := config.DAOForkBlock; daoBlock != nil {
limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 {
if config.DAOForkSupport {
b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra)
}
}
}
if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 {
misc.ApplyDAOHardFork(statedb)
}
// Execute any user modifications to the block
if gen != nil {
gen(i, b)
}
if b.engine != nil {
// Finalize and seal the block
block, err := b.engine.FinalizeAndAssemble(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts)
if err != nil {
panic(err)
}
// Write state changes to db
root, err := statedb.Commit(config.IsEIP158(b.header.Number))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil {
panic(fmt.Sprintf("trie write error: %v", err))
}
// Generate an associated verkle proof
if tr := statedb.GetTrie(); tr.IsVerkle() {
vtr := tr.(*trie.VerkleTrie)
// Generate the proof if we are using a verkle tree
// WORKAROUND: make sure all keys are resolved
// before building the proof. Ultimately, node
// resolution can be done with a prefetcher or
// from GetCommitmentsAlongPath.
keys := statedb.Witness().Keys()
for _, key := range keys {
out, err := vtr.TryGet(key)
if err != nil {
panic(err)
}
if len(out) == 0 {
panic(fmt.Sprintf("%x should be present in the tree", key))
}
}
vtr.Hash()
_, err := vtr.ProveAndSerialize(keys, statedb.Witness().KeyVals())
//block.SetVerkleProof(p)
if err != nil {
panic(err)
}
}
return block, b.receipts
}
return nil, nil
}
for i := 0; i < n; i++ {
statedb, err := state.New(parent.Root(), state.NewDatabaseWithConfig(db, &trie.Config{UseVerkle: true}), nil)
if err != nil {
panic(err)
}
block, receipt := genblock(i, parent, statedb)
blocks[i] = block
receipts[i] = receipt
parent = block
}
return blocks, receipts
}
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
var time uint64
if parent.Time() == 0 {

View File

@@ -40,7 +40,6 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
var (
beneficiary common.Address
baseFee *big.Int
random *common.Hash
)
// If we don't have an explicit author (i.e. not mining), extract from the header
@@ -52,9 +51,6 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
if header.BaseFee != nil {
baseFee = new(big.Int).Set(header.BaseFee)
}
if header.Difficulty.Cmp(common.Big0) == 0 {
random = &header.MixDigest
}
return vm.BlockContext{
CanTransfer: CanTransfer,
Transfer: Transfer,
@@ -65,7 +61,6 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
Difficulty: new(big.Int).Set(header.Difficulty),
BaseFee: baseFee,
GasLimit: header.GasLimit,
Random: random,
}
}
@@ -74,6 +69,7 @@ func NewEVMTxContext(msg Message) vm.TxContext {
return vm.TxContext{
Origin: msg.From(),
GasPrice: new(big.Int).Set(msg.GasPrice()),
Accesses: types.NewAccessWitness(),
}
}

View File

@@ -19,7 +19,6 @@ package forkid
import (
"bytes"
"math"
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
@@ -30,8 +29,6 @@ import (
// TestCreation tests that different genesis and fork rule combinations result in
// the correct fork ID.
func TestCreation(t *testing.T) {
mergeConfig := *params.MainnetChainConfig
mergeConfig.MergeForkBlock = big.NewInt(15000000)
type testcase struct {
head uint64
want ID
@@ -68,7 +65,7 @@ func TestCreation(t *testing.T) {
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
{13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // First Arrow Glacier block
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, /// First Arrow Glacier block
{20000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // Future Arrow Glacier block
},
},
@@ -136,38 +133,6 @@ func TestCreation(t *testing.T) {
{6000000, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // Future London block
},
},
// Merge test cases
{
&mergeConfig,
params.MainnetGenesisHash,
[]testcase{
{0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
{1149999, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
{1150000, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
{1919999, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
{1920000, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
{2462999, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
{2463000, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
{2674999, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
{2675000, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
{4369999, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
{4370000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
{9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
{9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
{9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
{9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block
{12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
{12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
{13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 15000000}}, // First Arrow Glacier block
{15000000, ID{Hash: checksumToBytes(0xe3abe201), Next: 0}}, // First Merge Start block
{20000000, ID{Hash: checksumToBytes(0xe3abe201), Next: 0}}, // Future Merge Start block
},
},
}
for i, tt := range tests {
for j, ttt := range tt.cases {

View File

@@ -80,81 +80,6 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error {
return nil
}
// flush adds allocated genesis accounts into a fresh new statedb and
// commit the state changes into the given database handler.
func (ga *GenesisAlloc) flush(db ethdb.Database) (common.Hash, error) {
statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
if err != nil {
return common.Hash{}, err
}
for addr, account := range *ga {
statedb.AddBalance(addr, account.Balance)
statedb.SetCode(addr, account.Code)
statedb.SetNonce(addr, account.Nonce)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
}
}
root, err := statedb.Commit(false)
if err != nil {
return common.Hash{}, err
}
err = statedb.Database().TrieDB().Commit(root, true, nil)
if err != nil {
return common.Hash{}, err
}
return root, nil
}
// write writes the json marshaled genesis state into database
// with the given block hash as the unique identifier.
func (ga *GenesisAlloc) write(db ethdb.KeyValueWriter, hash common.Hash) error {
blob, err := json.Marshal(ga)
if err != nil {
return err
}
rawdb.WriteGenesisState(db, hash, blob)
return nil
}
// CommitGenesisState loads the stored genesis state with the given block
// hash and commits them into the given database handler.
func CommitGenesisState(db ethdb.Database, hash common.Hash) error {
var alloc GenesisAlloc
blob := rawdb.ReadGenesisState(db, hash)
if len(blob) != 0 {
if err := alloc.UnmarshalJSON(blob); err != nil {
return err
}
} else {
// Genesis allocation is missing and there are several possibilities:
// the node is legacy which doesn't persist the genesis allocation or
// the persisted allocation is just lost.
// - supported networks(mainnet, testnets), recover with defined allocations
// - private network, can't recover
var genesis *Genesis
switch hash {
case params.MainnetGenesisHash:
genesis = DefaultGenesisBlock()
case params.RopstenGenesisHash:
genesis = DefaultRopstenGenesisBlock()
case params.RinkebyGenesisHash:
genesis = DefaultRinkebyGenesisBlock()
case params.GoerliGenesisHash:
genesis = DefaultGoerliGenesisBlock()
case params.SepoliaGenesisHash:
genesis = DefaultSepoliaGenesisBlock()
}
if genesis != nil {
alloc = genesis.Alloc
} else {
return errors.New("not found")
}
}
_, err := alloc.flush(db)
return err
}
// GenesisAccount is an account in the state of the genesis block.
type GenesisAccount struct {
Code []byte `json:"code,omitempty"`
@@ -237,6 +162,7 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
// Just commit the new block if there is no stored genesis block.
stored := rawdb.ReadCanonicalHash(db, 0)
if (stored == common.Hash{}) {
@@ -252,13 +178,29 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
}
return genesis.Config, block.Hash(), nil
}
// We have the genesis block in database(perhaps in ancient database)
// but the corresponding state is missing.
header := rawdb.ReadHeader(db, stored, 0)
if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, nil), nil); err != nil {
if genesis == nil {
genesis = DefaultGenesisBlock()
var trieCfg *trie.Config
if genesis == nil {
storedcfg := rawdb.ReadChainConfig(db, stored)
if storedcfg == nil {
panic("this should never be reached: if genesis is nil, the config is already present or 'geth init' is being called which created it (in the code above, which means genesis != nil)")
}
if storedcfg.CancunBlock != nil {
if storedcfg.CancunBlock.Cmp(big.NewInt(0)) != 0 {
panic("cancun block must be 0")
}
trieCfg = &trie.Config{UseVerkle: storedcfg.IsCancun(big.NewInt(header.Number.Int64()))}
}
}
if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, trieCfg), nil); err != nil {
// Ensure the stored genesis matches with the given one.
hash := genesis.ToBlock(nil).Hash()
if hash != stored {
@@ -294,19 +236,11 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
rawdb.WriteChainConfig(db, stored, newcfg)
return newcfg, stored, nil
}
// Special case: if a private network is being used (no genesis and also no
// mainnet hash in the database), we must not apply the `configOrDefault`
// chain config as that would be AllProtocolChanges (applying any new fork
// on top of an existing private network genesis block). In that case, only
// apply the overrides.
// Special case: don't change the existing config of a non-mainnet chain if no new
// config is supplied. These chains would get AllProtocolChanges (and a compat error)
// if we just continued here.
if genesis == nil && stored != params.MainnetGenesisHash {
newcfg = storedcfg
if overrideArrowGlacier != nil {
newcfg.ArrowGlacierBlock = overrideArrowGlacier
}
if overrideTerminalTotalDifficulty != nil {
newcfg.TerminalTotalDifficulty = overrideTerminalTotalDifficulty
}
return storedcfg, stored, nil
}
// Check config compatibility and write the config. Compatibility errors
// are returned to the caller unless we're already at block zero.
@@ -336,8 +270,6 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig {
return params.RinkebyChainConfig
case ghash == params.GoerliGenesisHash:
return params.GoerliChainConfig
case ghash == params.KilnGenesisHash:
return DefaultKilnGenesisBlock().Config
default:
return params.AllEthashProtocolChanges
}
@@ -349,10 +281,23 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if db == nil {
db = rawdb.NewMemoryDatabase()
}
root, err := g.Alloc.flush(db)
var trieCfg *trie.Config
if g.Config != nil {
trieCfg = &trie.Config{UseVerkle: g.Config.IsCancun(big.NewInt(int64(g.Number)))}
}
statedb, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(db, trieCfg), nil)
if err != nil {
panic(err)
}
for addr, account := range g.Alloc {
statedb.AddBalance(addr, account.Balance)
statedb.SetCode(addr, account.Code)
statedb.SetNonce(addr, account.Nonce)
for key, value := range account.Storage {
statedb.SetState(addr, key, value)
}
}
root := statedb.IntermediateRoot(false)
head := &types.Header{
Number: new(big.Int).SetUint64(g.Number),
Nonce: types.EncodeNonce(g.Nonce),
@@ -370,7 +315,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
if g.GasLimit == 0 {
head.GasLimit = params.GenesisGasLimit
}
if g.Difficulty == nil && g.Mixhash == (common.Hash{}) {
if g.Difficulty == nil {
head.Difficulty = params.GenesisDifficulty
}
if g.Config != nil && g.Config.IsLondon(common.Big0) {
@@ -380,6 +325,12 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee)
}
}
statedb.Commit(false)
statedb.Database().TrieDB().Commit(root, true, nil)
if err := statedb.Cap(root); err != nil {
panic(err)
}
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
}
@@ -400,9 +351,6 @@ func (g *Genesis) Commit(db ethdb.Database) (*types.Block, error) {
if config.Clique != nil && len(block.Extra()) == 0 {
return nil, errors.New("can't start clique chain without signers")
}
if err := g.Alloc.write(db, block.Hash()); err != nil {
return nil, err
}
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
rawdb.WriteBlock(db, block)
rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), nil)
@@ -433,6 +381,20 @@ func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big
return g.MustCommit(db)
}
func DefaultVerkleGenesisBlock() *Genesis {
return &Genesis{
Config: params.VerkleChainConfig,
Nonce: 86,
GasLimit: 0x2fefd8,
Difficulty: big.NewInt(1),
Alloc: map[common.Address]GenesisAccount{
common.BytesToAddress([]byte{97, 118, 97, 209, 72, 165, 43, 239, 81, 162, 104, 199, 40, 179, 162, 27, 88, 249, 67, 6}): {
Balance: big.NewInt(0).Lsh(big.NewInt(1), 27),
},
},
}
}
// DefaultGenesisBlock returns the Ethereum main net genesis block.
func DefaultGenesisBlock() *Genesis {
return &Genesis{
@@ -494,15 +456,6 @@ func DefaultSepoliaGenesisBlock() *Genesis {
}
}
func DefaultKilnGenesisBlock() *Genesis {
g := new(Genesis)
reader := strings.NewReader(KilnAllocData)
if err := json.NewDecoder(reader).Decode(g); err != nil {
panic(err)
}
return g
}
// DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one

File diff suppressed because one or more lines are too long

View File

@@ -213,33 +213,3 @@ func TestGenesis_Commit(t *testing.T) {
t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty())
}
}
func TestReadWriteGenesisAlloc(t *testing.T) {
var (
db = rawdb.NewMemoryDatabase()
alloc = &GenesisAlloc{
{1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}},
{2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}},
}
hash = common.HexToHash("0xdeadbeef")
)
alloc.write(db, hash)
var reload GenesisAlloc
err := reload.UnmarshalJSON(rawdb.ReadGenesisState(db, hash))
if err != nil {
t.Fatalf("Failed to load genesis state %v", err)
}
if len(reload) != len(*alloc) {
t.Fatal("Unexpected genesis allocation")
}
for addr, account := range reload {
want, ok := (*alloc)[addr]
if !ok {
t.Fatal("Account is not found")
}
if !reflect.DeepEqual(want, account) {
t.Fatal("Unexpected account")
}
}
}

View File

@@ -33,7 +33,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
lru "github.com/hashicorp/golang-lru"
)
@@ -499,46 +498,6 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
return hc.GetHeader(hash, number)
}
// GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going
// backwards from the given number.
// If the 'number' is higher than the highest local header, this method will
// return a best-effort response, containing the headers that we do have.
func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
// If the request is for future headers, we still return the portion of
// headers that we are able to serve
if current := hc.CurrentHeader().Number.Uint64(); current < number {
if count > number-current {
count -= number - current
number = current
} else {
return nil
}
}
var headers []rlp.RawValue
// If we have some of the headers in cache already, use that before going to db.
hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
if hash == (common.Hash{}) {
return nil
}
for count > 0 {
header, ok := hc.headerCache.Get(hash)
if !ok {
break
}
h := header.(*types.Header)
rlpData, _ := rlp.EncodeToBytes(h)
headers = append(headers, rlpData)
hash = h.ParentHash
count--
number--
}
// Read remaining from db
if count > 0 {
headers = append(headers, rawdb.ReadHeaderRange(hc.chainDb, number, count)...)
}
return headers
}
func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
return rawdb.ReadCanonicalHash(hc.chainDb, number)
}

View File

@@ -83,8 +83,8 @@ type NumberHash struct {
Hash common.Hash
}
// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain
// heights, both canonical and reorged forks included.
// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
// both canonical and reorged forks included.
// This method considers both limits to be _inclusive_.
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
var (
@@ -279,56 +279,6 @@ func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
}
}
// ReadHeaderRange returns the rlp-encoded headers, starting at 'number', and going
// backwards towards genesis. This method assumes that the caller already has
// placed a cap on count, to prevent DoS issues.
// Since this method operates in head-towards-genesis mode, it will return an empty
// slice in case the head ('number') is missing. Hence, the caller must ensure that
// the head ('number') argument is actually an existing header.
//
// N.B: Since the input is a number, as opposed to a hash, it's implicit that
// this method only operates on canon headers.
func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValue {
var rlpHeaders []rlp.RawValue
if count == 0 {
return rlpHeaders
}
i := number
if count-1 > number {
// It's ok to request block 0, 1 item
count = number + 1
}
limit, _ := db.Ancients()
// First read live blocks
if i >= limit {
// If we need to read live blocks, we need to figure out the hash first
hash := ReadCanonicalHash(db, number)
for ; i >= limit && count > 0; i-- {
if data, _ := db.Get(headerKey(i, hash)); len(data) > 0 {
rlpHeaders = append(rlpHeaders, data)
// Get the parent hash for next query
hash = types.HeaderParentHashFromRLP(data)
} else {
break // Maybe got moved to ancients
}
count--
}
}
if count == 0 {
return rlpHeaders
}
// read remaining from ancients
max := count * 700
data, err := db.AncientRange(freezerHeaderTable, i+1-count, count, max)
if err == nil && uint64(len(data)) == count {
// the data is on the order [h, h+1, .., n] -- reordering needed
for i := range data {
rlpHeaders = append(rlpHeaders, data[len(data)-1-i])
}
}
return rlpHeaders
}
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
var data []byte
@@ -447,11 +397,8 @@ func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
if len(data) > 0 {
return nil
}
// Block is not in ancients, read from leveldb by hash and number.
// Note: ReadCanonicalHash cannot be used here because it also
// calls ReadAncients internally.
hash, _ := db.Get(headerHashKey(number))
data, _ = db.Get(blockBodyKey(number, common.BytesToHash(hash)))
// Get it by hash from leveldb
data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
return nil
})
return data
@@ -717,7 +664,7 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.C
if logs := readLegacyLogs(db, hash, number, config); logs != nil {
return logs
}
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
log.Error("Invalid receipt array RLP", "hash", "err", err)
return nil
}
@@ -776,7 +723,7 @@ func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
WriteHeader(db, block.Header())
}
// WriteAncientBlocks writes entire block data into ancient store and returns the total written size.
// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
var (
tdSum = new(big.Int).Set(td)

View File

@@ -883,67 +883,3 @@ func BenchmarkDecodeRLPLogs(b *testing.B) {
}
})
}
func TestHeadersRLPStorage(t *testing.T) {
// Have N headers in the freezer
frdir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("failed to create temp freezer dir: %v", err)
}
defer os.Remove(frdir)
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
if err != nil {
t.Fatalf("failed to create database with ancient backend")
}
defer db.Close()
// Create blocks
var chain []*types.Block
var pHash common.Hash
for i := 0; i < 100; i++ {
block := types.NewBlockWithHeader(&types.Header{
Number: big.NewInt(int64(i)),
Extra: []byte("test block"),
UncleHash: types.EmptyUncleHash,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
ParentHash: pHash,
})
chain = append(chain, block)
pHash = block.Hash()
}
var receipts []types.Receipts = make([]types.Receipts, 100)
// Write first half to ancients
WriteAncientBlocks(db, chain[:50], receipts[:50], big.NewInt(100))
// Write second half to db
for i := 50; i < 100; i++ {
WriteCanonicalHash(db, chain[i].Hash(), chain[i].NumberU64())
WriteBlock(db, chain[i])
}
checkSequence := func(from, amount int) {
headersRlp := ReadHeaderRange(db, uint64(from), uint64(amount))
if have, want := len(headersRlp), amount; have != want {
t.Fatalf("have %d headers, want %d", have, want)
}
for i, headerRlp := range headersRlp {
var header types.Header
if err := rlp.DecodeBytes(headerRlp, &header); err != nil {
t.Fatal(err)
}
if have, want := header.Number.Uint64(), uint64(from-i); have != want {
t.Fatalf("wrong number, have %d want %d", have, want)
}
}
}
checkSequence(99, 20) // Latest block and 19 parents
checkSequence(99, 50) // Latest block -> all db blocks
checkSequence(99, 51) // Latest block -> one from ancients
checkSequence(99, 52) // Latest blocks -> two from ancients
checkSequence(50, 2) // One from db, one from ancients
checkSequence(49, 1) // One from ancients
checkSequence(49, 50) // All ancient ones
checkSequence(99, 100) // All blocks
checkSequence(0, 1) // Only genesis
checkSequence(1, 1) // Only block 1
checkSequence(1, 2) // Genesis + block 1
}

View File

@@ -81,19 +81,6 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha
}
}
// ReadGenesisState retrieves the genesis state based on the given genesis hash.
func ReadGenesisState(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(genesisKey(hash))
return data
}
// WriteGenesisState writes the genesis state into the disk.
func WriteGenesisState(db ethdb.KeyValueWriter, hash common.Hash, data []byte) {
if err := db.Put(genesisKey(hash), data); err != nil {
log.Crit("Failed to store genesis state", "err", err)
}
}
// crashList is a list of unclean-shutdown-markers, for rlp-encoding to the
// database
type crashList struct {
@@ -152,28 +139,6 @@ func PopUncleanShutdownMarker(db ethdb.KeyValueStore) {
}
}
// UpdateUncleanShutdownMarker updates the last marker's timestamp to now.
func UpdateUncleanShutdownMarker(db ethdb.KeyValueStore) {
var uncleanShutdowns crashList
// Read old data
if data, err := db.Get(uncleanShutdownKey); err != nil {
log.Warn("Error reading unclean shutdown markers", "error", err)
} else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
log.Warn("Error decoding unclean shutdown markers", "error", err)
}
// This shouldn't happen because we push a marker on Backend instantiation
count := len(uncleanShutdowns.Recent)
if count == 0 {
log.Warn("No unclean shutdown marker to update")
return
}
uncleanShutdowns.Recent[count-1] = uint64(time.Now().Unix())
data, _ := rlp.EncodeToBytes(uncleanShutdowns)
if err := db.Put(uncleanShutdownKey, data); err != nil {
log.Warn("Failed to write unclean-shutdown marker", "err", err)
}
}
// ReadTransitionStatus retrieves the eth2 transition status from the database
func ReadTransitionStatus(db ethdb.KeyValueReader) []byte {
data, _ := db.Get(transitionStatusKey)

View File

@@ -115,7 +115,7 @@ func DeleteStorageSnapshot(db ethdb.KeyValueWriter, accountHash, storageHash com
// IterateStorageSnapshots returns an iterator for walking the entire storage
// space of a specific account.
func IterateStorageSnapshots(db ethdb.Iteratee, accountHash common.Hash) ethdb.Iterator {
return NewKeyLengthIterator(db.NewIterator(storageSnapshotsKey(accountHash), nil), len(SnapshotStoragePrefix)+2*common.HashLength)
return db.NewIterator(storageSnapshotsKey(accountHash), nil)
}
// ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at

View File

@@ -28,58 +28,6 @@ func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte {
return data
}
// ReadCode retrieves the contract code of the provided code hash.
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
// Try with the prefixed code scheme first, if not then try with legacy
// scheme.
data := ReadCodeWithPrefix(db, hash)
if len(data) != 0 {
return data
}
data, _ = db.Get(hash.Bytes())
return data
}
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
// The main difference between this function and ReadCode is this function
// will only check the existence with latest scheme(with prefix).
func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(codeKey(hash))
return data
}
// ReadTrieNode retrieves the trie node of the provided hash.
func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(hash.Bytes())
return data
}
// HasCode checks if the contract code corresponding to the
// provided code hash is present in the db.
func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool {
// Try with the prefixed code scheme first, if not then try with legacy
// scheme.
if ok := HasCodeWithPrefix(db, hash); ok {
return true
}
ok, _ := db.Has(hash.Bytes())
return ok
}
// HasCodeWithPrefix checks if the contract code corresponding to the
// provided code hash is present in the db. This function will only check
// presence using the prefix-scheme.
func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
ok, _ := db.Has(codeKey(hash))
return ok
}
// HasTrieNode checks if the trie node with the provided hash is present in db.
func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
ok, _ := db.Has(hash.Bytes())
return ok
}
// WritePreimages writes the provided set of preimages to the database.
func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
for hash, preimage := range preimages {
@@ -91,6 +39,28 @@ func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
preimageHitCounter.Inc(int64(len(preimages)))
}
// ReadCode retrieves the contract code of the provided code hash.
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
// Try with the legacy code scheme first, if not then try with current
// scheme. Since most of the code will be found with legacy scheme.
//
// todo(rjl493456442) change the order when we forcibly upgrade the code
// scheme with snapshot.
data, _ := db.Get(hash[:])
if len(data) != 0 {
return data
}
return ReadCodeWithPrefix(db, hash)
}
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
// The main difference between this function and ReadCode is this function
// will only check the existence with latest scheme(with prefix).
func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(codeKey(hash))
return data
}
// WriteCode writes the provided contract code database.
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
if err := db.Put(codeKey(hash), code); err != nil {
@@ -98,13 +68,6 @@ func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
}
}
// WriteTrieNode writes the provided trie node database.
func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
if err := db.Put(hash.Bytes(), node); err != nil {
log.Crit("Failed to store trie node", "err", err)
}
}
// DeleteCode deletes the specified contract code from the database.
func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(codeKey(hash)); err != nil {
@@ -112,6 +75,19 @@ func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) {
}
}
// ReadTrieNode retrieves the trie node of the provided hash.
func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
data, _ := db.Get(hash.Bytes())
return data
}
// WriteTrieNode writes the provided trie node database.
func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
if err := db.Put(hash.Bytes(), node); err != nil {
log.Crit("Failed to store trie node", "err", err)
}
}
// DeleteTrieNode deletes the specified trie node from the database.
func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) {
if err := db.Delete(hash.Bytes()); err != nil {

View File

@@ -1,80 +0,0 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"bytes"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
// ReadSkeletonSyncStatus retrieves the serialized sync status saved at shutdown.
func ReadSkeletonSyncStatus(db ethdb.KeyValueReader) []byte {
data, _ := db.Get(skeletonSyncStatusKey)
return data
}
// WriteSkeletonSyncStatus stores the serialized sync status to save at shutdown.
func WriteSkeletonSyncStatus(db ethdb.KeyValueWriter, status []byte) {
if err := db.Put(skeletonSyncStatusKey, status); err != nil {
log.Crit("Failed to store skeleton sync status", "err", err)
}
}
// DeleteSkeletonSyncStatus deletes the serialized sync status saved at the last
// shutdown
func DeleteSkeletonSyncStatus(db ethdb.KeyValueWriter) {
if err := db.Delete(skeletonSyncStatusKey); err != nil {
log.Crit("Failed to remove skeleton sync status", "err", err)
}
}
// ReadSkeletonHeader retrieves a block header from the skeleton sync store,
func ReadSkeletonHeader(db ethdb.KeyValueReader, number uint64) *types.Header {
data, _ := db.Get(skeletonHeaderKey(number))
if len(data) == 0 {
return nil
}
header := new(types.Header)
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
log.Error("Invalid skeleton header RLP", "number", number, "err", err)
return nil
}
return header
}
// WriteSkeletonHeader stores a block header into the skeleton sync store.
func WriteSkeletonHeader(db ethdb.KeyValueWriter, header *types.Header) {
data, err := rlp.EncodeToBytes(header)
if err != nil {
log.Crit("Failed to RLP encode header", "err", err)
}
key := skeletonHeaderKey(header.Number.Uint64())
if err := db.Put(key, data); err != nil {
log.Crit("Failed to store skeleton header", "err", err)
}
}
// DeleteSkeletonHeader removes all block header data associated with a hash.
func DeleteSkeletonHeader(db ethdb.KeyValueWriter, number uint64) {
if err := db.Delete(skeletonHeaderKey(number)); err != nil {
log.Crit("Failed to delete skeleton header", "err", err)
}
}

View File

@@ -247,8 +247,7 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan
}
}
// IndexTransactions creates txlookup indices of the specified block range. The from
// is included while to is excluded.
// IndexTransactions creates txlookup indices of the specified block range.
//
// This function iterates canonical chain in reverse order, it has one main advantage:
// We can write tx index tail flag periodically even without the whole indexing
@@ -340,7 +339,6 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch
}
// UnindexTransactions removes txlookup indices of the specified block range.
// The from is included while to is excluded.
//
// There is a passed channel, the whole procedure will be interrupted if any
// signal received.

View File

@@ -99,11 +99,6 @@ func (db *nofreezedb) Ancients() (uint64, error) {
return 0, errNotSupported
}
// Tail returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) Tail() (uint64, error) {
return 0, errNotSupported
}
// AncientSize returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
return 0, errNotSupported
@@ -114,13 +109,8 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e
return 0, errNotSupported
}
// TruncateHead returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) TruncateHead(items uint64) error {
return errNotSupported
}
// TruncateTail returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) TruncateTail(items uint64) error {
// TruncateAncients returns an error as we don't have a backing chain freezer.
func (db *nofreezedb) TruncateAncients(items uint64) error {
return errNotSupported
}
@@ -145,12 +135,6 @@ func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReader) error) (e
return fn(db)
}
// MigrateTable processes the entries in a given table in sequence
// converting them to a new format if they're of an old format.
func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
return errNotSupported
}
// NewDatabase creates a high level database on top of a given key-value data
// store without a freezer moving immutable chain segments into cold storage.
func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
@@ -227,7 +211,7 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
// Block #1 is still in the database, we're allowed to init a new feezer
}
// Otherwise, the head header is still the genesis, we're allowed to init a new
// freezer.
// feezer.
}
}
// Freezer is consistent with the key-value database, permit combining the two
@@ -337,7 +321,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
storageSnaps stat
preimages stat
bloomBits stat
beaconHeaders stat
cliqueSnaps stat
// Ancient store statistics
@@ -392,14 +375,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
preimages.Add(size)
case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
metadata.Add(size)
case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
metadata.Add(size)
case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
bloomBits.Add(size)
case bytes.HasPrefix(key, BloomBitsIndexPrefix):
bloomBits.Add(size)
case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
beaconHeaders.Add(size)
case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
cliqueSnaps.Add(size)
case bytes.HasPrefix(key, []byte("cht-")) ||
@@ -416,7 +395,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey,
fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
uncleanShutdownKey, badBlockKey, transitionStatusKey,
} {
if bytes.Equal(key, meta) {
metadata.Add(size)
@@ -462,7 +441,6 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},

View File

@@ -19,7 +19,6 @@ package rawdb
import (
"errors"
"fmt"
"io/ioutil"
"math"
"os"
"path/filepath"
@@ -67,7 +66,7 @@ const (
freezerTableSize = 2 * 1000 * 1000 * 1000
)
// freezer is a memory mapped append-only database to store immutable chain data
// freezer is an memory mapped append-only database to store immutable chain data
// into flat files:
//
// - The append only nature ensures that disk writes are minimized.
@@ -79,7 +78,6 @@ type freezer struct {
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
frozen uint64 // Number of blocks already frozen
tail uint64 // Number of the first stored item in the freezer
threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
// This lock synchronizes writers and the truncate operation, as well as
@@ -135,7 +133,7 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
// Create the tables.
for name, disableSnappy := range tables {
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly)
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy)
if err != nil {
for _, table := range freezer.tables {
table.Close()
@@ -146,15 +144,8 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
freezer.tables[name] = table
}
if freezer.readonly {
// In readonly mode only validate, don't truncate.
// validate also sets `freezer.frozen`.
err = freezer.validate()
} else {
// Truncate all tables to common length.
err = freezer.repair()
}
if err != nil {
// Truncate all tables to common length.
if err := freezer.repair(); err != nil {
for _, table := range freezer.tables {
table.Close()
}
@@ -228,11 +219,6 @@ func (f *freezer) Ancients() (uint64, error) {
return atomic.LoadUint64(&f.frozen), nil
}
// Tail returns the number of first stored item in the freezer.
func (f *freezer) Tail() (uint64, error) {
return atomic.LoadUint64(&f.tail), nil
}
// AncientSize returns the ancient size of the specified category.
func (f *freezer) AncientSize(kind string) (uint64, error) {
// This needs the write lock to avoid data races on table fields.
@@ -268,7 +254,7 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
if err != nil {
// The write operation has failed. Go back to the previous item position.
for name, table := range f.tables {
err := table.truncateHead(prevItem)
err := table.truncate(prevItem)
if err != nil {
log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
}
@@ -288,8 +274,8 @@ func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize
return writeSize, nil
}
// TruncateHead discards any recent data above the provided threshold number.
func (f *freezer) TruncateHead(items uint64) error {
// TruncateAncients discards any recent data above the provided threshold number.
func (f *freezer) TruncateAncients(items uint64) error {
if f.readonly {
return errReadOnly
}
@@ -300,7 +286,7 @@ func (f *freezer) TruncateHead(items uint64) error {
return nil
}
for _, table := range f.tables {
if err := table.truncateHead(items); err != nil {
if err := table.truncate(items); err != nil {
return err
}
}
@@ -308,26 +294,6 @@ func (f *freezer) TruncateHead(items uint64) error {
return nil
}
// TruncateTail discards any recent data below the provided threshold number.
func (f *freezer) TruncateTail(tail uint64) error {
if f.readonly {
return errReadOnly
}
f.writeLock.Lock()
defer f.writeLock.Unlock()
if atomic.LoadUint64(&f.tail) >= tail {
return nil
}
for _, table := range f.tables {
if err := table.truncateTail(tail); err != nil {
return err
}
}
atomic.StoreUint64(&f.tail, tail)
return nil
}
// Sync flushes all data tables to disk.
func (f *freezer) Sync() error {
var errs []error
@@ -342,59 +308,21 @@ func (f *freezer) Sync() error {
return nil
}
// validate checks that every table has the same length.
// Used instead of `repair` in readonly mode.
func (f *freezer) validate() error {
if len(f.tables) == 0 {
return nil
}
var (
length uint64
name string
)
// Hack to get length of any table
for kind, table := range f.tables {
length = atomic.LoadUint64(&table.items)
name = kind
break
}
// Now check every table against that length
for kind, table := range f.tables {
items := atomic.LoadUint64(&table.items)
if length != items {
return fmt.Errorf("freezer tables %s and %s have differing lengths: %d != %d", kind, name, items, length)
}
}
atomic.StoreUint64(&f.frozen, length)
return nil
}
// repair truncates all data tables to the same length.
func (f *freezer) repair() error {
var (
head = uint64(math.MaxUint64)
tail = uint64(0)
)
min := uint64(math.MaxUint64)
for _, table := range f.tables {
items := atomic.LoadUint64(&table.items)
if head > items {
head = items
}
hidden := atomic.LoadUint64(&table.itemHidden)
if hidden > tail {
tail = hidden
if min > items {
min = items
}
}
for _, table := range f.tables {
if err := table.truncateHead(head); err != nil {
return err
}
if err := table.truncateTail(tail); err != nil {
if err := table.truncate(min); err != nil {
return err
}
}
atomic.StoreUint64(&f.frozen, head)
atomic.StoreUint64(&f.tail, tail)
atomic.StoreUint64(&f.frozen, min)
return nil
}
@@ -618,116 +546,3 @@ func (f *freezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []
return hashes, err
}
// convertLegacyFn takes a raw freezer entry in an older format and
// returns it in the new format.
type convertLegacyFn = func([]byte) ([]byte, error)
// MigrateTable processes the entries in a given table in sequence
// converting them to a new format if they're of an old format.
func (f *freezer) MigrateTable(kind string, convert convertLegacyFn) error {
if f.readonly {
return errReadOnly
}
f.writeLock.Lock()
defer f.writeLock.Unlock()
table, ok := f.tables[kind]
if !ok {
return errUnknownTable
}
// forEach iterates every entry in the table serially and in order, calling `fn`
// with the item as argument. If `fn` returns an error the iteration stops
// and that error will be returned.
forEach := func(t *freezerTable, offset uint64, fn func(uint64, []byte) error) error {
var (
items = atomic.LoadUint64(&t.items)
batchSize = uint64(1024)
maxBytes = uint64(1024 * 1024)
)
for i := offset; i < items; {
if i+batchSize > items {
batchSize = items - i
}
data, err := t.RetrieveItems(i, batchSize, maxBytes)
if err != nil {
return err
}
for j, item := range data {
if err := fn(i+uint64(j), item); err != nil {
return err
}
}
i += uint64(len(data))
}
return nil
}
// TODO(s1na): This is a sanity-check since as of now no process does tail-deletion. But the migration
// process assumes no deletion at tail and needs to be modified to account for that.
if table.itemOffset > 0 || table.itemHidden > 0 {
return fmt.Errorf("migration not supported for tail-deleted freezers")
}
ancientsPath := filepath.Dir(table.index.Name())
// Set up new dir for the migrated table, the content of which
// we'll at the end move over to the ancients dir.
migrationPath := filepath.Join(ancientsPath, "migration")
newTable, err := NewFreezerTable(migrationPath, kind, FreezerNoSnappy[kind], false)
if err != nil {
return err
}
var (
batch = newTable.newBatch()
out []byte
start = time.Now()
logged = time.Now()
offset = newTable.items
)
if offset > 0 {
log.Info("found previous migration attempt", "migrated", offset)
}
// Iterate through entries and transform them
if err := forEach(table, offset, func(i uint64, blob []byte) error {
if i%10000 == 0 && time.Since(logged) > 16*time.Second {
log.Info("Processing legacy elements", "count", i, "elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
out, err = convert(blob)
if err != nil {
return err
}
if err := batch.AppendRaw(i, out); err != nil {
return err
}
return nil
}); err != nil {
return err
}
if err := batch.commit(); err != nil {
return err
}
log.Info("Replacing old table files with migrated ones", "elapsed", common.PrettyDuration(time.Since(start)))
// Release and delete old table files. Note this won't
// delete the index file.
table.releaseFilesAfter(0, true)
if err := newTable.Close(); err != nil {
return err
}
files, err := ioutil.ReadDir(migrationPath)
if err != nil {
return err
}
// Move migrated files to ancients dir.
for _, f := range files {
// This will replace the old index file as a side-effect.
if err := os.Rename(filepath.Join(migrationPath, f.Name()), filepath.Join(ancientsPath, f.Name())); err != nil {
return err
}
}
// Delete by now empty dir.
if err := os.Remove(migrationPath); err != nil {
return err
}
return nil
}

View File

@@ -191,7 +191,7 @@ func (batch *freezerTableBatch) commit() error {
dataSize := int64(len(batch.dataBuffer))
batch.dataBuffer = batch.dataBuffer[:0]
// Write indices.
// Write index.
_, err = batch.t.index.Write(batch.indexBuffer)
if err != nil {
return err

View File

@@ -1,109 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
package rawdb
import (
"io"
"os"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
const freezerVersion = 1 // The initial version tag of freezer table metadata
// freezerTableMeta wraps all the metadata of the freezer table.
type freezerTableMeta struct {
// Version is the versioning descriptor of the freezer table.
Version uint16
// VirtualTail indicates how many items have been marked as deleted.
// Its value is equal to the number of items removed from the table
// plus the number of items hidden in the table, so it should never
// be lower than the "actual tail".
VirtualTail uint64
}
// newMetadata initializes the metadata object with the given virtual tail.
func newMetadata(tail uint64) *freezerTableMeta {
return &freezerTableMeta{
Version: freezerVersion,
VirtualTail: tail,
}
}
// readMetadata reads the metadata of the freezer table from the
// given metadata file.
func readMetadata(file *os.File) (*freezerTableMeta, error) {
_, err := file.Seek(0, io.SeekStart)
if err != nil {
return nil, err
}
var meta freezerTableMeta
if err := rlp.Decode(file, &meta); err != nil {
return nil, err
}
return &meta, nil
}
// writeMetadata writes the metadata of the freezer table into the
// given metadata file.
func writeMetadata(file *os.File, meta *freezerTableMeta) error {
_, err := file.Seek(0, io.SeekStart)
if err != nil {
return err
}
return rlp.Encode(file, meta)
}
// loadMetadata loads the metadata from the given metadata file.
// Initializes the metadata file with the given "actual tail" if
// it's empty.
func loadMetadata(file *os.File, tail uint64) (*freezerTableMeta, error) {
stat, err := file.Stat()
if err != nil {
return nil, err
}
// Write the metadata with the given actual tail into metadata file
// if it's non-existent. There are two possible scenarios here:
// - the freezer table is empty
// - the freezer table is legacy
// In both cases, write the meta into the file with the actual tail
// as the virtual tail.
if stat.Size() == 0 {
m := newMetadata(tail)
if err := writeMetadata(file, m); err != nil {
return nil, err
}
return m, nil
}
m, err := readMetadata(file)
if err != nil {
return nil, err
}
// Update the virtual tail with the given actual tail if it's even
// lower than it. Theoretically it shouldn't happen at all, print
// a warning here.
if m.VirtualTail < tail {
log.Warn("Updated virtual tail", "have", m.VirtualTail, "now", tail)
m.VirtualTail = tail
if err := writeMetadata(file, m); err != nil {
return nil, err
}
}
return m, nil
}

View File

@@ -1,61 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
package rawdb
import (
"io/ioutil"
"os"
"testing"
)
func TestReadWriteFreezerTableMeta(t *testing.T) {
f, err := ioutil.TempFile(os.TempDir(), "*")
if err != nil {
t.Fatalf("Failed to create file %v", err)
}
err = writeMetadata(f, newMetadata(100))
if err != nil {
t.Fatalf("Failed to write metadata %v", err)
}
meta, err := readMetadata(f)
if err != nil {
t.Fatalf("Failed to read metadata %v", err)
}
if meta.Version != freezerVersion {
t.Fatalf("Unexpected version field")
}
if meta.VirtualTail != uint64(100) {
t.Fatalf("Unexpected virtual tail field")
}
}
func TestInitializeFreezerTableMeta(t *testing.T) {
f, err := ioutil.TempFile(os.TempDir(), "*")
if err != nil {
t.Fatalf("Failed to create file %v", err)
}
meta, err := loadMetadata(f, uint64(100))
if err != nil {
t.Fatalf("Failed to read metadata %v", err)
}
if meta.Version != freezerVersion {
t.Fatalf("Unexpected version field")
}
if meta.VirtualTail != uint64(100) {
t.Fatalf("Unexpected virtual tail field")
}
}

View File

@@ -47,19 +47,20 @@ var (
)
// indexEntry contains the number/id of the file that the data resides in, aswell as the
// offset within the file to the end of the data.
// offset within the file to the end of the data
// In serialized form, the filenum is stored as uint16.
type indexEntry struct {
filenum uint32 // stored as uint16 ( 2 bytes )
offset uint32 // stored as uint32 ( 4 bytes )
filenum uint32 // stored as uint16 ( 2 bytes)
offset uint32 // stored as uint32 ( 4 bytes)
}
const indexEntrySize = 6
// unmarshalBinary deserializes binary b into the rawIndex entry.
func (i *indexEntry) unmarshalBinary(b []byte) {
func (i *indexEntry) unmarshalBinary(b []byte) error {
i.filenum = uint32(binary.BigEndian.Uint16(b[:2]))
i.offset = binary.BigEndian.Uint32(b[2:6])
return nil
}
// append adds the encoded entry to the end of b.
@@ -74,14 +75,14 @@ func (i *indexEntry) append(b []byte) []byte {
// bounds returns the start- and end- offsets, and the file number of where to
// read there data item marked by the two index entries. The two entries are
// assumed to be sequential.
func (i *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
if i.filenum != end.filenum {
func (start *indexEntry) bounds(end *indexEntry) (startOffset, endOffset, fileId uint32) {
if start.filenum != end.filenum {
// If a piece of data 'crosses' a data-file,
// it's actually in one piece on the second data-file.
// We return a zero-indexEntry for the second file as start
return 0, end.offset, end.filenum
}
return i.offset, end.offset, end.filenum
return start.offset, end.offset, end.filenum
}
// freezerTable represents a single chained data table within the freezer (e.g. blocks).
@@ -91,28 +92,22 @@ type freezerTable struct {
// WARNING: The `items` field is accessed atomically. On 32 bit platforms, only
// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
items uint64 // Number of items stored in the table (including items removed from tail)
itemOffset uint64 // Number of items removed from the table
items uint64 // Number of items stored in the table (including items removed from tail)
// itemHidden is the number of items marked as deleted. Tail deletion is
// only supported at file level which means the actual deletion will be
// delayed until the entire data file is marked as deleted. Before that
// these items will be hidden to prevent being visited again. The value
// should never be lower than itemOffset.
itemHidden uint64
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
readonly bool
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
maxFileSize uint32 // Max file size for data-files
name string
path string
head *os.File // File descriptor for the data head of the table
index *os.File // File descriptor for the indexEntry file of the table
meta *os.File // File descriptor for metadata of the table
files map[uint32]*os.File // open files
headId uint32 // number of the currently active head file
tailId uint32 // number of the earliest file
index *os.File // File descriptor for the indexEntry file of the table
// In the case that old items are deleted (from the tail), we use itemOffset
// to count how many historic items have gone missing.
itemOffset uint32 // Offset (number of discarded items)
headBytes int64 // Number of bytes written to the head file
readMeter metrics.Meter // Meter for measuring the effective amount of data read
@@ -124,61 +119,71 @@ type freezerTable struct {
}
// NewFreezerTable opens the given path as a freezer table.
func NewFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) {
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy)
}
// openFreezerFileForAppend opens a freezer table file and seeks to the end
func openFreezerFileForAppend(filename string) (*os.File, error) {
// Open the file without the O_APPEND flag
// because it has differing behaviour during Truncate operations
// on different OS's
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return nil, err
}
// Seek to end for append
if _, err = file.Seek(0, io.SeekEnd); err != nil {
return nil, err
}
return file, nil
}
// openFreezerFileForReadOnly opens a freezer table file for read only access
func openFreezerFileForReadOnly(filename string) (*os.File, error) {
return os.OpenFile(filename, os.O_RDONLY, 0644)
}
// openFreezerFileTruncated opens a freezer table making sure it is truncated
func openFreezerFileTruncated(filename string) (*os.File, error) {
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
}
// truncateFreezerFile resizes a freezer table file and seeks to the end
func truncateFreezerFile(file *os.File, size int64) error {
if err := file.Truncate(size); err != nil {
return err
}
// Seek to end for append
if _, err := file.Seek(0, io.SeekEnd); err != nil {
return err
}
return nil
}
// newTable opens a freezer table, creating the data and index files if they are
// non-existent. Both files are truncated to the shortest common length to ensure
// non existent. Both files are truncated to the shortest common length to ensure
// they don't go out of sync.
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) {
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
// Ensure the containing directory exists and open the indexEntry file
if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
var idxName string
if noCompression {
idxName = fmt.Sprintf("%s.ridx", name) // raw index file
// Raw idx
idxName = fmt.Sprintf("%s.ridx", name)
} else {
idxName = fmt.Sprintf("%s.cidx", name) // compressed index file
// Compressed idx
idxName = fmt.Sprintf("%s.cidx", name)
}
var (
err error
index *os.File
meta *os.File
)
if readonly {
// Will fail if table doesn't exist
index, err = openFreezerFileForReadOnly(filepath.Join(path, idxName))
if err != nil {
return nil, err
}
// TODO(rjl493456442) change it to read-only mode. Open the metadata file
// in rw mode. It's a temporary solution for now and should be changed
// whenever the tail deletion is actually used. The reason for this hack is
// the additional meta file for each freezer table is added in order to support
// tail deletion, but for most legacy nodes this file is missing. This check
// will suddenly break lots of database relevant commands. So the metadata file
// is always opened for mutation and nothing else will be written except
// the initialization.
meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
if err != nil {
return nil, err
}
} else {
index, err = openFreezerFileForAppend(filepath.Join(path, idxName))
if err != nil {
return nil, err
}
meta, err = openFreezerFileForAppend(filepath.Join(path, fmt.Sprintf("%s.meta", name)))
if err != nil {
return nil, err
}
offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
if err != nil {
return nil, err
}
// Create the table and repair any past inconsistency
tab := &freezerTable{
index: index,
meta: meta,
index: offsets,
files: make(map[uint32]*os.File),
readMeter: readMeter,
writeMeter: writeMeter,
@@ -187,7 +192,6 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
path: path,
logger: log.New("database", path, "table", name),
noCompression: noCompression,
readonly: readonly,
maxFileSize: maxFilesize,
}
if err := tab.repair(); err != nil {
@@ -205,7 +209,7 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
return tab, nil
}
// repair cross-checks the head and the index file and truncates them to
// repair cross checks the head and the index file and truncates them to
// be in sync with each other after a potential crash / data loss.
func (t *freezerTable) repair() error {
// Create a temporary offset buffer to init files with and read indexEntry into
@@ -243,32 +247,12 @@ func (t *freezerTable) repair() error {
t.index.ReadAt(buffer, 0)
firstIndex.unmarshalBinary(buffer)
// Assign the tail fields with the first stored index.
// The total removed items is represented with an uint32,
// which is not enough in theory but enough in practice.
// TODO: use uint64 to represent total removed items.
t.tailId = firstIndex.filenum
t.itemOffset = uint64(firstIndex.offset)
t.itemOffset = firstIndex.offset
// Load metadata from the file
meta, err := loadMetadata(t.meta, t.itemOffset)
if err != nil {
return err
}
t.itemHidden = meta.VirtualTail
// Read the last index, use the default value in case the freezer is empty
if offsetsSize == indexEntrySize {
lastIndex = indexEntry{filenum: t.tailId, offset: 0}
} else {
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
lastIndex.unmarshalBinary(buffer)
}
if t.readonly {
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
} else {
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
}
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
lastIndex.unmarshalBinary(buffer)
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
if err != nil {
return err
}
@@ -279,6 +263,7 @@ func (t *freezerTable) repair() error {
// Keep truncating both files until they come in sync
contentExp = int64(lastIndex.offset)
for contentExp != contentSize {
// Truncate the head file to the last offset pointer
if contentExp < contentSize {
@@ -295,16 +280,9 @@ func (t *freezerTable) repair() error {
return err
}
offsetsSize -= indexEntrySize
// Read the new head index, use the default value in case
// the freezer is already empty.
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
var newLastIndex indexEntry
if offsetsSize == indexEntrySize {
newLastIndex = indexEntry{filenum: t.tailId, offset: 0}
} else {
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
newLastIndex.unmarshalBinary(buffer)
}
newLastIndex.unmarshalBinary(buffer)
// We might have slipped back into an earlier head-file here
if newLastIndex.filenum != lastIndex.filenum {
// Release earlier opened file
@@ -323,30 +301,18 @@ func (t *freezerTable) repair() error {
contentExp = int64(lastIndex.offset)
}
}
// Sync() fails for read-only files on windows.
if !t.readonly {
// Ensure all reparation changes have been written to disk
if err := t.index.Sync(); err != nil {
return err
}
if err := t.head.Sync(); err != nil {
return err
}
if err := t.meta.Sync(); err != nil {
return err
}
// Ensure all reparation changes have been written to disk
if err := t.index.Sync(); err != nil {
return err
}
if err := t.head.Sync(); err != nil {
return err
}
// Update the item and byte counters and return
t.items = t.itemOffset + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
t.headBytes = contentSize
t.headId = lastIndex.filenum
// Delete the leftover files because of head deletion
t.releaseFilesAfter(t.headId, true)
// Delete the leftover files because of tail deletion
t.releaseFilesBefore(t.tailId, true)
// Close opened files and preopen all files
if err := t.preopen(); err != nil {
return err
@@ -362,35 +328,27 @@ func (t *freezerTable) repair() error {
func (t *freezerTable) preopen() (err error) {
// The repair might have already opened (some) files
t.releaseFilesAfter(0, false)
// Open all except head in RDONLY
for i := t.tailId; i < t.headId; i++ {
if _, err = t.openFile(i, openFreezerFileForReadOnly); err != nil {
return err
}
}
if t.readonly {
t.head, err = t.openFile(t.headId, openFreezerFileForReadOnly)
} else {
// Open head in read/write
t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
}
// Open head in read/write
t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
return err
}
// truncateHead discards any recent data above the provided threshold number.
func (t *freezerTable) truncateHead(items uint64) error {
// truncate discards any recent data above the provided threshold number.
func (t *freezerTable) truncate(items uint64) error {
t.lock.Lock()
defer t.lock.Unlock()
// Ensure the given truncate target falls in the correct range
// If our item count is correct, don't do anything
existing := atomic.LoadUint64(&t.items)
if existing <= items {
return nil
}
if items < atomic.LoadUint64(&t.itemHidden) {
return errors.New("truncation below tail")
}
// We need to truncate, save the old size for metrics tracking
oldSize, err := t.sizeNolock()
if err != nil {
@@ -402,24 +360,17 @@ func (t *freezerTable) truncateHead(items uint64) error {
log = t.logger.Warn // Only loud warn if we delete multiple items
}
log("Truncating freezer table", "items", existing, "limit", items)
// Truncate the index file first, the tail position is also considered
// when calculating the new freezer table length.
length := items - atomic.LoadUint64(&t.itemOffset)
if err := truncateFreezerFile(t.index, int64(length+1)*indexEntrySize); err != nil {
if err := truncateFreezerFile(t.index, int64(items+1)*indexEntrySize); err != nil {
return err
}
// Calculate the new expected size of the data file and truncate it
var expected indexEntry
if length == 0 {
expected = indexEntry{filenum: t.tailId, offset: 0}
} else {
buffer := make([]byte, indexEntrySize)
if _, err := t.index.ReadAt(buffer, int64(length*indexEntrySize)); err != nil {
return err
}
expected.unmarshalBinary(buffer)
buffer := make([]byte, indexEntrySize)
if _, err := t.index.ReadAt(buffer, int64(items*indexEntrySize)); err != nil {
return err
}
var expected indexEntry
expected.unmarshalBinary(buffer)
// We might need to truncate back to older files
if expected.filenum != t.headId {
// If already open for reading, force-reopen for writing
@@ -448,110 +399,7 @@ func (t *freezerTable) truncateHead(items uint64) error {
return err
}
t.sizeGauge.Dec(int64(oldSize - newSize))
return nil
}
// truncateTail discards any recent data before the provided threshold number.
func (t *freezerTable) truncateTail(items uint64) error {
t.lock.Lock()
defer t.lock.Unlock()
// Ensure the given truncate target falls in the correct range
if atomic.LoadUint64(&t.itemHidden) >= items {
return nil
}
if atomic.LoadUint64(&t.items) < items {
return errors.New("truncation above head")
}
// Load the new tail index by the given new tail position
var (
newTailId uint32
buffer = make([]byte, indexEntrySize)
)
if atomic.LoadUint64(&t.items) == items {
newTailId = t.headId
} else {
offset := items - atomic.LoadUint64(&t.itemOffset)
if _, err := t.index.ReadAt(buffer, int64((offset+1)*indexEntrySize)); err != nil {
return err
}
var newTail indexEntry
newTail.unmarshalBinary(buffer)
newTailId = newTail.filenum
}
// Update the virtual tail marker and hidden these entries in table.
atomic.StoreUint64(&t.itemHidden, items)
if err := writeMetadata(t.meta, newMetadata(items)); err != nil {
return err
}
// Hidden items still fall in the current tail file, no data file
// can be dropped.
if t.tailId == newTailId {
return nil
}
// Hidden items fall in the incorrect range, returns the error.
if t.tailId > newTailId {
return fmt.Errorf("invalid index, tail-file %d, item-file %d", t.tailId, newTailId)
}
// Hidden items exceed the current tail file, drop the relevant
// data files. We need to truncate, save the old size for metrics
// tracking.
oldSize, err := t.sizeNolock()
if err != nil {
return err
}
// Count how many items can be deleted from the file.
var (
newDeleted = items
deleted = atomic.LoadUint64(&t.itemOffset)
)
for current := items - 1; current >= deleted; current -= 1 {
if _, err := t.index.ReadAt(buffer, int64((current-deleted+1)*indexEntrySize)); err != nil {
return err
}
var pre indexEntry
pre.unmarshalBinary(buffer)
if pre.filenum != newTailId {
break
}
newDeleted = current
}
// Commit the changes of metadata file first before manipulating
// the indexes file.
if err := t.meta.Sync(); err != nil {
return err
}
// Truncate the deleted index entries from the index file.
err = copyFrom(t.index.Name(), t.index.Name(), indexEntrySize*(newDeleted-deleted+1), func(f *os.File) error {
tailIndex := indexEntry{
filenum: newTailId,
offset: uint32(newDeleted),
}
_, err := f.Write(tailIndex.append(nil))
return err
})
if err != nil {
return err
}
// Reopen the modified index file to load the changes
if err := t.index.Close(); err != nil {
return err
}
t.index, err = openFreezerFileForAppend(t.index.Name())
if err != nil {
return err
}
// Release any files before the current tail
t.tailId = newTailId
atomic.StoreUint64(&t.itemOffset, newDeleted)
t.releaseFilesBefore(t.tailId, true)
// Retrieve the new size and update the total size counter
newSize, err := t.sizeNolock()
if err != nil {
return err
}
t.sizeGauge.Dec(int64(oldSize - newSize))
return nil
}
@@ -566,11 +414,6 @@ func (t *freezerTable) Close() error {
}
t.index = nil
if err := t.meta.Close(); err != nil {
errs = append(errs, err)
}
t.meta = nil
for _, f := range t.files {
if err := f.Close(); err != nil {
errs = append(errs, err)
@@ -625,19 +468,6 @@ func (t *freezerTable) releaseFilesAfter(num uint32, remove bool) {
}
}
// releaseFilesBefore closes all open files with a lower number, and optionally also deletes the files
func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) {
for fnum, f := range t.files {
if fnum < num {
delete(t.files, fnum)
f.Close()
if remove {
os.Remove(f.Name())
}
}
}
}
// getIndices returns the index entries for the given from-item, covering 'count' items.
// N.B: The actual number of returned indices for N items will always be N+1 (unless an
// error is returned).
@@ -646,7 +476,7 @@ func (t *freezerTable) releaseFilesBefore(num uint32, remove bool) {
// it will return error.
func (t *freezerTable) getIndices(from, count uint64) ([]*indexEntry, error) {
// Apply the table-offset
from = from - t.itemOffset
from = from - uint64(t.itemOffset)
// For reading N items, we need N+1 indices.
buffer := make([]byte, (count+1)*indexEntrySize)
if _, err := t.index.ReadAt(buffer, int64(from*indexEntrySize)); err != nil {
@@ -731,21 +561,18 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
t.lock.RLock()
defer t.lock.RUnlock()
// Ensure the table and the item are accessible
// Ensure the table and the item is accessible
if t.index == nil || t.head == nil {
return nil, nil, errClosed
}
var (
items = atomic.LoadUint64(&t.items) // the total items(head + 1)
hidden = atomic.LoadUint64(&t.itemHidden) // the number of hidden items
)
itemCount := atomic.LoadUint64(&t.items) // max number
// Ensure the start is written, not deleted from the tail, and that the
// caller actually wants something
if items <= start || hidden > start || count == 0 {
if itemCount <= start || uint64(t.itemOffset) > start || count == 0 {
return nil, nil, errOutOfBounds
}
if start+count > items {
count = items - start
if start+count > itemCount {
count = itemCount - start
}
var (
output = make([]byte, maxBytes) // Buffer to read data into
@@ -821,10 +648,10 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
return output[:outputSize], sizes, nil
}
// has returns an indicator whether the specified number data is still accessible
// in the freezer table.
// has returns an indicator whether the specified number data
// exists in the freezer table.
func (t *freezerTable) has(number uint64) bool {
return atomic.LoadUint64(&t.items) > number && atomic.LoadUint64(&t.itemHidden) <= number
return atomic.LoadUint64(&t.items) > number
}
// size returns the total data size in the freezer table.
@@ -878,9 +705,6 @@ func (t *freezerTable) Sync() error {
if err := t.index.Sync(); err != nil {
return err
}
if err := t.meta.Sync(); err != nil {
return err
}
return t.head.Sync()
}
@@ -898,20 +722,13 @@ func (t *freezerTable) dumpIndexString(start, stop int64) string {
}
func (t *freezerTable) dumpIndex(w io.Writer, start, stop int64) {
meta, err := readMetadata(t.meta)
if err != nil {
fmt.Fprintf(w, "Failed to decode freezer table %v\n", err)
return
}
fmt.Fprintf(w, "Version %d deleted %d, hidden %d\n", meta.Version, atomic.LoadUint64(&t.itemOffset), atomic.LoadUint64(&t.itemHidden))
buf := make([]byte, indexEntrySize)
fmt.Fprintf(w, "| number | fileno | offset |\n")
fmt.Fprintf(w, "|--------|--------|--------|\n")
for i := uint64(start); ; i++ {
if _, err := t.index.ReadAt(buf, int64((i+1)*indexEntrySize)); err != nil {
if _, err := t.index.ReadAt(buf, int64(i*indexEntrySize)); err != nil {
break
}
var entry indexEntry

View File

@@ -18,18 +18,13 @@ package rawdb
import (
"bytes"
"encoding/binary"
"fmt"
"math/rand"
"os"
"path/filepath"
"reflect"
"sync/atomic"
"testing"
"testing/quick"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/ethereum/go-ethereum/metrics"
"github.com/stretchr/testify/require"
)
@@ -45,7 +40,7 @@ func TestFreezerBasics(t *testing.T) {
// set cutoff at 50 bytes
f, err := newTable(os.TempDir(),
fmt.Sprintf("unittest-%d", rand.Uint64()),
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
if err != nil {
t.Fatal(err)
}
@@ -90,7 +85,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
f *freezerTable
err error
)
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -104,7 +99,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
require.NoError(t, batch.commit())
f.Close()
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -121,7 +116,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
}
f.Close()
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -136,7 +131,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
// Fill table
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -165,7 +160,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
// Now open it again
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -188,7 +183,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
// Fill a table and close it
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -209,12 +204,12 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
}
// Remove everything but the first item, and leave data unaligned
// 0-indexEntry, 1-indexEntry, corrupt-indexEntry
idxFile.Truncate(2*indexEntrySize + indexEntrySize/2)
idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2)
idxFile.Close()
// Now open it again
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -237,7 +232,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
// And if we open it, we should now be able to read all of them (new values)
{
f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
for y := 1; y < 255; y++ {
exp := getChunk(15, ^y)
got, err := f.Retrieve(uint64(y))
@@ -259,7 +254,7 @@ func TestSnappyDetection(t *testing.T) {
// Open with snappy
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -270,7 +265,7 @@ func TestSnappyDetection(t *testing.T) {
// Open without snappy
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false)
if err != nil {
t.Fatal(err)
}
@@ -282,7 +277,7 @@ func TestSnappyDetection(t *testing.T) {
// Open with snappy
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -314,7 +309,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
// Fill a table and close it
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -350,7 +345,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
// 45, 45, 15
// with 3+3+1 items
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -371,7 +366,7 @@ func TestFreezerTruncate(t *testing.T) {
// Fill table
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -387,12 +382,12 @@ func TestFreezerTruncate(t *testing.T) {
// Reopen, truncate
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
defer f.Close()
f.truncateHead(10) // 150 bytes
f.truncate(10) // 150 bytes
if f.items != 10 {
t.Fatalf("expected %d items, got %d", 10, f.items)
}
@@ -412,7 +407,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
// Fill table
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -445,7 +440,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
// Reopen
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -480,7 +475,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
// Fill table
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -496,7 +491,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
// Reopen and read all files
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -509,7 +504,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
}
// Now, truncate back to zero
f.truncateHead(0)
f.truncate(0)
// Write the data again
batch := f.newBatch()
@@ -528,7 +523,7 @@ func TestFreezerOffset(t *testing.T) {
// Fill table
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
if err != nil {
t.Fatal(err)
}
@@ -570,19 +565,18 @@ func TestFreezerOffset(t *testing.T) {
// Update the index file, so that we store
// [ file = 2, offset = 4 ] at index zero
tailId := uint32(2) // First file is 2
itemOffset := uint32(4) // We have removed four items
zeroIndex := indexEntry{
filenum: uint32(2), // First file is 2
offset: uint32(4), // We have removed four items
filenum: tailId,
offset: itemOffset,
}
buf := zeroIndex.append(nil)
// Overwrite index zero
copy(indexBuf, buf)
// Remove the four next indices by overwriting
copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:])
indexFile.WriteAt(indexBuf, 0)
// Need to truncate the moved index items
indexFile.Truncate(indexEntrySize * (1 + 2))
indexFile.Close()
@@ -590,7 +584,7 @@ func TestFreezerOffset(t *testing.T) {
// Now open again
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
if err != nil {
t.Fatal(err)
}
@@ -629,12 +623,13 @@ func TestFreezerOffset(t *testing.T) {
// Update the index file, so that we store
// [ file = 2, offset = 1M ] at index zero
tailId := uint32(2) // First file is 2
itemOffset := uint32(1000000) // We have removed 1M items
zeroIndex := indexEntry{
offset: uint32(1000000), // We have removed 1M items
filenum: uint32(2), // First file is 2
offset: itemOffset,
filenum: tailId,
}
buf := zeroIndex.append(nil)
// Overwrite index zero
copy(indexBuf, buf)
indexFile.WriteAt(indexBuf, 0)
@@ -643,7 +638,7 @@ func TestFreezerOffset(t *testing.T) {
// Check that existing items have been moved to index 1M.
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
if err != nil {
t.Fatal(err)
}
@@ -664,171 +659,6 @@ func TestFreezerOffset(t *testing.T) {
}
}
func TestTruncateTail(t *testing.T) {
t.Parallel()
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64())
// Fill table
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
// Write 7 x 20 bytes, splitting out into four files
batch := f.newBatch()
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
require.NoError(t, batch.commit())
// nothing to do, all the items should still be there.
f.truncateTail(0)
fmt.Println(f.dumpIndexString(0, 1000))
checkRetrieve(t, f, map[uint64][]byte{
0: getChunk(20, 0xFF),
1: getChunk(20, 0xEE),
2: getChunk(20, 0xdd),
3: getChunk(20, 0xcc),
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
// truncate single element( item 0 ), deletion is only supported at file level
f.truncateTail(1)
fmt.Println(f.dumpIndexString(0, 1000))
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
})
checkRetrieve(t, f, map[uint64][]byte{
1: getChunk(20, 0xEE),
2: getChunk(20, 0xdd),
3: getChunk(20, 0xcc),
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
// Reopen the table, the deletion information should be persisted as well
f.Close()
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
})
checkRetrieve(t, f, map[uint64][]byte{
1: getChunk(20, 0xEE),
2: getChunk(20, 0xdd),
3: getChunk(20, 0xcc),
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
// truncate two elements( item 0, item 1 ), the file 0 should be deleted
f.truncateTail(2)
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
1: errOutOfBounds,
})
checkRetrieve(t, f, map[uint64][]byte{
2: getChunk(20, 0xdd),
3: getChunk(20, 0xcc),
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
// Reopen the table, the above testing should still pass
f.Close()
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
defer f.Close()
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
1: errOutOfBounds,
})
checkRetrieve(t, f, map[uint64][]byte{
2: getChunk(20, 0xdd),
3: getChunk(20, 0xcc),
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
// truncate all, the entire freezer should be deleted
f.truncateTail(7)
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds,
1: errOutOfBounds,
2: errOutOfBounds,
3: errOutOfBounds,
4: errOutOfBounds,
5: errOutOfBounds,
6: errOutOfBounds,
})
}
func TestTruncateHead(t *testing.T) {
t.Parallel()
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64())
// Fill table
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
if err != nil {
t.Fatal(err)
}
// Write 7 x 20 bytes, splitting out into four files
batch := f.newBatch()
require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF)))
require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE)))
require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd)))
require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc)))
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
require.NoError(t, batch.commit())
f.truncateTail(4) // Tail = 4
// NewHead is required to be 3, the entire table should be truncated
f.truncateHead(4)
checkRetrieveError(t, f, map[uint64]error{
0: errOutOfBounds, // Deleted by tail
1: errOutOfBounds, // Deleted by tail
2: errOutOfBounds, // Deleted by tail
3: errOutOfBounds, // Deleted by tail
4: errOutOfBounds, // Deleted by Head
5: errOutOfBounds, // Deleted by Head
6: errOutOfBounds, // Deleted by Head
})
// Append new items
batch = f.newBatch()
require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb)))
require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa)))
require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11)))
require.NoError(t, batch.commit())
checkRetrieve(t, f, map[uint64][]byte{
4: getChunk(20, 0xbb),
5: getChunk(20, 0xaa),
6: getChunk(20, 0x11),
})
}
func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) {
t.Helper()
@@ -896,7 +726,7 @@ func TestSequentialRead(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
{ // Fill table
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -906,7 +736,7 @@ func TestSequentialRead(t *testing.T) {
f.Close()
}
{ // Open it, iterate, verify iteration
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
if err != nil {
t.Fatal(err)
}
@@ -927,7 +757,7 @@ func TestSequentialRead(t *testing.T) {
}
{ // Open it, iterate, verify byte limit. The byte limit is less than item
// size, so each lookup should only return one item
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
if err != nil {
t.Fatal(err)
}
@@ -956,7 +786,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
{ // Fill table
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
if err != nil {
t.Fatal(err)
}
@@ -978,7 +808,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
{100, 109, 10},
} {
{
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
if err != nil {
t.Fatal(err)
}
@@ -999,298 +829,3 @@ func TestSequentialReadByteLimit(t *testing.T) {
}
}
}
func TestFreezerReadonly(t *testing.T) {
tmpdir := os.TempDir()
// Case 1: Check it fails on non-existent file.
_, err := newTable(tmpdir,
fmt.Sprintf("readonlytest-%d", rand.Uint64()),
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
if err == nil {
t.Fatal("readonly table instantiation should fail for non-existent table")
}
// Case 2: Check that it fails on invalid index length.
fname := fmt.Sprintf("readonlytest-%d", rand.Uint64())
idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname)))
if err != nil {
t.Errorf("Failed to open index file: %v\n", err)
}
// size should not be a multiple of indexEntrySize.
idxFile.Write(make([]byte, 17))
idxFile.Close()
_, err = newTable(tmpdir, fname,
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
if err == nil {
t.Errorf("readonly table instantiation should fail for invalid index size")
}
// Case 3: Open table non-readonly table to write some data.
// Then corrupt the head file and make sure opening the table
// again in readonly triggers an error.
fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
f, err := newTable(tmpdir, fname,
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
if err != nil {
t.Fatalf("failed to instantiate table: %v", err)
}
writeChunks(t, f, 8, 32)
// Corrupt table file
if _, err := f.head.Write([]byte{1, 1}); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
_, err = newTable(tmpdir, fname,
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
if err == nil {
t.Errorf("readonly table instantiation should fail for corrupt table file")
}
// Case 4: Write some data to a table and later re-open it as readonly.
// Should be successful.
fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
f, err = newTable(tmpdir, fname,
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
if err != nil {
t.Fatalf("failed to instantiate table: %v\n", err)
}
writeChunks(t, f, 32, 128)
if err := f.Close(); err != nil {
t.Fatal(err)
}
f, err = newTable(tmpdir, fname,
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
if err != nil {
t.Fatal(err)
}
v, err := f.Retrieve(10)
if err != nil {
t.Fatal(err)
}
exp := getChunk(128, 10)
if !bytes.Equal(v, exp) {
t.Errorf("retrieved value is incorrect")
}
// Case 5: Now write some data via a batch.
// This should fail either during AppendRaw or Commit
batch := f.newBatch()
writeErr := batch.AppendRaw(32, make([]byte, 1))
if writeErr == nil {
writeErr = batch.commit()
}
if writeErr == nil {
t.Fatalf("Writing to readonly table should fail")
}
}
// randTest performs random freezer table operations.
// Instances of this test are created by Generate.
type randTest []randTestStep
type randTestStep struct {
op int
items []uint64 // for append and retrieve
blobs [][]byte // for append
target uint64 // for truncate(head/tail)
err error // for debugging
}
const (
opReload = iota
opAppend
opRetrieve
opTruncateHead
opTruncateHeadAll
opTruncateTail
opTruncateTailAll
opCheckAll
opMax // boundary value, not an actual op
)
func getVals(first uint64, n int) [][]byte {
var ret [][]byte
for i := 0; i < n; i++ {
val := make([]byte, 8)
binary.BigEndian.PutUint64(val, first+uint64(i))
ret = append(ret, val)
}
return ret
}
func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
var (
deleted uint64 // The number of deleted items from tail
items []uint64 // The index of entries in table
// getItems retrieves the indexes for items in table.
getItems = func(n int) []uint64 {
length := len(items)
if length == 0 {
return nil
}
var ret []uint64
index := rand.Intn(length)
for i := index; len(ret) < n && i < length; i++ {
ret = append(ret, items[i])
}
return ret
}
// addItems appends the given length items into the table.
addItems = func(n int) []uint64 {
var first = deleted
if len(items) != 0 {
first = items[len(items)-1] + 1
}
var ret []uint64
for i := 0; i < n; i++ {
ret = append(ret, first+uint64(i))
}
items = append(items, ret...)
return ret
}
)
var steps randTest
for i := 0; i < size; i++ {
step := randTestStep{op: r.Intn(opMax)}
switch step.op {
case opReload, opCheckAll:
case opAppend:
num := r.Intn(3)
step.items = addItems(num)
if len(step.items) == 0 {
step.blobs = nil
} else {
step.blobs = getVals(step.items[0], num)
}
case opRetrieve:
step.items = getItems(r.Intn(3))
case opTruncateHead:
if len(items) == 0 {
step.target = deleted
} else {
index := r.Intn(len(items))
items = items[:index]
step.target = deleted + uint64(index)
}
case opTruncateHeadAll:
step.target = deleted
items = items[:0]
case opTruncateTail:
if len(items) == 0 {
step.target = deleted
} else {
index := r.Intn(len(items))
items = items[index:]
deleted += uint64(index)
step.target = deleted
}
case opTruncateTailAll:
step.target = deleted + uint64(len(items))
items = items[:0]
deleted = step.target
}
steps = append(steps, step)
}
return reflect.ValueOf(steps)
}
func runRandTest(rt randTest) bool {
fname := fmt.Sprintf("randtest-%d", rand.Uint64())
f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
if err != nil {
panic("failed to initialize table")
}
var values [][]byte
for i, step := range rt {
switch step.op {
case opReload:
f.Close()
f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
if err != nil {
rt[i].err = fmt.Errorf("failed to reload table %v", err)
}
case opCheckAll:
tail := atomic.LoadUint64(&f.itemHidden)
head := atomic.LoadUint64(&f.items)
if tail == head {
continue
}
got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000)
if err != nil {
rt[i].err = err
} else {
if !reflect.DeepEqual(got, values) {
rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values)
}
}
case opAppend:
batch := f.newBatch()
for i := 0; i < len(step.items); i++ {
batch.AppendRaw(step.items[i], step.blobs[i])
}
batch.commit()
values = append(values, step.blobs...)
case opRetrieve:
var blobs [][]byte
if len(step.items) == 0 {
continue
}
tail := atomic.LoadUint64(&f.itemHidden)
for i := 0; i < len(step.items); i++ {
blobs = append(blobs, values[step.items[i]-tail])
}
got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000)
if err != nil {
rt[i].err = err
} else {
if !reflect.DeepEqual(got, blobs) {
rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items)
}
}
case opTruncateHead:
f.truncateHead(step.target)
length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden)
values = values[:length]
case opTruncateHeadAll:
f.truncateHead(step.target)
values = nil
case opTruncateTail:
prev := atomic.LoadUint64(&f.itemHidden)
f.truncateTail(step.target)
truncated := atomic.LoadUint64(&f.itemHidden) - prev
values = values[truncated:]
case opTruncateTailAll:
f.truncateTail(step.target)
values = nil
}
// Abort the test on error.
if rt[i].err != nil {
return false
}
}
f.Close()
return true
}
func TestRandom(t *testing.T) {
if err := quick.Check(runRandTest, nil); err != nil {
if cerr, ok := err.(*quick.CheckError); ok {
t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In))
}
t.Fatal(err)
}
}

View File

@@ -24,7 +24,6 @@ import (
"math/big"
"math/rand"
"os"
"path"
"sync"
"testing"
@@ -187,7 +186,7 @@ func TestFreezerConcurrentModifyRetrieve(t *testing.T) {
wg.Wait()
}
// This test runs ModifyAncients and TruncateHead concurrently with each other.
// This test runs ModifyAncients and TruncateAncients concurrently with each other.
func TestFreezerConcurrentModifyTruncate(t *testing.T) {
f, dir := newFreezerForTesting(t, freezerTestTableDef)
defer os.RemoveAll(dir)
@@ -197,7 +196,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
for i := 0; i < 1000; i++ {
// First reset and write 100 items.
if err := f.TruncateHead(0); err != nil {
if err := f.TruncateAncients(0); err != nil {
t.Fatal("truncate failed:", err)
}
_, err := f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
@@ -232,7 +231,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
wg.Done()
}()
go func() {
truncateErr = f.TruncateHead(10)
truncateErr = f.TruncateAncients(10)
wg.Done()
}()
go func() {
@@ -254,44 +253,6 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
}
}
func TestFreezerReadonlyValidate(t *testing.T) {
tables := map[string]bool{"a": true, "b": true}
dir, err := ioutil.TempDir("", "freezer")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
// Open non-readonly freezer and fill individual tables
// with different amount of data.
f, err := newFreezer(dir, "", false, 2049, tables)
if err != nil {
t.Fatal("can't open freezer", err)
}
var item = make([]byte, 1024)
aBatch := f.tables["a"].newBatch()
require.NoError(t, aBatch.AppendRaw(0, item))
require.NoError(t, aBatch.AppendRaw(1, item))
require.NoError(t, aBatch.AppendRaw(2, item))
require.NoError(t, aBatch.commit())
bBatch := f.tables["b"].newBatch()
require.NoError(t, bBatch.AppendRaw(0, item))
require.NoError(t, bBatch.commit())
if f.tables["a"].items != 3 {
t.Fatalf("unexpected number of items in table")
}
if f.tables["b"].items != 1 {
t.Fatalf("unexpected number of items in table")
}
require.NoError(t, f.Close())
// Re-openening as readonly should fail when validating
// table lengths.
f, err = newFreezer(dir, "", true, 2049, tables)
if err == nil {
t.Fatal("readonly freezer should fail with differing table lengths")
}
}
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
t.Helper()
@@ -338,92 +299,3 @@ func checkAncientCount(t *testing.T, f *freezer, kind string, n uint64) {
t.Errorf("Ancient(%q, %d) returned unexpected error %q", kind, index, err)
}
}
func TestRenameWindows(t *testing.T) {
var (
fname = "file.bin"
fname2 = "file2.bin"
data = []byte{1, 2, 3, 4}
data2 = []byte{2, 3, 4, 5}
data3 = []byte{3, 5, 6, 7}
dataLen = 4
)
// Create 2 temp dirs
dir1, err := os.MkdirTemp("", "rename-test")
if err != nil {
t.Fatal(err)
}
defer os.Remove(dir1)
dir2, err := os.MkdirTemp("", "rename-test")
if err != nil {
t.Fatal(err)
}
defer os.Remove(dir2)
// Create file in dir1 and fill with data
f, err := os.Create(path.Join(dir1, fname))
if err != nil {
t.Fatal(err)
}
f2, err := os.Create(path.Join(dir1, fname2))
if err != nil {
t.Fatal(err)
}
f3, err := os.Create(path.Join(dir2, fname2))
if err != nil {
t.Fatal(err)
}
if _, err := f.Write(data); err != nil {
t.Fatal(err)
}
if _, err := f2.Write(data2); err != nil {
t.Fatal(err)
}
if _, err := f3.Write(data3); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
if err := f2.Close(); err != nil {
t.Fatal(err)
}
if err := f3.Close(); err != nil {
t.Fatal(err)
}
if err := os.Rename(f.Name(), path.Join(dir2, fname)); err != nil {
t.Fatal(err)
}
if err := os.Rename(f2.Name(), path.Join(dir2, fname2)); err != nil {
t.Fatal(err)
}
// Check file contents
f, err = os.Open(path.Join(dir2, fname))
if err != nil {
t.Fatal(err)
}
defer f.Close()
defer os.Remove(f.Name())
buf := make([]byte, dataLen)
if _, err := f.Read(buf); err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, data) {
t.Errorf("unexpected file contents. Got %v\n", buf)
}
f, err = os.Open(path.Join(dir2, fname2))
if err != nil {
t.Fatal(err)
}
defer f.Close()
defer os.Remove(f.Name())
if _, err := f.Read(buf); err != nil {
t.Fatal(err)
}
if !bytes.Equal(buf, data2) {
t.Errorf("unexpected file contents. Got %v\n", buf)
}
}

View File

@@ -1,120 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"io"
"io/ioutil"
"os"
"path/filepath"
)
// copyFrom copies data from 'srcPath' at offset 'offset' into 'destPath'.
// The 'destPath' is created if it doesn't exist, otherwise it is overwritten.
// Before the copy is executed, there is a callback can be registered to
// manipulate the dest file.
// It is perfectly valid to have destPath == srcPath.
func copyFrom(srcPath, destPath string, offset uint64, before func(f *os.File) error) error {
// Create a temp file in the same dir where we want it to wind up
f, err := ioutil.TempFile(filepath.Dir(destPath), "*")
if err != nil {
return err
}
fname := f.Name()
// Clean up the leftover file
defer func() {
if f != nil {
f.Close()
}
os.Remove(fname)
}()
// Apply the given function if it's not nil before we copy
// the content from the src.
if before != nil {
if err := before(f); err != nil {
return err
}
}
// Open the source file
src, err := os.Open(srcPath)
if err != nil {
return err
}
if _, err = src.Seek(int64(offset), 0); err != nil {
src.Close()
return err
}
// io.Copy uses 32K buffer internally.
_, err = io.Copy(f, src)
if err != nil {
src.Close()
return err
}
// Rename the temporary file to the specified dest name.
// src may be same as dest, so needs to be closed before
// we do the final move.
src.Close()
if err := f.Close(); err != nil {
return err
}
f = nil
if err := os.Rename(fname, destPath); err != nil {
return err
}
return nil
}
// openFreezerFileForAppend opens a freezer table file and seeks to the end
func openFreezerFileForAppend(filename string) (*os.File, error) {
// Open the file without the O_APPEND flag
// because it has differing behaviour during Truncate operations
// on different OS's
file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
return nil, err
}
// Seek to end for append
if _, err = file.Seek(0, io.SeekEnd); err != nil {
return nil, err
}
return file, nil
}
// openFreezerFileForReadOnly opens a freezer table file for read only access
func openFreezerFileForReadOnly(filename string) (*os.File, error) {
return os.OpenFile(filename, os.O_RDONLY, 0644)
}
// openFreezerFileTruncated opens a freezer table making sure it is truncated
func openFreezerFileTruncated(filename string) (*os.File, error) {
return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
}
// truncateFreezerFile resizes a freezer table file and seeks to the end
func truncateFreezerFile(file *os.File, size int64) error {
if err := file.Truncate(size); err != nil {
return err
}
// Seek to end for append
if _, err := file.Seek(0, io.SeekEnd); err != nil {
return err
}
return nil
}

View File

@@ -1,76 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"bytes"
"io/ioutil"
"os"
"testing"
)
func TestCopyFrom(t *testing.T) {
var (
content = []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}
prefix = []byte{0x9, 0xa, 0xb, 0xc, 0xd, 0xf}
)
var cases = []struct {
src, dest string
offset uint64
writePrefix bool
}{
{"foo", "bar", 0, false},
{"foo", "bar", 1, false},
{"foo", "bar", 8, false},
{"foo", "foo", 0, false},
{"foo", "foo", 1, false},
{"foo", "foo", 8, false},
{"foo", "bar", 0, true},
{"foo", "bar", 1, true},
{"foo", "bar", 8, true},
}
for _, c := range cases {
ioutil.WriteFile(c.src, content, 0644)
if err := copyFrom(c.src, c.dest, c.offset, func(f *os.File) error {
if !c.writePrefix {
return nil
}
f.Write(prefix)
return nil
}); err != nil {
os.Remove(c.src)
t.Fatalf("Failed to copy %v", err)
}
blob, err := ioutil.ReadFile(c.dest)
if err != nil {
os.Remove(c.src)
os.Remove(c.dest)
t.Fatalf("Failed to read %v", err)
}
want := content[c.offset:]
if c.writePrefix {
want = append(prefix, want...)
}
if !bytes.Equal(blob, want) {
t.Fatal("Unexpected value")
}
os.Remove(c.src)
os.Remove(c.dest)
}
}

View File

@@ -1,47 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import "github.com/ethereum/go-ethereum/ethdb"
// KeyLengthIterator is a wrapper for a database iterator that ensures only key-value pairs
// with a specific key length will be returned.
type KeyLengthIterator struct {
requiredKeyLength int
ethdb.Iterator
}
// NewKeyLengthIterator returns a wrapped version of the iterator that will only return key-value
// pairs where keys with a specific key length will be returned.
func NewKeyLengthIterator(it ethdb.Iterator, keyLen int) ethdb.Iterator {
return &KeyLengthIterator{
Iterator: it,
requiredKeyLength: keyLen,
}
}
func (it *KeyLengthIterator) Next() bool {
// Return true as soon as a key with the required key length is discovered
for it.Iterator.Next() {
if len(it.Iterator.Key()) == it.requiredKeyLength {
return true
}
}
// Return false when we exhaust the keys in the underlying iterator.
return false
}

View File

@@ -1,60 +0,0 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rawdb
import (
"encoding/binary"
"testing"
)
func TestKeyLengthIterator(t *testing.T) {
db := NewMemoryDatabase()
keyLen := 8
expectedKeys := make(map[string]struct{})
for i := 0; i < 100; i++ {
key := make([]byte, keyLen)
binary.BigEndian.PutUint64(key, uint64(i))
if err := db.Put(key, []byte{0x1}); err != nil {
t.Fatal(err)
}
expectedKeys[string(key)] = struct{}{}
longerKey := make([]byte, keyLen*2)
binary.BigEndian.PutUint64(longerKey, uint64(i))
if err := db.Put(longerKey, []byte{0x1}); err != nil {
t.Fatal(err)
}
}
it := NewKeyLengthIterator(db.NewIterator(nil, nil), keyLen)
for it.Next() {
key := it.Key()
_, exists := expectedKeys[string(key)]
if !exists {
t.Fatalf("Found unexpected key %d", binary.BigEndian.Uint64(key))
}
delete(expectedKeys, string(key))
if len(key) != keyLen {
t.Fatalf("Found unexpected key in key length iterator with length %d", len(key))
}
}
if len(expectedKeys) != 0 {
t.Fatalf("Expected all keys of length %d to be removed from expected keys during iteration", keyLen)
}
}

View File

@@ -63,9 +63,6 @@ var (
// snapshotSyncStatusKey tracks the snapshot sync status across restarts.
snapshotSyncStatusKey = []byte("SnapshotSyncStatus")
// skeletonSyncStatusKey tracks the skeleton sync status across restarts.
skeletonSyncStatusKey = []byte("SkeletonSyncStatus")
// txIndexTailKey tracks the oldest block whose transactions have been indexed.
txIndexTailKey = []byte("TransactionIndexTail")
@@ -95,11 +92,9 @@ var (
SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value
SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value
CodePrefix = []byte("c") // CodePrefix + code hash -> account code
skeletonHeaderPrefix = []byte("S") // skeletonHeaderPrefix + num (uint64 big endian) -> header
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db
genesisPrefix = []byte("ethereum-genesis-") // genesis state prefix for the db
PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage
configPrefix = []byte("ethereum-config-") // config prefix for the db
// Chain index prefixes (use `i` + single byte to avoid mixing data types).
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
@@ -215,11 +210,6 @@ func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte {
return key
}
// skeletonHeaderKey = skeletonHeaderPrefix + num (uint64 big endian)
func skeletonHeaderKey(number uint64) []byte {
return append(skeletonHeaderPrefix, encodeBlockNumber(number)...)
}
// preimageKey = PreimagePrefix + hash
func preimageKey(hash common.Hash) []byte {
return append(PreimagePrefix, hash.Bytes()...)
@@ -243,8 +233,3 @@ func IsCodeKey(key []byte) (bool, []byte) {
func configKey(hash common.Hash) []byte {
return append(configPrefix, hash.Bytes()...)
}
// genesisKey = genesisPrefix + hash
func genesisKey(hash common.Hash) []byte {
return append(genesisPrefix, hash.Bytes()...)
}

View File

@@ -74,12 +74,6 @@ func (t *table) Ancients() (uint64, error) {
return t.db.Ancients()
}
// Tail is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) Tail() (uint64, error) {
return t.db.Tail()
}
// AncientSize is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) AncientSize(kind string) (uint64, error) {
@@ -95,16 +89,10 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReader) error) (err err
return t.db.ReadAncients(fn)
}
// TruncateHead is a noop passthrough that just forwards the request to the underlying
// TruncateAncients is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) TruncateHead(items uint64) error {
return t.db.TruncateHead(items)
}
// TruncateTail is a noop passthrough that just forwards the request to the underlying
// database.
func (t *table) TruncateTail(items uint64) error {
return t.db.TruncateTail(items)
func (t *table) TruncateAncients(items uint64) error {
return t.db.TruncateAncients(items)
}
// Sync is a noop passthrough that just forwards the request to the underlying
@@ -113,12 +101,6 @@ func (t *table) Sync() error {
return t.db.Sync()
}
// MigrateTable processes the entries in a given table in sequence
// converting them to a new format if they're of an old format.
func (t *table) MigrateTable(kind string, convert convertLegacyFn) error {
return t.db.MigrateTable(kind, convert)
}
// Put inserts the given value into the database at a prefixed version of the
// provided key.
func (t *table) Put(key []byte, value []byte) error {
@@ -190,18 +172,6 @@ func (t *table) NewBatch() ethdb.Batch {
return &tableBatch{t.db.NewBatch(), t.prefix}
}
// NewBatchWithSize creates a write-only database batch with pre-allocated buffer.
func (t *table) NewBatchWithSize(size int) ethdb.Batch {
return &tableBatch{t.db.NewBatchWithSize(size), t.prefix}
}
// NewSnapshot creates a database snapshot based on the current state.
// The created snapshot will not be affected by all following mutations
// happened on the database.
func (t *table) NewSnapshot() (ethdb.Snapshot, error) {
return t.db.NewSnapshot()
}
// tableBatch is a wrapper around a database batch that prefixes each key access
// with a pre-configured string.
type tableBatch struct {

View File

@@ -26,6 +26,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/trie"
"github.com/gballet/go-verkle"
lru "github.com/hashicorp/golang-lru"
)
@@ -104,6 +105,9 @@ type Trie interface {
// nodes of the longest existing prefix of the key (at least the root), ending
// with the node that proves the absence of the key.
Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error
// IsVerkle returns true if the trie is verkle-tree based
IsVerkle() bool
}
// NewDatabase creates a backing store for state. The returned database is safe for
@@ -118,6 +122,13 @@ func NewDatabase(db ethdb.Database) Database {
// large memory cache.
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
csc, _ := lru.New(codeSizeCacheSize)
if config != nil && config.UseVerkle {
return &VerkleDB{
db: trie.NewDatabaseWithConfig(db, config),
codeSizeCache: csc,
codeCache: fastcache.New(codeCacheSize),
}
}
return &cachingDB{
db: trie.NewDatabaseWithConfig(db, config),
codeSizeCache: csc,
@@ -202,3 +213,67 @@ func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, erro
func (db *cachingDB) TrieDB() *trie.Database {
return db.db
}
// VerkleDB implements state.Database for a verkle tree
type VerkleDB struct {
db *trie.Database
codeSizeCache *lru.Cache
codeCache *fastcache.Cache
}
// OpenTrie opens the main account trie.
func (db *VerkleDB) OpenTrie(root common.Hash) (Trie, error) {
if root == (common.Hash{}) || root == emptyRoot {
return trie.NewVerkleTrie(verkle.New(), db.db), nil
}
payload, err := db.db.DiskDB().Get(root[:])
if err != nil {
return nil, err
}
r, err := verkle.ParseNode(payload, 0)
if err != nil {
panic(err)
}
return trie.NewVerkleTrie(r, db.db), err
}
// OpenStorageTrie opens the storage trie of an account.
func (db *VerkleDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
// alternatively, return accTrie
panic("should not be called")
}
// CopyTrie returns an independent copy of the given trie.
func (db *VerkleDB) CopyTrie(tr Trie) Trie {
t, ok := tr.(*trie.VerkleTrie)
if ok {
return t.Copy(db.db)
}
panic("invalid tree type != VerkleTrie")
}
// ContractCode retrieves a particular contract's code.
func (db *VerkleDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) {
if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 {
return code, nil
}
code := rawdb.ReadCode(db.db.DiskDB(), codeHash)
if len(code) > 0 {
db.codeCache.Set(codeHash.Bytes(), code)
db.codeSizeCache.Add(codeHash, len(code))
return code, nil
}
return nil, errors.New("not found")
}
// ContractCodeSize retrieves a particular contracts code's size.
func (db *VerkleDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) {
panic("need to merge #31 for this to work")
}
// TrieDB retrieves the low level trie database used for data storage.
func (db *VerkleDB) TrieDB() *trie.Database {
return db.db
}

View File

@@ -76,6 +76,14 @@ func (it *NodeIterator) step() error {
// Initialize the iterator if we've just started
if it.stateIt == nil {
it.stateIt = it.state.trie.NodeIterator(nil)
// If the trie is a verkle trie, then the data and state
// are the same tree, and as a result both iterators are
// the same. This is a hack meant for both tree types to
// work.
if _, ok := it.state.trie.(*trie.VerkleTrie); ok {
it.dataIt = it.stateIt
}
}
// If we had data nodes previously, we surely have at least state nodes
if it.dataIt != nil {
@@ -100,10 +108,11 @@ func (it *NodeIterator) step() error {
it.state, it.stateIt = nil, nil
return nil
}
// If the state trie node is an internal entry, leave as is
// If the state trie node is an internal entry, leave as is.
if !it.stateIt.Leaf() {
return nil
}
// Otherwise we've reached an account node, initiate data iteration
var account types.StateAccount
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {

View File

@@ -89,7 +89,7 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint6
if headBlock == nil {
return nil, errors.New("Failed to load head block")
}
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false)
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false, false)
if err != nil {
return nil, err // The relevant snapshot(s) might not exist
}
@@ -265,7 +265,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// Ensure the root is really present. The weak assumption
// is the presence of root can indicate the presence of the
// entire trie.
if !rawdb.HasTrieNode(p.db, root) {
if blob := rawdb.ReadTrieNode(p.db, root); len(blob) == 0 {
// The special case is for clique based networks(rinkeby, goerli
// and some other private networks), it's possible that two
// consecutive blocks will have same root. In this case snapshot
@@ -279,7 +279,7 @@ func (p *Pruner) Prune(root common.Hash) error {
// as the pruning target.
var found bool
for i := len(layers) - 2; i >= 2; i-- {
if rawdb.HasTrieNode(p.db, layers[i].Root()) {
if blob := rawdb.ReadTrieNode(p.db, layers[i].Root()); len(blob) != 0 {
root = layers[i].Root()
found = true
log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i)
@@ -362,7 +362,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err
// - The state HEAD is rewound already because of multiple incomplete `prune-state`
// In this case, even the state HEAD is not exactly matched with snapshot, it
// still feasible to recover the pruning correctly.
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true)
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true, false)
if err != nil {
return err // The relevant snapshot(s) might not exist
}

View File

@@ -48,13 +48,13 @@ var (
// accountCheckRange is the upper limit of the number of accounts involved in
// each range check. This is a value estimated based on experience. If this
// value is too large, the failure rate of range prove will increase. Otherwise
// the value is too small, the efficiency of the state recovery will decrease.
// the the value is too small, the efficiency of the state recovery will decrease.
accountCheckRange = 128
// storageCheckRange is the upper limit of the number of storage slots involved
// in each range check. This is a value estimated based on experience. If this
// value is too large, the failure rate of range prove will increase. Otherwise
// the value is too small, the efficiency of the state recovery will decrease.
// the the value is too small, the efficiency of the state recovery will decrease.
storageCheckRange = 1024
// errMissingTrie is returned if the target trie is missing while the generation

View File

@@ -66,29 +66,6 @@ type journalStorage struct {
Vals [][]byte
}
func ParseGeneratorStatus(generatorBlob []byte) string {
if len(generatorBlob) == 0 {
return ""
}
var generator journalGenerator
if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
log.Warn("failed to decode snapshot generator", "err", err)
return ""
}
// Figure out whether we're after or within an account
var m string
switch marker := generator.Marker; len(marker) {
case common.HashLength:
m = fmt.Sprintf("at %#x", marker)
case 2 * common.HashLength:
m = fmt.Sprintf("in %#x at %#x", marker[:common.HashLength], marker[common.HashLength:])
default:
m = fmt.Sprintf("%#x", marker)
}
return fmt.Sprintf(`Done: %v, Accounts: %d, Slots: %d, Storage: %d, Marker: %s`,
generator.Done, generator.Accounts, generator.Slots, generator.Storage, m)
}
// loadAndParseJournal tries to parse the snapshot journal in latest format.
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
// Retrieve the disk layer generator. It must exist, no matter the

View File

@@ -24,6 +24,7 @@ import (
"sync"
"sync/atomic"
"github.com/VictoriaMetrics/fastcache"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
@@ -183,7 +184,7 @@ type Tree struct {
// This case happens when the snapshot is 'ahead' of the state trie.
// - otherwise, the entire snapshot is considered invalid and will be recreated on
// a background thread.
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool, useVerkle bool) (*Tree, error) {
// Create a new, empty snapshot tree
snap := &Tree{
diskdb: diskdb,
@@ -202,6 +203,17 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
}
if err != nil {
if rebuild {
if useVerkle {
snap.layers = map[common.Hash]snapshot{
root: &diskLayer{
diskdb: diskdb,
triedb: triedb,
root: root,
cache: fastcache.New(cache * 1024 * 1024),
},
}
return snap, nil
}
log.Warn("Failed to load snapshot, regenerating", "err", err)
snap.Rebuild(root)
return snap, nil
@@ -546,19 +558,20 @@ func diffToDisk(bottom *diffLayer) *diskLayer {
it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
for it.Next() {
key := it.Key()
batch.Delete(key)
base.cache.Del(key[1:])
snapshotFlushStorageItemMeter.Mark(1)
if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
batch.Delete(key)
base.cache.Del(key[1:])
snapshotFlushStorageItemMeter.Mark(1)
// Ensure we don't delete too much data blindly (contract can be
// huge). It's ok to flush, the root will go missing in case of a
// crash and we'll detect and regenerate the snapshot.
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err)
// Ensure we don't delete too much data blindly (contract can be
// huge). It's ok to flush, the root will go missing in case of a
// crash and we'll detect and regenerate the snapshot.
if batch.ValueSize() > ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
log.Crit("Failed to write storage deletions", "err", err)
}
batch.Reset()
}
batch.Reset()
}
}
it.Release()

View File

@@ -28,6 +28,8 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
)
var emptyCodeHash = crypto.Keccak256(nil)
@@ -198,10 +200,25 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
}
// If no live objects are available, attempt to use snapshots
var (
enc []byte
err error
enc []byte
err error
meter *time.Duration
)
readStart := time.Now()
if metrics.EnabledExpensive {
// If the snap is 'under construction', the first lookup may fail. If that
// happens, we don't want to double-count the time elapsed. Thus this
// dance with the metering.
defer func() {
if meter != nil {
*meter += time.Since(readStart)
}
}()
}
if s.db.snap != nil {
if metrics.EnabledExpensive {
meter = &s.db.SnapshotStorageReads
}
// If the object was destructed in *this* block (and potentially resurrected),
// the storage has been cleared out, and we should *not* consult the previous
// snapshot about any storage values. The only possible alternatives are:
@@ -211,22 +228,26 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
if _, destructed := s.db.snapDestructs[s.addrHash]; destructed {
return common.Hash{}
}
start := time.Now()
enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
if metrics.EnabledExpensive {
s.db.SnapshotStorageReads += time.Since(start)
}
}
// If the snapshot is unavailable or reading from it fails, load from the database.
if s.db.snap == nil || err != nil {
start := time.Now()
enc, err = s.getTrie(db).TryGet(key.Bytes())
if metrics.EnabledExpensive {
s.db.StorageReads += time.Since(start)
if meter != nil {
// If we already spent time checking the snapshot, account for it
// and reset the readStart
*meter += time.Since(readStart)
readStart = time.Now()
}
if err != nil {
s.setError(err)
return common.Hash{}
if metrics.EnabledExpensive {
meter = &s.db.StorageReads
}
if !s.db.trie.IsVerkle() {
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
s.setError(err)
return common.Hash{}
}
} else {
panic("verkle trees use the snapshot")
}
}
var value common.Hash
@@ -317,7 +338,12 @@ func (s *stateObject) updateTrie(db Database) Trie {
// The snapshot storage map for the object
var storage map[common.Hash][]byte
// Insert all the pending updates into the trie
tr := s.getTrie(db)
var tr Trie
if s.db.trie.IsVerkle() {
tr = s.db.trie
} else {
tr = s.getTrie(db)
}
hasher := s.db.hasher
usedStorage := make([][]byte, 0, len(s.pendingStorage))
@@ -330,12 +356,25 @@ func (s *stateObject) updateTrie(db Database) Trie {
var v []byte
if (value == common.Hash{}) {
s.setError(tr.TryDelete(key[:]))
if tr.IsVerkle() {
k := trieUtils.GetTreeKeyStorageSlot(s.address[:], new(uint256.Int).SetBytes(key[:]))
s.setError(tr.TryDelete(k))
//s.db.db.TrieDB().DiskDB().Delete(append(s.address[:], key[:]...))
} else {
s.setError(tr.TryDelete(key[:]))
}
s.db.StorageDeleted += 1
} else {
// Encoding []byte cannot fail, ok to ignore the error.
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
s.setError(tr.TryUpdate(key[:], v))
if !tr.IsVerkle() {
s.setError(tr.TryUpdate(key[:], v))
} else {
k := trieUtils.GetTreeKeyStorageSlot(s.address[:], new(uint256.Int).SetBytes(key[:]))
// Update the trie, with v as a value
s.setError(tr.TryUpdate(k, v))
}
s.db.StorageUpdated += 1
}
// If state snapshotting is active, cache the data til commit

View File

@@ -18,6 +18,7 @@
package state
import (
"encoding/binary"
"errors"
"fmt"
"math/big"
@@ -33,6 +34,8 @@ import (
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
"github.com/holiman/uint256"
)
type revision struct {
@@ -99,6 +102,8 @@ type StateDB struct {
// Per-transaction access list
accessList *accessList
witness *types.AccessWitness
// Journal of state modifications. This is the backbone of
// Snapshot and RevertToSnapshot.
journal *journal
@@ -143,6 +148,13 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
journal: newJournal(),
accessList: newAccessList(),
hasher: crypto.NewKeccakState(),
witness: types.NewAccessWitness(),
}
if sdb.snaps == nil && tr.IsVerkle() {
sdb.snaps, err = snapshot.New(db.TrieDB().DiskDB(), db.TrieDB(), 1, root, false, true, false, true)
if err != nil {
return nil, err
}
}
if sdb.snaps != nil {
if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
@@ -154,6 +166,14 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
return sdb, nil
}
func (s *StateDB) Witness() *types.AccessWitness {
return s.witness
}
func (s *StateDB) SetWitness(aw *types.AccessWitness) {
s.witness = aw
}
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
// state trie concurrently while the state is mutated so that when we reach the
// commit phase, most of the needed data is already hot.
@@ -460,8 +480,26 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
}
// Encode the account and update the account trie
addr := obj.Address()
if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err))
}
if len(obj.code) > 0 && s.trie.IsVerkle() {
cs := make([]byte, 32)
binary.BigEndian.PutUint64(cs, uint64(len(obj.code)))
if err := s.trie.TryUpdate(trieUtils.GetTreeKeyCodeSize(addr[:]), cs); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err))
}
if obj.dirtyCode {
if chunks, err := trie.ChunkifyCode(addr, obj.code); err == nil {
for i := range chunks {
s.trie.TryUpdate(trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(uint64(i))), chunks[i][:])
}
} else {
s.setError(err)
}
}
}
// If state snapshotting is active, cache the data til commit. Note, this
@@ -479,10 +517,19 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
}
// Delete the account from the trie
addr := obj.Address()
if err := s.trie.TryDelete(addr[:]); err != nil {
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
if !s.trie.IsVerkle() {
addr := obj.Address()
if err := s.trie.TryDelete(addr[:]); err != nil {
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
}
} else {
for i := byte(0); i <= 255; i++ {
if err := s.trie.TryDelete(trieUtils.GetTreeKeyAccountLeaf(obj.Address().Bytes(), i)); err != nil {
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", obj.Address(), err))
}
}
}
}
@@ -506,14 +553,16 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
return obj
}
// If no live objects are available, attempt to use snapshots
var data *types.StateAccount
var (
data *types.StateAccount
err error
)
if s.snap != nil {
start := time.Now()
acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
if metrics.EnabledExpensive {
s.SnapshotAccountReads += time.Since(start)
defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
}
if err == nil {
var acc *snapshot.Account
if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil {
if acc == nil {
return nil
}
@@ -530,14 +579,21 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
data.Root = emptyRoot
}
}
// NOTE: Do not touch the addresses here, kick the can down the
// road. That is because I don't want to change the interface
// to getDeletedStateObject at this stage, as the PR would then
// have a huge footprint.
// The alternative is to make accesses available via the state
// db instead of the evm. This requires a significant rewrite,
// that isn't currently warranted.
}
// If snapshot unavailable or reading from it failed, load from the database
if data == nil {
start := time.Now()
enc, err := s.trie.TryGet(addr.Bytes())
if s.snap == nil || err != nil {
if metrics.EnabledExpensive {
s.AccountReads += time.Since(start)
defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
}
enc, err := s.trie.TryGet(addr.Bytes())
if err != nil {
s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
return nil
@@ -657,6 +713,7 @@ func (s *StateDB) Copy() *StateDB {
preimages: make(map[common.Hash][]byte, len(s.preimages)),
journal: newJournal(),
hasher: crypto.NewKeccakState(),
witness: s.witness.Copy(),
}
// Copy the dirty states, logs, and preimages
for addr := range s.journal.dirties {
@@ -844,7 +901,11 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// to pull useful data from disk.
for addr := range s.stateObjectsPending {
if obj := s.stateObjects[addr]; !obj.deleted {
obj.updateRoot(s.db)
if s.trie.IsVerkle() {
obj.updateTrie(s.db)
} else {
obj.updateRoot(s.db)
}
}
}
// Now we're about to start to write changes to the trie. The trie is so far
@@ -884,6 +945,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
func (s *StateDB) Prepare(thash common.Hash, ti int) {
s.thash = thash
s.txIndex = ti
s.accessList = newAccessList()
}
func (s *StateDB) clearJournalAndRefund() {
@@ -894,6 +956,20 @@ func (s *StateDB) clearJournalAndRefund() {
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires
}
// GetTrie returns the account trie.
func (s *StateDB) GetTrie() Trie {
return s.trie
}
func (s *StateDB) Cap(root common.Hash) error {
if s.snaps != nil {
return s.snaps.Cap(root, 0)
}
// pre-verkle path: noop if s.snaps hasn't been
// initialized.
return nil
}
// Commit writes the state to the underlying in-memory trie database.
func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if s.dbErr != nil {
@@ -907,17 +983,27 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
for addr := range s.stateObjectsDirty {
if obj := s.stateObjects[addr]; !obj.deleted {
// Write any contract code associated with the state object
if obj.code != nil && obj.dirtyCode {
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
obj.dirtyCode = false
}
// Write any storage changes in the state object to its storage trie
committed, err := obj.CommitTrie(s.db)
if err != nil {
return common.Hash{}, err
}
storageCommitted += committed
// Write any contract code associated with the state object
if obj.code != nil && obj.dirtyCode {
if s.trie.IsVerkle() {
if chunks, err := trie.ChunkifyCode(addr, obj.code); err == nil {
for i := range chunks {
s.trie.TryUpdate(trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(uint64(i))), chunks[i][:])
}
} else {
s.setError(err)
}
} else {
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
}
obj.dirtyCode = false
}
}
}
if len(s.stateObjectsDirty) > 0 {
@@ -993,9 +1079,6 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
//
// This method should only be called if Berlin/2929+2930 is applicable at the current number.
func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) {
// Clear out any leftover from previous executions
s.accessList = newAccessList()
s.AddAddressToAccessList(sender)
if dst != nil {
s.AddAddressToAccessList(*dst)

View File

@@ -704,7 +704,10 @@ func TestMissingTrieNodes(t *testing.T) {
memDb := rawdb.NewMemoryDatabase()
db := NewDatabase(memDb)
var root common.Hash
state, _ := New(common.Hash{}, db, nil)
state, err := New(common.Hash{}, db, nil)
if err != nil {
panic("nil stte")
}
addr := common.BytesToAddress([]byte("so"))
{
state.SetBalance(addr, big.NewInt(1))
@@ -736,7 +739,7 @@ func TestMissingTrieNodes(t *testing.T) {
}
// Modify the state
state.SetBalance(addr, big.NewInt(2))
root, err := state.Commit(false)
root, err = state.Commit(false)
if err == nil {
t.Fatalf("expected error, got root :%x", root)
}

View File

@@ -27,7 +27,7 @@ import (
)
// NewStateSync create a new state trie download scheduler.
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync {
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync {
// Register the storage slot callback if the external callback is specified.
var onSlot func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error
if onLeaf != nil {
@@ -52,6 +52,6 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(p
syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), hexpath, parent)
return nil
}
syncer = trie.NewSync(root, database, onAccount)
syncer = trie.NewSync(root, database, onAccount, bloom)
return syncer
}

Some files were not shown because too many files have changed in this diff Show More