Compare commits
124 Commits
verkle/onl
...
v1.10.16
Author | SHA1 | Date | |
---|---|---|---|
|
20356e57b1 | ||
|
e98114da4f | ||
|
f01e2fab07 | ||
|
6c3513c077 | ||
|
51e7968b8b | ||
|
fb3a6528cf | ||
|
5a0d487c3b | ||
|
2d20fed893 | ||
|
6ce4670bc0 | ||
|
aaca58a7a1 | ||
|
1a7e345af4 | ||
|
d99e759e76 | ||
|
afe344bcf3 | ||
|
c5436c8eb7 | ||
|
b868ca1790 | ||
|
9da25c5db7 | ||
|
a5c0cfb451 | ||
|
cac09a3823 | ||
|
0c1bd22ec0 | ||
|
64c53edf83 | ||
|
abd49a6c48 | ||
|
a9885505ca | ||
|
e282246a4b | ||
|
015fde9a2c | ||
|
29cb5deea3 | ||
|
78f13a3a57 | ||
|
0e35192797 | ||
|
f39f068161 | ||
|
f9ce40bb84 | ||
|
4230f5f08f | ||
|
78636ee568 | ||
|
bd615e0e5f | ||
|
683854255c | ||
|
06e16de894 | ||
|
2dfa4bcf6c | ||
|
eef7a33135 | ||
|
ae45c97d3d | ||
|
c029cdc90b | ||
|
5bcbb2980b | ||
|
514ae7cfa3 | ||
|
03aaea11d1 | ||
|
7dec26db2a | ||
|
51eb5f8ca8 | ||
|
4aab440ee2 | ||
|
f80ce141a1 | ||
|
b1f09596e6 | ||
|
045e90c897 | ||
|
2c58e6b62d | ||
|
52448e9585 | ||
|
c006261758 | ||
|
e6b61edd57 | ||
|
acd7b36999 | ||
|
b1e72f7ea9 | ||
|
1884f37f2c | ||
|
23471288c8 | ||
|
adc0a6adca | ||
|
0dec47b5c0 | ||
|
127ce93db4 | ||
|
9aa2e98191 | ||
|
7403a38ab7 | ||
|
af2ca5a654 | ||
|
0f893109c9 | ||
|
8be800ffa9 | ||
|
335914a63a | ||
|
3ccd6b6dbb | ||
|
c20de3c4bd | ||
|
0169d579d0 | ||
|
c0d17bca52 | ||
|
4bd2d0eccf | ||
|
66a908c5e8 | ||
|
d0bd5017ed | ||
|
98be5f9a72 | ||
|
062d910b26 | ||
|
356bbe343a | ||
|
dddf73abbd | ||
|
11a3a35097 | ||
|
3f2e96cf95 | ||
|
980b7682b4 | ||
|
b8edc04ce3 | ||
|
99be62a9b1 | ||
|
8bbf83e7a4 | ||
|
2295640ebd | ||
|
f5f5c0855a | ||
|
ada9c774e9 | ||
|
3e47e38a4e | ||
|
81ec6b1d4c | ||
|
bc6bf1e193 | ||
|
893502e561 | ||
|
0ba0b81e54 | ||
|
fc01a7ce8e | ||
|
155795be99 | ||
|
adec878c1d | ||
|
b3b8b268eb | ||
|
72c2c0ae7e | ||
|
ae8ff2661d | ||
|
db03faa10d | ||
|
7f7877a023 | ||
|
d78590560d | ||
|
cc87cbd70a | ||
|
acb0f7a67b | ||
|
a25906e4c0 | ||
|
69686fa328 | ||
|
a95675d50f | ||
|
619a3e7085 | ||
|
93f196c4b0 | ||
|
46f701ca93 | ||
|
cca482b4b1 | ||
|
58d1988349 | ||
|
b02fe5317f | ||
|
9331fe28e8 | ||
|
a0f7771962 | ||
|
5e78fc034b | ||
|
85064ed09b | ||
|
b45931cc4a | ||
|
8fbe0b9b68 | ||
|
c893488349 | ||
|
721c5723c0 | ||
|
2be129b5cf | ||
|
9393d1fb5d | ||
|
1988b47e02 | ||
|
163f1665dd | ||
|
a69d4b273d | ||
|
1fa91729f2 | ||
|
86fe359a56 |
@@ -1,45 +0,0 @@
|
|||||||
# Use the latest 2.1 version of CircleCI pipeline process engine.
|
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference
|
|
||||||
version: 2.1
|
|
||||||
|
|
||||||
# Define a job to be invoked later in a workflow.
|
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
working_directory: ~/repo
|
|
||||||
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
|
||||||
docker:
|
|
||||||
- image: circleci/golang:1.16.10
|
|
||||||
# Add steps to the job
|
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
|
||||||
steps:
|
|
||||||
- checkout
|
|
||||||
- restore_cache:
|
|
||||||
keys:
|
|
||||||
- go-mod-v4-{{ checksum "go.sum" }}
|
|
||||||
- run:
|
|
||||||
name: Install Dependencies
|
|
||||||
command: go mod download
|
|
||||||
- save_cache:
|
|
||||||
key: go-mod-v4-{{ checksum "go.sum" }}
|
|
||||||
paths:
|
|
||||||
- "/go/pkg/mod"
|
|
||||||
#- run:
|
|
||||||
# name: Run linter
|
|
||||||
# command: |
|
|
||||||
# go run build/ci.go lint
|
|
||||||
- run:
|
|
||||||
name: Run tests
|
|
||||||
command: |
|
|
||||||
go run build/ci.go test -coverage
|
|
||||||
- store_test_results:
|
|
||||||
path: /tmp/test-reports
|
|
||||||
|
|
||||||
# Invoke jobs via workflows
|
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
|
|
||||||
workflows:
|
|
||||||
sample: # This is the name of the workflow, feel free to change it to better match your workflow.
|
|
||||||
# Inside the workflow, you define the jobs you want to run.
|
|
||||||
jobs:
|
|
||||||
- build
|
|
4
.gitmodules
vendored
4
.gitmodules
vendored
@@ -2,3 +2,7 @@
|
|||||||
path = tests/testdata
|
path = tests/testdata
|
||||||
url = https://github.com/ethereum/tests
|
url = https://github.com/ethereum/tests
|
||||||
shallow = true
|
shallow = true
|
||||||
|
[submodule "evm-benchmarks"]
|
||||||
|
path = tests/evm-benchmarks
|
||||||
|
url = https://github.com/ipsilon/evm-benchmarks
|
||||||
|
shallow = true
|
||||||
|
@@ -1,7 +1,7 @@
|
|||||||
# This file configures github.com/golangci/golangci-lint.
|
# This file configures github.com/golangci/golangci-lint.
|
||||||
|
|
||||||
run:
|
run:
|
||||||
timeout: 5m
|
timeout: 20m
|
||||||
tests: true
|
tests: true
|
||||||
# default is true. Enables skipping of directories:
|
# default is true. Enables skipping of directories:
|
||||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
||||||
|
@@ -57,7 +57,7 @@ on how you can run your own `geth` instance.
|
|||||||
By far the most common scenario is people wanting to simply interact with the Ethereum
|
By far the most common scenario is people wanting to simply interact with the Ethereum
|
||||||
network: create accounts; transfer funds; deploy and interact with contracts. For this
|
network: create accounts; transfer funds; deploy and interact with contracts. For this
|
||||||
particular use-case the user doesn't care about years-old historical data, so we can
|
particular use-case the user doesn't care about years-old historical data, so we can
|
||||||
fast-sync quickly to the current state of the network. To do so:
|
sync quickly to the current state of the network. To do so:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ geth console
|
$ geth console
|
||||||
@@ -68,7 +68,7 @@ This command will:
|
|||||||
causing it to download more data in exchange for avoiding processing the entire history
|
causing it to download more data in exchange for avoiding processing the entire history
|
||||||
of the Ethereum network, which is very CPU intensive.
|
of the Ethereum network, which is very CPU intensive.
|
||||||
* Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
|
* Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
|
||||||
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://web3js.readthedocs.io/)
|
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md)
|
||||||
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
|
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
|
||||||
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
|
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
|
||||||
This tool is optional and if you leave it out you can always attach to an already running
|
This tool is optional and if you leave it out you can always attach to an already running
|
||||||
@@ -159,13 +159,13 @@ docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
|
|||||||
ethereum/client-go
|
ethereum/client-go
|
||||||
```
|
```
|
||||||
|
|
||||||
This will start `geth` in fast-sync mode with a DB memory allowance of 1GB just as the
|
This will start `geth` in snap-sync mode with a DB memory allowance of 1GB just as the
|
||||||
above command does. It will also create a persistent volume in your home directory for
|
above command does. It will also create a persistent volume in your home directory for
|
||||||
saving your blockchain as well as map the default ports. There is also an `alpine` tag
|
saving your blockchain as well as map the default ports. There is also an `alpine` tag
|
||||||
available for a slim version of the image.
|
available for a slim version of the image.
|
||||||
|
|
||||||
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
|
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
|
||||||
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints is not
|
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not
|
||||||
accessible from the outside.
|
accessible from the outside.
|
||||||
|
|
||||||
### Programmatically interfacing `geth` nodes
|
### Programmatically interfacing `geth` nodes
|
||||||
|
231
SECURITY.md
231
SECURITY.md
@@ -19,7 +19,7 @@ Audit reports are published in the `docs` folder: https://github.com/ethereum/go
|
|||||||
|
|
||||||
**Please do not file a public ticket** mentioning the vulnerability.
|
**Please do not file a public ticket** mentioning the vulnerability.
|
||||||
|
|
||||||
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publically disclosed security vulnerabilities.
|
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities.
|
||||||
|
|
||||||
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
|
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
|
||||||
|
|
||||||
@@ -29,92 +29,147 @@ Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A`
|
|||||||
|
|
||||||
```
|
```
|
||||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
Version: GnuPG v1
|
Version: SKS 1.1.6
|
||||||
|
Comment: Hostname: pgp.mit.edu
|
||||||
|
|
||||||
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaY
|
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1
|
||||||
neAk3Bp182GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9
|
82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM
|
||||||
L8c8yiqry1ZTCmYMqCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUi
|
qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q
|
||||||
m+y7buJDtoNf7YILlhDQXN8qlHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0b
|
lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac
|
||||||
fUo9pexOn7LS4SojoJmsm/5dp6AoKlac48cZU5zwR9AYcq/nvkrfmf2WkObg/xRd
|
48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y
|
||||||
EvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/yPFE335k+ujjZCPOu7OwjzDk7
|
PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho
|
||||||
M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXChoyI8vbfp4dGvCvYqv
|
yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F
|
||||||
QAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+FnQOUgg2H
|
nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
|
||||||
h8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
|
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB
|
||||||
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZ
|
8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG
|
||||||
EZCjMXxB8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQAB
|
b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa
|
||||||
tDlFdGhlcmV1bSBGb3VuZGF0aW9uIFNlY3VyaXR5IFRlYW0gPHNlY3VyaXR5QGV0
|
FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b
|
||||||
aGVyZXVtLm9yZz6JAj4EEwECACgCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheA
|
mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28
|
||||||
BQJaCWH6BQkFo2BYAAoJEOiNMzT6X2oK+DEP/3H6dxkm0hvHZKoHLVuuxcu3EHYo
|
rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB
|
||||||
k5sd3MMWPrZSN8qzZnY7ayEDMxnarWOizc+2jfOxfJlzX/g8lR1/fsHdWPFPhPoV
|
Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV
|
||||||
Qk8ygrHn1H8U8+rpw/U03BqmqHpYCDzJ+CIis9UWROniqXw1nuqu/FtWOsdWxNKh
|
Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD
|
||||||
jUo6k/0EsaXsxRPzgJv7fEUcVcQ7as/C3x9sy3muc2gvgA4/BKoGPb1/U0GuA8lV
|
CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt
|
||||||
fDIDshAggmnSUAg+TuYSAAdoFQ1sKwFMPigcLJF2eyKuK3iUyixJrec/c4LSf3wA
|
09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX
|
||||||
cGghbeuqI8INP0Y2zvXDQN2cByxsFAuoZG+m0cyKGaDH2MVUvOKKYqn/03qvrf15
|
QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt
|
||||||
AWAsW0l0yQwOTCo3FbsNzemClm5Bj/xH0E4XuwXwChcMCMOWJrFoxyvCEI+keoQc
|
Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp
|
||||||
c08/a8/MtS7vBAABXwOziSmm6CNqmzpWrh/fDrjlJlba9U3MxzvqU3IFlTdMratv
|
XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo
|
||||||
6V+SgX+L25lCzW4NxxUavoB8fAlvo8lxpHKo24FP+RcLQ8XqkU3RiUsgRjQRFOqQ
|
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2
|
||||||
TaJcsp8mimmiYyf24mNu6b48pi+a5c/eQR9w59emeEUZqsJU+nqv8BWIIp7o4Agh
|
D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl
|
||||||
NYnKjkhPlY5e1fLVfAHIADZFynWwRPkPMJSrBiP5EtcOFxQGHGjRxU/KjXkvE0hV
|
s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI
|
||||||
xYb1PB8pWMTu/beeiQI+BBMBAgAoBQJYJd7YAhsDBQkB4TOABgsJCAcDAgYVCAIJ
|
ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85
|
||||||
CgsEFgIDAQIeAQIXgAAKCRDojTM0+l9qCplDD/9IZ2i+m1cnqQKtiyHbyFGx32oL
|
kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j
|
||||||
fzqPylX2bOG5DPsSTorSUdJMGVfT04oVxXc4S/2DVnNvi7RAbSiLapCWSplgtBOj
|
dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu
|
||||||
j1xlblOoXxT3m7s1XHGCX5tENxI9fVSSPVKJn+fQaWpPB2MhBA+1lUI6GJ+11T7K
|
O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01
|
||||||
J8LrP/fiw1/nOb7rW61HW44Gtyox23sA/d1+DsFVaF8hxJlNj5coPKr8xWzQ8pQl
|
ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky
|
||||||
juzdjHDukjevuw4rRmRq9vozvj9keEU9XJ5dldyEVXFmdDk7KT0p0Rla9nxYhzf/
|
fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc
|
||||||
r/Bv8Bzy0HCWRb2D31BjXXGG05oVnYmNGxGFxYja4MwgrMmne3ilEVjfUJsapsqi
|
T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S
|
||||||
w41BAyQgIdfREulYN7ahsF5PrjVAqBd9IGtE8ULelF2SQxEBQBngEkP0ahP6tRAL
|
V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL
|
||||||
i7/CBjPKOyKijtqVny7qrGOnU2ygcA88/WDibexDhrjz0Gx8WmErU7rIWZiZ5u4Y
|
CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf
|
||||||
vJYVRo0+6rBCXRPeSJfiP5h1p17Anr2l42boAYslfcrzquB8MHtrNcyn650OLtHG
|
Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm
|
||||||
nbxgIdniKrpuzGN6Opw+O2id2JhD1/1p4SOemwAmthplr1MIyOHNP3q93rEj2J7h
|
5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp
|
||||||
5zPS/AJuKkMDFUpslPNLQjCOwPXtdzL7/kUZGBSyez1T3TaW1uY6l9XaJJRaSn+v
|
7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm
|
||||||
1zPgfp4GJ3lPs4AlAbQ0RXRoZXJldW0gRm91bmRhdGlvbiBCdWcgQm91bnR5IDxi
|
otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS
|
||||||
b3VudHlAZXRoZXJldW0ub3JnPokCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
|
DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF
|
||||||
AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagoENg/+LnSaVeMxiGVtcjWl
|
aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr
|
||||||
b7Xd73yrEy4uxiESS1AalW9mMf7oZzfI05f7QIQlaLAkNac74vZDJbPKjtb7tpMO
|
A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz
|
||||||
RFhRZMCveq6CPKU6pd1SI8IUVUKwpEe6AJP3lHdVP57dquieFE2HlYKm6uHbCGWU
|
/WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF
|
||||||
0cjyTA+uu2KbgCHGmofsPY/xOcZLGEHTHqa5w60JJAQm+BSDKnw8wTyrxGvA3EK/
|
rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt
|
||||||
ePSvOZMYa+iw6vYuZeBIMbdiXR/A2keBi3GuvqB8tDMj7P22TrH5mVDm3zNqGYD6
|
BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt
|
||||||
amDPeiWp4cztY3aZyLcgYotqXPpDceZzDn+HopBPzAb/llCdE7bVswKRhphVMw4b
|
lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0
|
||||||
bhL0R/TQY7Sf6TK2LKSBrjv0DWOSijikE71SJcBnJvHU7EpKrQQ0lMGclm3ynyji
|
GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj
|
||||||
Nf0YTPXQt4I+fwTmOew2GFeK3UytNWbWI7oXX7Nm4bj9bhf3IJ0kmZb/Gs73+xII
|
GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB
|
||||||
e7Rz52Mby436tWyQIQiF9ITYNGvNf53TwBBZMn0pKPiTyr3Ur7FHEotkEOFNh1//
|
6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E
|
||||||
4zQY10XxuBdLrYGyZ4V8xHJM+oKre8Eg2R9qHXVbjvErHE+7CvgnV7YUip0criPr
|
qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA
|
||||||
BlKRvuoJaSliH2JFhSjWVrkPmFGrWN0BAx10yIqMnEplfKeHf4P9Elek3oInS8WP
|
zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA
|
||||||
G1zJG6s/t5+hQK0X37+TB+6rd3GJAj4EEwECACgFAlgl4TsCGwMFCQHhM4AGCwkI
|
nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP
|
||||||
BwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOiNMzT6X2oKzf8P/iIKd77WHTbp4pMN
|
5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN
|
||||||
8h52HyZJtDJmjA1DPZrbGl1TesW/Z9uTd12txlgqZnbG2GfN9+LSP6EOPzR6v2xC
|
WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2
|
||||||
OVhR+RdWhZDJJuQCVS7lJIqQrZgmeTZG0TyQPZdLjVFBOrrhVwYX+HXbu429IzHr
|
8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+
|
||||||
URf5InyR1QgqOXyElDYS6e28HFqvaoA0DWTWDDqOLPVl+U5fuceIE2XXdv3AGLeP
|
N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8
|
||||||
Yf8J5MPobjPiZtBqI6S6iENY2Yn35qLX+axeC/iYSCHVtFuCCIdb/QYR1ZZV8Ps/
|
c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT
|
||||||
aI9DwC7LU+YfPw7iqCIoqxSeA3o1PORkdSigEg3jtfRv5UqVo9a0oBb9jdoADsat
|
wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl
|
||||||
F/gW0E7mto3XGOiaR0eB9SSdsM3x7Bz4A0HIGNaxpZo1RWqlO91leP4c13Px7ISv
|
D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem
|
||||||
5OGXfLg+M8qb+qxbGd1HpitGi9s1y1aVfEj1kOtZ0tN8eu+Upg5WKwPNBDX3ar7J
|
J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf
|
||||||
9NCULgVSL+E79FG+zXw62gxiQrLfKzm4wU/9L5wVkwQnm29hLJ0tokrSBZFnc/1l
|
Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw
|
||||||
7OC+GM63tYicKkY4rqmoWUeYx7IwFH9mtDtvR1RxO85RbQhZizwpZpdpRkH0DqZu
|
NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL
|
||||||
ZJRmRa5r7rPqmfa7d+VIFhz2Xs8pJMLVqxTsLKcLglmjw7aOrYG0SWeH7YraXWGD
|
QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V
|
||||||
N3SlvSBiVwcK7QUKzLLvpadLwxfsuQINBFgl3tgBEACbgq6HTN5gEBi0lkD/MafI
|
Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye
|
||||||
nmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4hYontkMaKRlCg2Rvgjvk3Zve0
|
uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7
|
||||||
PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT19BdeAQRFvcfd+8w8
|
TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc
|
||||||
f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj26bf+2+1
|
Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
|
||||||
DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
|
AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ
|
||||||
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66i
|
diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC
|
||||||
PsR99MQ7FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A
|
sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF
|
||||||
4tGkHl08KZ2N9o6GrfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8gr
|
E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA
|
||||||
eW8xB4zuf9Mkuou+RHNmo8PebHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0
|
B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU
|
||||||
VRxdPImKun+4LOXbfOxArOSkY6i35+gsgkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9
|
C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7
|
||||||
IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/bM1ACUtipMiIVeUs2uFiRjpz
|
BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1
|
||||||
A1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJaCWIIBQkFo2BYAAoJ
|
TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ
|
||||||
EOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg3IHMGxDM
|
SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg
|
||||||
b/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
|
CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8
|
||||||
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0I
|
HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
|
||||||
Q1UKKXvzZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0
|
AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR
|
||||||
K9lneidcqtBDvlggJTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0T
|
0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19
|
||||||
NOOE8fXlvu8iuIAMBSDL9ep6sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd
|
VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O
|
||||||
5MTi0MDRNTij431kn8T/D0LCgmoUmYYMBgbwFhXr67axPZlKjrqR0z3F/Elv0ZPP
|
wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0
|
||||||
cVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1qScl9HiMxjt/H6aPastH63/7w
|
OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR
|
||||||
cN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4/Lih6Z1TlwcFVap+
|
WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1
|
||||||
cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1pM6AOQPpZ
|
EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG
|
||||||
85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4
|
jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M
|
||||||
=r6KK
|
Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC
|
||||||
-----END PGP PUBLIC KEY BLOCK-----
|
MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/
|
||||||
|
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh
|
||||||
|
ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX
|
||||||
|
FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm
|
||||||
|
/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+
|
||||||
|
Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6
|
||||||
|
DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT
|
||||||
|
1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655
|
||||||
|
9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU
|
||||||
|
+2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1
|
||||||
|
qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4
|
||||||
|
OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN
|
||||||
|
BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h
|
||||||
|
YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1
|
||||||
|
9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj
|
||||||
|
26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
|
||||||
|
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7
|
||||||
|
FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G
|
||||||
|
rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe
|
||||||
|
bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs
|
||||||
|
gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/
|
||||||
|
bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa
|
||||||
|
CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg
|
||||||
|
3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
|
||||||
|
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz
|
||||||
|
ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg
|
||||||
|
JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6
|
||||||
|
sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM
|
||||||
|
BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q
|
||||||
|
Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4
|
||||||
|
/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p
|
||||||
|
M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl
|
||||||
|
BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q
|
||||||
|
1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs
|
||||||
|
6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l
|
||||||
|
LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3
|
||||||
|
yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd
|
||||||
|
cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle
|
||||||
|
5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX
|
||||||
|
GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1
|
||||||
|
rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR
|
||||||
|
XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa
|
||||||
|
QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc
|
||||||
|
mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB
|
||||||
|
8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy
|
||||||
|
I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro
|
||||||
|
yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh
|
||||||
|
ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz
|
||||||
|
i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP
|
||||||
|
+m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB
|
||||||
|
402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur
|
||||||
|
epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx
|
||||||
|
PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano=
|
||||||
|
=arte
|
||||||
|
-----END PGP PUBLIC KEY BLOCK------
|
||||||
```
|
```
|
||||||
|
@@ -81,13 +81,7 @@ func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
|
|||||||
if len(arguments) != 0 {
|
if len(arguments) != 0 {
|
||||||
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
return nil, fmt.Errorf("abi: attempting to unmarshall an empty string while arguments are expected")
|
||||||
}
|
}
|
||||||
// Nothing to unmarshal, return default variables
|
return make([]interface{}, 0), nil
|
||||||
nonIndexedArgs := arguments.NonIndexed()
|
|
||||||
defaultVars := make([]interface{}, len(nonIndexedArgs))
|
|
||||||
for index, arg := range nonIndexedArgs {
|
|
||||||
defaultVars[index] = reflect.New(arg.Type.GetType())
|
|
||||||
}
|
|
||||||
return defaultVars, nil
|
|
||||||
}
|
}
|
||||||
return arguments.UnpackValues(data)
|
return arguments.UnpackValues(data)
|
||||||
}
|
}
|
||||||
|
@@ -230,6 +230,9 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common
|
|||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
|
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
|
||||||
|
if receipt == nil {
|
||||||
|
return nil, ethereum.NotFound
|
||||||
|
}
|
||||||
return receipt, nil
|
return receipt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -639,7 +642,6 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SendTransaction updates the pending block to include the given transaction.
|
// SendTransaction updates the pending block to include the given transaction.
|
||||||
// It panics if the transaction is invalid.
|
|
||||||
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
|
func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
@@ -647,17 +649,17 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
|
|||||||
// Get the last block
|
// Get the last block
|
||||||
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
|
block, err := b.blockByHash(ctx, b.pendingBlock.ParentHash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic("could not fetch parent")
|
return fmt.Errorf("could not fetch parent")
|
||||||
}
|
}
|
||||||
// Check transaction validity
|
// Check transaction validity
|
||||||
signer := types.MakeSigner(b.blockchain.Config(), block.Number())
|
signer := types.MakeSigner(b.blockchain.Config(), block.Number())
|
||||||
sender, err := types.Sender(signer, tx)
|
sender, err := types.Sender(signer, tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(fmt.Errorf("invalid transaction: %v", err))
|
return fmt.Errorf("invalid transaction: %v", err)
|
||||||
}
|
}
|
||||||
nonce := b.pendingState.GetNonce(sender)
|
nonce := b.pendingState.GetNonce(sender)
|
||||||
if tx.Nonce() != nonce {
|
if tx.Nonce() != nonce {
|
||||||
panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce))
|
return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)
|
||||||
}
|
}
|
||||||
// Include tx in chain
|
// Include tx in chain
|
||||||
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
|
||||||
|
@@ -496,7 +496,7 @@ func TestEstimateGas(t *testing.T) {
|
|||||||
GasPrice: big.NewInt(0),
|
GasPrice: big.NewInt(0),
|
||||||
Value: nil,
|
Value: nil,
|
||||||
Data: common.Hex2Bytes("b9b046f9"),
|
Data: common.Hex2Bytes("b9b046f9"),
|
||||||
}, 0, errors.New("invalid opcode: opcode 0xfe not defined"), nil},
|
}, 0, errors.New("invalid opcode: INVALID"), nil},
|
||||||
|
|
||||||
{"Valid", ethereum.CallMsg{
|
{"Valid", ethereum.CallMsg{
|
||||||
From: addr,
|
From: addr,
|
||||||
|
@@ -88,6 +88,13 @@ func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]
|
|||||||
transactIdentifiers = make(map[string]bool)
|
transactIdentifiers = make(map[string]bool)
|
||||||
eventIdentifiers = make(map[string]bool)
|
eventIdentifiers = make(map[string]bool)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
for _, input := range evmABI.Constructor.Inputs {
|
||||||
|
if hasStruct(input.Type) {
|
||||||
|
bindStructType[lang](input.Type, structs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, original := range evmABI.Methods {
|
for _, original := range evmABI.Methods {
|
||||||
// Normalize the method for capital cases and non-anonymous inputs/outputs
|
// Normalize the method for capital cases and non-anonymous inputs/outputs
|
||||||
normalized := original
|
normalized := original
|
||||||
|
@@ -1911,6 +1911,50 @@ var bindTests = []struct {
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: `ConstructorWithStructParam`,
|
||||||
|
contract: `
|
||||||
|
pragma solidity >=0.8.0 <0.9.0;
|
||||||
|
|
||||||
|
contract ConstructorWithStructParam {
|
||||||
|
struct StructType {
|
||||||
|
uint256 field;
|
||||||
|
}
|
||||||
|
|
||||||
|
constructor(StructType memory st) {}
|
||||||
|
}
|
||||||
|
`,
|
||||||
|
bytecode: []string{`0x608060405234801561001057600080fd5b506040516101c43803806101c48339818101604052810190610032919061014a565b50610177565b6000604051905090565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6100958261004c565b810181811067ffffffffffffffff821117156100b4576100b361005d565b5b80604052505050565b60006100c7610038565b90506100d3828261008c565b919050565b6000819050919050565b6100eb816100d8565b81146100f657600080fd5b50565b600081519050610108816100e2565b92915050565b60006020828403121561012457610123610047565b5b61012e60206100bd565b9050600061013e848285016100f9565b60008301525092915050565b6000602082840312156101605761015f610042565b5b600061016e8482850161010e565b91505092915050565b603f806101856000396000f3fe6080604052600080fdfea2646970667358221220cdffa667affecefac5561f65f4a4ba914204a8d4eb859d8cd426fb306e5c12a364736f6c634300080a0033`},
|
||||||
|
abi: []string{`[{"inputs":[{"components":[{"internalType":"uint256","name":"field","type":"uint256"}],"internalType":"struct ConstructorWithStructParam.StructType","name":"st","type":"tuple"}],"stateMutability":"nonpayable","type":"constructor"}]`},
|
||||||
|
imports: `
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||||
|
"github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
|
||||||
|
"github.com/ethereum/go-ethereum/core"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
|
`,
|
||||||
|
tester: `
|
||||||
|
var (
|
||||||
|
key, _ = crypto.GenerateKey()
|
||||||
|
user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337))
|
||||||
|
sim = backends.NewSimulatedBackend(core.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil)
|
||||||
|
)
|
||||||
|
defer sim.Close()
|
||||||
|
|
||||||
|
_, tx, _, err := DeployConstructorWithStructParam(user, sim, ConstructorWithStructParamStructType{Field: big.NewInt(42)})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("DeployConstructorWithStructParam() got err %v; want nil err", err)
|
||||||
|
}
|
||||||
|
sim.Commit()
|
||||||
|
|
||||||
|
if _, err = bind.WaitDeployed(nil, sim, tx); err != nil {
|
||||||
|
t.Logf("Deployment tx: %+v", tx)
|
||||||
|
t.Errorf("bind.WaitDeployed(nil, %T, <deployment tx>) got err %v; want nil err", sim, err)
|
||||||
|
}
|
||||||
|
`,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests that packages generated by the binder can be successfully compiled and
|
// Tests that packages generated by the binder can be successfully compiled and
|
||||||
@@ -1934,22 +1978,23 @@ func TestGolangBindings(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Generate the test suite for all the contracts
|
// Generate the test suite for all the contracts
|
||||||
for i, tt := range bindTests {
|
for i, tt := range bindTests {
|
||||||
var types []string
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
if tt.types != nil {
|
var types []string
|
||||||
types = tt.types
|
if tt.types != nil {
|
||||||
} else {
|
types = tt.types
|
||||||
types = []string{tt.name}
|
} else {
|
||||||
}
|
types = []string{tt.name}
|
||||||
// Generate the binding and create a Go source file in the workspace
|
}
|
||||||
bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases)
|
// Generate the binding and create a Go source file in the workspace
|
||||||
if err != nil {
|
bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases)
|
||||||
t.Fatalf("test %d: failed to generate binding: %v", i, err)
|
if err != nil {
|
||||||
}
|
t.Fatalf("test %d: failed to generate binding: %v", i, err)
|
||||||
if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil {
|
}
|
||||||
t.Fatalf("test %d: failed to write binding: %v", i, err)
|
if err = ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil {
|
||||||
}
|
t.Fatalf("test %d: failed to write binding: %v", i, err)
|
||||||
// Generate the test file with the injected test code
|
}
|
||||||
code := fmt.Sprintf(`
|
// Generate the test file with the injected test code
|
||||||
|
code := fmt.Sprintf(`
|
||||||
package bindtest
|
package bindtest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -1961,9 +2006,10 @@ func TestGolangBindings(t *testing.T) {
|
|||||||
%s
|
%s
|
||||||
}
|
}
|
||||||
`, tt.imports, tt.name, tt.tester)
|
`, tt.imports, tt.name, tt.tester)
|
||||||
if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil {
|
if err := ioutil.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil {
|
||||||
t.Fatalf("test %d: failed to write tests: %v", i, err)
|
t.Fatalf("test %d: failed to write tests: %v", i, err)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
// Convert the package to go modules and use the current source for go-ethereum
|
// Convert the package to go modules and use the current source for go-ethereum
|
||||||
moder := exec.Command(gocmd, "mod", "init", "bindtest")
|
moder := exec.Command(gocmd, "mod", "init", "bindtest")
|
||||||
|
@@ -21,6 +21,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum"
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
@@ -35,14 +36,16 @@ func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*ty
|
|||||||
logger := log.New("hash", tx.Hash())
|
logger := log.New("hash", tx.Hash())
|
||||||
for {
|
for {
|
||||||
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
|
||||||
if receipt != nil {
|
if err == nil {
|
||||||
return receipt, nil
|
return receipt, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
logger.Trace("Receipt retrieval failed", "err", err)
|
if errors.Is(err, ethereum.NotFound) {
|
||||||
} else {
|
|
||||||
logger.Trace("Transaction not yet mined")
|
logger.Trace("Transaction not yet mined")
|
||||||
|
} else {
|
||||||
|
logger.Trace("Receipt retrieval failed", "err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the next round.
|
// Wait for the next round.
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@@ -290,7 +290,7 @@ func tuplePointsTo(index int, output []byte) (start int, err error) {
|
|||||||
offset := big.NewInt(0).SetBytes(output[index : index+32])
|
offset := big.NewInt(0).SetBytes(output[index : index+32])
|
||||||
outputLen := big.NewInt(int64(len(output)))
|
outputLen := big.NewInt(int64(len(output)))
|
||||||
|
|
||||||
if offset.Cmp(big.NewInt(int64(len(output)))) > 0 {
|
if offset.Cmp(outputLen) > 0 {
|
||||||
return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
|
return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
|
||||||
}
|
}
|
||||||
if offset.BitLen() > 63 {
|
if offset.BitLen() > 63 {
|
||||||
|
@@ -46,7 +46,7 @@ const (
|
|||||||
// accounts (derived from the same seed).
|
// accounts (derived from the same seed).
|
||||||
type Wallet interface {
|
type Wallet interface {
|
||||||
// URL retrieves the canonical path under which this wallet is reachable. It is
|
// URL retrieves the canonical path under which this wallet is reachable. It is
|
||||||
// user by upper layers to define a sorting order over all wallets from multiple
|
// used by upper layers to define a sorting order over all wallets from multiple
|
||||||
// backends.
|
// backends.
|
||||||
URL() URL
|
URL() URL
|
||||||
|
|
||||||
@@ -89,7 +89,7 @@ type Wallet interface {
|
|||||||
// accounts.
|
// accounts.
|
||||||
//
|
//
|
||||||
// Note, self derivation will increment the last component of the specified path
|
// Note, self derivation will increment the last component of the specified path
|
||||||
// opposed to decending into a child path to allow discovering accounts starting
|
// opposed to descending into a child path to allow discovering accounts starting
|
||||||
// from non zero components.
|
// from non zero components.
|
||||||
//
|
//
|
||||||
// Some hardware wallets switched derivation paths through their evolution, so
|
// Some hardware wallets switched derivation paths through their evolution, so
|
||||||
@@ -105,7 +105,7 @@ type Wallet interface {
|
|||||||
// or optionally with the aid of any location metadata from the embedded URL field.
|
// or optionally with the aid of any location metadata from the embedded URL field.
|
||||||
//
|
//
|
||||||
// If the wallet requires additional authentication to sign the request (e.g.
|
// If the wallet requires additional authentication to sign the request (e.g.
|
||||||
// a password to decrypt the account, or a PIN code o verify the transaction),
|
// a password to decrypt the account, or a PIN code to verify the transaction),
|
||||||
// an AuthNeededError instance will be returned, containing infos for the user
|
// an AuthNeededError instance will be returned, containing infos for the user
|
||||||
// about which fields or actions are needed. The user may retry by providing
|
// about which fields or actions are needed. The user may retry by providing
|
||||||
// the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
|
// the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
|
||||||
@@ -124,13 +124,13 @@ type Wallet interface {
|
|||||||
// or optionally with the aid of any location metadata from the embedded URL field.
|
// or optionally with the aid of any location metadata from the embedded URL field.
|
||||||
//
|
//
|
||||||
// If the wallet requires additional authentication to sign the request (e.g.
|
// If the wallet requires additional authentication to sign the request (e.g.
|
||||||
// a password to decrypt the account, or a PIN code o verify the transaction),
|
// a password to decrypt the account, or a PIN code to verify the transaction),
|
||||||
// an AuthNeededError instance will be returned, containing infos for the user
|
// an AuthNeededError instance will be returned, containing infos for the user
|
||||||
// about which fields or actions are needed. The user may retry by providing
|
// about which fields or actions are needed. The user may retry by providing
|
||||||
// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
|
// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
|
||||||
// the account in a keystore).
|
// the account in a keystore).
|
||||||
//
|
//
|
||||||
// This method should return the signature in 'canonical' format, with v 0 or 1
|
// This method should return the signature in 'canonical' format, with v 0 or 1.
|
||||||
SignText(account Account, text []byte) ([]byte, error)
|
SignText(account Account, text []byte) ([]byte, error)
|
||||||
|
|
||||||
// SignTextWithPassphrase is identical to Signtext, but also takes a password
|
// SignTextWithPassphrase is identical to Signtext, but also takes a password
|
||||||
@@ -176,7 +176,7 @@ type Backend interface {
|
|||||||
// TextHash is a helper function that calculates a hash for the given message that can be
|
// TextHash is a helper function that calculates a hash for the given message that can be
|
||||||
// safely used to calculate a signature from.
|
// safely used to calculate a signature from.
|
||||||
//
|
//
|
||||||
// The hash is calulcated as
|
// The hash is calculated as
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
//
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
@@ -188,7 +188,7 @@ func TextHash(data []byte) []byte {
|
|||||||
// TextAndHash is a helper function that calculates a hash for the given message that can be
|
// TextAndHash is a helper function that calculates a hash for the given message that can be
|
||||||
// safely used to calculate a signature from.
|
// safely used to calculate a signature from.
|
||||||
//
|
//
|
||||||
// The hash is calulcated as
|
// The hash is calculated as
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
//
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
|
@@ -42,7 +42,7 @@ var ErrInvalidPassphrase = errors.New("invalid password")
|
|||||||
var ErrWalletAlreadyOpen = errors.New("wallet already open")
|
var ErrWalletAlreadyOpen = errors.New("wallet already open")
|
||||||
|
|
||||||
// ErrWalletClosed is returned if a wallet is attempted to be opened the
|
// ErrWalletClosed is returned if a wallet is attempted to be opened the
|
||||||
// secodn time.
|
// second time.
|
||||||
var ErrWalletClosed = errors.New("wallet closed")
|
var ErrWalletClosed = errors.New("wallet closed")
|
||||||
|
|
||||||
// AuthNeededError is returned by backends for signing requests where the user
|
// AuthNeededError is returned by backends for signing requests where the user
|
||||||
|
@@ -638,7 +638,7 @@ func (w *Wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
|
|||||||
// accounts.
|
// accounts.
|
||||||
//
|
//
|
||||||
// Note, self derivation will increment the last component of the specified path
|
// Note, self derivation will increment the last component of the specified path
|
||||||
// opposed to decending into a child path to allow discovering accounts starting
|
// opposed to descending into a child path to allow discovering accounts starting
|
||||||
// from non zero components.
|
// from non zero components.
|
||||||
//
|
//
|
||||||
// Some hardware wallets switched derivation paths through their evolution, so
|
// Some hardware wallets switched derivation paths through their evolution, so
|
||||||
|
@@ -496,7 +496,7 @@ func (w *wallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Accoun
|
|||||||
// accounts.
|
// accounts.
|
||||||
//
|
//
|
||||||
// Note, self derivation will increment the last component of the specified path
|
// Note, self derivation will increment the last component of the specified path
|
||||||
// opposed to decending into a child path to allow discovering accounts starting
|
// opposed to descending into a child path to allow discovering accounts starting
|
||||||
// from non zero components.
|
// from non zero components.
|
||||||
//
|
//
|
||||||
// Some hardware wallets switched derivation paths through their evolution, so
|
// Some hardware wallets switched derivation paths through their evolution, so
|
||||||
|
@@ -1,19 +1,19 @@
|
|||||||
# This file contains sha256 checksums of optional build dependencies.
|
# This file contains sha256 checksums of optional build dependencies.
|
||||||
|
|
||||||
2255eb3e4e824dd7d5fcdc2e7f84534371c186312e546fb1086a34c17752f431 go1.17.2.src.tar.gz
|
3defb9a09bed042403195e872dcbc8c6fae1485963332279668ec52e80a95a2d go1.17.5.src.tar.gz
|
||||||
7914497a302a132a465d33f5ee044ce05568bacdb390ab805cb75a3435a23f94 go1.17.2.darwin-amd64.tar.gz
|
2db6a5d25815b56072465a2cacc8ed426c18f1d5fc26c1fc8c4f5a7188658264 go1.17.5.darwin-amd64.tar.gz
|
||||||
ce8771bd3edfb5b28104084b56bbb532eeb47fbb7769c3e664c6223712c30904 go1.17.2.darwin-arm64.tar.gz
|
111f71166de0cb8089bb3e8f9f5b02d76e1bf1309256824d4062a47b0e5f98e0 go1.17.5.darwin-arm64.tar.gz
|
||||||
8cea5b8d1f8e8cbb58069bfed58954c71c5b1aca2f3c857765dae83bf724d0d7 go1.17.2.freebsd-386.tar.gz
|
443c1cd9768df02085014f1eb034ebc7dbe032ffc8a9bb9f2e6617d037eee23c go1.17.5.freebsd-386.tar.gz
|
||||||
c96e57218fb03e74d683ad63b1684d44c89d5e5b994f36102b33dce21b58499a go1.17.2.freebsd-amd64.tar.gz
|
17180bdc4126acffd0ebf86d66ef5cbc3488b6734e93374fb00eb09494e006d3 go1.17.5.freebsd-amd64.tar.gz
|
||||||
8617f2e40d51076983502894181ae639d1d8101bfbc4d7463a2b442f239f5596 go1.17.2.linux-386.tar.gz
|
4f4914303bc18f24fd137a97e595735308f5ce81323c7224c12466fd763fc59f go1.17.5.linux-386.tar.gz
|
||||||
f242a9db6a0ad1846de7b6d94d507915d14062660616a61ef7c808a76e4f1676 go1.17.2.linux-amd64.tar.gz
|
bd78114b0d441b029c8fe0341f4910370925a4d270a6a590668840675b0c653e go1.17.5.linux-amd64.tar.gz
|
||||||
a5a43c9cdabdb9f371d56951b14290eba8ce2f9b0db48fb5fc657943984fd4fc go1.17.2.linux-arm64.tar.gz
|
6f95ce3da40d9ce1355e48f31f4eb6508382415ca4d7413b1e7a3314e6430e7e go1.17.5.linux-arm64.tar.gz
|
||||||
04d16105008230a9763005be05606f7eb1c683a3dbf0fbfed4034b23889cb7f2 go1.17.2.linux-armv6l.tar.gz
|
aa1fb6c53b4fe72f159333362a10aca37ae938bde8adc9c6eaf2a8e87d1e47de go1.17.5.linux-armv6l.tar.gz
|
||||||
12e2dc7e0ffeebe77083f267ef6705fec1621cdf2ed6489b3af04a13597ed68d go1.17.2.linux-ppc64le.tar.gz
|
3d4be616e568f0a02cb7f7769bcaafda4b0969ed0f9bb4277619930b96847e70 go1.17.5.linux-ppc64le.tar.gz
|
||||||
c4b2349a8d11350ca038b8c57f3cc58dc0b31284bcbed4f7fca39aeed28b4a51 go1.17.2.linux-s390x.tar.gz
|
8087d4fe991e82804e6485c26568c2e0ee0bfde00ceb9015dc86cb6bf84ef40b go1.17.5.linux-s390x.tar.gz
|
||||||
8a85257a351996fdf045fe95ed5fdd6917dd48636d562dd11dedf193005a53e0 go1.17.2.windows-386.zip
|
6d7b9948ee14a906b14f5cbebdfab63cd6828b0b618160847ecd3cc3470a26fe go1.17.5.windows-386.zip
|
||||||
fa6da0b829a66f5fab7e4e312fd6aa1b2d8f045c7ecee83b3d00f6fe5306759a go1.17.2.windows-amd64.zip
|
671faf99cd5d81cd7e40936c0a94363c64d654faa0148d2af4bbc262555620b9 go1.17.5.windows-amd64.zip
|
||||||
00575c85dc7a129ba892685a456b27a3f3670f71c8bfde1c5ad151f771d55df7 go1.17.2.windows-arm64.zip
|
45e88676b68e9cf364be469b5a27965397f4e339aa622c2f52c10433c56e5030 go1.17.5.windows-arm64.zip
|
||||||
|
|
||||||
d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
|
d4bd25b9814eeaa2134197dd2c7671bb791eae786d42010d9d788af20dee4bfa golangci-lint-1.42.0-darwin-amd64.tar.gz
|
||||||
e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
|
e56859c04a2ad5390c6a497b1acb1cc9329ecb1010260c6faae9b5a4c35b35ea golangci-lint-1.42.0-darwin-arm64.tar.gz
|
||||||
|
@@ -147,7 +147,7 @@ var (
|
|||||||
// This is the version of go that will be downloaded by
|
// This is the version of go that will be downloaded by
|
||||||
//
|
//
|
||||||
// go run ci.go install -dlgo
|
// go run ci.go install -dlgo
|
||||||
dlgoVersion = "1.17.2"
|
dlgoVersion = "1.17.5"
|
||||||
)
|
)
|
||||||
|
|
||||||
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin"))
|
||||||
@@ -334,7 +334,11 @@ func downloadLinter(cachedir string) string {
|
|||||||
const version = "1.42.0"
|
const version = "1.42.0"
|
||||||
|
|
||||||
csdb := build.MustLoadChecksums("build/checksums.txt")
|
csdb := build.MustLoadChecksums("build/checksums.txt")
|
||||||
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
|
arch := runtime.GOARCH
|
||||||
|
if arch == "arm" {
|
||||||
|
arch += "v" + os.Getenv("GOARM")
|
||||||
|
}
|
||||||
|
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch)
|
||||||
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base)
|
url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base)
|
||||||
archivePath := filepath.Join(cachedir, base+".tar.gz")
|
archivePath := filepath.Join(cachedir, base+".tar.gz")
|
||||||
if err := csdb.DownloadFile(url, archivePath); err != nil {
|
if err := csdb.DownloadFile(url, archivePath); err != nil {
|
||||||
|
@@ -898,7 +898,7 @@ func testExternalUI(api *core.SignerAPI) {
|
|||||||
addr, _ := common.NewMixedcaseAddressFromString("0x0011223344556677889900112233445566778899")
|
addr, _ := common.NewMixedcaseAddressFromString("0x0011223344556677889900112233445566778899")
|
||||||
data := `{"types":{"EIP712Domain":[{"name":"name","type":"string"},{"name":"version","type":"string"},{"name":"chainId","type":"uint256"},{"name":"verifyingContract","type":"address"}],"Person":[{"name":"name","type":"string"},{"name":"test","type":"uint8"},{"name":"wallet","type":"address"}],"Mail":[{"name":"from","type":"Person"},{"name":"to","type":"Person"},{"name":"contents","type":"string"}]},"primaryType":"Mail","domain":{"name":"Ether Mail","version":"1","chainId":"1","verifyingContract":"0xCCCcccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"},"message":{"from":{"name":"Cow","test":"3","wallet":"0xcD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"},"to":{"name":"Bob","wallet":"0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB","test":"2"},"contents":"Hello, Bob!"}}`
|
data := `{"types":{"EIP712Domain":[{"name":"name","type":"string"},{"name":"version","type":"string"},{"name":"chainId","type":"uint256"},{"name":"verifyingContract","type":"address"}],"Person":[{"name":"name","type":"string"},{"name":"test","type":"uint8"},{"name":"wallet","type":"address"}],"Mail":[{"name":"from","type":"Person"},{"name":"to","type":"Person"},{"name":"contents","type":"string"}]},"primaryType":"Mail","domain":{"name":"Ether Mail","version":"1","chainId":"1","verifyingContract":"0xCCCcccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"},"message":{"from":{"name":"Cow","test":"3","wallet":"0xcD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"},"to":{"name":"Bob","wallet":"0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB","test":"2"},"contents":"Hello, Bob!"}}`
|
||||||
//_, err := api.SignData(ctx, accounts.MimetypeTypedData, *addr, hexutil.Encode([]byte(data)))
|
//_, err := api.SignData(ctx, accounts.MimetypeTypedData, *addr, hexutil.Encode([]byte(data)))
|
||||||
var typedData core.TypedData
|
var typedData apitypes.TypedData
|
||||||
json.Unmarshal([]byte(data), &typedData)
|
json.Unmarshal([]byte(data), &typedData)
|
||||||
_, err := api.SignTypedData(ctx, *addr, typedData)
|
_, err := api.SignTypedData(ctx, *addr, typedData)
|
||||||
expectApprove("sign 712 typed data", err)
|
expectApprove("sign 712 typed data", err)
|
||||||
@@ -1025,7 +1025,7 @@ func GenDoc(ctx *cli.Context) {
|
|||||||
"of the work in canonicalizing and making sense of the data, and it's up to the UI to present" +
|
"of the work in canonicalizing and making sense of the data, and it's up to the UI to present" +
|
||||||
"the user with the contents of the `message`"
|
"the user with the contents of the `message`"
|
||||||
sighash, msg := accounts.TextAndHash([]byte("hello world"))
|
sighash, msg := accounts.TextAndHash([]byte("hello world"))
|
||||||
messages := []*core.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}}
|
messages := []*apitypes.NameValueType{{Name: "message", Value: msg, Typ: accounts.MimetypeTextPlain}}
|
||||||
|
|
||||||
add("SignDataRequest", desc, &core.SignDataRequest{
|
add("SignDataRequest", desc, &core.SignDataRequest{
|
||||||
Address: common.NewMixedcaseAddress(a),
|
Address: common.NewMixedcaseAddress(a),
|
||||||
|
@@ -26,6 +26,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core"
|
"github.com/ethereum/go-ethereum/core"
|
||||||
"github.com/ethereum/go-ethereum/core/forkid"
|
"github.com/ethereum/go-ethereum/core/forkid"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
@@ -67,6 +68,13 @@ func (c *Chain) TotalDifficultyAt(height int) *big.Int {
|
|||||||
return sum
|
return sum
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Chain) RootAt(height int) common.Hash {
|
||||||
|
if height < c.Len() {
|
||||||
|
return c.blocks[height].Root()
|
||||||
|
}
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
|
||||||
// ForkID gets the fork id of the chain.
|
// ForkID gets the fork id of the chain.
|
||||||
func (c *Chain) ForkID() forkid.ID {
|
func (c *Chain) ForkID() forkid.ID {
|
||||||
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
|
return forkid.NewID(c.chainConfig, c.blocks[0].Hash(), uint64(c.Len()))
|
||||||
|
@@ -96,6 +96,19 @@ func (s *Suite) dial66() (*Conn, error) {
|
|||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dial66 attempts to dial the given node and perform a handshake,
|
||||||
|
// returning the created Conn with additional snap/1 capabilities if
|
||||||
|
// successful.
|
||||||
|
func (s *Suite) dialSnap() (*Conn, error) {
|
||||||
|
conn, err := s.dial66()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
conn.caps = append(conn.caps, p2p.Cap{Name: "snap", Version: 1})
|
||||||
|
conn.ourHighestSnapProtoVersion = 1
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|
||||||
// peer performs both the protocol handshake and the status message
|
// peer performs both the protocol handshake and the status message
|
||||||
// exchange with the node in order to peer with it.
|
// exchange with the node in order to peer with it.
|
||||||
func (c *Conn) peer(chain *Chain, status *Status) error {
|
func (c *Conn) peer(chain *Chain, status *Status) error {
|
||||||
@@ -131,7 +144,11 @@ func (c *Conn) handshake() error {
|
|||||||
}
|
}
|
||||||
c.negotiateEthProtocol(msg.Caps)
|
c.negotiateEthProtocol(msg.Caps)
|
||||||
if c.negotiatedProtoVersion == 0 {
|
if c.negotiatedProtoVersion == 0 {
|
||||||
return fmt.Errorf("could not negotiate protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
|
return fmt.Errorf("could not negotiate eth protocol (remote caps: %v, local eth version: %v)", msg.Caps, c.ourHighestProtoVersion)
|
||||||
|
}
|
||||||
|
// If we require snap, verify that it was negotiated
|
||||||
|
if c.ourHighestSnapProtoVersion != c.negotiatedSnapProtoVersion {
|
||||||
|
return fmt.Errorf("could not negotiate snap protocol (remote caps: %v, local snap version: %v)", msg.Caps, c.ourHighestSnapProtoVersion)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
default:
|
default:
|
||||||
@@ -143,15 +160,21 @@ func (c *Conn) handshake() error {
|
|||||||
// advertised capability from peer.
|
// advertised capability from peer.
|
||||||
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
|
func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) {
|
||||||
var highestEthVersion uint
|
var highestEthVersion uint
|
||||||
|
var highestSnapVersion uint
|
||||||
for _, capability := range caps {
|
for _, capability := range caps {
|
||||||
if capability.Name != "eth" {
|
switch capability.Name {
|
||||||
continue
|
case "eth":
|
||||||
}
|
if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
|
||||||
if capability.Version > highestEthVersion && capability.Version <= c.ourHighestProtoVersion {
|
highestEthVersion = capability.Version
|
||||||
highestEthVersion = capability.Version
|
}
|
||||||
|
case "snap":
|
||||||
|
if capability.Version > highestSnapVersion && capability.Version <= c.ourHighestSnapProtoVersion {
|
||||||
|
highestSnapVersion = capability.Version
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c.negotiatedProtoVersion = highestEthVersion
|
c.negotiatedProtoVersion = highestEthVersion
|
||||||
|
c.negotiatedSnapProtoVersion = highestSnapVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
// statusExchange performs a `Status` message exchange with the given node.
|
// statusExchange performs a `Status` message exchange with the given node.
|
||||||
@@ -325,6 +348,15 @@ func (c *Conn) headersRequest(request *GetBlockHeaders, chain *Chain, isEth66 bo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Conn) snapRequest(msg Message, id uint64, chain *Chain) (Message, error) {
|
||||||
|
defer c.SetReadDeadline(time.Time{})
|
||||||
|
c.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||||
|
if err := c.Write(msg); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not write to connection: %v", err)
|
||||||
|
}
|
||||||
|
return c.ReadSnap(id)
|
||||||
|
}
|
||||||
|
|
||||||
// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol.
|
// getBlockHeaders66 executes the given `GetBlockHeaders` request over the eth66 protocol.
|
||||||
func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) {
|
func getBlockHeaders66(chain *Chain, conn *Conn, request *GetBlockHeaders, id uint64) (BlockHeaders, error) {
|
||||||
// write request
|
// write request
|
||||||
|
675
cmd/devp2p/internal/ethtest/snap.go
Normal file
675
cmd/devp2p/internal/ethtest/snap.go
Normal file
@@ -0,0 +1,675 @@
|
|||||||
|
// Copyright 2014 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package ethtest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
|
"github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||||
|
"github.com/ethereum/go-ethereum/internal/utesting"
|
||||||
|
"github.com/ethereum/go-ethereum/light"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"golang.org/x/crypto/sha3"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Suite) TestSnapStatus(t *utesting.T) {
|
||||||
|
conn, err := s.dialSnap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err := conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type accRangeTest struct {
|
||||||
|
nBytes uint64
|
||||||
|
root common.Hash
|
||||||
|
origin common.Hash
|
||||||
|
limit common.Hash
|
||||||
|
|
||||||
|
expAccounts int
|
||||||
|
expFirst common.Hash
|
||||||
|
expLast common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSnapGetAccountRange various forms of GetAccountRange requests.
|
||||||
|
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
|
||||||
|
var (
|
||||||
|
root = s.chain.RootAt(999)
|
||||||
|
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||||
|
zero = common.Hash{}
|
||||||
|
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
|
||||||
|
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
|
firstKeyPlus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b")
|
||||||
|
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
||||||
|
storageRoot = common.HexToHash("0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790")
|
||||||
|
)
|
||||||
|
for i, tc := range []accRangeTest{
|
||||||
|
// Tests decreasing the number of bytes
|
||||||
|
{4000, root, zero, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
|
||||||
|
{3000, root, zero, ffHash, 57, firstKey, common.HexToHash("0x9b63fa753ece5cb90657d02ecb15df4dc1508d8c1d187af1bf7f1a05e747d3c7")},
|
||||||
|
{2000, root, zero, ffHash, 38, firstKey, common.HexToHash("0x5e6140ecae4354a9e8f47559a8c6209c1e0e69cb077b067b528556c11698b91f")},
|
||||||
|
{1, root, zero, ffHash, 1, firstKey, firstKey},
|
||||||
|
|
||||||
|
// Tests variations of the range
|
||||||
|
//
|
||||||
|
// [00b to firstkey]: should return [firstkey, secondkey], where secondkey is out of bounds
|
||||||
|
{4000, root, common.HexToHash("0x00bf000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2b"), 2, firstKey, secondKey},
|
||||||
|
// [00b0 to 0bf0]: where both are before firstkey. Should return firstKey (even though it's out of bounds)
|
||||||
|
{4000, root, common.HexToHash("0x00b0000000000000000000000000000000000000000000000000000000000000"), common.HexToHash("0x00bf100000000000000000000000000000000000000000000000000000000000"), 1, firstKey, firstKey},
|
||||||
|
{4000, root, zero, zero, 1, firstKey, firstKey},
|
||||||
|
{4000, root, firstKey, ffHash, 76, firstKey, common.HexToHash("0xd2669dcf3858e7f1eecb8b5fedbf22fbea3e9433848a75035f79d68422c2dcda")},
|
||||||
|
{4000, root, firstKeyPlus1, ffHash, 76, secondKey, common.HexToHash("0xd28f55d3b994f16389f36944ad685b48e0fc3f8fbe86c3ca92ebecadf16a783f")},
|
||||||
|
|
||||||
|
// Test different root hashes
|
||||||
|
//
|
||||||
|
// A stateroot that does not exist
|
||||||
|
{4000, common.Hash{0x13, 37}, zero, ffHash, 0, zero, zero},
|
||||||
|
// The genesis stateroot (we expect it to not be served)
|
||||||
|
{4000, s.chain.RootAt(0), zero, ffHash, 0, zero, zero},
|
||||||
|
// A 127 block old stateroot, expected to be served
|
||||||
|
{4000, s.chain.RootAt(999 - 127), zero, ffHash, 77, firstKey, common.HexToHash("0xe4c6fdef5dd4e789a2612390806ee840b8ec0fe52548f8b4efe41abb20c37aac")},
|
||||||
|
// A root which is not actually an account root, but a storage orot
|
||||||
|
{4000, storageRoot, zero, ffHash, 0, zero, zero},
|
||||||
|
|
||||||
|
// And some non-sensical requests
|
||||||
|
//
|
||||||
|
// range from [0xFF to 0x00], wrong order. Expect not to be serviced
|
||||||
|
{4000, root, ffHash, zero, 0, zero, zero},
|
||||||
|
// range from [firstkey, firstkey-1], wrong order. Expect to get first key.
|
||||||
|
{4000, root, firstKey, firstKeyMinus1, 1, firstKey, firstKey},
|
||||||
|
// range from [firstkey, 0], wrong order. Expect to get first key.
|
||||||
|
{4000, root, firstKey, zero, 1, firstKey, firstKey},
|
||||||
|
// Max bytes: 0. Expect to deliver one account.
|
||||||
|
{0, root, zero, ffHash, 1, firstKey, firstKey},
|
||||||
|
} {
|
||||||
|
if err := s.snapGetAccountRange(t, &tc); err != nil {
|
||||||
|
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\nfailed: %v", i, tc.root, tc.origin, tc.limit, tc.nBytes, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type stRangesTest struct {
|
||||||
|
root common.Hash
|
||||||
|
accounts []common.Hash
|
||||||
|
origin []byte
|
||||||
|
limit []byte
|
||||||
|
nBytes uint64
|
||||||
|
|
||||||
|
expSlots int
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSnapGetStorageRange various forms of GetStorageRanges requests.
|
||||||
|
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
|
||||||
|
var (
|
||||||
|
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||||
|
zero = common.Hash{}
|
||||||
|
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
|
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
|
||||||
|
)
|
||||||
|
for i, tc := range []stRangesTest{
|
||||||
|
{
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{secondKey, firstKey},
|
||||||
|
origin: zero[:],
|
||||||
|
limit: ffHash[:],
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 0,
|
||||||
|
},
|
||||||
|
|
||||||
|
/*
|
||||||
|
Some tests against this account:
|
||||||
|
{
|
||||||
|
"balance": "0",
|
||||||
|
"nonce": 1,
|
||||||
|
"root": "0xbe3d75a1729be157e79c3b77f00206db4d54e3ea14375a015451c88ec067c790",
|
||||||
|
"codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
|
||||||
|
"storage": {
|
||||||
|
"0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace": "02",
|
||||||
|
"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6": "01",
|
||||||
|
"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b": "03"
|
||||||
|
},
|
||||||
|
"key": "0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844"
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
{ // [:] -> [slot1, slot2, slot3]
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||||
|
origin: zero[:],
|
||||||
|
limit: ffHash[:],
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 3,
|
||||||
|
},
|
||||||
|
{ // [slot1:] -> [slot1, slot2, slot3]
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||||
|
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
|
||||||
|
limit: ffHash[:],
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 3,
|
||||||
|
},
|
||||||
|
{ // [slot1+ :] -> [slot2, slot3]
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||||
|
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5acf"),
|
||||||
|
limit: ffHash[:],
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 2,
|
||||||
|
},
|
||||||
|
{ // [slot1:slot2] -> [slot1, slot2]
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||||
|
origin: common.FromHex("0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace"),
|
||||||
|
limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6"),
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 2,
|
||||||
|
},
|
||||||
|
{ // [slot1+:slot2+] -> [slot2, slot3]
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
accounts: []common.Hash{common.HexToHash("0xf493f79c43bd747129a226ad42529885a4b108aba6046b2d12071695a6627844")},
|
||||||
|
origin: common.FromHex("0x4fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
|
||||||
|
limit: common.FromHex("0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf7"),
|
||||||
|
nBytes: 500,
|
||||||
|
expSlots: 2,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if err := s.snapGetStorageRanges(t, &tc); err != nil {
|
||||||
|
t.Errorf("test %d \n root: %x\n range: %#x - %#x\n bytes: %d\n #accounts: %d\nfailed: %v",
|
||||||
|
i, tc.root, tc.origin, tc.limit, tc.nBytes, len(tc.accounts), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type byteCodesTest struct {
|
||||||
|
nBytes uint64
|
||||||
|
hashes []common.Hash
|
||||||
|
|
||||||
|
expHashes int
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// emptyRoot is the known root hash of an empty trie.
|
||||||
|
emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||||
|
// emptyCode is the known hash of the empty EVM bytecode.
|
||||||
|
emptyCode = common.HexToHash("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestSnapGetByteCodes various forms of GetByteCodes requests.
|
||||||
|
func (s *Suite) TestSnapGetByteCodes(t *utesting.T) {
|
||||||
|
// The halfchain import should yield these bytecodes
|
||||||
|
var hcBytecodes []common.Hash
|
||||||
|
for _, s := range []string{
|
||||||
|
"0x200c90460d8b0063210d5f5b9918e053c8f2c024485e0f1b48be8b1fc71b1317",
|
||||||
|
"0x20ba67ed4ac6aff626e0d1d4db623e2fada9593daeefc4a6eb4b70e6cff986f3",
|
||||||
|
"0x24b5b4902cb3d897c1cee9f16be8e897d8fa277c04c6dc8214f18295fca5de44",
|
||||||
|
"0x320b9d0a2be39b8a1c858f9f8cb96b1df0983071681de07ded3a7c0d05db5fd6",
|
||||||
|
"0x48cb0d5275936a24632babc7408339f9f7b051274809de565b8b0db76e97e03c",
|
||||||
|
"0x67c7a6f5cdaa43b4baa0e15b2be63346d1b9ce9f2c3d7e5804e0cacd44ee3b04",
|
||||||
|
"0x6d8418059bdc8c3fabf445e6bfc662af3b6a4ae45999b953996e42c7ead2ab49",
|
||||||
|
"0x7043422e5795d03f17ee0463a37235258e609fdd542247754895d72695e3e142",
|
||||||
|
"0x727f9e6f0c4bac1ff8d72c2972122d9c8d37ccb37e04edde2339e8da193546f1",
|
||||||
|
"0x86ccd5e23c78568a8334e0cebaf3e9f48c998307b0bfb1c378cee83b4bfb29cb",
|
||||||
|
"0x8fc89b00d6deafd4c4279531e743365626dbfa28845ec697919d305c2674302d",
|
||||||
|
"0x92cfc353bcb9746bb6f9996b6b9df779c88af2e9e0eeac44879ca19887c9b732",
|
||||||
|
"0x941b4872104f0995a4898fcf0f615ea6bf46bfbdfcf63ea8f2fd45b3f3286b77",
|
||||||
|
"0xa02fe8f41159bb39d2b704c633c3d6389cf4bfcb61a2539a9155f60786cf815f",
|
||||||
|
"0xa4b94e0afdffcb0af599677709dac067d3145489ea7aede57672bee43e3b7373",
|
||||||
|
"0xaf4e64edd3234c1205b725e42963becd1085f013590bd7ed93f8d711c5eb65fb",
|
||||||
|
"0xb69a18fa855b742031420081999086f6fb56c3930ae8840944e8b8ae9931c51e",
|
||||||
|
"0xc246c217bc73ce6666c93a93a94faa5250564f50a3fdc27ea74c231c07fe2ca6",
|
||||||
|
"0xcd6e4ab2c3034df2a8a1dfaaeb1c4baecd162a93d22de35e854ee2945cbe0c35",
|
||||||
|
"0xe24b692d09d6fc2f3d1a6028c400a27c37d7cbb11511907c013946d6ce263d3b",
|
||||||
|
"0xe440c5f0e8603fd1ed25976eee261ccee8038cf79d6a4c0eb31b2bf883be737f",
|
||||||
|
"0xe6eacbc509203d21ac814b350e72934fde686b7f673c19be8cf956b0c70078ce",
|
||||||
|
"0xe8530de4371467b5be7ea0e69e675ab36832c426d6c1ce9513817c0f0ae1486b",
|
||||||
|
"0xe85d487abbbc83bf3423cf9731360cf4f5a37220e18e5add54e72ee20861196a",
|
||||||
|
"0xf195ea389a5eea28db0be93660014275b158963dec44af1dfa7d4743019a9a49",
|
||||||
|
} {
|
||||||
|
hcBytecodes = append(hcBytecodes, common.HexToHash(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tc := range []byteCodesTest{
|
||||||
|
// A few stateroots
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(999)},
|
||||||
|
expHashes: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: []common.Hash{s.chain.RootAt(0), s.chain.RootAt(0)},
|
||||||
|
expHashes: 0,
|
||||||
|
},
|
||||||
|
// Empties
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: []common.Hash{emptyRoot},
|
||||||
|
expHashes: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: []common.Hash{emptyCode},
|
||||||
|
expHashes: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: []common.Hash{emptyCode, emptyCode, emptyCode},
|
||||||
|
expHashes: 3,
|
||||||
|
},
|
||||||
|
// The existing bytecodes
|
||||||
|
{
|
||||||
|
nBytes: 10000, hashes: hcBytecodes,
|
||||||
|
expHashes: len(hcBytecodes),
|
||||||
|
},
|
||||||
|
// The existing, with limited byte arg
|
||||||
|
{
|
||||||
|
nBytes: 1, hashes: hcBytecodes,
|
||||||
|
expHashes: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nBytes: 0, hashes: hcBytecodes,
|
||||||
|
expHashes: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
nBytes: 1000, hashes: []common.Hash{hcBytecodes[0], hcBytecodes[0], hcBytecodes[0], hcBytecodes[0]},
|
||||||
|
expHashes: 4,
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if err := s.snapGetByteCodes(t, &tc); err != nil {
|
||||||
|
t.Errorf("test %d \n bytes: %d\n #hashes: %d\nfailed: %v", i, tc.nBytes, len(tc.hashes), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type trieNodesTest struct {
|
||||||
|
root common.Hash
|
||||||
|
paths []snap.TrieNodePathSet
|
||||||
|
nBytes uint64
|
||||||
|
|
||||||
|
expHashes []common.Hash
|
||||||
|
expReject bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeNibbles(nibbles []byte, bytes []byte) {
|
||||||
|
for bi, ni := 0, 0; ni < len(nibbles); bi, ni = bi+1, ni+2 {
|
||||||
|
bytes[bi] = nibbles[ni]<<4 | nibbles[ni+1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasTerm returns whether a hex key has the terminator flag.
|
||||||
|
func hasTerm(s []byte) bool {
|
||||||
|
return len(s) > 0 && s[len(s)-1] == 16
|
||||||
|
}
|
||||||
|
|
||||||
|
func keybytesToHex(str []byte) []byte {
|
||||||
|
l := len(str)*2 + 1
|
||||||
|
var nibbles = make([]byte, l)
|
||||||
|
for i, b := range str {
|
||||||
|
nibbles[i*2] = b / 16
|
||||||
|
nibbles[i*2+1] = b % 16
|
||||||
|
}
|
||||||
|
nibbles[l-1] = 16
|
||||||
|
return nibbles
|
||||||
|
}
|
||||||
|
|
||||||
|
func hexToCompact(hex []byte) []byte {
|
||||||
|
terminator := byte(0)
|
||||||
|
if hasTerm(hex) {
|
||||||
|
terminator = 1
|
||||||
|
hex = hex[:len(hex)-1]
|
||||||
|
}
|
||||||
|
buf := make([]byte, len(hex)/2+1)
|
||||||
|
buf[0] = terminator << 5 // the flag byte
|
||||||
|
if len(hex)&1 == 1 {
|
||||||
|
buf[0] |= 1 << 4 // odd flag
|
||||||
|
buf[0] |= hex[0] // first nibble is contained in the first byte
|
||||||
|
hex = hex[1:]
|
||||||
|
}
|
||||||
|
decodeNibbles(hex, buf[1:])
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSnapTrieNodes various forms of GetTrieNodes requests.
|
||||||
|
func (s *Suite) TestSnapTrieNodes(t *utesting.T) {
|
||||||
|
|
||||||
|
key := common.FromHex("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
|
||||||
|
// helper function to iterate the key, and generate the compact-encoded
|
||||||
|
// trie paths along the way.
|
||||||
|
pathTo := func(length int) snap.TrieNodePathSet {
|
||||||
|
hex := keybytesToHex(key)[:length]
|
||||||
|
hex[len(hex)-1] = 0 // remove term flag
|
||||||
|
hKey := hexToCompact(hex)
|
||||||
|
return snap.TrieNodePathSet{hKey}
|
||||||
|
}
|
||||||
|
var accPaths []snap.TrieNodePathSet
|
||||||
|
for i := 1; i <= 65; i++ {
|
||||||
|
accPaths = append(accPaths, pathTo(i))
|
||||||
|
}
|
||||||
|
empty := emptyCode
|
||||||
|
for i, tc := range []trieNodesTest{
|
||||||
|
{
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: nil,
|
||||||
|
nBytes: 500,
|
||||||
|
expHashes: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: []snap.TrieNodePathSet{
|
||||||
|
snap.TrieNodePathSet{}, // zero-length pathset should 'abort' and kick us off
|
||||||
|
snap.TrieNodePathSet{[]byte{0}},
|
||||||
|
},
|
||||||
|
nBytes: 5000,
|
||||||
|
expHashes: []common.Hash{},
|
||||||
|
expReject: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: []snap.TrieNodePathSet{
|
||||||
|
snap.TrieNodePathSet{[]byte{0}},
|
||||||
|
snap.TrieNodePathSet{[]byte{1}, []byte{0}},
|
||||||
|
},
|
||||||
|
nBytes: 5000,
|
||||||
|
//0x6b3724a41b8c38b46d4d02fba2bb2074c47a507eb16a9a4b978f91d32e406faf
|
||||||
|
expHashes: []common.Hash{s.chain.RootAt(999)},
|
||||||
|
},
|
||||||
|
{ // nonsensically long path
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: []snap.TrieNodePathSet{
|
||||||
|
snap.TrieNodePathSet{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8,
|
||||||
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8}},
|
||||||
|
},
|
||||||
|
nBytes: 5000,
|
||||||
|
expHashes: []common.Hash{common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470")},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
root: s.chain.RootAt(0),
|
||||||
|
paths: []snap.TrieNodePathSet{
|
||||||
|
snap.TrieNodePathSet{[]byte{0}},
|
||||||
|
snap.TrieNodePathSet{[]byte{1}, []byte{0}},
|
||||||
|
},
|
||||||
|
nBytes: 5000,
|
||||||
|
expHashes: []common.Hash{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// The leaf is only a couple of levels down, so the continued trie traversal causes lookup failures.
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: accPaths,
|
||||||
|
nBytes: 5000,
|
||||||
|
expHashes: []common.Hash{
|
||||||
|
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
|
||||||
|
common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
|
||||||
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty, empty,
|
||||||
|
empty, empty, empty},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// Basically the same as above, with different ordering
|
||||||
|
root: s.chain.RootAt(999),
|
||||||
|
paths: []snap.TrieNodePathSet{
|
||||||
|
accPaths[10], accPaths[1], accPaths[0],
|
||||||
|
},
|
||||||
|
nBytes: 5000,
|
||||||
|
expHashes: []common.Hash{
|
||||||
|
empty,
|
||||||
|
common.HexToHash("0x4fb1e4e2391e4b4da471d59641319b8fa25d76c973d4bec594d7b00a69ae5135"),
|
||||||
|
common.HexToHash("0xbcefee69b37cca1f5bf3a48aebe08b35f2ea1864fa958bb0723d909a0e0d28d8"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
} {
|
||||||
|
if err := s.snapGetTrieNodes(t, &tc); err != nil {
|
||||||
|
t.Errorf("test %d \n #hashes %x\n root: %#x\n bytes: %d\nfailed: %v", i, len(tc.expHashes), tc.root, tc.nBytes, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) snapGetAccountRange(t *utesting.T, tc *accRangeTest) error {
|
||||||
|
conn, err := s.dialSnap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
// write request
|
||||||
|
req := &GetAccountRange{
|
||||||
|
ID: uint64(rand.Int63()),
|
||||||
|
Root: tc.root,
|
||||||
|
Origin: tc.origin,
|
||||||
|
Limit: tc.limit,
|
||||||
|
Bytes: tc.nBytes,
|
||||||
|
}
|
||||||
|
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("account range request failed: %v", err)
|
||||||
|
}
|
||||||
|
var res *snap.AccountRangePacket
|
||||||
|
if r, ok := resp.(*AccountRange); !ok {
|
||||||
|
return fmt.Errorf("account range response wrong: %T %v", resp, resp)
|
||||||
|
} else {
|
||||||
|
res = (*snap.AccountRangePacket)(r)
|
||||||
|
}
|
||||||
|
if exp, got := tc.expAccounts, len(res.Accounts); exp != got {
|
||||||
|
return fmt.Errorf("expected %d accounts, got %d", exp, got)
|
||||||
|
}
|
||||||
|
// Check that the encoding order is correct
|
||||||
|
for i := 1; i < len(res.Accounts); i++ {
|
||||||
|
if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {
|
||||||
|
return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
hashes []common.Hash
|
||||||
|
accounts [][]byte
|
||||||
|
proof = res.Proof
|
||||||
|
)
|
||||||
|
hashes, accounts, err = res.Unpack()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(hashes) > 0 {
|
||||||
|
if exp, got := tc.expFirst, res.Accounts[0].Hash; exp != got {
|
||||||
|
return fmt.Errorf("expected first account 0x%x, got 0x%x", exp, got)
|
||||||
|
}
|
||||||
|
if exp, got := tc.expLast, res.Accounts[len(res.Accounts)-1].Hash; exp != got {
|
||||||
|
return fmt.Errorf("expected last account 0x%x, got 0x%x", exp, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Reconstruct a partial trie from the response and verify it
|
||||||
|
keys := make([][]byte, len(hashes))
|
||||||
|
for i, key := range hashes {
|
||||||
|
keys[i] = common.CopyBytes(key[:])
|
||||||
|
}
|
||||||
|
nodes := make(light.NodeList, len(proof))
|
||||||
|
for i, node := range proof {
|
||||||
|
nodes[i] = node
|
||||||
|
}
|
||||||
|
proofdb := nodes.NodeSet()
|
||||||
|
|
||||||
|
var end []byte
|
||||||
|
if len(keys) > 0 {
|
||||||
|
end = keys[len(keys)-1]
|
||||||
|
}
|
||||||
|
_, err = trie.VerifyRangeProof(tc.root, tc.origin[:], end, keys, accounts, proofdb)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) snapGetStorageRanges(t *utesting.T, tc *stRangesTest) error {
|
||||||
|
conn, err := s.dialSnap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
// write request
|
||||||
|
req := &GetStorageRanges{
|
||||||
|
ID: uint64(rand.Int63()),
|
||||||
|
Root: tc.root,
|
||||||
|
Accounts: tc.accounts,
|
||||||
|
Origin: tc.origin,
|
||||||
|
Limit: tc.limit,
|
||||||
|
Bytes: tc.nBytes,
|
||||||
|
}
|
||||||
|
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("account range request failed: %v", err)
|
||||||
|
}
|
||||||
|
var res *snap.StorageRangesPacket
|
||||||
|
if r, ok := resp.(*StorageRanges); !ok {
|
||||||
|
return fmt.Errorf("account range response wrong: %T %v", resp, resp)
|
||||||
|
} else {
|
||||||
|
res = (*snap.StorageRangesPacket)(r)
|
||||||
|
}
|
||||||
|
gotSlots := 0
|
||||||
|
// Ensure the ranges are monotonically increasing
|
||||||
|
for i, slots := range res.Slots {
|
||||||
|
gotSlots += len(slots)
|
||||||
|
for j := 1; j < len(slots); j++ {
|
||||||
|
if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {
|
||||||
|
return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if exp, got := tc.expSlots, gotSlots; exp != got {
|
||||||
|
return fmt.Errorf("expected %d slots, got %d", exp, got)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) snapGetByteCodes(t *utesting.T, tc *byteCodesTest) error {
|
||||||
|
conn, err := s.dialSnap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
// write request
|
||||||
|
req := &GetByteCodes{
|
||||||
|
ID: uint64(rand.Int63()),
|
||||||
|
Hashes: tc.hashes,
|
||||||
|
Bytes: tc.nBytes,
|
||||||
|
}
|
||||||
|
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getBytecodes request failed: %v", err)
|
||||||
|
}
|
||||||
|
var res *snap.ByteCodesPacket
|
||||||
|
if r, ok := resp.(*ByteCodes); !ok {
|
||||||
|
return fmt.Errorf("bytecodes response wrong: %T %v", resp, resp)
|
||||||
|
} else {
|
||||||
|
res = (*snap.ByteCodesPacket)(r)
|
||||||
|
}
|
||||||
|
if exp, got := tc.expHashes, len(res.Codes); exp != got {
|
||||||
|
for i, c := range res.Codes {
|
||||||
|
fmt.Printf("%d. %#x\n", i, c)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("expected %d bytecodes, got %d", exp, got)
|
||||||
|
}
|
||||||
|
// Cross reference the requested bytecodes with the response to find gaps
|
||||||
|
// that the serving node is missing
|
||||||
|
var (
|
||||||
|
bytecodes = res.Codes
|
||||||
|
hasher = sha3.NewLegacyKeccak256().(crypto.KeccakState)
|
||||||
|
hash = make([]byte, 32)
|
||||||
|
codes = make([][]byte, len(req.Hashes))
|
||||||
|
)
|
||||||
|
|
||||||
|
for i, j := 0, 0; i < len(bytecodes); i++ {
|
||||||
|
// Find the next hash that we've been served, leaving misses with nils
|
||||||
|
hasher.Reset()
|
||||||
|
hasher.Write(bytecodes[i])
|
||||||
|
hasher.Read(hash)
|
||||||
|
|
||||||
|
for j < len(req.Hashes) && !bytes.Equal(hash, req.Hashes[j][:]) {
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
if j < len(req.Hashes) {
|
||||||
|
codes[j] = bytecodes[i]
|
||||||
|
j++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// We've either ran out of hashes, or got unrequested data
|
||||||
|
return errors.New("unexpected bytecode")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Suite) snapGetTrieNodes(t *utesting.T, tc *trieNodesTest) error {
|
||||||
|
conn, err := s.dialSnap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("dial failed: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
if err = conn.peer(s.chain, nil); err != nil {
|
||||||
|
t.Fatalf("peering failed: %v", err)
|
||||||
|
}
|
||||||
|
// write request
|
||||||
|
req := &GetTrieNodes{
|
||||||
|
ID: uint64(rand.Int63()),
|
||||||
|
Root: tc.root,
|
||||||
|
Paths: tc.paths,
|
||||||
|
Bytes: tc.nBytes,
|
||||||
|
}
|
||||||
|
resp, err := conn.snapRequest(req, req.ID, s.chain)
|
||||||
|
if err != nil {
|
||||||
|
if tc.expReject {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("trienodes request failed: %v", err)
|
||||||
|
}
|
||||||
|
var res *snap.TrieNodesPacket
|
||||||
|
if r, ok := resp.(*TrieNodes); !ok {
|
||||||
|
return fmt.Errorf("trienodes response wrong: %T %v", resp, resp)
|
||||||
|
} else {
|
||||||
|
res = (*snap.TrieNodesPacket)(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the correctness
|
||||||
|
|
||||||
|
// Cross reference the requested trienodes with the response to find gaps
|
||||||
|
// that the serving node is missing
|
||||||
|
hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)
|
||||||
|
hash := make([]byte, 32)
|
||||||
|
trienodes := res.Nodes
|
||||||
|
if got, want := len(trienodes), len(tc.expHashes); got != want {
|
||||||
|
return fmt.Errorf("wrong trienode count, got %d, want %d\n", got, want)
|
||||||
|
}
|
||||||
|
for i, trienode := range trienodes {
|
||||||
|
hasher.Reset()
|
||||||
|
hasher.Write(trienode)
|
||||||
|
hasher.Read(hash)
|
||||||
|
if got, want := hash, tc.expHashes[i]; !bytes.Equal(got, want[:]) {
|
||||||
|
fmt.Printf("hash %d wrong, got %#x, want %#x\n", i, got, want)
|
||||||
|
err = fmt.Errorf("hash %d wrong, got %#x, want %#x", i, got, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
36
cmd/devp2p/internal/ethtest/snapTypes.go
Normal file
36
cmd/devp2p/internal/ethtest/snapTypes.go
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package ethtest
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/eth/protocols/snap"
|
||||||
|
|
||||||
|
// GetAccountRange represents an account range query.
|
||||||
|
type GetAccountRange snap.GetAccountRangePacket
|
||||||
|
|
||||||
|
func (g GetAccountRange) Code() int { return 33 }
|
||||||
|
|
||||||
|
type AccountRange snap.AccountRangePacket
|
||||||
|
|
||||||
|
func (g AccountRange) Code() int { return 34 }
|
||||||
|
|
||||||
|
type GetStorageRanges snap.GetStorageRangesPacket
|
||||||
|
|
||||||
|
func (g GetStorageRanges) Code() int { return 35 }
|
||||||
|
|
||||||
|
type StorageRanges snap.StorageRangesPacket
|
||||||
|
|
||||||
|
func (g StorageRanges) Code() int { return 36 }
|
||||||
|
|
||||||
|
type GetByteCodes snap.GetByteCodesPacket
|
||||||
|
|
||||||
|
func (g GetByteCodes) Code() int { return 37 }
|
||||||
|
|
||||||
|
type ByteCodes snap.ByteCodesPacket
|
||||||
|
|
||||||
|
func (g ByteCodes) Code() int { return 38 }
|
||||||
|
|
||||||
|
type GetTrieNodes snap.GetTrieNodesPacket
|
||||||
|
|
||||||
|
func (g GetTrieNodes) Code() int { return 39 }
|
||||||
|
|
||||||
|
type TrieNodes snap.TrieNodesPacket
|
||||||
|
|
||||||
|
func (g TrieNodes) Code() int { return 40 }
|
@@ -125,6 +125,16 @@ func (s *Suite) Eth66Tests() []utesting.Test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Suite) SnapTests() []utesting.Test {
|
||||||
|
return []utesting.Test{
|
||||||
|
{Name: "TestSnapStatus", Fn: s.TestSnapStatus},
|
||||||
|
{Name: "TestSnapAccountRange", Fn: s.TestSnapGetAccountRange},
|
||||||
|
{Name: "TestSnapGetByteCodes", Fn: s.TestSnapGetByteCodes},
|
||||||
|
{Name: "TestSnapGetTrieNodes", Fn: s.TestSnapTrieNodes},
|
||||||
|
{Name: "TestSnapGetStorageRanges", Fn: s.TestSnapGetStorageRanges},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
eth66 = true // indicates whether suite should negotiate eth66 connection
|
eth66 = true // indicates whether suite should negotiate eth66 connection
|
||||||
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
|
eth65 = false // indicates whether suite should negotiate eth65 connection or below.
|
||||||
|
@@ -55,6 +55,27 @@ func TestEthSuite(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSnapSuite(t *testing.T) {
|
||||||
|
geth, err := runGeth()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not run geth: %v", err)
|
||||||
|
}
|
||||||
|
defer geth.Close()
|
||||||
|
|
||||||
|
suite, err := NewSuite(geth.Server().Self(), fullchainFile, genesisFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create new test suite: %v", err)
|
||||||
|
}
|
||||||
|
for _, test := range suite.SnapTests() {
|
||||||
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
|
result := utesting.RunTAP([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout)
|
||||||
|
if result[0].Failed {
|
||||||
|
t.Fatal()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// runGeth creates and starts a geth node
|
// runGeth creates and starts a geth node
|
||||||
func runGeth() (*node.Node, error) {
|
func runGeth() (*node.Node, error) {
|
||||||
stack, err := node.New(&node.Config{
|
stack, err := node.New(&node.Config{
|
||||||
|
@@ -19,6 +19,7 @@ package ethtest
|
|||||||
import (
|
import (
|
||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
"github.com/ethereum/go-ethereum/eth/protocols/eth"
|
||||||
"github.com/ethereum/go-ethereum/p2p"
|
"github.com/ethereum/go-ethereum/p2p"
|
||||||
@@ -126,10 +127,12 @@ func (pt PooledTransactions) Code() int { return 26 }
|
|||||||
// Conn represents an individual connection with a peer
|
// Conn represents an individual connection with a peer
|
||||||
type Conn struct {
|
type Conn struct {
|
||||||
*rlpx.Conn
|
*rlpx.Conn
|
||||||
ourKey *ecdsa.PrivateKey
|
ourKey *ecdsa.PrivateKey
|
||||||
negotiatedProtoVersion uint
|
negotiatedProtoVersion uint
|
||||||
ourHighestProtoVersion uint
|
negotiatedSnapProtoVersion uint
|
||||||
caps []p2p.Cap
|
ourHighestProtoVersion uint
|
||||||
|
ourHighestSnapProtoVersion uint
|
||||||
|
caps []p2p.Cap
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read reads an eth packet from the connection.
|
// Read reads an eth packet from the connection.
|
||||||
@@ -259,12 +262,7 @@ func (c *Conn) Read66() (uint64, Message) {
|
|||||||
|
|
||||||
// Write writes a eth packet to the connection.
|
// Write writes a eth packet to the connection.
|
||||||
func (c *Conn) Write(msg Message) error {
|
func (c *Conn) Write(msg Message) error {
|
||||||
// check if message is eth protocol message
|
payload, err := rlp.EncodeToBytes(msg)
|
||||||
var (
|
|
||||||
payload []byte
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
payload, err = rlp.EncodeToBytes(msg)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -281,3 +279,43 @@ func (c *Conn) Write66(req eth.Packet, code int) error {
|
|||||||
_, err = c.Conn.Write(uint64(code), payload)
|
_, err = c.Conn.Write(uint64(code), payload)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadSnap reads a snap/1 response with the given id from the connection.
|
||||||
|
func (c *Conn) ReadSnap(id uint64) (Message, error) {
|
||||||
|
respId := id + 1
|
||||||
|
start := time.Now()
|
||||||
|
for respId != id && time.Since(start) < timeout {
|
||||||
|
code, rawData, _, err := c.Conn.Read()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("could not read from connection: %v", err)
|
||||||
|
}
|
||||||
|
var snpMsg interface{}
|
||||||
|
switch int(code) {
|
||||||
|
case (GetAccountRange{}).Code():
|
||||||
|
snpMsg = new(GetAccountRange)
|
||||||
|
case (AccountRange{}).Code():
|
||||||
|
snpMsg = new(AccountRange)
|
||||||
|
case (GetStorageRanges{}).Code():
|
||||||
|
snpMsg = new(GetStorageRanges)
|
||||||
|
case (StorageRanges{}).Code():
|
||||||
|
snpMsg = new(StorageRanges)
|
||||||
|
case (GetByteCodes{}).Code():
|
||||||
|
snpMsg = new(GetByteCodes)
|
||||||
|
case (ByteCodes{}).Code():
|
||||||
|
snpMsg = new(ByteCodes)
|
||||||
|
case (GetTrieNodes{}).Code():
|
||||||
|
snpMsg = new(GetTrieNodes)
|
||||||
|
case (TrieNodes{}).Code():
|
||||||
|
snpMsg = new(TrieNodes)
|
||||||
|
default:
|
||||||
|
//return nil, fmt.Errorf("invalid message code: %d", code)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := rlp.DecodeBytes(rawData, snpMsg); err != nil {
|
||||||
|
return nil, fmt.Errorf("could not rlp decode message: %v", err)
|
||||||
|
}
|
||||||
|
return snpMsg.(Message), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("request timed out")
|
||||||
|
}
|
||||||
|
@@ -36,6 +36,7 @@ var (
|
|||||||
Subcommands: []cli.Command{
|
Subcommands: []cli.Command{
|
||||||
rlpxPingCommand,
|
rlpxPingCommand,
|
||||||
rlpxEthTestCommand,
|
rlpxEthTestCommand,
|
||||||
|
rlpxSnapTestCommand,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
rlpxPingCommand = cli.Command{
|
rlpxPingCommand = cli.Command{
|
||||||
@@ -53,6 +54,16 @@ var (
|
|||||||
testTAPFlag,
|
testTAPFlag,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
rlpxSnapTestCommand = cli.Command{
|
||||||
|
Name: "snap-test",
|
||||||
|
Usage: "Runs tests against a node",
|
||||||
|
ArgsUsage: "<node> <chain.rlp> <genesis.json>",
|
||||||
|
Action: rlpxSnapTest,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
testPatternFlag,
|
||||||
|
testTAPFlag,
|
||||||
|
},
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func rlpxPing(ctx *cli.Context) error {
|
func rlpxPing(ctx *cli.Context) error {
|
||||||
@@ -106,3 +117,15 @@ func rlpxEthTest(ctx *cli.Context) error {
|
|||||||
}
|
}
|
||||||
return runTests(ctx, suite.AllEthTests())
|
return runTests(ctx, suite.AllEthTests())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// rlpxSnapTest runs the snap protocol test suite.
|
||||||
|
func rlpxSnapTest(ctx *cli.Context) error {
|
||||||
|
if ctx.NArg() < 3 {
|
||||||
|
exit("missing path to chain.rlp as command-line argument")
|
||||||
|
}
|
||||||
|
suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2])
|
||||||
|
if err != nil {
|
||||||
|
exit(err)
|
||||||
|
}
|
||||||
|
return runTests(ctx, suite.SnapTests())
|
||||||
|
}
|
||||||
|
@@ -49,7 +49,7 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string {
|
|||||||
// signHash is a helper function that calculates a hash for the given message
|
// signHash is a helper function that calculates a hash for the given message
|
||||||
// that can be safely used to calculate a signature from.
|
// that can be safely used to calculate a signature from.
|
||||||
//
|
//
|
||||||
// The hash is calulcated as
|
// The hash is calculated as
|
||||||
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
|
||||||
//
|
//
|
||||||
// This gives context to the signed message and prevents signing of transactions.
|
// This gives context to the signed message and prevents signing of transactions.
|
||||||
|
@@ -67,6 +67,7 @@ type ommer struct {
|
|||||||
type stEnv struct {
|
type stEnv struct {
|
||||||
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
|
Coinbase common.Address `json:"currentCoinbase" gencodec:"required"`
|
||||||
Difficulty *big.Int `json:"currentDifficulty"`
|
Difficulty *big.Int `json:"currentDifficulty"`
|
||||||
|
Random *big.Int `json:"currentRandom"`
|
||||||
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
ParentDifficulty *big.Int `json:"parentDifficulty"`
|
||||||
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
|
GasLimit uint64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
Number uint64 `json:"currentNumber" gencodec:"required"`
|
Number uint64 `json:"currentNumber" gencodec:"required"`
|
||||||
@@ -81,6 +82,7 @@ type stEnv struct {
|
|||||||
type stEnvMarshaling struct {
|
type stEnvMarshaling struct {
|
||||||
Coinbase common.UnprefixedAddress
|
Coinbase common.UnprefixedAddress
|
||||||
Difficulty *math.HexOrDecimal256
|
Difficulty *math.HexOrDecimal256
|
||||||
|
Random *math.HexOrDecimal256
|
||||||
ParentDifficulty *math.HexOrDecimal256
|
ParentDifficulty *math.HexOrDecimal256
|
||||||
GasLimit math.HexOrDecimal64
|
GasLimit math.HexOrDecimal64
|
||||||
Number math.HexOrDecimal64
|
Number math.HexOrDecimal64
|
||||||
@@ -139,6 +141,11 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
|
|||||||
if pre.Env.BaseFee != nil {
|
if pre.Env.BaseFee != nil {
|
||||||
vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee)
|
vmContext.BaseFee = new(big.Int).Set(pre.Env.BaseFee)
|
||||||
}
|
}
|
||||||
|
// If random is defined, add it to the vmContext.
|
||||||
|
if pre.Env.Random != nil {
|
||||||
|
rnd := common.BigToHash(pre.Env.Random)
|
||||||
|
vmContext.Random = &rnd
|
||||||
|
}
|
||||||
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
|
// If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's
|
||||||
// done in StateProcessor.Process(block, ...), right before transactions are applied.
|
// done in StateProcessor.Process(block, ...), right before transactions are applied.
|
||||||
if chainConfig.DAOForkSupport &&
|
if chainConfig.DAOForkSupport &&
|
||||||
|
@@ -18,6 +18,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
|
|||||||
type stEnv struct {
|
type stEnv struct {
|
||||||
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
||||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
||||||
|
Random *math.HexOrDecimal256 `json:"currentRandom"`
|
||||||
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
||||||
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
||||||
@@ -31,6 +32,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) {
|
|||||||
var enc stEnv
|
var enc stEnv
|
||||||
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
|
enc.Coinbase = common.UnprefixedAddress(s.Coinbase)
|
||||||
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
|
enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty)
|
||||||
|
enc.Random = (*math.HexOrDecimal256)(s.Random)
|
||||||
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
|
enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty)
|
||||||
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
|
enc.GasLimit = math.HexOrDecimal64(s.GasLimit)
|
||||||
enc.Number = math.HexOrDecimal64(s.Number)
|
enc.Number = math.HexOrDecimal64(s.Number)
|
||||||
@@ -48,6 +50,7 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
|||||||
type stEnv struct {
|
type stEnv struct {
|
||||||
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"`
|
||||||
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"`
|
||||||
|
Random *math.HexOrDecimal256 `json:"currentRandom"`
|
||||||
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"`
|
||||||
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"`
|
||||||
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"`
|
||||||
@@ -69,6 +72,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error {
|
|||||||
if dec.Difficulty != nil {
|
if dec.Difficulty != nil {
|
||||||
s.Difficulty = (*big.Int)(dec.Difficulty)
|
s.Difficulty = (*big.Int)(dec.Difficulty)
|
||||||
}
|
}
|
||||||
|
if dec.Random != nil {
|
||||||
|
s.Random = (*big.Int)(dec.Random)
|
||||||
|
}
|
||||||
if dec.ParentDifficulty != nil {
|
if dec.ParentDifficulty != nil {
|
||||||
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
|
s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty)
|
||||||
}
|
}
|
||||||
|
@@ -252,6 +252,10 @@ func Transition(ctx *cli.Context) error {
|
|||||||
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
|
return NewError(ErrorConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Sanity check, to not `panic` in state_transition
|
||||||
|
if prestate.Env.Random != nil && !chainConfig.IsLondon(big.NewInt(int64(prestate.Env.Number))) {
|
||||||
|
return NewError(ErrorConfig, errors.New("can only apply RANDOM on top of London chainrules"))
|
||||||
|
}
|
||||||
if env := prestate.Env; env.Difficulty == nil {
|
if env := prestate.Env; env.Difficulty == nil {
|
||||||
// If difficulty was not provided by caller, we need to calculate it.
|
// If difficulty was not provided by caller, we need to calculate it.
|
||||||
switch {
|
switch {
|
||||||
|
@@ -120,7 +120,7 @@ func importAccountWithExpect(t *testing.T, key string, expected string) {
|
|||||||
if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
|
if err := ioutil.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
geth := runGeth(t, "account", "import", keyfile, "-password", passwordFile)
|
geth := runGeth(t, "--lightkdf", "account", "import", keyfile, "-password", passwordFile)
|
||||||
defer geth.ExpectExit()
|
defer geth.ExpectExit()
|
||||||
geth.Expect(expected)
|
geth.Expect(expected)
|
||||||
}
|
}
|
||||||
|
@@ -159,9 +159,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
|
|||||||
cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name))
|
cfg.Eth.OverrideArrowGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideArrowGlacierFlag.Name))
|
||||||
}
|
}
|
||||||
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
|
if ctx.GlobalIsSet(utils.OverrideTerminalTotalDifficulty.Name) {
|
||||||
cfg.Eth.Genesis.Config.TerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
|
cfg.Eth.OverrideTerminalTotalDifficulty = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideTerminalTotalDifficulty.Name))
|
||||||
}
|
}
|
||||||
backend, _ := utils.RegisterEthService(stack, &cfg.Eth, ctx.GlobalBool(utils.CatalystFlag.Name))
|
backend, _ := utils.RegisterEthService(stack, &cfg.Eth)
|
||||||
|
|
||||||
// Configure GraphQL if requested
|
// Configure GraphQL if requested
|
||||||
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
|
if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) {
|
||||||
|
@@ -77,13 +77,13 @@ func localConsole(ctx *cli.Context) error {
|
|||||||
// Create and start the node based on the CLI flags
|
// Create and start the node based on the CLI flags
|
||||||
prepare(ctx)
|
prepare(ctx)
|
||||||
stack, backend := makeFullNode(ctx)
|
stack, backend := makeFullNode(ctx)
|
||||||
startNode(ctx, stack, backend)
|
startNode(ctx, stack, backend, true)
|
||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
|
|
||||||
// Attach to the newly started node and start the JavaScript console
|
// Attach to the newly started node and create the JavaScript console.
|
||||||
client, err := stack.Attach()
|
client, err := stack.Attach()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
return fmt.Errorf("Failed to attach to the inproc geth: %v", err)
|
||||||
}
|
}
|
||||||
config := console.Config{
|
config := console.Config{
|
||||||
DataDir: utils.MakeDataDir(ctx),
|
DataDir: utils.MakeDataDir(ctx),
|
||||||
@@ -91,29 +91,34 @@ func localConsole(ctx *cli.Context) error {
|
|||||||
Client: client,
|
Client: client,
|
||||||
Preload: utils.MakeConsolePreloads(ctx),
|
Preload: utils.MakeConsolePreloads(ctx),
|
||||||
}
|
}
|
||||||
|
|
||||||
console, err := console.New(config)
|
console, err := console.New(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
return fmt.Errorf("Failed to start the JavaScript console: %v", err)
|
||||||
}
|
}
|
||||||
defer console.Stop(false)
|
defer console.Stop(false)
|
||||||
|
|
||||||
// If only a short execution was requested, evaluate and return
|
// If only a short execution was requested, evaluate and return.
|
||||||
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
if script := ctx.GlobalString(utils.ExecFlag.Name); script != "" {
|
||||||
console.Evaluate(script)
|
console.Evaluate(script)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Otherwise print the welcome screen and enter interactive mode
|
|
||||||
|
// Track node shutdown and stop the console when it goes down.
|
||||||
|
// This happens when SIGTERM is sent to the process.
|
||||||
|
go func() {
|
||||||
|
stack.Wait()
|
||||||
|
console.StopInteractive()
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Print the welcome screen and enter interactive mode.
|
||||||
console.Welcome()
|
console.Welcome()
|
||||||
console.Interactive()
|
console.Interactive()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// remoteConsole will connect to a remote geth instance, attaching a JavaScript
|
// remoteConsole will connect to a remote geth instance, attaching a JavaScript
|
||||||
// console to it.
|
// console to it.
|
||||||
func remoteConsole(ctx *cli.Context) error {
|
func remoteConsole(ctx *cli.Context) error {
|
||||||
// Attach to a remotely running geth instance and start the JavaScript console
|
|
||||||
endpoint := ctx.Args().First()
|
endpoint := ctx.Args().First()
|
||||||
if endpoint == "" {
|
if endpoint == "" {
|
||||||
path := node.DefaultDataDir()
|
path := node.DefaultDataDir()
|
||||||
@@ -150,7 +155,6 @@ func remoteConsole(ctx *cli.Context) error {
|
|||||||
Client: client,
|
Client: client,
|
||||||
Preload: utils.MakeConsolePreloads(ctx),
|
Preload: utils.MakeConsolePreloads(ctx),
|
||||||
}
|
}
|
||||||
|
|
||||||
console, err := console.New(config)
|
console, err := console.New(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
||||||
@@ -165,7 +169,6 @@ func remoteConsole(ctx *cli.Context) error {
|
|||||||
// Otherwise print the welcome screen and enter interactive mode
|
// Otherwise print the welcome screen and enter interactive mode
|
||||||
console.Welcome()
|
console.Welcome()
|
||||||
console.Interactive()
|
console.Interactive()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -189,13 +192,13 @@ func dialRPC(endpoint string) (*rpc.Client, error) {
|
|||||||
func ephemeralConsole(ctx *cli.Context) error {
|
func ephemeralConsole(ctx *cli.Context) error {
|
||||||
// Create and start the node based on the CLI flags
|
// Create and start the node based on the CLI flags
|
||||||
stack, backend := makeFullNode(ctx)
|
stack, backend := makeFullNode(ctx)
|
||||||
startNode(ctx, stack, backend)
|
startNode(ctx, stack, backend, false)
|
||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
|
|
||||||
// Attach to the newly started node and start the JavaScript console
|
// Attach to the newly started node and start the JavaScript console
|
||||||
client, err := stack.Attach()
|
client, err := stack.Attach()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to attach to the inproc geth: %v", err)
|
return fmt.Errorf("Failed to attach to the inproc geth: %v", err)
|
||||||
}
|
}
|
||||||
config := console.Config{
|
config := console.Config{
|
||||||
DataDir: utils.MakeDataDir(ctx),
|
DataDir: utils.MakeDataDir(ctx),
|
||||||
@@ -206,22 +209,24 @@ func ephemeralConsole(ctx *cli.Context) error {
|
|||||||
|
|
||||||
console, err := console.New(config)
|
console, err := console.New(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
utils.Fatalf("Failed to start the JavaScript console: %v", err)
|
return fmt.Errorf("Failed to start the JavaScript console: %v", err)
|
||||||
}
|
}
|
||||||
defer console.Stop(false)
|
defer console.Stop(false)
|
||||||
|
|
||||||
// Evaluate each of the specified JavaScript files
|
// Interrupt the JS interpreter when node is stopped.
|
||||||
for _, file := range ctx.Args() {
|
|
||||||
if err = console.Execute(file); err != nil {
|
|
||||||
utils.Fatalf("Failed to execute %s: %v", file, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
stack.Wait()
|
stack.Wait()
|
||||||
console.Stop(false)
|
console.Stop(false)
|
||||||
}()
|
}()
|
||||||
console.Stop(true)
|
|
||||||
|
|
||||||
|
// Evaluate each of the specified JavaScript files.
|
||||||
|
for _, file := range ctx.Args() {
|
||||||
|
if err = console.Execute(file); err != nil {
|
||||||
|
return fmt.Errorf("Failed to execute %s: %v", file, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The main script is now done, but keep running timers/callbacks.
|
||||||
|
console.Stop(true)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@@ -34,9 +34,11 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
"github.com/ethereum/go-ethereum/console/prompt"
|
"github.com/ethereum/go-ethereum/console/prompt"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state/snapshot"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
"github.com/olekukonko/tablewriter"
|
||||||
"gopkg.in/urfave/cli.v1"
|
"gopkg.in/urfave/cli.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -69,6 +71,7 @@ Remove blockchain and state databases`,
|
|||||||
dbDumpFreezerIndex,
|
dbDumpFreezerIndex,
|
||||||
dbImportCmd,
|
dbImportCmd,
|
||||||
dbExportCmd,
|
dbExportCmd,
|
||||||
|
dbMetadataCmd,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
dbInspectCmd = cli.Command{
|
dbInspectCmd = cli.Command{
|
||||||
@@ -233,6 +236,21 @@ WARNING: This is a low-level operation which may cause database corruption!`,
|
|||||||
},
|
},
|
||||||
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
|
Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
|
||||||
}
|
}
|
||||||
|
dbMetadataCmd = cli.Command{
|
||||||
|
Action: utils.MigrateFlags(showMetaData),
|
||||||
|
Name: "metadata",
|
||||||
|
Usage: "Shows metadata about the chain status.",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
utils.DataDirFlag,
|
||||||
|
utils.SyncModeFlag,
|
||||||
|
utils.MainnetFlag,
|
||||||
|
utils.RopstenFlag,
|
||||||
|
utils.SepoliaFlag,
|
||||||
|
utils.RinkebyFlag,
|
||||||
|
utils.GoerliFlag,
|
||||||
|
},
|
||||||
|
Description: "Shows metadata about the chain status.",
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func removeDB(ctx *cli.Context) error {
|
func removeDB(ctx *cli.Context) error {
|
||||||
@@ -539,7 +557,7 @@ func freezerInspect(ctx *cli.Context) error {
|
|||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
|
path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
|
||||||
log.Info("Opening freezer", "location", path, "name", kind)
|
log.Info("Opening freezer", "location", path, "name", kind)
|
||||||
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy); err != nil {
|
if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
f.DumpIndex(start, end)
|
f.DumpIndex(start, end)
|
||||||
@@ -685,3 +703,50 @@ func exportChaindata(ctx *cli.Context) error {
|
|||||||
db := utils.MakeChainDatabase(ctx, stack, true)
|
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
|
return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func showMetaData(ctx *cli.Context) error {
|
||||||
|
stack, _ := makeConfigNode(ctx)
|
||||||
|
defer stack.Close()
|
||||||
|
db := utils.MakeChainDatabase(ctx, stack, true)
|
||||||
|
ancients, err := db.Ancients()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
|
||||||
|
}
|
||||||
|
pp := func(val *uint64) string {
|
||||||
|
if val == nil {
|
||||||
|
return "<nil>"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d (0x%x)", *val, *val)
|
||||||
|
}
|
||||||
|
data := [][]string{
|
||||||
|
{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
|
||||||
|
{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
|
||||||
|
{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
|
||||||
|
{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
|
||||||
|
if b := rawdb.ReadHeadBlock(db); b != nil {
|
||||||
|
data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
|
||||||
|
data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
|
||||||
|
data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
|
||||||
|
}
|
||||||
|
if h := rawdb.ReadHeadHeader(db); h != nil {
|
||||||
|
data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
|
||||||
|
data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
|
||||||
|
data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
|
||||||
|
}
|
||||||
|
data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
|
||||||
|
{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
|
||||||
|
{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
|
||||||
|
{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
|
||||||
|
{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
|
||||||
|
{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
|
||||||
|
{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
|
||||||
|
{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
|
||||||
|
{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
|
||||||
|
{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
|
||||||
|
}...)
|
||||||
|
table := tablewriter.NewWriter(os.Stdout)
|
||||||
|
table.SetHeader([]string{"Field", "Value"})
|
||||||
|
table.AppendBulk(data)
|
||||||
|
table.Render()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@@ -157,7 +157,6 @@ var (
|
|||||||
utils.GpoIgnoreGasPriceFlag,
|
utils.GpoIgnoreGasPriceFlag,
|
||||||
utils.MinerNotifyFullFlag,
|
utils.MinerNotifyFullFlag,
|
||||||
configFileFlag,
|
configFileFlag,
|
||||||
utils.CatalystFlag,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcFlags = []cli.Flag{
|
rpcFlags = []cli.Flag{
|
||||||
@@ -208,7 +207,7 @@ func init() {
|
|||||||
// Initialize the CLI app and start Geth
|
// Initialize the CLI app and start Geth
|
||||||
app.Action = geth
|
app.Action = geth
|
||||||
app.HideVersion = true // we have a command to print the version
|
app.HideVersion = true // we have a command to print the version
|
||||||
app.Copyright = "Copyright 2013-2021 The go-ethereum Authors"
|
app.Copyright = "Copyright 2013-2022 The go-ethereum Authors"
|
||||||
app.Commands = []cli.Command{
|
app.Commands = []cli.Command{
|
||||||
// See chaincmd.go:
|
// See chaincmd.go:
|
||||||
initCommand,
|
initCommand,
|
||||||
@@ -274,6 +273,9 @@ func prepare(ctx *cli.Context) {
|
|||||||
case ctx.GlobalIsSet(utils.RopstenFlag.Name):
|
case ctx.GlobalIsSet(utils.RopstenFlag.Name):
|
||||||
log.Info("Starting Geth on Ropsten testnet...")
|
log.Info("Starting Geth on Ropsten testnet...")
|
||||||
|
|
||||||
|
case ctx.GlobalIsSet(utils.SepoliaFlag.Name):
|
||||||
|
log.Info("Starting Geth on Sepolia testnet...")
|
||||||
|
|
||||||
case ctx.GlobalIsSet(utils.RinkebyFlag.Name):
|
case ctx.GlobalIsSet(utils.RinkebyFlag.Name):
|
||||||
log.Info("Starting Geth on Rinkeby testnet...")
|
log.Info("Starting Geth on Rinkeby testnet...")
|
||||||
|
|
||||||
@@ -289,7 +291,11 @@ func prepare(ctx *cli.Context) {
|
|||||||
// If we're a full node on mainnet without --cache specified, bump default cache allowance
|
// If we're a full node on mainnet without --cache specified, bump default cache allowance
|
||||||
if ctx.GlobalString(utils.SyncModeFlag.Name) != "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) && !ctx.GlobalIsSet(utils.NetworkIdFlag.Name) {
|
if ctx.GlobalString(utils.SyncModeFlag.Name) != "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) && !ctx.GlobalIsSet(utils.NetworkIdFlag.Name) {
|
||||||
// Make sure we're not on any supported preconfigured testnet either
|
// Make sure we're not on any supported preconfigured testnet either
|
||||||
if !ctx.GlobalIsSet(utils.RopstenFlag.Name) && !ctx.GlobalIsSet(utils.RinkebyFlag.Name) && !ctx.GlobalIsSet(utils.GoerliFlag.Name) && !ctx.GlobalIsSet(utils.DeveloperFlag.Name) {
|
if !ctx.GlobalIsSet(utils.RopstenFlag.Name) &&
|
||||||
|
!ctx.GlobalIsSet(utils.SepoliaFlag.Name) &&
|
||||||
|
!ctx.GlobalIsSet(utils.RinkebyFlag.Name) &&
|
||||||
|
!ctx.GlobalIsSet(utils.GoerliFlag.Name) &&
|
||||||
|
!ctx.GlobalIsSet(utils.DeveloperFlag.Name) {
|
||||||
// Nope, we're really on mainnet. Bump that cache up!
|
// Nope, we're really on mainnet. Bump that cache up!
|
||||||
log.Info("Bumping default cache on mainnet", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 4096)
|
log.Info("Bumping default cache on mainnet", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 4096)
|
||||||
ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(4096))
|
ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(4096))
|
||||||
@@ -320,7 +326,7 @@ func geth(ctx *cli.Context) error {
|
|||||||
stack, backend := makeFullNode(ctx)
|
stack, backend := makeFullNode(ctx)
|
||||||
defer stack.Close()
|
defer stack.Close()
|
||||||
|
|
||||||
startNode(ctx, stack, backend)
|
startNode(ctx, stack, backend, false)
|
||||||
stack.Wait()
|
stack.Wait()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -328,11 +334,11 @@ func geth(ctx *cli.Context) error {
|
|||||||
// startNode boots up the system node and all registered protocols, after which
|
// startNode boots up the system node and all registered protocols, after which
|
||||||
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
// it unlocks any requested accounts, and starts the RPC/IPC interfaces and the
|
||||||
// miner.
|
// miner.
|
||||||
func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) {
|
func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend, isConsole bool) {
|
||||||
debug.Memsize.Add("node", stack)
|
debug.Memsize.Add("node", stack)
|
||||||
|
|
||||||
// Start up the node itself
|
// Start up the node itself
|
||||||
utils.StartNode(ctx, stack)
|
utils.StartNode(ctx, stack, isConsole)
|
||||||
|
|
||||||
// Unlock any account specifically requested
|
// Unlock any account specifically requested
|
||||||
unlockAccounts(ctx, stack)
|
unlockAccounts(ctx, stack)
|
||||||
|
@@ -220,7 +220,7 @@ func verifyState(ctx *cli.Context) error {
|
|||||||
log.Error("Failed to load head block")
|
log.Error("Failed to load head block")
|
||||||
return errors.New("no head block")
|
return errors.New("no head block")
|
||||||
}
|
}
|
||||||
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false, false)
|
snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, headBlock.Root(), false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to open snapshot tree", "err", err)
|
log.Error("Failed to open snapshot tree", "err", err)
|
||||||
return err
|
return err
|
||||||
@@ -418,8 +418,7 @@ func traverseRawState(ctx *cli.Context) error {
|
|||||||
// Check the present for non-empty hash node(embedded node doesn't
|
// Check the present for non-empty hash node(embedded node doesn't
|
||||||
// have their own hash).
|
// have their own hash).
|
||||||
if node != (common.Hash{}) {
|
if node != (common.Hash{}) {
|
||||||
blob := rawdb.ReadTrieNode(chaindb, node)
|
if !rawdb.HasTrieNode(chaindb, node) {
|
||||||
if len(blob) == 0 {
|
|
||||||
log.Error("Missing trie node(storage)", "hash", node)
|
log.Error("Missing trie node(storage)", "hash", node)
|
||||||
return errors.New("missing storage")
|
return errors.New("missing storage")
|
||||||
}
|
}
|
||||||
@@ -472,7 +471,7 @@ func dumpState(ctx *cli.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false, false)
|
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@@ -229,7 +229,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{
|
|||||||
utils.SnapshotFlag,
|
utils.SnapshotFlag,
|
||||||
utils.BloomFilterSizeFlag,
|
utils.BloomFilterSizeFlag,
|
||||||
cli.HelpFlag,
|
cli.HelpFlag,
|
||||||
utils.CatalystFlag,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@@ -25,6 +25,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/jedisct1/go-minisign"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestVerification(t *testing.T) {
|
func TestVerification(t *testing.T) {
|
||||||
@@ -128,3 +130,39 @@ func TestMatching(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGethPubKeysParseable(t *testing.T) {
|
||||||
|
for _, pubkey := range gethPubKeys {
|
||||||
|
_, err := minisign.NewPublicKey(pubkey)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Should be parseable")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestKeyID(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
id [8]byte
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"@holiman key", args{id: extractKeyId(gethPubKeys[0])}, "FB1D084D39BAEC24"},
|
||||||
|
{"second key", args{id: extractKeyId(gethPubKeys[1])}, "138B1CA303E51687"},
|
||||||
|
{"third key", args{id: extractKeyId(gethPubKeys[2])}, "FD9813B2D2098484"},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := keyID(tt.args.id); got != tt.want {
|
||||||
|
t.Errorf("keyID() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractKeyId(pubkey string) [8]byte {
|
||||||
|
p, _ := minisign.NewPublicKey(pubkey)
|
||||||
|
return p.KeyId
|
||||||
|
}
|
||||||
|
@@ -68,7 +68,7 @@ func Fatalf(format string, args ...interface{}) {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func StartNode(ctx *cli.Context, stack *node.Node) {
|
func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
|
||||||
if err := stack.Start(); err != nil {
|
if err := stack.Start(); err != nil {
|
||||||
Fatalf("Error starting protocol stack: %v", err)
|
Fatalf("Error starting protocol stack: %v", err)
|
||||||
}
|
}
|
||||||
@@ -87,17 +87,33 @@ func StartNode(ctx *cli.Context, stack *node.Node) {
|
|||||||
go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
|
go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
|
||||||
}
|
}
|
||||||
|
|
||||||
<-sigc
|
shutdown := func() {
|
||||||
log.Info("Got interrupt, shutting down...")
|
log.Info("Got interrupt, shutting down...")
|
||||||
go stack.Close()
|
go stack.Close()
|
||||||
for i := 10; i > 0; i-- {
|
for i := 10; i > 0; i-- {
|
||||||
<-sigc
|
<-sigc
|
||||||
if i > 1 {
|
if i > 1 {
|
||||||
log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
|
log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
debug.Exit() // ensure trace and CPU profile data is flushed.
|
||||||
|
debug.LoudPanic("boom")
|
||||||
|
}
|
||||||
|
|
||||||
|
if isConsole {
|
||||||
|
// In JS console mode, SIGINT is ignored because it's handled by the console.
|
||||||
|
// However, SIGTERM still shuts down the node.
|
||||||
|
for {
|
||||||
|
sig := <-sigc
|
||||||
|
if sig == syscall.SIGTERM {
|
||||||
|
shutdown()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
<-sigc
|
||||||
|
shutdown()
|
||||||
}
|
}
|
||||||
debug.Exit() // ensure trace and CPU profile data is flushed.
|
|
||||||
debug.LoudPanic("boom")
|
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -45,7 +45,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/eth"
|
"github.com/ethereum/go-ethereum/eth"
|
||||||
"github.com/ethereum/go-ethereum/eth/catalyst"
|
ethcatalyst "github.com/ethereum/go-ethereum/eth/catalyst"
|
||||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||||
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
"github.com/ethereum/go-ethereum/eth/ethconfig"
|
||||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||||
@@ -56,6 +56,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/internal/ethapi"
|
"github.com/ethereum/go-ethereum/internal/ethapi"
|
||||||
"github.com/ethereum/go-ethereum/internal/flags"
|
"github.com/ethereum/go-ethereum/internal/flags"
|
||||||
"github.com/ethereum/go-ethereum/les"
|
"github.com/ethereum/go-ethereum/les"
|
||||||
|
lescatalyst "github.com/ethereum/go-ethereum/les/catalyst"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/metrics/exp"
|
"github.com/ethereum/go-ethereum/metrics/exp"
|
||||||
@@ -789,11 +790,6 @@ var (
|
|||||||
Usage: "InfluxDB organization name (v2 only)",
|
Usage: "InfluxDB organization name (v2 only)",
|
||||||
Value: metrics.DefaultConfig.InfluxDBOrganization,
|
Value: metrics.DefaultConfig.InfluxDBOrganization,
|
||||||
}
|
}
|
||||||
|
|
||||||
CatalystFlag = cli.BoolFlag{
|
|
||||||
Name: "catalyst",
|
|
||||||
Usage: "Catalyst mode (eth2 integration testing)",
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MakeDataDir retrieves the currently requested data directory, terminating
|
// MakeDataDir retrieves the currently requested data directory, terminating
|
||||||
@@ -1673,9 +1669,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) {
|
|||||||
// Create a new developer genesis block or reuse existing one
|
// Create a new developer genesis block or reuse existing one
|
||||||
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
|
cfg.Genesis = core.DeveloperGenesisBlock(uint64(ctx.GlobalInt(DeveloperPeriodFlag.Name)), ctx.GlobalUint64(DeveloperGasLimitFlag.Name), developer.Address)
|
||||||
if ctx.GlobalIsSet(DataDirFlag.Name) {
|
if ctx.GlobalIsSet(DataDirFlag.Name) {
|
||||||
|
// If datadir doesn't exist we need to open db in write-mode
|
||||||
|
// so leveldb can create files.
|
||||||
|
readonly := true
|
||||||
|
if !common.FileExist(stack.ResolvePath("chaindata")) {
|
||||||
|
readonly = false
|
||||||
|
}
|
||||||
// Check if we have an already initialized chain and fall back to
|
// Check if we have an already initialized chain and fall back to
|
||||||
// that if so. Otherwise we need to generate a new genesis spec.
|
// that if so. Otherwise we need to generate a new genesis spec.
|
||||||
chaindb := MakeChainDatabase(ctx, stack, false) // TODO (MariusVanDerWijden) make this read only
|
chaindb := MakeChainDatabase(ctx, stack, readonly)
|
||||||
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
if rawdb.ReadCanonicalHash(chaindb, 0) != (common.Hash{}) {
|
||||||
cfg.Genesis = nil // fallback to db content
|
cfg.Genesis = nil // fallback to db content
|
||||||
}
|
}
|
||||||
@@ -1710,15 +1712,15 @@ func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) {
|
|||||||
// RegisterEthService adds an Ethereum client to the stack.
|
// RegisterEthService adds an Ethereum client to the stack.
|
||||||
// The second return value is the full node instance, which may be nil if the
|
// The second return value is the full node instance, which may be nil if the
|
||||||
// node is running as a light client.
|
// node is running as a light client.
|
||||||
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool) (ethapi.Backend, *eth.Ethereum) {
|
func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) (ethapi.Backend, *eth.Ethereum) {
|
||||||
if cfg.SyncMode == downloader.LightSync {
|
if cfg.SyncMode == downloader.LightSync {
|
||||||
backend, err := les.New(stack, cfg)
|
backend, err := les.New(stack, cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fatalf("Failed to register the Ethereum service: %v", err)
|
Fatalf("Failed to register the Ethereum service: %v", err)
|
||||||
}
|
}
|
||||||
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
|
stack.RegisterAPIs(tracers.APIs(backend.ApiBackend))
|
||||||
if isCatalyst {
|
if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
|
||||||
if err := catalyst.RegisterLight(stack, backend); err != nil {
|
if err := lescatalyst.Register(stack, backend); err != nil {
|
||||||
Fatalf("Failed to register the catalyst service: %v", err)
|
Fatalf("Failed to register the catalyst service: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1734,8 +1736,8 @@ func RegisterEthService(stack *node.Node, cfg *ethconfig.Config, isCatalyst bool
|
|||||||
Fatalf("Failed to create the LES server: %v", err)
|
Fatalf("Failed to create the LES server: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isCatalyst {
|
if backend.BlockChain().Config().TerminalTotalDifficulty != nil {
|
||||||
if err := catalyst.Register(stack, backend); err != nil {
|
if err := ethcatalyst.Register(stack, backend); err != nil {
|
||||||
Fatalf("Failed to register the catalyst service: %v", err)
|
Fatalf("Failed to register the catalyst service: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -26,6 +26,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/consensus/misc"
|
"github.com/ethereum/go-ethereum/consensus/misc"
|
||||||
"github.com/ethereum/go-ethereum/core/state"
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rpc"
|
"github.com/ethereum/go-ethereum/rpc"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
@@ -42,7 +43,6 @@ var (
|
|||||||
// error types into the consensus package.
|
// error types into the consensus package.
|
||||||
var (
|
var (
|
||||||
errTooManyUncles = errors.New("too many uncles")
|
errTooManyUncles = errors.New("too many uncles")
|
||||||
errInvalidMixDigest = errors.New("invalid mix digest")
|
|
||||||
errInvalidNonce = errors.New("invalid nonce")
|
errInvalidNonce = errors.New("invalid nonce")
|
||||||
errInvalidUncleHash = errors.New("invalid uncle hash")
|
errInvalidUncleHash = errors.New("invalid uncle hash")
|
||||||
)
|
)
|
||||||
@@ -181,10 +181,7 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
|
|||||||
if len(header.Extra) > 32 {
|
if len(header.Extra) > 32 {
|
||||||
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
|
return fmt.Errorf("extra-data longer than 32 bytes (%d)", len(header.Extra))
|
||||||
}
|
}
|
||||||
// Verify the seal parts. Ensure the mixhash, nonce and uncle hash are the expected value.
|
// Verify the seal parts. Ensure the nonce and uncle hash are the expected value.
|
||||||
if header.MixDigest != (common.Hash{}) {
|
|
||||||
return errInvalidMixDigest
|
|
||||||
}
|
|
||||||
if header.Nonce != beaconNonce {
|
if header.Nonce != beaconNonce {
|
||||||
return errInvalidNonce
|
return errInvalidNonce
|
||||||
}
|
}
|
||||||
@@ -196,9 +193,8 @@ func (beacon *Beacon) verifyHeader(chain consensus.ChainHeaderReader, header, pa
|
|||||||
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, beaconDifficulty)
|
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, beaconDifficulty)
|
||||||
}
|
}
|
||||||
// Verify that the gas limit is <= 2^63-1
|
// Verify that the gas limit is <= 2^63-1
|
||||||
cap := uint64(0x7fffffffffffffff)
|
if header.GasLimit > params.MaxGasLimit {
|
||||||
if header.GasLimit > cap {
|
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
|
||||||
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
|
|
||||||
}
|
}
|
||||||
// Verify that the gasUsed is <= gasLimit
|
// Verify that the gasUsed is <= gasLimit
|
||||||
if header.GasUsed > header.GasLimit {
|
if header.GasUsed > header.GasLimit {
|
||||||
|
@@ -295,9 +295,8 @@ func (c *Clique) verifyHeader(chain consensus.ChainHeaderReader, header *types.H
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Verify that the gas limit is <= 2^63-1
|
// Verify that the gas limit is <= 2^63-1
|
||||||
cap := uint64(0x7fffffffffffffff)
|
if header.GasLimit > params.MaxGasLimit {
|
||||||
if header.GasLimit > cap {
|
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
|
||||||
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
|
|
||||||
}
|
}
|
||||||
// If all checks passed, validate any special fields for hard forks
|
// If all checks passed, validate any special fields for hard forks
|
||||||
if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {
|
if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {
|
||||||
|
@@ -34,7 +34,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/ethereum/go-ethereum/trie/utils"
|
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -282,9 +281,8 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa
|
|||||||
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
|
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
|
||||||
}
|
}
|
||||||
// Verify that the gas limit is <= 2^63-1
|
// Verify that the gas limit is <= 2^63-1
|
||||||
cap := uint64(0x7fffffffffffffff)
|
if header.GasLimit > params.MaxGasLimit {
|
||||||
if header.GasLimit > cap {
|
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
|
||||||
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, cap)
|
|
||||||
}
|
}
|
||||||
// Verify that the gasUsed is <= gasLimit
|
// Verify that the gasUsed is <= gasLimit
|
||||||
if header.GasUsed > header.GasLimit {
|
if header.GasUsed > header.GasLimit {
|
||||||
@@ -661,14 +659,10 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header
|
|||||||
r.Sub(r, header.Number)
|
r.Sub(r, header.Number)
|
||||||
r.Mul(r, blockReward)
|
r.Mul(r, blockReward)
|
||||||
r.Div(r, big8)
|
r.Div(r, big8)
|
||||||
uncleCoinbase := utils.GetTreeKeyBalance(uncle.Coinbase.Bytes())
|
|
||||||
state.Witness().TouchAddress(uncleCoinbase, state.GetBalance(uncle.Coinbase).Bytes())
|
|
||||||
state.AddBalance(uncle.Coinbase, r)
|
state.AddBalance(uncle.Coinbase, r)
|
||||||
|
|
||||||
r.Div(blockReward, big32)
|
r.Div(blockReward, big32)
|
||||||
reward.Add(reward, r)
|
reward.Add(reward, r)
|
||||||
}
|
}
|
||||||
coinbase := utils.GetTreeKeyBalance(header.Coinbase.Bytes())
|
|
||||||
state.Witness().TouchAddress(coinbase, state.GetBalance(header.Coinbase).Bytes())
|
|
||||||
state.AddBalance(header.Coinbase, reward)
|
state.AddBalance(header.Coinbase, reward)
|
||||||
}
|
}
|
||||||
|
@@ -17,6 +17,7 @@
|
|||||||
package console
|
package console
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@@ -26,6 +27,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/dop251/goja"
|
"github.com/dop251/goja"
|
||||||
@@ -74,6 +76,13 @@ type Console struct {
|
|||||||
histPath string // Absolute path to the console scrollback history
|
histPath string // Absolute path to the console scrollback history
|
||||||
history []string // Scroll history maintained by the console
|
history []string // Scroll history maintained by the console
|
||||||
printer io.Writer // Output writer to serialize any display strings to
|
printer io.Writer // Output writer to serialize any display strings to
|
||||||
|
|
||||||
|
interactiveStopped chan struct{}
|
||||||
|
stopInteractiveCh chan struct{}
|
||||||
|
signalReceived chan struct{}
|
||||||
|
stopped chan struct{}
|
||||||
|
wg sync.WaitGroup
|
||||||
|
stopOnce sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
// New initializes a JavaScript interpreted runtime environment and sets defaults
|
// New initializes a JavaScript interpreted runtime environment and sets defaults
|
||||||
@@ -92,12 +101,16 @@ func New(config Config) (*Console, error) {
|
|||||||
|
|
||||||
// Initialize the console and return
|
// Initialize the console and return
|
||||||
console := &Console{
|
console := &Console{
|
||||||
client: config.Client,
|
client: config.Client,
|
||||||
jsre: jsre.New(config.DocRoot, config.Printer),
|
jsre: jsre.New(config.DocRoot, config.Printer),
|
||||||
prompt: config.Prompt,
|
prompt: config.Prompt,
|
||||||
prompter: config.Prompter,
|
prompter: config.Prompter,
|
||||||
printer: config.Printer,
|
printer: config.Printer,
|
||||||
histPath: filepath.Join(config.DataDir, HistoryFile),
|
histPath: filepath.Join(config.DataDir, HistoryFile),
|
||||||
|
interactiveStopped: make(chan struct{}),
|
||||||
|
stopInteractiveCh: make(chan struct{}),
|
||||||
|
signalReceived: make(chan struct{}, 1),
|
||||||
|
stopped: make(chan struct{}),
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(config.DataDir, 0700); err != nil {
|
if err := os.MkdirAll(config.DataDir, 0700); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -105,6 +118,10 @@ func New(config Config) (*Console, error) {
|
|||||||
if err := console.init(config.Preload); err != nil {
|
if err := console.init(config.Preload); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
console.wg.Add(1)
|
||||||
|
go console.interruptHandler()
|
||||||
|
|
||||||
return console, nil
|
return console, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -337,9 +354,63 @@ func (c *Console) Evaluate(statement string) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
c.jsre.Evaluate(statement, c.printer)
|
c.jsre.Evaluate(statement, c.printer)
|
||||||
|
|
||||||
|
// Avoid exiting Interactive when jsre was interrupted by SIGINT.
|
||||||
|
c.clearSignalReceived()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Interactive starts an interactive user session, where input is propted from
|
// interruptHandler runs in its own goroutine and waits for signals.
|
||||||
|
// When a signal is received, it interrupts the JS interpreter.
|
||||||
|
func (c *Console) interruptHandler() {
|
||||||
|
defer c.wg.Done()
|
||||||
|
|
||||||
|
// During Interactive, liner inhibits the signal while it is prompting for
|
||||||
|
// input. However, the signal will be received while evaluating JS.
|
||||||
|
//
|
||||||
|
// On unsupported terminals, SIGINT can also happen while prompting.
|
||||||
|
// Unfortunately, it is not possible to abort the prompt in this case and
|
||||||
|
// the c.readLines goroutine leaks.
|
||||||
|
sig := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sig, syscall.SIGINT)
|
||||||
|
defer signal.Stop(sig)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-sig:
|
||||||
|
c.setSignalReceived()
|
||||||
|
c.jsre.Interrupt(errors.New("interrupted"))
|
||||||
|
case <-c.stopInteractiveCh:
|
||||||
|
close(c.interactiveStopped)
|
||||||
|
c.jsre.Interrupt(errors.New("interrupted"))
|
||||||
|
case <-c.stopped:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Console) setSignalReceived() {
|
||||||
|
select {
|
||||||
|
case c.signalReceived <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Console) clearSignalReceived() {
|
||||||
|
select {
|
||||||
|
case <-c.signalReceived:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopInteractive causes Interactive to return as soon as possible.
|
||||||
|
func (c *Console) StopInteractive() {
|
||||||
|
select {
|
||||||
|
case c.stopInteractiveCh <- struct{}{}:
|
||||||
|
case <-c.stopped:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interactive starts an interactive user session, where in.put is propted from
|
||||||
// the configured user prompter.
|
// the configured user prompter.
|
||||||
func (c *Console) Interactive() {
|
func (c *Console) Interactive() {
|
||||||
var (
|
var (
|
||||||
@@ -349,15 +420,11 @@ func (c *Console) Interactive() {
|
|||||||
inputLine = make(chan string, 1) // receives user input
|
inputLine = make(chan string, 1) // receives user input
|
||||||
inputErr = make(chan error, 1) // receives liner errors
|
inputErr = make(chan error, 1) // receives liner errors
|
||||||
requestLine = make(chan string) // requests a line of input
|
requestLine = make(chan string) // requests a line of input
|
||||||
interrupt = make(chan os.Signal, 1)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Monitor Ctrl-C. While liner does turn on the relevant terminal mode bits to avoid
|
defer func() {
|
||||||
// the signal, a signal can still be received for unsupported terminals. Unfortunately
|
c.writeHistory()
|
||||||
// there is no way to cancel the line reader when this happens. The readLines
|
}()
|
||||||
// goroutine will be leaked in this case.
|
|
||||||
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
defer signal.Stop(interrupt)
|
|
||||||
|
|
||||||
// The line reader runs in a separate goroutine.
|
// The line reader runs in a separate goroutine.
|
||||||
go c.readLines(inputLine, inputErr, requestLine)
|
go c.readLines(inputLine, inputErr, requestLine)
|
||||||
@@ -368,7 +435,14 @@ func (c *Console) Interactive() {
|
|||||||
requestLine <- prompt
|
requestLine <- prompt
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-interrupt:
|
case <-c.interactiveStopped:
|
||||||
|
fmt.Fprintln(c.printer, "node is down, exiting console")
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-c.signalReceived:
|
||||||
|
// SIGINT received while prompting for input -> unsupported terminal.
|
||||||
|
// I'm not sure if the best choice would be to leave the console running here.
|
||||||
|
// Bash keeps running in this case. node.js does not.
|
||||||
fmt.Fprintln(c.printer, "caught interrupt, exiting")
|
fmt.Fprintln(c.printer, "caught interrupt, exiting")
|
||||||
return
|
return
|
||||||
|
|
||||||
@@ -476,12 +550,19 @@ func (c *Console) Execute(path string) error {
|
|||||||
|
|
||||||
// Stop cleans up the console and terminates the runtime environment.
|
// Stop cleans up the console and terminates the runtime environment.
|
||||||
func (c *Console) Stop(graceful bool) error {
|
func (c *Console) Stop(graceful bool) error {
|
||||||
if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil {
|
c.stopOnce.Do(func() {
|
||||||
return err
|
// Stop the interrupt handler.
|
||||||
}
|
close(c.stopped)
|
||||||
if err := os.Chmod(c.histPath, 0600); err != nil { // Force 0600, even if it was different previously
|
c.wg.Wait()
|
||||||
return err
|
})
|
||||||
}
|
|
||||||
c.jsre.Stop(graceful)
|
c.jsre.Stop(graceful)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Console) writeHistory() error {
|
||||||
|
if err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, "\n")), 0600); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Chmod(c.histPath, 0600) // Force 0600, even if it was different previously
|
||||||
|
}
|
||||||
|
@@ -68,10 +68,10 @@ func (it tokenType) String() string {
|
|||||||
|
|
||||||
var stringtokenTypes = []string{
|
var stringtokenTypes = []string{
|
||||||
eof: "EOF",
|
eof: "EOF",
|
||||||
|
lineStart: "new line",
|
||||||
|
lineEnd: "end of line",
|
||||||
invalidStatement: "invalid statement",
|
invalidStatement: "invalid statement",
|
||||||
element: "element",
|
element: "element",
|
||||||
lineEnd: "end of line",
|
|
||||||
lineStart: "new line",
|
|
||||||
label: "label",
|
label: "label",
|
||||||
labelDef: "label definition",
|
labelDef: "label definition",
|
||||||
number: "number",
|
number: "number",
|
||||||
|
29
core/beacon/errors.go
Normal file
29
core/beacon/errors.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||||
|
|
||||||
|
package beacon
|
||||||
|
|
||||||
|
import "github.com/ethereum/go-ethereum/rpc"
|
||||||
|
|
||||||
|
var (
|
||||||
|
VALID = GenericStringResponse{"VALID"}
|
||||||
|
SUCCESS = GenericStringResponse{"SUCCESS"}
|
||||||
|
INVALID = ForkChoiceResponse{Status: "INVALID", PayloadID: nil}
|
||||||
|
SYNCING = ForkChoiceResponse{Status: "SYNCING", PayloadID: nil}
|
||||||
|
GenericServerError = rpc.CustomError{Code: -32000, ValidationError: "Server error"}
|
||||||
|
UnknownPayload = rpc.CustomError{Code: -32001, ValidationError: "Unknown payload"}
|
||||||
|
InvalidTB = rpc.CustomError{Code: -32002, ValidationError: "Invalid terminal block"}
|
||||||
|
)
|
53
core/beacon/gen_blockparams.go
Normal file
53
core/beacon/gen_blockparams.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||||
|
|
||||||
|
package beacon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = (*payloadAttributesMarshaling)(nil)
|
||||||
|
|
||||||
|
// MarshalJSON marshals as JSON.
|
||||||
|
func (p PayloadAttributesV1) MarshalJSON() ([]byte, error) {
|
||||||
|
type PayloadAttributesV1 struct {
|
||||||
|
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
||||||
|
Random common.Hash `json:"random" gencodec:"required"`
|
||||||
|
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
||||||
|
}
|
||||||
|
var enc PayloadAttributesV1
|
||||||
|
enc.Timestamp = hexutil.Uint64(p.Timestamp)
|
||||||
|
enc.Random = p.Random
|
||||||
|
enc.SuggestedFeeRecipient = p.SuggestedFeeRecipient
|
||||||
|
return json.Marshal(&enc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON unmarshals from JSON.
|
||||||
|
func (p *PayloadAttributesV1) UnmarshalJSON(input []byte) error {
|
||||||
|
type PayloadAttributesV1 struct {
|
||||||
|
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
||||||
|
Random *common.Hash `json:"random" gencodec:"required"`
|
||||||
|
SuggestedFeeRecipient *common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
||||||
|
}
|
||||||
|
var dec PayloadAttributesV1
|
||||||
|
if err := json.Unmarshal(input, &dec); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if dec.Timestamp == nil {
|
||||||
|
return errors.New("missing required field 'timestamp' for PayloadAttributesV1")
|
||||||
|
}
|
||||||
|
p.Timestamp = uint64(*dec.Timestamp)
|
||||||
|
if dec.Random == nil {
|
||||||
|
return errors.New("missing required field 'random' for PayloadAttributesV1")
|
||||||
|
}
|
||||||
|
p.Random = *dec.Random
|
||||||
|
if dec.SuggestedFeeRecipient == nil {
|
||||||
|
return errors.New("missing required field 'suggestedFeeRecipient' for PayloadAttributesV1")
|
||||||
|
}
|
||||||
|
p.SuggestedFeeRecipient = *dec.SuggestedFeeRecipient
|
||||||
|
return nil
|
||||||
|
}
|
@@ -1,6 +1,6 @@
|
|||||||
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
// Code generated by github.com/fjl/gencodec. DO NOT EDIT.
|
||||||
|
|
||||||
package catalyst
|
package beacon
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@@ -14,13 +14,12 @@ import (
|
|||||||
var _ = (*executableDataMarshaling)(nil)
|
var _ = (*executableDataMarshaling)(nil)
|
||||||
|
|
||||||
// MarshalJSON marshals as JSON.
|
// MarshalJSON marshals as JSON.
|
||||||
func (e ExecutableData) MarshalJSON() ([]byte, error) {
|
func (e ExecutableDataV1) MarshalJSON() ([]byte, error) {
|
||||||
type ExecutableData struct {
|
type ExecutableDataV1 struct {
|
||||||
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
|
|
||||||
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
|
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
|
||||||
Coinbase common.Address `json:"coinbase" gencodec:"required"`
|
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
|
||||||
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
|
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
|
||||||
ReceiptRoot common.Hash `json:"receiptRoot" gencodec:"required"`
|
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
|
||||||
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
|
LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"`
|
||||||
Random common.Hash `json:"random" gencodec:"required"`
|
Random common.Hash `json:"random" gencodec:"required"`
|
||||||
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
|
Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
|
||||||
@@ -29,14 +28,14 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
|
|||||||
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
||||||
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
|
ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"`
|
||||||
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
|
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
|
||||||
|
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
|
||||||
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
|
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
|
||||||
}
|
}
|
||||||
var enc ExecutableData
|
var enc ExecutableDataV1
|
||||||
enc.BlockHash = e.BlockHash
|
|
||||||
enc.ParentHash = e.ParentHash
|
enc.ParentHash = e.ParentHash
|
||||||
enc.Coinbase = e.Coinbase
|
enc.FeeRecipient = e.FeeRecipient
|
||||||
enc.StateRoot = e.StateRoot
|
enc.StateRoot = e.StateRoot
|
||||||
enc.ReceiptRoot = e.ReceiptRoot
|
enc.ReceiptsRoot = e.ReceiptsRoot
|
||||||
enc.LogsBloom = e.LogsBloom
|
enc.LogsBloom = e.LogsBloom
|
||||||
enc.Random = e.Random
|
enc.Random = e.Random
|
||||||
enc.Number = hexutil.Uint64(e.Number)
|
enc.Number = hexutil.Uint64(e.Number)
|
||||||
@@ -45,6 +44,7 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
|
|||||||
enc.Timestamp = hexutil.Uint64(e.Timestamp)
|
enc.Timestamp = hexutil.Uint64(e.Timestamp)
|
||||||
enc.ExtraData = e.ExtraData
|
enc.ExtraData = e.ExtraData
|
||||||
enc.BaseFeePerGas = (*hexutil.Big)(e.BaseFeePerGas)
|
enc.BaseFeePerGas = (*hexutil.Big)(e.BaseFeePerGas)
|
||||||
|
enc.BlockHash = e.BlockHash
|
||||||
if e.Transactions != nil {
|
if e.Transactions != nil {
|
||||||
enc.Transactions = make([]hexutil.Bytes, len(e.Transactions))
|
enc.Transactions = make([]hexutil.Bytes, len(e.Transactions))
|
||||||
for k, v := range e.Transactions {
|
for k, v := range e.Transactions {
|
||||||
@@ -55,13 +55,12 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalJSON unmarshals from JSON.
|
// UnmarshalJSON unmarshals from JSON.
|
||||||
func (e *ExecutableData) UnmarshalJSON(input []byte) error {
|
func (e *ExecutableDataV1) UnmarshalJSON(input []byte) error {
|
||||||
type ExecutableData struct {
|
type ExecutableDataV1 struct {
|
||||||
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
|
|
||||||
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
|
ParentHash *common.Hash `json:"parentHash" gencodec:"required"`
|
||||||
Coinbase *common.Address `json:"coinbase" gencodec:"required"`
|
FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"`
|
||||||
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
|
StateRoot *common.Hash `json:"stateRoot" gencodec:"required"`
|
||||||
ReceiptRoot *common.Hash `json:"receiptRoot" gencodec:"required"`
|
ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"`
|
||||||
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
|
LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"`
|
||||||
Random *common.Hash `json:"random" gencodec:"required"`
|
Random *common.Hash `json:"random" gencodec:"required"`
|
||||||
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
|
Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"`
|
||||||
@@ -70,66 +69,67 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error {
|
|||||||
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"`
|
||||||
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
|
ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"`
|
||||||
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
|
BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"`
|
||||||
|
BlockHash *common.Hash `json:"blockHash" gencodec:"required"`
|
||||||
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
|
Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"`
|
||||||
}
|
}
|
||||||
var dec ExecutableData
|
var dec ExecutableDataV1
|
||||||
if err := json.Unmarshal(input, &dec); err != nil {
|
if err := json.Unmarshal(input, &dec); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dec.BlockHash == nil {
|
|
||||||
return errors.New("missing required field 'blockHash' for ExecutableData")
|
|
||||||
}
|
|
||||||
e.BlockHash = *dec.BlockHash
|
|
||||||
if dec.ParentHash == nil {
|
if dec.ParentHash == nil {
|
||||||
return errors.New("missing required field 'parentHash' for ExecutableData")
|
return errors.New("missing required field 'parentHash' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.ParentHash = *dec.ParentHash
|
e.ParentHash = *dec.ParentHash
|
||||||
if dec.Coinbase == nil {
|
if dec.FeeRecipient == nil {
|
||||||
return errors.New("missing required field 'coinbase' for ExecutableData")
|
return errors.New("missing required field 'feeRecipient' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.Coinbase = *dec.Coinbase
|
e.FeeRecipient = *dec.FeeRecipient
|
||||||
if dec.StateRoot == nil {
|
if dec.StateRoot == nil {
|
||||||
return errors.New("missing required field 'stateRoot' for ExecutableData")
|
return errors.New("missing required field 'stateRoot' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.StateRoot = *dec.StateRoot
|
e.StateRoot = *dec.StateRoot
|
||||||
if dec.ReceiptRoot == nil {
|
if dec.ReceiptsRoot == nil {
|
||||||
return errors.New("missing required field 'receiptRoot' for ExecutableData")
|
return errors.New("missing required field 'receiptsRoot' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.ReceiptRoot = *dec.ReceiptRoot
|
e.ReceiptsRoot = *dec.ReceiptsRoot
|
||||||
if dec.LogsBloom == nil {
|
if dec.LogsBloom == nil {
|
||||||
return errors.New("missing required field 'logsBloom' for ExecutableData")
|
return errors.New("missing required field 'logsBloom' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.LogsBloom = *dec.LogsBloom
|
e.LogsBloom = *dec.LogsBloom
|
||||||
if dec.Random == nil {
|
if dec.Random == nil {
|
||||||
return errors.New("missing required field 'random' for ExecutableData")
|
return errors.New("missing required field 'random' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.Random = *dec.Random
|
e.Random = *dec.Random
|
||||||
if dec.Number == nil {
|
if dec.Number == nil {
|
||||||
return errors.New("missing required field 'blockNumber' for ExecutableData")
|
return errors.New("missing required field 'blockNumber' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.Number = uint64(*dec.Number)
|
e.Number = uint64(*dec.Number)
|
||||||
if dec.GasLimit == nil {
|
if dec.GasLimit == nil {
|
||||||
return errors.New("missing required field 'gasLimit' for ExecutableData")
|
return errors.New("missing required field 'gasLimit' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.GasLimit = uint64(*dec.GasLimit)
|
e.GasLimit = uint64(*dec.GasLimit)
|
||||||
if dec.GasUsed == nil {
|
if dec.GasUsed == nil {
|
||||||
return errors.New("missing required field 'gasUsed' for ExecutableData")
|
return errors.New("missing required field 'gasUsed' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.GasUsed = uint64(*dec.GasUsed)
|
e.GasUsed = uint64(*dec.GasUsed)
|
||||||
if dec.Timestamp == nil {
|
if dec.Timestamp == nil {
|
||||||
return errors.New("missing required field 'timestamp' for ExecutableData")
|
return errors.New("missing required field 'timestamp' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.Timestamp = uint64(*dec.Timestamp)
|
e.Timestamp = uint64(*dec.Timestamp)
|
||||||
if dec.ExtraData == nil {
|
if dec.ExtraData == nil {
|
||||||
return errors.New("missing required field 'extraData' for ExecutableData")
|
return errors.New("missing required field 'extraData' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.ExtraData = *dec.ExtraData
|
e.ExtraData = *dec.ExtraData
|
||||||
if dec.BaseFeePerGas == nil {
|
if dec.BaseFeePerGas == nil {
|
||||||
return errors.New("missing required field 'baseFeePerGas' for ExecutableData")
|
return errors.New("missing required field 'baseFeePerGas' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.BaseFeePerGas = (*big.Int)(dec.BaseFeePerGas)
|
e.BaseFeePerGas = (*big.Int)(dec.BaseFeePerGas)
|
||||||
|
if dec.BlockHash == nil {
|
||||||
|
return errors.New("missing required field 'blockHash' for ExecutableDataV1")
|
||||||
|
}
|
||||||
|
e.BlockHash = *dec.BlockHash
|
||||||
if dec.Transactions == nil {
|
if dec.Transactions == nil {
|
||||||
return errors.New("missing required field 'transactions' for ExecutableData")
|
return errors.New("missing required field 'transactions' for ExecutableDataV1")
|
||||||
}
|
}
|
||||||
e.Transactions = make([][]byte, len(dec.Transactions))
|
e.Transactions = make([][]byte, len(dec.Transactions))
|
||||||
for k, v := range dec.Transactions {
|
for k, v := range dec.Transactions {
|
204
core/beacon/types.go
Normal file
204
core/beacon/types.go
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
// Copyright 2022 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package beacon
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||||
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate go run github.com/fjl/gencodec -type PayloadAttributesV1 -field-override payloadAttributesMarshaling -out gen_blockparams.go
|
||||||
|
|
||||||
|
// PayloadAttributesV1 structure described at https://github.com/ethereum/execution-apis/pull/74
|
||||||
|
type PayloadAttributesV1 struct {
|
||||||
|
Timestamp uint64 `json:"timestamp" gencodec:"required"`
|
||||||
|
Random common.Hash `json:"random" gencodec:"required"`
|
||||||
|
SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSON type overrides for PayloadAttributesV1.
|
||||||
|
type payloadAttributesMarshaling struct {
|
||||||
|
Timestamp hexutil.Uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate go run github.com/fjl/gencodec -type ExecutableDataV1 -field-override executableDataMarshaling -out gen_ed.go
|
||||||
|
|
||||||
|
// ExecutableDataV1 structure described at https://github.com/ethereum/execution-apis/src/engine/specification.md
|
||||||
|
type ExecutableDataV1 struct {
|
||||||
|
ParentHash common.Hash `json:"parentHash" gencodec:"required"`
|
||||||
|
FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"`
|
||||||
|
StateRoot common.Hash `json:"stateRoot" gencodec:"required"`
|
||||||
|
ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"`
|
||||||
|
LogsBloom []byte `json:"logsBloom" gencodec:"required"`
|
||||||
|
Random common.Hash `json:"random" gencodec:"required"`
|
||||||
|
Number uint64 `json:"blockNumber" gencodec:"required"`
|
||||||
|
GasLimit uint64 `json:"gasLimit" gencodec:"required"`
|
||||||
|
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
|
||||||
|
Timestamp uint64 `json:"timestamp" gencodec:"required"`
|
||||||
|
ExtraData []byte `json:"extraData" gencodec:"required"`
|
||||||
|
BaseFeePerGas *big.Int `json:"baseFeePerGas" gencodec:"required"`
|
||||||
|
BlockHash common.Hash `json:"blockHash" gencodec:"required"`
|
||||||
|
Transactions [][]byte `json:"transactions" gencodec:"required"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSON type overrides for executableData.
|
||||||
|
type executableDataMarshaling struct {
|
||||||
|
Number hexutil.Uint64
|
||||||
|
GasLimit hexutil.Uint64
|
||||||
|
GasUsed hexutil.Uint64
|
||||||
|
Timestamp hexutil.Uint64
|
||||||
|
BaseFeePerGas *hexutil.Big
|
||||||
|
ExtraData hexutil.Bytes
|
||||||
|
LogsBloom hexutil.Bytes
|
||||||
|
Transactions []hexutil.Bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
type NewBlockResponse struct {
|
||||||
|
Valid bool `json:"valid"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GenericResponse struct {
|
||||||
|
Success bool `json:"success"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type GenericStringResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ExecutePayloadResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
LatestValidHash common.Hash `json:"latestValidHash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConsensusValidatedParams struct {
|
||||||
|
BlockHash common.Hash `json:"blockHash"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PayloadID is an identifier of the payload build process
|
||||||
|
type PayloadID [8]byte
|
||||||
|
|
||||||
|
func (b PayloadID) String() string {
|
||||||
|
return hexutil.Encode(b[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b PayloadID) MarshalText() ([]byte, error) {
|
||||||
|
return hexutil.Bytes(b[:]).MarshalText()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *PayloadID) UnmarshalText(input []byte) error {
|
||||||
|
err := hexutil.UnmarshalFixedText("PayloadID", input, b[:])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid payload id %q: %w", input, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ForkChoiceResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
PayloadID *PayloadID `json:"payloadId"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ForkchoiceStateV1 struct {
|
||||||
|
HeadBlockHash common.Hash `json:"headBlockHash"`
|
||||||
|
SafeBlockHash common.Hash `json:"safeBlockHash"`
|
||||||
|
FinalizedBlockHash common.Hash `json:"finalizedBlockHash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeTransactions(txs []*types.Transaction) [][]byte {
|
||||||
|
var enc = make([][]byte, len(txs))
|
||||||
|
for i, tx := range txs {
|
||||||
|
enc[i], _ = tx.MarshalBinary()
|
||||||
|
}
|
||||||
|
return enc
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeTransactions(enc [][]byte) ([]*types.Transaction, error) {
|
||||||
|
var txs = make([]*types.Transaction, len(enc))
|
||||||
|
for i, encTx := range enc {
|
||||||
|
var tx types.Transaction
|
||||||
|
if err := tx.UnmarshalBinary(encTx); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid transaction %d: %v", i, err)
|
||||||
|
}
|
||||||
|
txs[i] = &tx
|
||||||
|
}
|
||||||
|
return txs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecutableDataToBlock constructs a block from executable data.
|
||||||
|
// It verifies that the following fields:
|
||||||
|
// len(extraData) <= 32
|
||||||
|
// uncleHash = emptyUncleHash
|
||||||
|
// difficulty = 0
|
||||||
|
// and that the blockhash of the constructed block matches the parameters.
|
||||||
|
func ExecutableDataToBlock(params ExecutableDataV1) (*types.Block, error) {
|
||||||
|
txs, err := decodeTransactions(params.Transactions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(params.ExtraData) > 32 {
|
||||||
|
return nil, fmt.Errorf("invalid extradata length: %v", len(params.ExtraData))
|
||||||
|
}
|
||||||
|
header := &types.Header{
|
||||||
|
ParentHash: params.ParentHash,
|
||||||
|
UncleHash: types.EmptyUncleHash,
|
||||||
|
Coinbase: params.FeeRecipient,
|
||||||
|
Root: params.StateRoot,
|
||||||
|
TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)),
|
||||||
|
ReceiptHash: params.ReceiptsRoot,
|
||||||
|
Bloom: types.BytesToBloom(params.LogsBloom),
|
||||||
|
Difficulty: common.Big0,
|
||||||
|
Number: new(big.Int).SetUint64(params.Number),
|
||||||
|
GasLimit: params.GasLimit,
|
||||||
|
GasUsed: params.GasUsed,
|
||||||
|
Time: params.Timestamp,
|
||||||
|
BaseFee: params.BaseFeePerGas,
|
||||||
|
Extra: params.ExtraData,
|
||||||
|
MixDigest: params.Random,
|
||||||
|
}
|
||||||
|
block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */)
|
||||||
|
if block.Hash() != params.BlockHash {
|
||||||
|
return nil, fmt.Errorf("blockhash mismatch, want %x, got %x", params.BlockHash, block.Hash())
|
||||||
|
}
|
||||||
|
return block, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlockToExecutableData constructs the executableDataV1 structure by filling the
|
||||||
|
// fields from the given block. It assumes the given block is post-merge block.
|
||||||
|
func BlockToExecutableData(block *types.Block) *ExecutableDataV1 {
|
||||||
|
return &ExecutableDataV1{
|
||||||
|
BlockHash: block.Hash(),
|
||||||
|
ParentHash: block.ParentHash(),
|
||||||
|
FeeRecipient: block.Coinbase(),
|
||||||
|
StateRoot: block.Root(),
|
||||||
|
Number: block.NumberU64(),
|
||||||
|
GasLimit: block.GasLimit(),
|
||||||
|
GasUsed: block.GasUsed(),
|
||||||
|
BaseFeePerGas: block.BaseFee(),
|
||||||
|
Timestamp: block.Time(),
|
||||||
|
ReceiptsRoot: block.ReceiptHash(),
|
||||||
|
LogsBloom: block.Bloom().Bytes(),
|
||||||
|
Transactions: encodeTransactions(block.Transactions()),
|
||||||
|
Random: block.MixDigest(),
|
||||||
|
ExtraData: block.Extra(),
|
||||||
|
}
|
||||||
|
}
|
@@ -226,10 +226,15 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
|||||||
futureBlocks, _ := lru.New(maxFutureBlocks)
|
futureBlocks, _ := lru.New(maxFutureBlocks)
|
||||||
|
|
||||||
bc := &BlockChain{
|
bc := &BlockChain{
|
||||||
chainConfig: chainConfig,
|
chainConfig: chainConfig,
|
||||||
cacheConfig: cacheConfig,
|
cacheConfig: cacheConfig,
|
||||||
db: db,
|
db: db,
|
||||||
triegc: prque.New(nil),
|
triegc: prque.New(nil),
|
||||||
|
stateCache: state.NewDatabaseWithConfig(db, &trie.Config{
|
||||||
|
Cache: cacheConfig.TrieCleanLimit,
|
||||||
|
Journal: cacheConfig.TrieCleanJournal,
|
||||||
|
Preimages: cacheConfig.Preimages,
|
||||||
|
}),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
chainmu: syncx.NewClosableMutex(),
|
chainmu: syncx.NewClosableMutex(),
|
||||||
bodyCache: bodyCache,
|
bodyCache: bodyCache,
|
||||||
@@ -278,13 +283,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
|||||||
|
|
||||||
// Make sure the state associated with the block is available
|
// Make sure the state associated with the block is available
|
||||||
head := bc.CurrentBlock()
|
head := bc.CurrentBlock()
|
||||||
bc.stateCache = state.NewDatabaseWithConfig(db, &trie.Config{
|
|
||||||
Cache: cacheConfig.TrieCleanLimit,
|
|
||||||
Journal: cacheConfig.TrieCleanJournal,
|
|
||||||
Preimages: cacheConfig.Preimages,
|
|
||||||
UseVerkle: chainConfig.IsCancun(head.Header().Number),
|
|
||||||
})
|
|
||||||
|
|
||||||
if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
|
if _, err := state.New(head.Root(), bc.stateCache, bc.snaps); err != nil {
|
||||||
// Head state is missing, before the state recovery, find out the
|
// Head state is missing, before the state recovery, find out the
|
||||||
// disk layer point of snapshot(if it's enabled). Make sure the
|
// disk layer point of snapshot(if it's enabled). Make sure the
|
||||||
@@ -377,7 +375,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par
|
|||||||
log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
|
log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer)
|
||||||
recover = true
|
recover = true
|
||||||
}
|
}
|
||||||
bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover, chainConfig.IsCancun(head.Header().Number))
|
bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start future block processor.
|
// Start future block processor.
|
||||||
@@ -556,7 +554,7 @@ func (bc *BlockChain) setHeadBeyondRoot(head uint64, root common.Hash, repair bo
|
|||||||
// Degrade the chain markers if they are explicitly reverted.
|
// Degrade the chain markers if they are explicitly reverted.
|
||||||
// In theory we should update all in-memory markers in the
|
// In theory we should update all in-memory markers in the
|
||||||
// last step, however the direction of SetHead is from high
|
// last step, however the direction of SetHead is from high
|
||||||
// to low, so it's safe the update in-memory markers directly.
|
// to low, so it's safe to update in-memory markers directly.
|
||||||
bc.currentBlock.Store(newHeadBlock)
|
bc.currentBlock.Store(newHeadBlock)
|
||||||
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
|
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
|
||||||
}
|
}
|
||||||
@@ -981,32 +979,31 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
|
|||||||
// range. In this case, all tx indices of newly imported blocks should be
|
// range. In this case, all tx indices of newly imported blocks should be
|
||||||
// generated.
|
// generated.
|
||||||
var batch = bc.db.NewBatch()
|
var batch = bc.db.NewBatch()
|
||||||
for _, block := range blockChain {
|
for i, block := range blockChain {
|
||||||
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
|
if bc.txLookupLimit == 0 || ancientLimit <= bc.txLookupLimit || block.NumberU64() >= ancientLimit-bc.txLookupLimit {
|
||||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||||
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
|
} else if rawdb.ReadTxIndexTail(bc.db) != nil {
|
||||||
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
rawdb.WriteTxLookupEntriesByBlock(batch, block)
|
||||||
}
|
}
|
||||||
stats.processed++
|
stats.processed++
|
||||||
}
|
|
||||||
|
|
||||||
// Flush all tx-lookup index data.
|
if batch.ValueSize() > ethdb.IdealBatchSize || i == len(blockChain)-1 {
|
||||||
size += int64(batch.ValueSize())
|
size += int64(batch.ValueSize())
|
||||||
if err := batch.Write(); err != nil {
|
if err = batch.Write(); err != nil {
|
||||||
// The tx index data could not be written.
|
fastBlock := bc.CurrentFastBlock().NumberU64()
|
||||||
// Roll back the ancient store update.
|
if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
|
||||||
fastBlock := bc.CurrentFastBlock().NumberU64()
|
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
||||||
if err := bc.db.TruncateAncients(fastBlock + 1); err != nil {
|
}
|
||||||
log.Error("Can't truncate ancient store after failed insert", "err", err)
|
return 0, err
|
||||||
|
}
|
||||||
|
batch.Reset()
|
||||||
}
|
}
|
||||||
return 0, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
|
||||||
if err := bc.db.Sync(); err != nil {
|
if err := bc.db.Sync(); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the current fast block because all block data is now present in DB.
|
// Update the current fast block because all block data is now present in DB.
|
||||||
previousFastBlock := bc.CurrentFastBlock().NumberU64()
|
previousFastBlock := bc.CurrentFastBlock().NumberU64()
|
||||||
if !updateHead(blockChain[len(blockChain)-1]) {
|
if !updateHead(blockChain[len(blockChain)-1]) {
|
||||||
@@ -1594,12 +1591,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool)
|
|||||||
|
|
||||||
// Process block using the parent state as reference point
|
// Process block using the parent state as reference point
|
||||||
substart := time.Now()
|
substart := time.Now()
|
||||||
var (
|
receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig)
|
||||||
usedGas uint64
|
|
||||||
receipts types.Receipts
|
|
||||||
logs []*types.Log
|
|
||||||
)
|
|
||||||
receipts, logs, usedGas, err = bc.processor.Process(block, statedb, bc.vmConfig)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
bc.reportBlock(block, receipts, err)
|
bc.reportBlock(block, receipts, err)
|
||||||
atomic.StoreUint32(&followupInterrupt, 1)
|
atomic.StoreUint32(&followupInterrupt, 1)
|
||||||
@@ -2211,7 +2203,14 @@ func (bc *BlockChain) maintainTxIndex(ancients uint64) {
|
|||||||
// If a previous indexing existed, make sure that we fill in any missing entries
|
// If a previous indexing existed, make sure that we fill in any missing entries
|
||||||
if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
|
if bc.txLookupLimit == 0 || head < bc.txLookupLimit {
|
||||||
if *tail > 0 {
|
if *tail > 0 {
|
||||||
rawdb.IndexTransactions(bc.db, 0, *tail, bc.quit)
|
// It can happen when chain is rewound to a historical point which
|
||||||
|
// is even lower than the indexes tail, recap the indexing target
|
||||||
|
// to new head to avoid reading non-existent block bodies.
|
||||||
|
end := *tail
|
||||||
|
if end > head+1 {
|
||||||
|
end = head + 1
|
||||||
|
}
|
||||||
|
rawdb.IndexTransactions(bc.db, 0, end, bc.quit)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@@ -73,6 +73,12 @@ func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
|
|||||||
return bc.hc.GetHeaderByNumber(number)
|
return bc.hc.GetHeaderByNumber(number)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going
|
||||||
|
// backwards from the given number.
|
||||||
|
func (bc *BlockChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
|
||||||
|
return bc.hc.GetHeadersFrom(number, count)
|
||||||
|
}
|
||||||
|
|
||||||
// GetBody retrieves a block body (transactions and uncles) from the database by
|
// GetBody retrieves a block body (transactions and uncles) from the database by
|
||||||
// hash, caching it if found.
|
// hash, caching it if found.
|
||||||
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
|
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
|
||||||
|
@@ -1779,6 +1779,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
|||||||
SnapshotLimit: 0, // Disable snapshot by default
|
SnapshotLimit: 0, // Disable snapshot by default
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
defer engine.Close()
|
||||||
if snapshots {
|
if snapshots {
|
||||||
config.SnapshotLimit = 256
|
config.SnapshotLimit = 256
|
||||||
config.SnapshotWait = true
|
config.SnapshotWait = true
|
||||||
@@ -1836,25 +1837,25 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) {
|
|||||||
}
|
}
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
newChain, err := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to recreate chain: %v", err)
|
t.Fatalf("Failed to recreate chain: %v", err)
|
||||||
}
|
}
|
||||||
defer chain.Stop()
|
defer newChain.Stop()
|
||||||
|
|
||||||
// Iterate over all the remaining blocks and ensure there are no gaps
|
// Iterate over all the remaining blocks and ensure there are no gaps
|
||||||
verifyNoGaps(t, chain, true, canonblocks)
|
verifyNoGaps(t, newChain, true, canonblocks)
|
||||||
verifyNoGaps(t, chain, false, sideblocks)
|
verifyNoGaps(t, newChain, false, sideblocks)
|
||||||
verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks)
|
verifyCutoff(t, newChain, true, canonblocks, tt.expCanonicalBlocks)
|
||||||
verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks)
|
verifyCutoff(t, newChain, false, sideblocks, tt.expSidechainBlocks)
|
||||||
|
|
||||||
if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
|
if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader {
|
||||||
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
|
t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader)
|
||||||
}
|
}
|
||||||
if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
|
if head := newChain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock {
|
||||||
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
|
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock)
|
||||||
}
|
}
|
||||||
if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
|
if head := newChain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock {
|
||||||
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
|
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock)
|
||||||
}
|
}
|
||||||
if frozen, err := db.(freezer).Ancients(); err != nil {
|
if frozen, err := db.(freezer).Ancients(); err != nil {
|
||||||
|
@@ -2987,10 +2987,10 @@ func TestDeleteRecreateSlots(t *testing.T) {
|
|||||||
initCode := []byte{
|
initCode := []byte{
|
||||||
byte(vm.PUSH1), 0x3, // value
|
byte(vm.PUSH1), 0x3, // value
|
||||||
byte(vm.PUSH1), 0x3, // location
|
byte(vm.PUSH1), 0x3, // location
|
||||||
byte(vm.SSTORE), // Set slot[3] = 1
|
byte(vm.SSTORE), // Set slot[3] = 3
|
||||||
byte(vm.PUSH1), 0x4, // value
|
byte(vm.PUSH1), 0x4, // value
|
||||||
byte(vm.PUSH1), 0x4, // location
|
byte(vm.PUSH1), 0x4, // location
|
||||||
byte(vm.SSTORE), // Set slot[4] = 1
|
byte(vm.SSTORE), // Set slot[4] = 4
|
||||||
// Slots are set, now return the code
|
// Slots are set, now return the code
|
||||||
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
|
byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack
|
||||||
byte(vm.PUSH1), 0x0, // memory start on stack
|
byte(vm.PUSH1), 0x0, // memory start on stack
|
||||||
|
@@ -28,7 +28,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// BlockGen creates blocks for testing.
|
// BlockGen creates blocks for testing.
|
||||||
@@ -285,91 +284,6 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
|
|||||||
return blocks, receipts
|
return blocks, receipts
|
||||||
}
|
}
|
||||||
|
|
||||||
func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) {
|
|
||||||
if config == nil {
|
|
||||||
config = params.TestChainConfig
|
|
||||||
}
|
|
||||||
blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n)
|
|
||||||
chainreader := &fakeChainReader{config: config}
|
|
||||||
genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) {
|
|
||||||
b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine}
|
|
||||||
b.header = makeHeader(chainreader, parent, statedb, b.engine)
|
|
||||||
|
|
||||||
// Mutate the state and block according to any hard-fork specs
|
|
||||||
if daoBlock := config.DAOForkBlock; daoBlock != nil {
|
|
||||||
limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange)
|
|
||||||
if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 {
|
|
||||||
if config.DAOForkSupport {
|
|
||||||
b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 {
|
|
||||||
misc.ApplyDAOHardFork(statedb)
|
|
||||||
}
|
|
||||||
// Execute any user modifications to the block
|
|
||||||
if gen != nil {
|
|
||||||
gen(i, b)
|
|
||||||
}
|
|
||||||
if b.engine != nil {
|
|
||||||
// Finalize and seal the block
|
|
||||||
block, err := b.engine.FinalizeAndAssemble(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write state changes to db
|
|
||||||
root, err := statedb.Commit(config.IsEIP158(b.header.Number))
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("state write error: %v", err))
|
|
||||||
}
|
|
||||||
if err := statedb.Database().TrieDB().Commit(root, false, nil); err != nil {
|
|
||||||
panic(fmt.Sprintf("trie write error: %v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate an associated verkle proof
|
|
||||||
if tr := statedb.GetTrie(); tr.IsVerkle() {
|
|
||||||
vtr := tr.(*trie.VerkleTrie)
|
|
||||||
// Generate the proof if we are using a verkle tree
|
|
||||||
// WORKAROUND: make sure all keys are resolved
|
|
||||||
// before building the proof. Ultimately, node
|
|
||||||
// resolution can be done with a prefetcher or
|
|
||||||
// from GetCommitmentsAlongPath.
|
|
||||||
|
|
||||||
keys := statedb.Witness().Keys()
|
|
||||||
for _, key := range keys {
|
|
||||||
out, err := vtr.TryGet(key)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
if len(out) == 0 {
|
|
||||||
panic(fmt.Sprintf("%x should be present in the tree", key))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vtr.Hash()
|
|
||||||
_, err := vtr.ProveAndSerialize(keys, statedb.Witness().KeyVals())
|
|
||||||
//block.SetVerkleProof(p)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return block, b.receipts
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
statedb, err := state.New(parent.Root(), state.NewDatabaseWithConfig(db, &trie.Config{UseVerkle: true}), nil)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
block, receipt := genblock(i, parent, statedb)
|
|
||||||
blocks[i] = block
|
|
||||||
receipts[i] = receipt
|
|
||||||
parent = block
|
|
||||||
}
|
|
||||||
return blocks, receipts
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
|
func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header {
|
||||||
var time uint64
|
var time uint64
|
||||||
if parent.Time() == 0 {
|
if parent.Time() == 0 {
|
||||||
|
@@ -40,6 +40,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
|
|||||||
var (
|
var (
|
||||||
beneficiary common.Address
|
beneficiary common.Address
|
||||||
baseFee *big.Int
|
baseFee *big.Int
|
||||||
|
random *common.Hash
|
||||||
)
|
)
|
||||||
|
|
||||||
// If we don't have an explicit author (i.e. not mining), extract from the header
|
// If we don't have an explicit author (i.e. not mining), extract from the header
|
||||||
@@ -51,6 +52,9 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
|
|||||||
if header.BaseFee != nil {
|
if header.BaseFee != nil {
|
||||||
baseFee = new(big.Int).Set(header.BaseFee)
|
baseFee = new(big.Int).Set(header.BaseFee)
|
||||||
}
|
}
|
||||||
|
if header.Difficulty.Cmp(common.Big0) == 0 {
|
||||||
|
random = &header.MixDigest
|
||||||
|
}
|
||||||
return vm.BlockContext{
|
return vm.BlockContext{
|
||||||
CanTransfer: CanTransfer,
|
CanTransfer: CanTransfer,
|
||||||
Transfer: Transfer,
|
Transfer: Transfer,
|
||||||
@@ -61,6 +65,7 @@ func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common
|
|||||||
Difficulty: new(big.Int).Set(header.Difficulty),
|
Difficulty: new(big.Int).Set(header.Difficulty),
|
||||||
BaseFee: baseFee,
|
BaseFee: baseFee,
|
||||||
GasLimit: header.GasLimit,
|
GasLimit: header.GasLimit,
|
||||||
|
Random: random,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +74,6 @@ func NewEVMTxContext(msg Message) vm.TxContext {
|
|||||||
return vm.TxContext{
|
return vm.TxContext{
|
||||||
Origin: msg.From(),
|
Origin: msg.From(),
|
||||||
GasPrice: new(big.Int).Set(msg.GasPrice()),
|
GasPrice: new(big.Int).Set(msg.GasPrice()),
|
||||||
Accesses: types.NewAccessWitness(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -19,6 +19,7 @@ package forkid
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"math"
|
"math"
|
||||||
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@@ -29,6 +30,8 @@ import (
|
|||||||
// TestCreation tests that different genesis and fork rule combinations result in
|
// TestCreation tests that different genesis and fork rule combinations result in
|
||||||
// the correct fork ID.
|
// the correct fork ID.
|
||||||
func TestCreation(t *testing.T) {
|
func TestCreation(t *testing.T) {
|
||||||
|
mergeConfig := *params.MainnetChainConfig
|
||||||
|
mergeConfig.MergeForkBlock = big.NewInt(15000000)
|
||||||
type testcase struct {
|
type testcase struct {
|
||||||
head uint64
|
head uint64
|
||||||
want ID
|
want ID
|
||||||
@@ -65,7 +68,7 @@ func TestCreation(t *testing.T) {
|
|||||||
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
||||||
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
|
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
|
||||||
{13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
|
{13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
|
||||||
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, /// First Arrow Glacier block
|
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // First Arrow Glacier block
|
||||||
{20000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // Future Arrow Glacier block
|
{20000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // Future Arrow Glacier block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -133,6 +136,38 @@ func TestCreation(t *testing.T) {
|
|||||||
{6000000, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // Future London block
|
{6000000, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // Future London block
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
// Merge test cases
|
||||||
|
{
|
||||||
|
&mergeConfig,
|
||||||
|
params.MainnetGenesisHash,
|
||||||
|
[]testcase{
|
||||||
|
{0, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Unsynced
|
||||||
|
{1149999, ID{Hash: checksumToBytes(0xfc64ec04), Next: 1150000}}, // Last Frontier block
|
||||||
|
{1150000, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // First Homestead block
|
||||||
|
{1919999, ID{Hash: checksumToBytes(0x97c2c34c), Next: 1920000}}, // Last Homestead block
|
||||||
|
{1920000, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // First DAO block
|
||||||
|
{2462999, ID{Hash: checksumToBytes(0x91d1f948), Next: 2463000}}, // Last DAO block
|
||||||
|
{2463000, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // First Tangerine block
|
||||||
|
{2674999, ID{Hash: checksumToBytes(0x7a64da13), Next: 2675000}}, // Last Tangerine block
|
||||||
|
{2675000, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // First Spurious block
|
||||||
|
{4369999, ID{Hash: checksumToBytes(0x3edd5b10), Next: 4370000}}, // Last Spurious block
|
||||||
|
{4370000, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // First Byzantium block
|
||||||
|
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
|
||||||
|
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
|
||||||
|
{9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
|
||||||
|
{9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
|
||||||
|
{9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
|
||||||
|
{9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block
|
||||||
|
{12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block
|
||||||
|
{12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block
|
||||||
|
{12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block
|
||||||
|
{12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block
|
||||||
|
{13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block
|
||||||
|
{13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 15000000}}, // First Arrow Glacier block
|
||||||
|
{15000000, ID{Hash: checksumToBytes(0xe3abe201), Next: 0}}, // First Merge Start block
|
||||||
|
{20000000, ID{Hash: checksumToBytes(0xe3abe201), Next: 0}}, // Future Merge Start block
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
for j, ttt := range tt.cases {
|
for j, ttt := range tt.cases {
|
||||||
|
@@ -162,7 +162,6 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
|
|||||||
if genesis != nil && genesis.Config == nil {
|
if genesis != nil && genesis.Config == nil {
|
||||||
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just commit the new block if there is no stored genesis block.
|
// Just commit the new block if there is no stored genesis block.
|
||||||
stored := rawdb.ReadCanonicalHash(db, 0)
|
stored := rawdb.ReadCanonicalHash(db, 0)
|
||||||
if (stored == common.Hash{}) {
|
if (stored == common.Hash{}) {
|
||||||
@@ -178,29 +177,13 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
|
|||||||
}
|
}
|
||||||
return genesis.Config, block.Hash(), nil
|
return genesis.Config, block.Hash(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// We have the genesis block in database(perhaps in ancient database)
|
// We have the genesis block in database(perhaps in ancient database)
|
||||||
// but the corresponding state is missing.
|
// but the corresponding state is missing.
|
||||||
header := rawdb.ReadHeader(db, stored, 0)
|
header := rawdb.ReadHeader(db, stored, 0)
|
||||||
|
if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, nil), nil); err != nil {
|
||||||
var trieCfg *trie.Config
|
if genesis == nil {
|
||||||
|
genesis = DefaultGenesisBlock()
|
||||||
if genesis == nil {
|
|
||||||
storedcfg := rawdb.ReadChainConfig(db, stored)
|
|
||||||
if storedcfg == nil {
|
|
||||||
panic("this should never be reached: if genesis is nil, the config is already present or 'geth init' is being called which created it (in the code above, which means genesis != nil)")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if storedcfg.CancunBlock != nil {
|
|
||||||
if storedcfg.CancunBlock.Cmp(big.NewInt(0)) != 0 {
|
|
||||||
panic("cancun block must be 0")
|
|
||||||
}
|
|
||||||
|
|
||||||
trieCfg = &trie.Config{UseVerkle: storedcfg.IsCancun(big.NewInt(header.Number.Int64()))}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, trieCfg), nil); err != nil {
|
|
||||||
// Ensure the stored genesis matches with the given one.
|
// Ensure the stored genesis matches with the given one.
|
||||||
hash := genesis.ToBlock(nil).Hash()
|
hash := genesis.ToBlock(nil).Hash()
|
||||||
if hash != stored {
|
if hash != stored {
|
||||||
@@ -281,11 +264,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
|||||||
if db == nil {
|
if db == nil {
|
||||||
db = rawdb.NewMemoryDatabase()
|
db = rawdb.NewMemoryDatabase()
|
||||||
}
|
}
|
||||||
var trieCfg *trie.Config
|
statedb, err := state.New(common.Hash{}, state.NewDatabase(db), nil)
|
||||||
if g.Config != nil {
|
|
||||||
trieCfg = &trie.Config{UseVerkle: g.Config.IsCancun(big.NewInt(int64(g.Number)))}
|
|
||||||
}
|
|
||||||
statedb, err := state.New(common.Hash{}, state.NewDatabaseWithConfig(db, trieCfg), nil)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
@@ -315,7 +294,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
|||||||
if g.GasLimit == 0 {
|
if g.GasLimit == 0 {
|
||||||
head.GasLimit = params.GenesisGasLimit
|
head.GasLimit = params.GenesisGasLimit
|
||||||
}
|
}
|
||||||
if g.Difficulty == nil {
|
if g.Difficulty == nil && g.Mixhash == (common.Hash{}) {
|
||||||
head.Difficulty = params.GenesisDifficulty
|
head.Difficulty = params.GenesisDifficulty
|
||||||
}
|
}
|
||||||
if g.Config != nil && g.Config.IsLondon(common.Big0) {
|
if g.Config != nil && g.Config.IsLondon(common.Big0) {
|
||||||
@@ -327,9 +306,6 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block {
|
|||||||
}
|
}
|
||||||
statedb.Commit(false)
|
statedb.Commit(false)
|
||||||
statedb.Database().TrieDB().Commit(root, true, nil)
|
statedb.Database().TrieDB().Commit(root, true, nil)
|
||||||
if err := statedb.Cap(root); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
|
return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil))
|
||||||
}
|
}
|
||||||
@@ -381,20 +357,6 @@ func GenesisBlockForTesting(db ethdb.Database, addr common.Address, balance *big
|
|||||||
return g.MustCommit(db)
|
return g.MustCommit(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
func DefaultVerkleGenesisBlock() *Genesis {
|
|
||||||
return &Genesis{
|
|
||||||
Config: params.VerkleChainConfig,
|
|
||||||
Nonce: 86,
|
|
||||||
GasLimit: 0x2fefd8,
|
|
||||||
Difficulty: big.NewInt(1),
|
|
||||||
Alloc: map[common.Address]GenesisAccount{
|
|
||||||
common.BytesToAddress([]byte{97, 118, 97, 209, 72, 165, 43, 239, 81, 162, 104, 199, 40, 179, 162, 27, 88, 249, 67, 6}): {
|
|
||||||
Balance: big.NewInt(0).Lsh(big.NewInt(1), 27),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultGenesisBlock returns the Ethereum main net genesis block.
|
// DefaultGenesisBlock returns the Ethereum main net genesis block.
|
||||||
func DefaultGenesisBlock() *Genesis {
|
func DefaultGenesisBlock() *Genesis {
|
||||||
return &Genesis{
|
return &Genesis{
|
||||||
|
@@ -33,6 +33,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -498,6 +499,46 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
|
|||||||
return hc.GetHeader(hash, number)
|
return hc.GetHeader(hash, number)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going
|
||||||
|
// backwards from the given number.
|
||||||
|
// If the 'number' is higher than the highest local header, this method will
|
||||||
|
// return a best-effort response, containing the headers that we do have.
|
||||||
|
func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
|
||||||
|
// If the request is for future headers, we still return the portion of
|
||||||
|
// headers that we are able to serve
|
||||||
|
if current := hc.CurrentHeader().Number.Uint64(); current < number {
|
||||||
|
if count > number-current {
|
||||||
|
count -= number - current
|
||||||
|
number = current
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var headers []rlp.RawValue
|
||||||
|
// If we have some of the headers in cache already, use that before going to db.
|
||||||
|
hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
|
||||||
|
if hash == (common.Hash{}) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for count > 0 {
|
||||||
|
header, ok := hc.headerCache.Get(hash)
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
h := header.(*types.Header)
|
||||||
|
rlpData, _ := rlp.EncodeToBytes(h)
|
||||||
|
headers = append(headers, rlpData)
|
||||||
|
hash = h.ParentHash
|
||||||
|
count--
|
||||||
|
number--
|
||||||
|
}
|
||||||
|
// Read remaining from db
|
||||||
|
if count > 0 {
|
||||||
|
headers = append(headers, rawdb.ReadHeaderRange(hc.chainDb, number, count)...)
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
|
func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
|
||||||
return rawdb.ReadCanonicalHash(hc.chainDb, number)
|
return rawdb.ReadCanonicalHash(hc.chainDb, number)
|
||||||
}
|
}
|
||||||
|
@@ -279,6 +279,56 @@ func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ReadHeaderRange returns the rlp-encoded headers, starting at 'number', and going
|
||||||
|
// backwards towards genesis. This method assumes that the caller already has
|
||||||
|
// placed a cap on count, to prevent DoS issues.
|
||||||
|
// Since this method operates in head-towards-genesis mode, it will return an empty
|
||||||
|
// slice in case the head ('number') is missing. Hence, the caller must ensure that
|
||||||
|
// the head ('number') argument is actually an existing header.
|
||||||
|
//
|
||||||
|
// N.B: Since the input is a number, as opposed to a hash, it's implicit that
|
||||||
|
// this method only operates on canon headers.
|
||||||
|
func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValue {
|
||||||
|
var rlpHeaders []rlp.RawValue
|
||||||
|
if count == 0 {
|
||||||
|
return rlpHeaders
|
||||||
|
}
|
||||||
|
i := number
|
||||||
|
if count-1 > number {
|
||||||
|
// It's ok to request block 0, 1 item
|
||||||
|
count = number + 1
|
||||||
|
}
|
||||||
|
limit, _ := db.Ancients()
|
||||||
|
// First read live blocks
|
||||||
|
if i >= limit {
|
||||||
|
// If we need to read live blocks, we need to figure out the hash first
|
||||||
|
hash := ReadCanonicalHash(db, number)
|
||||||
|
for ; i >= limit && count > 0; i-- {
|
||||||
|
if data, _ := db.Get(headerKey(i, hash)); len(data) > 0 {
|
||||||
|
rlpHeaders = append(rlpHeaders, data)
|
||||||
|
// Get the parent hash for next query
|
||||||
|
hash = types.HeaderParentHashFromRLP(data)
|
||||||
|
} else {
|
||||||
|
break // Maybe got moved to ancients
|
||||||
|
}
|
||||||
|
count--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count == 0 {
|
||||||
|
return rlpHeaders
|
||||||
|
}
|
||||||
|
// read remaining from ancients
|
||||||
|
max := count * 700
|
||||||
|
data, err := db.AncientRange(freezerHeaderTable, i+1-count, count, max)
|
||||||
|
if err == nil && uint64(len(data)) == count {
|
||||||
|
// the data is on the order [h, h+1, .., n] -- reordering needed
|
||||||
|
for i := range data {
|
||||||
|
rlpHeaders = append(rlpHeaders, data[len(data)-1-i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rlpHeaders
|
||||||
|
}
|
||||||
|
|
||||||
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
|
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
|
||||||
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
||||||
var data []byte
|
var data []byte
|
||||||
@@ -397,8 +447,11 @@ func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
|
|||||||
if len(data) > 0 {
|
if len(data) > 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Get it by hash from leveldb
|
// Block is not in ancients, read from leveldb by hash and number.
|
||||||
data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
|
// Note: ReadCanonicalHash cannot be used here because it also
|
||||||
|
// calls ReadAncients internally.
|
||||||
|
hash, _ := db.Get(headerHashKey(number))
|
||||||
|
data, _ = db.Get(blockBodyKey(number, common.BytesToHash(hash)))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
return data
|
return data
|
||||||
@@ -664,7 +717,7 @@ func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.C
|
|||||||
if logs := readLegacyLogs(db, hash, number, config); logs != nil {
|
if logs := readLegacyLogs(db, hash, number, config); logs != nil {
|
||||||
return logs
|
return logs
|
||||||
}
|
}
|
||||||
log.Error("Invalid receipt array RLP", "hash", "err", err)
|
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -883,3 +883,67 @@ func BenchmarkDecodeRLPLogs(b *testing.B) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHeadersRLPStorage(t *testing.T) {
|
||||||
|
// Have N headers in the freezer
|
||||||
|
frdir, err := ioutil.TempDir("", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create temp freezer dir: %v", err)
|
||||||
|
}
|
||||||
|
defer os.Remove(frdir)
|
||||||
|
|
||||||
|
db, err := NewDatabaseWithFreezer(NewMemoryDatabase(), frdir, "", false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to create database with ancient backend")
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
// Create blocks
|
||||||
|
var chain []*types.Block
|
||||||
|
var pHash common.Hash
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
block := types.NewBlockWithHeader(&types.Header{
|
||||||
|
Number: big.NewInt(int64(i)),
|
||||||
|
Extra: []byte("test block"),
|
||||||
|
UncleHash: types.EmptyUncleHash,
|
||||||
|
TxHash: types.EmptyRootHash,
|
||||||
|
ReceiptHash: types.EmptyRootHash,
|
||||||
|
ParentHash: pHash,
|
||||||
|
})
|
||||||
|
chain = append(chain, block)
|
||||||
|
pHash = block.Hash()
|
||||||
|
}
|
||||||
|
var receipts []types.Receipts = make([]types.Receipts, 100)
|
||||||
|
// Write first half to ancients
|
||||||
|
WriteAncientBlocks(db, chain[:50], receipts[:50], big.NewInt(100))
|
||||||
|
// Write second half to db
|
||||||
|
for i := 50; i < 100; i++ {
|
||||||
|
WriteCanonicalHash(db, chain[i].Hash(), chain[i].NumberU64())
|
||||||
|
WriteBlock(db, chain[i])
|
||||||
|
}
|
||||||
|
checkSequence := func(from, amount int) {
|
||||||
|
headersRlp := ReadHeaderRange(db, uint64(from), uint64(amount))
|
||||||
|
if have, want := len(headersRlp), amount; have != want {
|
||||||
|
t.Fatalf("have %d headers, want %d", have, want)
|
||||||
|
}
|
||||||
|
for i, headerRlp := range headersRlp {
|
||||||
|
var header types.Header
|
||||||
|
if err := rlp.DecodeBytes(headerRlp, &header); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if have, want := header.Number.Uint64(), uint64(from-i); have != want {
|
||||||
|
t.Fatalf("wrong number, have %d want %d", have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkSequence(99, 20) // Latest block and 19 parents
|
||||||
|
checkSequence(99, 50) // Latest block -> all db blocks
|
||||||
|
checkSequence(99, 51) // Latest block -> one from ancients
|
||||||
|
checkSequence(99, 52) // Latest blocks -> two from ancients
|
||||||
|
checkSequence(50, 2) // One from db, one from ancients
|
||||||
|
checkSequence(49, 1) // One from ancients
|
||||||
|
checkSequence(49, 50) // All ancient ones
|
||||||
|
checkSequence(99, 100) // All blocks
|
||||||
|
checkSequence(0, 1) // Only genesis
|
||||||
|
checkSequence(1, 1) // Only block 1
|
||||||
|
checkSequence(1, 2) // Genesis + block 1
|
||||||
|
}
|
||||||
|
@@ -139,6 +139,28 @@ func PopUncleanShutdownMarker(db ethdb.KeyValueStore) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateUncleanShutdownMarker updates the last marker's timestamp to now.
|
||||||
|
func UpdateUncleanShutdownMarker(db ethdb.KeyValueStore) {
|
||||||
|
var uncleanShutdowns crashList
|
||||||
|
// Read old data
|
||||||
|
if data, err := db.Get(uncleanShutdownKey); err != nil {
|
||||||
|
log.Warn("Error reading unclean shutdown markers", "error", err)
|
||||||
|
} else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil {
|
||||||
|
log.Warn("Error decoding unclean shutdown markers", "error", err)
|
||||||
|
}
|
||||||
|
// This shouldn't happen because we push a marker on Backend instantiation
|
||||||
|
count := len(uncleanShutdowns.Recent)
|
||||||
|
if count == 0 {
|
||||||
|
log.Warn("No unclean shutdown marker to update")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
uncleanShutdowns.Recent[count-1] = uint64(time.Now().Unix())
|
||||||
|
data, _ := rlp.EncodeToBytes(uncleanShutdowns)
|
||||||
|
if err := db.Put(uncleanShutdownKey, data); err != nil {
|
||||||
|
log.Warn("Failed to write unclean-shutdown marker", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ReadTransitionStatus retrieves the eth2 transition status from the database
|
// ReadTransitionStatus retrieves the eth2 transition status from the database
|
||||||
func ReadTransitionStatus(db ethdb.KeyValueReader) []byte {
|
func ReadTransitionStatus(db ethdb.KeyValueReader) []byte {
|
||||||
data, _ := db.Get(transitionStatusKey)
|
data, _ := db.Get(transitionStatusKey)
|
||||||
|
@@ -41,16 +41,14 @@ func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) {
|
|||||||
|
|
||||||
// ReadCode retrieves the contract code of the provided code hash.
|
// ReadCode retrieves the contract code of the provided code hash.
|
||||||
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
||||||
// Try with the legacy code scheme first, if not then try with current
|
// Try with the prefixed code scheme first, if not then try with legacy
|
||||||
// scheme. Since most of the code will be found with legacy scheme.
|
// scheme.
|
||||||
//
|
data := ReadCodeWithPrefix(db, hash)
|
||||||
// todo(rjl493456442) change the order when we forcibly upgrade the code
|
|
||||||
// scheme with snapshot.
|
|
||||||
data, _ := db.Get(hash[:])
|
|
||||||
if len(data) != 0 {
|
if len(data) != 0 {
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
return ReadCodeWithPrefix(db, hash)
|
data, _ = db.Get(hash[:])
|
||||||
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
|
// ReadCodeWithPrefix retrieves the contract code of the provided code hash.
|
||||||
@@ -61,6 +59,14 @@ func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
|||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasCodeWithPrefix checks if the contract code corresponding to the
|
||||||
|
// provided code hash is present in the db. This function will only check
|
||||||
|
// presence using the prefix-scheme.
|
||||||
|
func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool {
|
||||||
|
ok, _ := db.Has(codeKey(hash))
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
// WriteCode writes the provided contract code database.
|
// WriteCode writes the provided contract code database.
|
||||||
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
|
func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) {
|
||||||
if err := db.Put(codeKey(hash), code); err != nil {
|
if err := db.Put(codeKey(hash), code); err != nil {
|
||||||
@@ -81,6 +87,12 @@ func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte {
|
|||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasTrieNode checks if the trie node with the provided hash is present in db.
|
||||||
|
func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool {
|
||||||
|
ok, _ := db.Has(hash.Bytes())
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
// WriteTrieNode writes the provided trie node database.
|
// WriteTrieNode writes the provided trie node database.
|
||||||
func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
|
func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) {
|
||||||
if err := db.Put(hash.Bytes(), node); err != nil {
|
if err := db.Put(hash.Bytes(), node); err != nil {
|
||||||
|
@@ -247,7 +247,8 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IndexTransactions creates txlookup indices of the specified block range.
|
// IndexTransactions creates txlookup indices of the specified block range. The from
|
||||||
|
// is included while to is excluded.
|
||||||
//
|
//
|
||||||
// This function iterates canonical chain in reverse order, it has one main advantage:
|
// This function iterates canonical chain in reverse order, it has one main advantage:
|
||||||
// We can write tx index tail flag periodically even without the whole indexing
|
// We can write tx index tail flag periodically even without the whole indexing
|
||||||
@@ -339,6 +340,7 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UnindexTransactions removes txlookup indices of the specified block range.
|
// UnindexTransactions removes txlookup indices of the specified block range.
|
||||||
|
// The from is included while to is excluded.
|
||||||
//
|
//
|
||||||
// There is a passed channel, the whole procedure will be interrupted if any
|
// There is a passed channel, the whole procedure will be interrupted if any
|
||||||
// signal received.
|
// signal received.
|
||||||
|
@@ -133,7 +133,7 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
|
|||||||
|
|
||||||
// Create the tables.
|
// Create the tables.
|
||||||
for name, disableSnappy := range tables {
|
for name, disableSnappy := range tables {
|
||||||
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy)
|
table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
for _, table := range freezer.tables {
|
for _, table := range freezer.tables {
|
||||||
table.Close()
|
table.Close()
|
||||||
@@ -144,8 +144,15 @@ func newFreezer(datadir string, namespace string, readonly bool, maxTableSize ui
|
|||||||
freezer.tables[name] = table
|
freezer.tables[name] = table
|
||||||
}
|
}
|
||||||
|
|
||||||
// Truncate all tables to common length.
|
if freezer.readonly {
|
||||||
if err := freezer.repair(); err != nil {
|
// In readonly mode only validate, don't truncate.
|
||||||
|
// validate also sets `freezer.frozen`.
|
||||||
|
err = freezer.validate()
|
||||||
|
} else {
|
||||||
|
// Truncate all tables to common length.
|
||||||
|
err = freezer.repair()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
for _, table := range freezer.tables {
|
for _, table := range freezer.tables {
|
||||||
table.Close()
|
table.Close()
|
||||||
}
|
}
|
||||||
@@ -308,6 +315,33 @@ func (f *freezer) Sync() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validate checks that every table has the same length.
|
||||||
|
// Used instead of `repair` in readonly mode.
|
||||||
|
func (f *freezer) validate() error {
|
||||||
|
if len(f.tables) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
length uint64
|
||||||
|
name string
|
||||||
|
)
|
||||||
|
// Hack to get length of any table
|
||||||
|
for kind, table := range f.tables {
|
||||||
|
length = atomic.LoadUint64(&table.items)
|
||||||
|
name = kind
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Now check every table against that length
|
||||||
|
for kind, table := range f.tables {
|
||||||
|
items := atomic.LoadUint64(&table.items)
|
||||||
|
if length != items {
|
||||||
|
return fmt.Errorf("freezer tables %s and %s have differing lengths: %d != %d", kind, name, items, length)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
atomic.StoreUint64(&f.frozen, length)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// repair truncates all data tables to the same length.
|
// repair truncates all data tables to the same length.
|
||||||
func (f *freezer) repair() error {
|
func (f *freezer) repair() error {
|
||||||
min := uint64(math.MaxUint64)
|
min := uint64(math.MaxUint64)
|
||||||
|
@@ -94,7 +94,8 @@ type freezerTable struct {
|
|||||||
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
|
// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
|
||||||
items uint64 // Number of items stored in the table (including items removed from tail)
|
items uint64 // Number of items stored in the table (including items removed from tail)
|
||||||
|
|
||||||
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
|
noCompression bool // if true, disables snappy compression. Note: does not work retroactively
|
||||||
|
readonly bool
|
||||||
maxFileSize uint32 // Max file size for data-files
|
maxFileSize uint32 // Max file size for data-files
|
||||||
name string
|
name string
|
||||||
path string
|
path string
|
||||||
@@ -119,8 +120,8 @@ type freezerTable struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFreezerTable opens the given path as a freezer table.
|
// NewFreezerTable opens the given path as a freezer table.
|
||||||
func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) {
|
func NewFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) {
|
||||||
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy)
|
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly)
|
||||||
}
|
}
|
||||||
|
|
||||||
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
// openFreezerFileForAppend opens a freezer table file and seeks to the end
|
||||||
@@ -164,7 +165,7 @@ func truncateFreezerFile(file *os.File, size int64) error {
|
|||||||
// newTable opens a freezer table, creating the data and index files if they are
|
// newTable opens a freezer table, creating the data and index files if they are
|
||||||
// non existent. Both files are truncated to the shortest common length to ensure
|
// non existent. Both files are truncated to the shortest common length to ensure
|
||||||
// they don't go out of sync.
|
// they don't go out of sync.
|
||||||
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression bool) (*freezerTable, error) {
|
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) {
|
||||||
// Ensure the containing directory exists and open the indexEntry file
|
// Ensure the containing directory exists and open the indexEntry file
|
||||||
if err := os.MkdirAll(path, 0755); err != nil {
|
if err := os.MkdirAll(path, 0755); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -177,7 +178,16 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
|
|||||||
// Compressed idx
|
// Compressed idx
|
||||||
idxName = fmt.Sprintf("%s.cidx", name)
|
idxName = fmt.Sprintf("%s.cidx", name)
|
||||||
}
|
}
|
||||||
offsets, err := openFreezerFileForAppend(filepath.Join(path, idxName))
|
var (
|
||||||
|
err error
|
||||||
|
offsets *os.File
|
||||||
|
)
|
||||||
|
if readonly {
|
||||||
|
// Will fail if table doesn't exist
|
||||||
|
offsets, err = openFreezerFileForReadOnly(filepath.Join(path, idxName))
|
||||||
|
} else {
|
||||||
|
offsets, err = openFreezerFileForAppend(filepath.Join(path, idxName))
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -192,6 +202,7 @@ func newTable(path string, name string, readMeter metrics.Meter, writeMeter metr
|
|||||||
path: path,
|
path: path,
|
||||||
logger: log.New("database", path, "table", name),
|
logger: log.New("database", path, "table", name),
|
||||||
noCompression: noCompression,
|
noCompression: noCompression,
|
||||||
|
readonly: readonly,
|
||||||
maxFileSize: maxFilesize,
|
maxFileSize: maxFilesize,
|
||||||
}
|
}
|
||||||
if err := tab.repair(); err != nil {
|
if err := tab.repair(); err != nil {
|
||||||
@@ -252,7 +263,11 @@ func (t *freezerTable) repair() error {
|
|||||||
|
|
||||||
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
|
||||||
lastIndex.unmarshalBinary(buffer)
|
lastIndex.unmarshalBinary(buffer)
|
||||||
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
|
if t.readonly {
|
||||||
|
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
|
||||||
|
} else {
|
||||||
|
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForAppend)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -301,12 +316,15 @@ func (t *freezerTable) repair() error {
|
|||||||
contentExp = int64(lastIndex.offset)
|
contentExp = int64(lastIndex.offset)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ensure all reparation changes have been written to disk
|
// Sync() fails for read-only files on windows.
|
||||||
if err := t.index.Sync(); err != nil {
|
if !t.readonly {
|
||||||
return err
|
// Ensure all reparation changes have been written to disk
|
||||||
}
|
if err := t.index.Sync(); err != nil {
|
||||||
if err := t.head.Sync(); err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
|
if err := t.head.Sync(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Update the item and byte counters and return
|
// Update the item and byte counters and return
|
||||||
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
|
t.items = uint64(t.itemOffset) + uint64(offsetsSize/indexEntrySize-1) // last indexEntry points to the end of the data file
|
||||||
@@ -334,8 +352,12 @@ func (t *freezerTable) preopen() (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Open head in read/write
|
if t.readonly {
|
||||||
t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
|
t.head, err = t.openFile(t.headId, openFreezerFileForReadOnly)
|
||||||
|
} else {
|
||||||
|
// Open head in read/write
|
||||||
|
t.head, err = t.openFile(t.headId, openFreezerFileForAppend)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -40,7 +40,7 @@ func TestFreezerBasics(t *testing.T) {
|
|||||||
// set cutoff at 50 bytes
|
// set cutoff at 50 bytes
|
||||||
f, err := newTable(os.TempDir(),
|
f, err := newTable(os.TempDir(),
|
||||||
fmt.Sprintf("unittest-%d", rand.Uint64()),
|
fmt.Sprintf("unittest-%d", rand.Uint64()),
|
||||||
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true)
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -85,7 +85,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
|||||||
f *freezerTable
|
f *freezerTable
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -99,7 +99,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
|||||||
require.NoError(t, batch.commit())
|
require.NoError(t, batch.commit())
|
||||||
f.Close()
|
f.Close()
|
||||||
|
|
||||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -116,7 +116,7 @@ func TestFreezerBasicsClosing(t *testing.T) {
|
|||||||
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
t.Fatalf("test %d, got \n%x != \n%x", y, got, exp)
|
||||||
}
|
}
|
||||||
f.Close()
|
f.Close()
|
||||||
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -131,7 +131,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
|||||||
|
|
||||||
// Fill table
|
// Fill table
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -160,7 +160,7 @@ func TestFreezerRepairDanglingHead(t *testing.T) {
|
|||||||
|
|
||||||
// Now open it again
|
// Now open it again
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -183,7 +183,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
|||||||
|
|
||||||
// Fill a table and close it
|
// Fill a table and close it
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -209,7 +209,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
|||||||
|
|
||||||
// Now open it again
|
// Now open it again
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -232,7 +232,7 @@ func TestFreezerRepairDanglingHeadLarge(t *testing.T) {
|
|||||||
|
|
||||||
// And if we open it, we should now be able to read all of them (new values)
|
// And if we open it, we should now be able to read all of them (new values)
|
||||||
{
|
{
|
||||||
f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
for y := 1; y < 255; y++ {
|
for y := 1; y < 255; y++ {
|
||||||
exp := getChunk(15, ^y)
|
exp := getChunk(15, ^y)
|
||||||
got, err := f.Retrieve(uint64(y))
|
got, err := f.Retrieve(uint64(y))
|
||||||
@@ -254,7 +254,7 @@ func TestSnappyDetection(t *testing.T) {
|
|||||||
|
|
||||||
// Open with snappy
|
// Open with snappy
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -265,7 +265,7 @@ func TestSnappyDetection(t *testing.T) {
|
|||||||
|
|
||||||
// Open without snappy
|
// Open without snappy
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -277,7 +277,7 @@ func TestSnappyDetection(t *testing.T) {
|
|||||||
|
|
||||||
// Open with snappy
|
// Open with snappy
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -309,7 +309,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
|||||||
|
|
||||||
// Fill a table and close it
|
// Fill a table and close it
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -345,7 +345,7 @@ func TestFreezerRepairDanglingIndex(t *testing.T) {
|
|||||||
// 45, 45, 15
|
// 45, 45, 15
|
||||||
// with 3+3+1 items
|
// with 3+3+1 items
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -366,7 +366,7 @@ func TestFreezerTruncate(t *testing.T) {
|
|||||||
|
|
||||||
// Fill table
|
// Fill table
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -382,7 +382,7 @@ func TestFreezerTruncate(t *testing.T) {
|
|||||||
|
|
||||||
// Reopen, truncate
|
// Reopen, truncate
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -407,7 +407,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
|||||||
|
|
||||||
// Fill table
|
// Fill table
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -440,7 +440,7 @@ func TestFreezerRepairFirstFile(t *testing.T) {
|
|||||||
|
|
||||||
// Reopen
|
// Reopen
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -475,7 +475,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
|||||||
|
|
||||||
// Fill table
|
// Fill table
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -491,7 +491,7 @@ func TestFreezerReadAndTruncate(t *testing.T) {
|
|||||||
|
|
||||||
// Reopen and read all files
|
// Reopen and read all files
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -523,7 +523,7 @@ func TestFreezerOffset(t *testing.T) {
|
|||||||
|
|
||||||
// Fill table
|
// Fill table
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -584,7 +584,7 @@ func TestFreezerOffset(t *testing.T) {
|
|||||||
|
|
||||||
// Now open again
|
// Now open again
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -638,7 +638,7 @@ func TestFreezerOffset(t *testing.T) {
|
|||||||
|
|
||||||
// Check that existing items have been moved to index 1M.
|
// Check that existing items have been moved to index 1M.
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -726,7 +726,7 @@ func TestSequentialRead(t *testing.T) {
|
|||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
|
fname := fmt.Sprintf("batchread-%d", rand.Uint64())
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -736,7 +736,7 @@ func TestSequentialRead(t *testing.T) {
|
|||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
{ // Open it, iterate, verify iteration
|
{ // Open it, iterate, verify iteration
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -757,7 +757,7 @@ func TestSequentialRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
{ // Open it, iterate, verify byte limit. The byte limit is less than item
|
{ // Open it, iterate, verify byte limit. The byte limit is less than item
|
||||||
// size, so each lookup should only return one item
|
// size, so each lookup should only return one item
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -786,7 +786,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
|||||||
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge()
|
||||||
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
|
fname := fmt.Sprintf("batchread-2-%d", rand.Uint64())
|
||||||
{ // Fill table
|
{ // Fill table
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -808,7 +808,7 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
|||||||
{100, 109, 10},
|
{100, 109, 10},
|
||||||
} {
|
} {
|
||||||
{
|
{
|
||||||
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true)
|
f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -829,3 +829,89 @@ func TestSequentialReadByteLimit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFreezerReadonly(t *testing.T) {
|
||||||
|
tmpdir := os.TempDir()
|
||||||
|
// Case 1: Check it fails on non-existent file.
|
||||||
|
_, err := newTable(tmpdir,
|
||||||
|
fmt.Sprintf("readonlytest-%d", rand.Uint64()),
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("readonly table instantiation should fail for non-existent table")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 2: Check that it fails on invalid index length.
|
||||||
|
fname := fmt.Sprintf("readonlytest-%d", rand.Uint64())
|
||||||
|
idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname)))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to open index file: %v\n", err)
|
||||||
|
}
|
||||||
|
// size should not be a multiple of indexEntrySize.
|
||||||
|
idxFile.Write(make([]byte, 17))
|
||||||
|
idxFile.Close()
|
||||||
|
_, err = newTable(tmpdir, fname,
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("readonly table instantiation should fail for invalid index size")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 3: Open table non-readonly table to write some data.
|
||||||
|
// Then corrupt the head file and make sure opening the table
|
||||||
|
// again in readonly triggers an error.
|
||||||
|
fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
|
||||||
|
f, err := newTable(tmpdir, fname,
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to instantiate table: %v", err)
|
||||||
|
}
|
||||||
|
writeChunks(t, f, 8, 32)
|
||||||
|
// Corrupt table file
|
||||||
|
if _, err := f.head.Write([]byte{1, 1}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = newTable(tmpdir, fname,
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||||
|
if err == nil {
|
||||||
|
t.Errorf("readonly table instantiation should fail for corrupt table file")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 4: Write some data to a table and later re-open it as readonly.
|
||||||
|
// Should be successful.
|
||||||
|
fname = fmt.Sprintf("readonlytest-%d", rand.Uint64())
|
||||||
|
f, err = newTable(tmpdir, fname,
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to instantiate table: %v\n", err)
|
||||||
|
}
|
||||||
|
writeChunks(t, f, 32, 128)
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
f, err = newTable(tmpdir, fname,
|
||||||
|
metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
v, err := f.Retrieve(10)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
exp := getChunk(128, 10)
|
||||||
|
if !bytes.Equal(v, exp) {
|
||||||
|
t.Errorf("retrieved value is incorrect")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Case 5: Now write some data via a batch.
|
||||||
|
// This should fail either during AppendRaw or Commit
|
||||||
|
batch := f.newBatch()
|
||||||
|
writeErr := batch.AppendRaw(32, make([]byte, 1))
|
||||||
|
if writeErr == nil {
|
||||||
|
writeErr = batch.commit()
|
||||||
|
}
|
||||||
|
if writeErr == nil {
|
||||||
|
t.Fatalf("Writing to readonly table should fail")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -253,6 +253,44 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFreezerReadonlyValidate(t *testing.T) {
|
||||||
|
tables := map[string]bool{"a": true, "b": true}
|
||||||
|
dir, err := ioutil.TempDir("", "freezer")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(dir)
|
||||||
|
// Open non-readonly freezer and fill individual tables
|
||||||
|
// with different amount of data.
|
||||||
|
f, err := newFreezer(dir, "", false, 2049, tables)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("can't open freezer", err)
|
||||||
|
}
|
||||||
|
var item = make([]byte, 1024)
|
||||||
|
aBatch := f.tables["a"].newBatch()
|
||||||
|
require.NoError(t, aBatch.AppendRaw(0, item))
|
||||||
|
require.NoError(t, aBatch.AppendRaw(1, item))
|
||||||
|
require.NoError(t, aBatch.AppendRaw(2, item))
|
||||||
|
require.NoError(t, aBatch.commit())
|
||||||
|
bBatch := f.tables["b"].newBatch()
|
||||||
|
require.NoError(t, bBatch.AppendRaw(0, item))
|
||||||
|
require.NoError(t, bBatch.commit())
|
||||||
|
if f.tables["a"].items != 3 {
|
||||||
|
t.Fatalf("unexpected number of items in table")
|
||||||
|
}
|
||||||
|
if f.tables["b"].items != 1 {
|
||||||
|
t.Fatalf("unexpected number of items in table")
|
||||||
|
}
|
||||||
|
require.NoError(t, f.Close())
|
||||||
|
|
||||||
|
// Re-openening as readonly should fail when validating
|
||||||
|
// table lengths.
|
||||||
|
f, err = newFreezer(dir, "", true, 2049, tables)
|
||||||
|
if err == nil {
|
||||||
|
t.Fatal("readonly freezer should fail with differing table lengths")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
|
func newFreezerForTesting(t *testing.T, tables map[string]bool) (*freezer, string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
@@ -26,7 +26,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
"github.com/gballet/go-verkle"
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
lru "github.com/hashicorp/golang-lru"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -105,9 +104,6 @@ type Trie interface {
|
|||||||
// nodes of the longest existing prefix of the key (at least the root), ending
|
// nodes of the longest existing prefix of the key (at least the root), ending
|
||||||
// with the node that proves the absence of the key.
|
// with the node that proves the absence of the key.
|
||||||
Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error
|
Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error
|
||||||
|
|
||||||
// IsVerkle returns true if the trie is verkle-tree based
|
|
||||||
IsVerkle() bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDatabase creates a backing store for state. The returned database is safe for
|
// NewDatabase creates a backing store for state. The returned database is safe for
|
||||||
@@ -122,13 +118,6 @@ func NewDatabase(db ethdb.Database) Database {
|
|||||||
// large memory cache.
|
// large memory cache.
|
||||||
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
|
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
|
||||||
csc, _ := lru.New(codeSizeCacheSize)
|
csc, _ := lru.New(codeSizeCacheSize)
|
||||||
if config != nil && config.UseVerkle {
|
|
||||||
return &VerkleDB{
|
|
||||||
db: trie.NewDatabaseWithConfig(db, config),
|
|
||||||
codeSizeCache: csc,
|
|
||||||
codeCache: fastcache.New(codeCacheSize),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &cachingDB{
|
return &cachingDB{
|
||||||
db: trie.NewDatabaseWithConfig(db, config),
|
db: trie.NewDatabaseWithConfig(db, config),
|
||||||
codeSizeCache: csc,
|
codeSizeCache: csc,
|
||||||
@@ -213,67 +202,3 @@ func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, erro
|
|||||||
func (db *cachingDB) TrieDB() *trie.Database {
|
func (db *cachingDB) TrieDB() *trie.Database {
|
||||||
return db.db
|
return db.db
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerkleDB implements state.Database for a verkle tree
|
|
||||||
type VerkleDB struct {
|
|
||||||
db *trie.Database
|
|
||||||
codeSizeCache *lru.Cache
|
|
||||||
codeCache *fastcache.Cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenTrie opens the main account trie.
|
|
||||||
func (db *VerkleDB) OpenTrie(root common.Hash) (Trie, error) {
|
|
||||||
if root == (common.Hash{}) || root == emptyRoot {
|
|
||||||
return trie.NewVerkleTrie(verkle.New(), db.db), nil
|
|
||||||
}
|
|
||||||
payload, err := db.db.DiskDB().Get(root[:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err := verkle.ParseNode(payload, 0)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return trie.NewVerkleTrie(r, db.db), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenStorageTrie opens the storage trie of an account.
|
|
||||||
func (db *VerkleDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) {
|
|
||||||
// alternatively, return accTrie
|
|
||||||
panic("should not be called")
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyTrie returns an independent copy of the given trie.
|
|
||||||
func (db *VerkleDB) CopyTrie(tr Trie) Trie {
|
|
||||||
t, ok := tr.(*trie.VerkleTrie)
|
|
||||||
if ok {
|
|
||||||
return t.Copy(db.db)
|
|
||||||
}
|
|
||||||
|
|
||||||
panic("invalid tree type != VerkleTrie")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContractCode retrieves a particular contract's code.
|
|
||||||
func (db *VerkleDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) {
|
|
||||||
if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 {
|
|
||||||
return code, nil
|
|
||||||
}
|
|
||||||
code := rawdb.ReadCode(db.db.DiskDB(), codeHash)
|
|
||||||
if len(code) > 0 {
|
|
||||||
db.codeCache.Set(codeHash.Bytes(), code)
|
|
||||||
db.codeSizeCache.Add(codeHash, len(code))
|
|
||||||
return code, nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContractCodeSize retrieves a particular contracts code's size.
|
|
||||||
func (db *VerkleDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) {
|
|
||||||
panic("need to merge #31 for this to work")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TrieDB retrieves the low level trie database used for data storage.
|
|
||||||
func (db *VerkleDB) TrieDB() *trie.Database {
|
|
||||||
return db.db
|
|
||||||
}
|
|
||||||
|
@@ -76,14 +76,6 @@ func (it *NodeIterator) step() error {
|
|||||||
// Initialize the iterator if we've just started
|
// Initialize the iterator if we've just started
|
||||||
if it.stateIt == nil {
|
if it.stateIt == nil {
|
||||||
it.stateIt = it.state.trie.NodeIterator(nil)
|
it.stateIt = it.state.trie.NodeIterator(nil)
|
||||||
|
|
||||||
// If the trie is a verkle trie, then the data and state
|
|
||||||
// are the same tree, and as a result both iterators are
|
|
||||||
// the same. This is a hack meant for both tree types to
|
|
||||||
// work.
|
|
||||||
if _, ok := it.state.trie.(*trie.VerkleTrie); ok {
|
|
||||||
it.dataIt = it.stateIt
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// If we had data nodes previously, we surely have at least state nodes
|
// If we had data nodes previously, we surely have at least state nodes
|
||||||
if it.dataIt != nil {
|
if it.dataIt != nil {
|
||||||
@@ -108,11 +100,10 @@ func (it *NodeIterator) step() error {
|
|||||||
it.state, it.stateIt = nil, nil
|
it.state, it.stateIt = nil, nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// If the state trie node is an internal entry, leave as is.
|
// If the state trie node is an internal entry, leave as is
|
||||||
if !it.stateIt.Leaf() {
|
if !it.stateIt.Leaf() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise we've reached an account node, initiate data iteration
|
// Otherwise we've reached an account node, initiate data iteration
|
||||||
var account types.StateAccount
|
var account types.StateAccount
|
||||||
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
|
if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil {
|
||||||
|
@@ -89,7 +89,7 @@ func NewPruner(db ethdb.Database, datadir, trieCachePath string, bloomSize uint6
|
|||||||
if headBlock == nil {
|
if headBlock == nil {
|
||||||
return nil, errors.New("Failed to load head block")
|
return nil, errors.New("Failed to load head block")
|
||||||
}
|
}
|
||||||
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false, false)
|
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err // The relevant snapshot(s) might not exist
|
return nil, err // The relevant snapshot(s) might not exist
|
||||||
}
|
}
|
||||||
@@ -362,7 +362,7 @@ func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) err
|
|||||||
// - The state HEAD is rewound already because of multiple incomplete `prune-state`
|
// - The state HEAD is rewound already because of multiple incomplete `prune-state`
|
||||||
// In this case, even the state HEAD is not exactly matched with snapshot, it
|
// In this case, even the state HEAD is not exactly matched with snapshot, it
|
||||||
// still feasible to recover the pruning correctly.
|
// still feasible to recover the pruning correctly.
|
||||||
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true, false)
|
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headBlock.Root(), false, false, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err // The relevant snapshot(s) might not exist
|
return err // The relevant snapshot(s) might not exist
|
||||||
}
|
}
|
||||||
|
@@ -48,13 +48,13 @@ var (
|
|||||||
// accountCheckRange is the upper limit of the number of accounts involved in
|
// accountCheckRange is the upper limit of the number of accounts involved in
|
||||||
// each range check. This is a value estimated based on experience. If this
|
// each range check. This is a value estimated based on experience. If this
|
||||||
// value is too large, the failure rate of range prove will increase. Otherwise
|
// value is too large, the failure rate of range prove will increase. Otherwise
|
||||||
// the the value is too small, the efficiency of the state recovery will decrease.
|
// the value is too small, the efficiency of the state recovery will decrease.
|
||||||
accountCheckRange = 128
|
accountCheckRange = 128
|
||||||
|
|
||||||
// storageCheckRange is the upper limit of the number of storage slots involved
|
// storageCheckRange is the upper limit of the number of storage slots involved
|
||||||
// in each range check. This is a value estimated based on experience. If this
|
// in each range check. This is a value estimated based on experience. If this
|
||||||
// value is too large, the failure rate of range prove will increase. Otherwise
|
// value is too large, the failure rate of range prove will increase. Otherwise
|
||||||
// the the value is too small, the efficiency of the state recovery will decrease.
|
// the value is too small, the efficiency of the state recovery will decrease.
|
||||||
storageCheckRange = 1024
|
storageCheckRange = 1024
|
||||||
|
|
||||||
// errMissingTrie is returned if the target trie is missing while the generation
|
// errMissingTrie is returned if the target trie is missing while the generation
|
||||||
|
@@ -66,6 +66,29 @@ type journalStorage struct {
|
|||||||
Vals [][]byte
|
Vals [][]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ParseGeneratorStatus(generatorBlob []byte) string {
|
||||||
|
if len(generatorBlob) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var generator journalGenerator
|
||||||
|
if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil {
|
||||||
|
log.Warn("failed to decode snapshot generator", "err", err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
// Figure out whether we're after or within an account
|
||||||
|
var m string
|
||||||
|
switch marker := generator.Marker; len(marker) {
|
||||||
|
case common.HashLength:
|
||||||
|
m = fmt.Sprintf("at %#x", marker)
|
||||||
|
case 2 * common.HashLength:
|
||||||
|
m = fmt.Sprintf("in %#x at %#x", marker[:common.HashLength], marker[common.HashLength:])
|
||||||
|
default:
|
||||||
|
m = fmt.Sprintf("%#x", marker)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(`Done: %v, Accounts: %d, Slots: %d, Storage: %d, Marker: %s`,
|
||||||
|
generator.Done, generator.Accounts, generator.Slots, generator.Storage, m)
|
||||||
|
}
|
||||||
|
|
||||||
// loadAndParseJournal tries to parse the snapshot journal in latest format.
|
// loadAndParseJournal tries to parse the snapshot journal in latest format.
|
||||||
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
|
func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) {
|
||||||
// Retrieve the disk layer generator. It must exist, no matter the
|
// Retrieve the disk layer generator. It must exist, no matter the
|
||||||
|
@@ -24,7 +24,6 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/fastcache"
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/rawdb"
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
@@ -184,7 +183,7 @@ type Tree struct {
|
|||||||
// This case happens when the snapshot is 'ahead' of the state trie.
|
// This case happens when the snapshot is 'ahead' of the state trie.
|
||||||
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
// - otherwise, the entire snapshot is considered invalid and will be recreated on
|
||||||
// a background thread.
|
// a background thread.
|
||||||
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool, useVerkle bool) (*Tree, error) {
|
func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
|
||||||
// Create a new, empty snapshot tree
|
// Create a new, empty snapshot tree
|
||||||
snap := &Tree{
|
snap := &Tree{
|
||||||
diskdb: diskdb,
|
diskdb: diskdb,
|
||||||
@@ -203,17 +202,6 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm
|
|||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if rebuild {
|
if rebuild {
|
||||||
if useVerkle {
|
|
||||||
snap.layers = map[common.Hash]snapshot{
|
|
||||||
root: &diskLayer{
|
|
||||||
diskdb: diskdb,
|
|
||||||
triedb: triedb,
|
|
||||||
root: root,
|
|
||||||
cache: fastcache.New(cache * 1024 * 1024),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return snap, nil
|
|
||||||
}
|
|
||||||
log.Warn("Failed to load snapshot, regenerating", "err", err)
|
log.Warn("Failed to load snapshot, regenerating", "err", err)
|
||||||
snap.Rebuild(root)
|
snap.Rebuild(root)
|
||||||
return snap, nil
|
return snap, nil
|
||||||
|
@@ -28,8 +28,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
|
||||||
"github.com/holiman/uint256"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var emptyCodeHash = crypto.Keccak256(nil)
|
var emptyCodeHash = crypto.Keccak256(nil)
|
||||||
@@ -200,25 +198,10 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
|
|||||||
}
|
}
|
||||||
// If no live objects are available, attempt to use snapshots
|
// If no live objects are available, attempt to use snapshots
|
||||||
var (
|
var (
|
||||||
enc []byte
|
enc []byte
|
||||||
err error
|
err error
|
||||||
meter *time.Duration
|
|
||||||
)
|
)
|
||||||
readStart := time.Now()
|
|
||||||
if metrics.EnabledExpensive {
|
|
||||||
// If the snap is 'under construction', the first lookup may fail. If that
|
|
||||||
// happens, we don't want to double-count the time elapsed. Thus this
|
|
||||||
// dance with the metering.
|
|
||||||
defer func() {
|
|
||||||
if meter != nil {
|
|
||||||
*meter += time.Since(readStart)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
if s.db.snap != nil {
|
if s.db.snap != nil {
|
||||||
if metrics.EnabledExpensive {
|
|
||||||
meter = &s.db.SnapshotStorageReads
|
|
||||||
}
|
|
||||||
// If the object was destructed in *this* block (and potentially resurrected),
|
// If the object was destructed in *this* block (and potentially resurrected),
|
||||||
// the storage has been cleared out, and we should *not* consult the previous
|
// the storage has been cleared out, and we should *not* consult the previous
|
||||||
// snapshot about any storage values. The only possible alternatives are:
|
// snapshot about any storage values. The only possible alternatives are:
|
||||||
@@ -228,26 +211,22 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has
|
|||||||
if _, destructed := s.db.snapDestructs[s.addrHash]; destructed {
|
if _, destructed := s.db.snapDestructs[s.addrHash]; destructed {
|
||||||
return common.Hash{}
|
return common.Hash{}
|
||||||
}
|
}
|
||||||
|
start := time.Now()
|
||||||
enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
|
enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes()))
|
||||||
|
if metrics.EnabledExpensive {
|
||||||
|
s.db.SnapshotStorageReads += time.Since(start)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// If the snapshot is unavailable or reading from it fails, load from the database.
|
// If the snapshot is unavailable or reading from it fails, load from the database.
|
||||||
if s.db.snap == nil || err != nil {
|
if s.db.snap == nil || err != nil {
|
||||||
if meter != nil {
|
start := time.Now()
|
||||||
// If we already spent time checking the snapshot, account for it
|
enc, err = s.getTrie(db).TryGet(key.Bytes())
|
||||||
// and reset the readStart
|
|
||||||
*meter += time.Since(readStart)
|
|
||||||
readStart = time.Now()
|
|
||||||
}
|
|
||||||
if metrics.EnabledExpensive {
|
if metrics.EnabledExpensive {
|
||||||
meter = &s.db.StorageReads
|
s.db.StorageReads += time.Since(start)
|
||||||
}
|
}
|
||||||
if !s.db.trie.IsVerkle() {
|
if err != nil {
|
||||||
if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil {
|
s.setError(err)
|
||||||
s.setError(err)
|
return common.Hash{}
|
||||||
return common.Hash{}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
panic("verkle trees use the snapshot")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var value common.Hash
|
var value common.Hash
|
||||||
@@ -338,12 +317,7 @@ func (s *stateObject) updateTrie(db Database) Trie {
|
|||||||
// The snapshot storage map for the object
|
// The snapshot storage map for the object
|
||||||
var storage map[common.Hash][]byte
|
var storage map[common.Hash][]byte
|
||||||
// Insert all the pending updates into the trie
|
// Insert all the pending updates into the trie
|
||||||
var tr Trie
|
tr := s.getTrie(db)
|
||||||
if s.db.trie.IsVerkle() {
|
|
||||||
tr = s.db.trie
|
|
||||||
} else {
|
|
||||||
tr = s.getTrie(db)
|
|
||||||
}
|
|
||||||
hasher := s.db.hasher
|
hasher := s.db.hasher
|
||||||
|
|
||||||
usedStorage := make([][]byte, 0, len(s.pendingStorage))
|
usedStorage := make([][]byte, 0, len(s.pendingStorage))
|
||||||
@@ -356,25 +330,12 @@ func (s *stateObject) updateTrie(db Database) Trie {
|
|||||||
|
|
||||||
var v []byte
|
var v []byte
|
||||||
if (value == common.Hash{}) {
|
if (value == common.Hash{}) {
|
||||||
if tr.IsVerkle() {
|
s.setError(tr.TryDelete(key[:]))
|
||||||
k := trieUtils.GetTreeKeyStorageSlot(s.address[:], new(uint256.Int).SetBytes(key[:]))
|
|
||||||
s.setError(tr.TryDelete(k))
|
|
||||||
//s.db.db.TrieDB().DiskDB().Delete(append(s.address[:], key[:]...))
|
|
||||||
} else {
|
|
||||||
s.setError(tr.TryDelete(key[:]))
|
|
||||||
}
|
|
||||||
s.db.StorageDeleted += 1
|
s.db.StorageDeleted += 1
|
||||||
} else {
|
} else {
|
||||||
// Encoding []byte cannot fail, ok to ignore the error.
|
// Encoding []byte cannot fail, ok to ignore the error.
|
||||||
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
|
v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:]))
|
||||||
|
s.setError(tr.TryUpdate(key[:], v))
|
||||||
if !tr.IsVerkle() {
|
|
||||||
s.setError(tr.TryUpdate(key[:], v))
|
|
||||||
} else {
|
|
||||||
k := trieUtils.GetTreeKeyStorageSlot(s.address[:], new(uint256.Int).SetBytes(key[:]))
|
|
||||||
// Update the trie, with v as a value
|
|
||||||
s.setError(tr.TryUpdate(k, v))
|
|
||||||
}
|
|
||||||
s.db.StorageUpdated += 1
|
s.db.StorageUpdated += 1
|
||||||
}
|
}
|
||||||
// If state snapshotting is active, cache the data til commit
|
// If state snapshotting is active, cache the data til commit
|
||||||
|
@@ -18,7 +18,6 @@
|
|||||||
package state
|
package state
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/big"
|
"math/big"
|
||||||
@@ -34,8 +33,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/metrics"
|
"github.com/ethereum/go-ethereum/metrics"
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
|
||||||
"github.com/holiman/uint256"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type revision struct {
|
type revision struct {
|
||||||
@@ -102,8 +99,6 @@ type StateDB struct {
|
|||||||
// Per-transaction access list
|
// Per-transaction access list
|
||||||
accessList *accessList
|
accessList *accessList
|
||||||
|
|
||||||
witness *types.AccessWitness
|
|
||||||
|
|
||||||
// Journal of state modifications. This is the backbone of
|
// Journal of state modifications. This is the backbone of
|
||||||
// Snapshot and RevertToSnapshot.
|
// Snapshot and RevertToSnapshot.
|
||||||
journal *journal
|
journal *journal
|
||||||
@@ -148,13 +143,6 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
|
|||||||
journal: newJournal(),
|
journal: newJournal(),
|
||||||
accessList: newAccessList(),
|
accessList: newAccessList(),
|
||||||
hasher: crypto.NewKeccakState(),
|
hasher: crypto.NewKeccakState(),
|
||||||
witness: types.NewAccessWitness(),
|
|
||||||
}
|
|
||||||
if sdb.snaps == nil && tr.IsVerkle() {
|
|
||||||
sdb.snaps, err = snapshot.New(db.TrieDB().DiskDB(), db.TrieDB(), 1, root, false, true, false, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if sdb.snaps != nil {
|
if sdb.snaps != nil {
|
||||||
if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
|
if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil {
|
||||||
@@ -166,14 +154,6 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
|
|||||||
return sdb, nil
|
return sdb, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *StateDB) Witness() *types.AccessWitness {
|
|
||||||
return s.witness
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StateDB) SetWitness(aw *types.AccessWitness) {
|
|
||||||
s.witness = aw
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
|
// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the
|
||||||
// state trie concurrently while the state is mutated so that when we reach the
|
// state trie concurrently while the state is mutated so that when we reach the
|
||||||
// commit phase, most of the needed data is already hot.
|
// commit phase, most of the needed data is already hot.
|
||||||
@@ -480,26 +460,8 @@ func (s *StateDB) updateStateObject(obj *stateObject) {
|
|||||||
}
|
}
|
||||||
// Encode the account and update the account trie
|
// Encode the account and update the account trie
|
||||||
addr := obj.Address()
|
addr := obj.Address()
|
||||||
|
|
||||||
if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil {
|
if err := s.trie.TryUpdateAccount(addr[:], &obj.data); err != nil {
|
||||||
s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err))
|
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err))
|
||||||
}
|
|
||||||
if len(obj.code) > 0 && s.trie.IsVerkle() {
|
|
||||||
cs := make([]byte, 32)
|
|
||||||
binary.BigEndian.PutUint64(cs, uint64(len(obj.code)))
|
|
||||||
if err := s.trie.TryUpdate(trieUtils.GetTreeKeyCodeSize(addr[:]), cs); err != nil {
|
|
||||||
s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.dirtyCode {
|
|
||||||
if chunks, err := trie.ChunkifyCode(addr, obj.code); err == nil {
|
|
||||||
for i := range chunks {
|
|
||||||
s.trie.TryUpdate(trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(uint64(i))), chunks[i][:])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
s.setError(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If state snapshotting is active, cache the data til commit. Note, this
|
// If state snapshotting is active, cache the data til commit. Note, this
|
||||||
@@ -517,19 +479,10 @@ func (s *StateDB) deleteStateObject(obj *stateObject) {
|
|||||||
if metrics.EnabledExpensive {
|
if metrics.EnabledExpensive {
|
||||||
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
|
defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the account from the trie
|
// Delete the account from the trie
|
||||||
if !s.trie.IsVerkle() {
|
addr := obj.Address()
|
||||||
addr := obj.Address()
|
if err := s.trie.TryDelete(addr[:]); err != nil {
|
||||||
if err := s.trie.TryDelete(addr[:]); err != nil {
|
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
|
||||||
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", addr[:], err))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for i := byte(0); i <= 255; i++ {
|
|
||||||
if err := s.trie.TryDelete(trieUtils.GetTreeKeyAccountLeaf(obj.Address().Bytes(), i)); err != nil {
|
|
||||||
s.setError(fmt.Errorf("deleteStateObject (%x) error: %v", obj.Address(), err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -553,16 +506,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
|||||||
return obj
|
return obj
|
||||||
}
|
}
|
||||||
// If no live objects are available, attempt to use snapshots
|
// If no live objects are available, attempt to use snapshots
|
||||||
var (
|
var data *types.StateAccount
|
||||||
data *types.StateAccount
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if s.snap != nil {
|
if s.snap != nil {
|
||||||
|
start := time.Now()
|
||||||
|
acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
|
||||||
if metrics.EnabledExpensive {
|
if metrics.EnabledExpensive {
|
||||||
defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now())
|
s.SnapshotAccountReads += time.Since(start)
|
||||||
}
|
}
|
||||||
var acc *snapshot.Account
|
if err == nil {
|
||||||
if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil {
|
|
||||||
if acc == nil {
|
if acc == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -579,21 +530,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject {
|
|||||||
data.Root = emptyRoot
|
data.Root = emptyRoot
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: Do not touch the addresses here, kick the can down the
|
|
||||||
// road. That is because I don't want to change the interface
|
|
||||||
// to getDeletedStateObject at this stage, as the PR would then
|
|
||||||
// have a huge footprint.
|
|
||||||
// The alternative is to make accesses available via the state
|
|
||||||
// db instead of the evm. This requires a significant rewrite,
|
|
||||||
// that isn't currently warranted.
|
|
||||||
}
|
}
|
||||||
// If snapshot unavailable or reading from it failed, load from the database
|
// If snapshot unavailable or reading from it failed, load from the database
|
||||||
if s.snap == nil || err != nil {
|
if data == nil {
|
||||||
if metrics.EnabledExpensive {
|
start := time.Now()
|
||||||
defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now())
|
|
||||||
}
|
|
||||||
enc, err := s.trie.TryGet(addr.Bytes())
|
enc, err := s.trie.TryGet(addr.Bytes())
|
||||||
|
if metrics.EnabledExpensive {
|
||||||
|
s.AccountReads += time.Since(start)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
|
s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err))
|
||||||
return nil
|
return nil
|
||||||
@@ -713,7 +657,6 @@ func (s *StateDB) Copy() *StateDB {
|
|||||||
preimages: make(map[common.Hash][]byte, len(s.preimages)),
|
preimages: make(map[common.Hash][]byte, len(s.preimages)),
|
||||||
journal: newJournal(),
|
journal: newJournal(),
|
||||||
hasher: crypto.NewKeccakState(),
|
hasher: crypto.NewKeccakState(),
|
||||||
witness: s.witness.Copy(),
|
|
||||||
}
|
}
|
||||||
// Copy the dirty states, logs, and preimages
|
// Copy the dirty states, logs, and preimages
|
||||||
for addr := range s.journal.dirties {
|
for addr := range s.journal.dirties {
|
||||||
@@ -901,11 +844,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
|
|||||||
// to pull useful data from disk.
|
// to pull useful data from disk.
|
||||||
for addr := range s.stateObjectsPending {
|
for addr := range s.stateObjectsPending {
|
||||||
if obj := s.stateObjects[addr]; !obj.deleted {
|
if obj := s.stateObjects[addr]; !obj.deleted {
|
||||||
if s.trie.IsVerkle() {
|
obj.updateRoot(s.db)
|
||||||
obj.updateTrie(s.db)
|
|
||||||
} else {
|
|
||||||
obj.updateRoot(s.db)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Now we're about to start to write changes to the trie. The trie is so far
|
// Now we're about to start to write changes to the trie. The trie is so far
|
||||||
@@ -956,20 +895,6 @@ func (s *StateDB) clearJournalAndRefund() {
|
|||||||
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires
|
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entires
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTrie returns the account trie.
|
|
||||||
func (s *StateDB) GetTrie() Trie {
|
|
||||||
return s.trie
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *StateDB) Cap(root common.Hash) error {
|
|
||||||
if s.snaps != nil {
|
|
||||||
return s.snaps.Cap(root, 0)
|
|
||||||
}
|
|
||||||
// pre-verkle path: noop if s.snaps hasn't been
|
|
||||||
// initialized.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit writes the state to the underlying in-memory trie database.
|
// Commit writes the state to the underlying in-memory trie database.
|
||||||
func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
||||||
if s.dbErr != nil {
|
if s.dbErr != nil {
|
||||||
@@ -983,27 +908,17 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
|
|||||||
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
|
codeWriter := s.db.TrieDB().DiskDB().NewBatch()
|
||||||
for addr := range s.stateObjectsDirty {
|
for addr := range s.stateObjectsDirty {
|
||||||
if obj := s.stateObjects[addr]; !obj.deleted {
|
if obj := s.stateObjects[addr]; !obj.deleted {
|
||||||
|
// Write any contract code associated with the state object
|
||||||
|
if obj.code != nil && obj.dirtyCode {
|
||||||
|
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
|
||||||
|
obj.dirtyCode = false
|
||||||
|
}
|
||||||
// Write any storage changes in the state object to its storage trie
|
// Write any storage changes in the state object to its storage trie
|
||||||
committed, err := obj.CommitTrie(s.db)
|
committed, err := obj.CommitTrie(s.db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return common.Hash{}, err
|
return common.Hash{}, err
|
||||||
}
|
}
|
||||||
storageCommitted += committed
|
storageCommitted += committed
|
||||||
// Write any contract code associated with the state object
|
|
||||||
if obj.code != nil && obj.dirtyCode {
|
|
||||||
if s.trie.IsVerkle() {
|
|
||||||
if chunks, err := trie.ChunkifyCode(addr, obj.code); err == nil {
|
|
||||||
for i := range chunks {
|
|
||||||
s.trie.TryUpdate(trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(uint64(i))), chunks[i][:])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
s.setError(err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code)
|
|
||||||
}
|
|
||||||
obj.dirtyCode = false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(s.stateObjectsDirty) > 0 {
|
if len(s.stateObjectsDirty) > 0 {
|
||||||
|
@@ -704,10 +704,7 @@ func TestMissingTrieNodes(t *testing.T) {
|
|||||||
memDb := rawdb.NewMemoryDatabase()
|
memDb := rawdb.NewMemoryDatabase()
|
||||||
db := NewDatabase(memDb)
|
db := NewDatabase(memDb)
|
||||||
var root common.Hash
|
var root common.Hash
|
||||||
state, err := New(common.Hash{}, db, nil)
|
state, _ := New(common.Hash{}, db, nil)
|
||||||
if err != nil {
|
|
||||||
panic("nil stte")
|
|
||||||
}
|
|
||||||
addr := common.BytesToAddress([]byte("so"))
|
addr := common.BytesToAddress([]byte("so"))
|
||||||
{
|
{
|
||||||
state.SetBalance(addr, big.NewInt(1))
|
state.SetBalance(addr, big.NewInt(1))
|
||||||
@@ -739,7 +736,7 @@ func TestMissingTrieNodes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Modify the state
|
// Modify the state
|
||||||
state.SetBalance(addr, big.NewInt(2))
|
state.SetBalance(addr, big.NewInt(2))
|
||||||
root, err = state.Commit(false)
|
root, err := state.Commit(false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatalf("expected error, got root :%x", root)
|
t.Fatalf("expected error, got root :%x", root)
|
||||||
}
|
}
|
||||||
|
@@ -27,7 +27,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewStateSync create a new state trie download scheduler.
|
// NewStateSync create a new state trie download scheduler.
|
||||||
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.SyncBloom, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync {
|
func NewStateSync(root common.Hash, database ethdb.KeyValueReader, onLeaf func(paths [][]byte, leaf []byte) error) *trie.Sync {
|
||||||
// Register the storage slot callback if the external callback is specified.
|
// Register the storage slot callback if the external callback is specified.
|
||||||
var onSlot func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error
|
var onSlot func(paths [][]byte, hexpath []byte, leaf []byte, parent common.Hash) error
|
||||||
if onLeaf != nil {
|
if onLeaf != nil {
|
||||||
@@ -52,6 +52,6 @@ func NewStateSync(root common.Hash, database ethdb.KeyValueReader, bloom *trie.S
|
|||||||
syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), hexpath, parent)
|
syncer.AddCodeEntry(common.BytesToHash(obj.CodeHash), hexpath, parent)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
syncer = trie.NewSync(root, database, onAccount, bloom)
|
syncer = trie.NewSync(root, database, onAccount)
|
||||||
return syncer
|
return syncer
|
||||||
}
|
}
|
||||||
|
@@ -26,7 +26,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/ethdb"
|
"github.com/ethereum/go-ethereum/ethdb"
|
||||||
"github.com/ethereum/go-ethereum/ethdb/memorydb"
|
|
||||||
"github.com/ethereum/go-ethereum/rlp"
|
"github.com/ethereum/go-ethereum/rlp"
|
||||||
"github.com/ethereum/go-ethereum/trie"
|
"github.com/ethereum/go-ethereum/trie"
|
||||||
)
|
)
|
||||||
@@ -70,10 +69,7 @@ func makeTestState() (Database, common.Hash, []*testAccount) {
|
|||||||
state.updateStateObject(obj)
|
state.updateStateObject(obj)
|
||||||
accounts = append(accounts, acc)
|
accounts = append(accounts, acc)
|
||||||
}
|
}
|
||||||
root, err := state.Commit(false)
|
root, _ := state.Commit(false)
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the generated state
|
// Return the generated state
|
||||||
return db, root, accounts
|
return db, root, accounts
|
||||||
@@ -137,7 +133,7 @@ func checkStateConsistency(db ethdb.Database, root common.Hash) error {
|
|||||||
// Tests that an empty state is not scheduled for syncing.
|
// Tests that an empty state is not scheduled for syncing.
|
||||||
func TestEmptyStateSync(t *testing.T) {
|
func TestEmptyStateSync(t *testing.T) {
|
||||||
empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
empty := common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||||
sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), trie.NewSyncBloom(1, memorydb.New()), nil)
|
sync := NewStateSync(empty, rawdb.NewMemoryDatabase(), nil)
|
||||||
if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
|
if nodes, paths, codes := sync.Missing(1); len(nodes) != 0 || len(paths) != 0 || len(codes) != 0 {
|
||||||
t.Errorf(" content requested for empty state: %v, %v, %v", nodes, paths, codes)
|
t.Errorf(" content requested for empty state: %v, %v, %v", nodes, paths, codes)
|
||||||
}
|
}
|
||||||
@@ -174,7 +170,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool) {
|
|||||||
|
|
||||||
// Create a destination state and sync with the scheduler
|
// Create a destination state and sync with the scheduler
|
||||||
dstDb := rawdb.NewMemoryDatabase()
|
dstDb := rawdb.NewMemoryDatabase()
|
||||||
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
|
sched := NewStateSync(srcRoot, dstDb, nil)
|
||||||
|
|
||||||
nodes, paths, codes := sched.Missing(count)
|
nodes, paths, codes := sched.Missing(count)
|
||||||
var (
|
var (
|
||||||
@@ -253,7 +249,7 @@ func TestIterativeDelayedStateSync(t *testing.T) {
|
|||||||
|
|
||||||
// Create a destination state and sync with the scheduler
|
// Create a destination state and sync with the scheduler
|
||||||
dstDb := rawdb.NewMemoryDatabase()
|
dstDb := rawdb.NewMemoryDatabase()
|
||||||
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
|
sched := NewStateSync(srcRoot, dstDb, nil)
|
||||||
|
|
||||||
nodes, _, codes := sched.Missing(0)
|
nodes, _, codes := sched.Missing(0)
|
||||||
queue := append(append([]common.Hash{}, nodes...), codes...)
|
queue := append(append([]common.Hash{}, nodes...), codes...)
|
||||||
@@ -301,7 +297,7 @@ func testIterativeRandomStateSync(t *testing.T, count int) {
|
|||||||
|
|
||||||
// Create a destination state and sync with the scheduler
|
// Create a destination state and sync with the scheduler
|
||||||
dstDb := rawdb.NewMemoryDatabase()
|
dstDb := rawdb.NewMemoryDatabase()
|
||||||
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
|
sched := NewStateSync(srcRoot, dstDb, nil)
|
||||||
|
|
||||||
queue := make(map[common.Hash]struct{})
|
queue := make(map[common.Hash]struct{})
|
||||||
nodes, _, codes := sched.Missing(count)
|
nodes, _, codes := sched.Missing(count)
|
||||||
@@ -351,7 +347,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) {
|
|||||||
|
|
||||||
// Create a destination state and sync with the scheduler
|
// Create a destination state and sync with the scheduler
|
||||||
dstDb := rawdb.NewMemoryDatabase()
|
dstDb := rawdb.NewMemoryDatabase()
|
||||||
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
|
sched := NewStateSync(srcRoot, dstDb, nil)
|
||||||
|
|
||||||
queue := make(map[common.Hash]struct{})
|
queue := make(map[common.Hash]struct{})
|
||||||
nodes, _, codes := sched.Missing(0)
|
nodes, _, codes := sched.Missing(0)
|
||||||
@@ -418,7 +414,7 @@ func TestIncompleteStateSync(t *testing.T) {
|
|||||||
|
|
||||||
// Create a destination state and sync with the scheduler
|
// Create a destination state and sync with the scheduler
|
||||||
dstDb := rawdb.NewMemoryDatabase()
|
dstDb := rawdb.NewMemoryDatabase()
|
||||||
sched := NewStateSync(srcRoot, dstDb, trie.NewSyncBloom(1, dstDb), nil)
|
sched := NewStateSync(srcRoot, dstDb, nil)
|
||||||
|
|
||||||
var added []common.Hash
|
var added []common.Hash
|
||||||
|
|
||||||
|
@@ -128,8 +128,6 @@ func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainCon
|
|||||||
receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce())
|
receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce())
|
||||||
}
|
}
|
||||||
|
|
||||||
statedb.Witness().Merge(txContext.Accesses)
|
|
||||||
|
|
||||||
// Set the receipt logs and create the bloom filter.
|
// Set the receipt logs and create the bloom filter.
|
||||||
receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash)
|
receipt.Logs = statedb.GetLogs(tx.Hash(), blockHash)
|
||||||
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
|
||||||
|
@@ -340,55 +340,3 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr
|
|||||||
// Assemble and return the final block for sealing
|
// Assemble and return the final block for sealing
|
||||||
return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
|
return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessStateless(t *testing.T) {
|
|
||||||
var (
|
|
||||||
config = ¶ms.ChainConfig{
|
|
||||||
ChainID: big.NewInt(1),
|
|
||||||
HomesteadBlock: big.NewInt(0),
|
|
||||||
EIP150Block: big.NewInt(0),
|
|
||||||
EIP155Block: big.NewInt(0),
|
|
||||||
EIP158Block: big.NewInt(0),
|
|
||||||
ByzantiumBlock: big.NewInt(0),
|
|
||||||
ConstantinopleBlock: big.NewInt(0),
|
|
||||||
PetersburgBlock: big.NewInt(0),
|
|
||||||
IstanbulBlock: big.NewInt(0),
|
|
||||||
MuirGlacierBlock: big.NewInt(0),
|
|
||||||
BerlinBlock: big.NewInt(0),
|
|
||||||
LondonBlock: big.NewInt(0),
|
|
||||||
Ethash: new(params.EthashConfig),
|
|
||||||
CancunBlock: big.NewInt(0),
|
|
||||||
}
|
|
||||||
signer = types.LatestSigner(config)
|
|
||||||
testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
|
|
||||||
db = rawdb.NewMemoryDatabase()
|
|
||||||
gspec = &Genesis{
|
|
||||||
Config: config,
|
|
||||||
Alloc: GenesisAlloc{
|
|
||||||
common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{
|
|
||||||
Balance: big.NewInt(1000000000000000000), // 1 ether
|
|
||||||
Nonce: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
// Verkle trees use the snapshot, which must be enabled before the
|
|
||||||
// data is saved into the tree+database.
|
|
||||||
genesis := gspec.MustCommit(db)
|
|
||||||
blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil)
|
|
||||||
defer blockchain.Stop()
|
|
||||||
chain, _ := GenerateVerkleChain(gspec.Config, genesis, ethash.NewFaker(), db, 1, func(_ int, gen *BlockGen) {
|
|
||||||
tx, _ := types.SignTx(types.NewTransaction(0, common.Address{1, 2, 3}, big.NewInt(999), params.TxGas, big.NewInt(875000000), nil), signer, testKey)
|
|
||||||
gen.AddTx(tx)
|
|
||||||
tx, _ = types.SignTx(types.NewTransaction(1, common.Address{}, big.NewInt(999), params.TxGas, big.NewInt(875000000), nil), signer, testKey)
|
|
||||||
gen.AddTx(tx)
|
|
||||||
tx, _ = types.SignTx(types.NewTransaction(2, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), signer, testKey)
|
|
||||||
gen.AddTx(tx)
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
_, err := blockchain.InsertChain(chain)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("block imported with error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@@ -17,7 +17,6 @@
|
|||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"math/big"
|
"math/big"
|
||||||
@@ -28,7 +27,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/core/vm"
|
"github.com/ethereum/go-ethereum/core/vm"
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var emptyCodeHash = crypto.Keccak256Hash(nil)
|
var emptyCodeHash = crypto.Keccak256Hash(nil)
|
||||||
@@ -117,7 +115,7 @@ func (result *ExecutionResult) Revert() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
|
// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
|
||||||
func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation, isHomestead, isEIP2028 bool) (uint64, error) {
|
func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool) (uint64, error) {
|
||||||
// Set the starting gas for the raw transaction
|
// Set the starting gas for the raw transaction
|
||||||
var gas uint64
|
var gas uint64
|
||||||
if isContractCreation && isHomestead {
|
if isContractCreation && isHomestead {
|
||||||
@@ -304,27 +302,6 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
|||||||
if st.gas < gas {
|
if st.gas < gas {
|
||||||
return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gas, gas)
|
return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gas, gas)
|
||||||
}
|
}
|
||||||
if st.evm.TxContext.Accesses != nil {
|
|
||||||
if msg.To() != nil {
|
|
||||||
toBalance := trieUtils.GetTreeKeyBalance(msg.To().Bytes())
|
|
||||||
pre := st.state.GetBalance(*msg.To())
|
|
||||||
gas += st.evm.TxContext.Accesses.TouchAddressAndChargeGas(toBalance, pre.Bytes())
|
|
||||||
|
|
||||||
// NOTE: Nonce also needs to be charged, because it is needed for execution
|
|
||||||
// on the statless side.
|
|
||||||
var preTN [8]byte
|
|
||||||
fromNonce := trieUtils.GetTreeKeyNonce(msg.To().Bytes())
|
|
||||||
binary.BigEndian.PutUint64(preTN[:], st.state.GetNonce(*msg.To()))
|
|
||||||
gas += st.evm.TxContext.Accesses.TouchAddressAndChargeGas(fromNonce, preTN[:])
|
|
||||||
}
|
|
||||||
fromBalance := trieUtils.GetTreeKeyBalance(msg.From().Bytes())
|
|
||||||
preFB := st.state.GetBalance(msg.From()).Bytes()
|
|
||||||
fromNonce := trieUtils.GetTreeKeyNonce(msg.From().Bytes())
|
|
||||||
var preFN [8]byte
|
|
||||||
binary.BigEndian.PutUint64(preFN[:], st.state.GetNonce(msg.From()))
|
|
||||||
gas += st.evm.TxContext.Accesses.TouchAddressAndChargeGas(fromNonce, preFN[:])
|
|
||||||
gas += st.evm.TxContext.Accesses.TouchAddressAndChargeGas(fromBalance, preFB[:])
|
|
||||||
}
|
|
||||||
st.gas -= gas
|
st.gas -= gas
|
||||||
|
|
||||||
// Check clause 6
|
// Check clause 6
|
||||||
@@ -333,7 +310,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set up the initial access list.
|
// Set up the initial access list.
|
||||||
if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber); rules.IsBerlin {
|
if rules := st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Random != nil); rules.IsBerlin {
|
||||||
st.state.PrepareAccessList(msg.From(), msg.To(), vm.ActivePrecompiles(rules), msg.AccessList())
|
st.state.PrepareAccessList(msg.From(), msg.To(), vm.ActivePrecompiles(rules), msg.AccessList())
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
|
@@ -621,9 +621,8 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return ErrInvalidSender
|
return ErrInvalidSender
|
||||||
}
|
}
|
||||||
// Drop non-local transactions under our own minimal accepted gas price or tip.
|
// Drop non-local transactions under our own minimal accepted gas price or tip
|
||||||
pendingBaseFee := pool.priced.urgent.baseFee
|
if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 {
|
||||||
if !local && tx.EffectiveGasTipIntCmp(pool.gasPrice, pendingBaseFee) < 0 {
|
|
||||||
return ErrUnderpriced
|
return ErrUnderpriced
|
||||||
}
|
}
|
||||||
// Ensure the transaction adheres to nonce ordering
|
// Ensure the transaction adheres to nonce ordering
|
||||||
|
@@ -1,144 +0,0 @@
|
|||||||
// Copyright 2021 The go-ethereum Authors
|
|
||||||
// This file is part of the go-ethereum library.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Lesser General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Lesser General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Lesser General Public License
|
|
||||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package types
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AccessWitness lists the locations of the state that are being accessed
|
|
||||||
// during the production of a block.
|
|
||||||
// TODO(@gballet) this doesn't fully support deletions
|
|
||||||
type AccessWitness struct {
|
|
||||||
// Branches flags if a given branch has been loaded
|
|
||||||
Branches map[[31]byte]struct{}
|
|
||||||
|
|
||||||
// Chunks contains the initial value of each address
|
|
||||||
Chunks map[common.Hash][]byte
|
|
||||||
|
|
||||||
// The initial value isn't always available at the time an
|
|
||||||
// address is touched, this map references addresses that
|
|
||||||
// were touched but can not yet be put in Chunks.
|
|
||||||
Undefined map[common.Hash]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewAccessWitness() *AccessWitness {
|
|
||||||
return &AccessWitness{
|
|
||||||
Branches: make(map[[31]byte]struct{}),
|
|
||||||
Chunks: make(map[common.Hash][]byte),
|
|
||||||
Undefined: make(map[common.Hash]struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TouchAddress adds any missing addr to the witness and returns respectively
|
|
||||||
// true if the stem or the stub weren't arleady present.
|
|
||||||
func (aw *AccessWitness) TouchAddress(addr, value []byte) (bool, bool) {
|
|
||||||
var (
|
|
||||||
stem [31]byte
|
|
||||||
newStem bool
|
|
||||||
newSelector bool
|
|
||||||
)
|
|
||||||
copy(stem[:], addr[:31])
|
|
||||||
|
|
||||||
// Check for the presence of the stem
|
|
||||||
if _, newStem := aw.Branches[stem]; !newStem {
|
|
||||||
aw.Branches[stem] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check for the presence of the selector
|
|
||||||
if _, newSelector := aw.Chunks[common.BytesToHash(addr)]; !newSelector {
|
|
||||||
if value == nil {
|
|
||||||
aw.Undefined[common.BytesToHash(addr)] = struct{}{}
|
|
||||||
} else {
|
|
||||||
if _, ok := aw.Undefined[common.BytesToHash(addr)]; !ok {
|
|
||||||
delete(aw.Undefined, common.BytesToHash(addr))
|
|
||||||
}
|
|
||||||
aw.Chunks[common.BytesToHash(addr)] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return newStem, newSelector
|
|
||||||
}
|
|
||||||
|
|
||||||
// TouchAddressAndChargeGas checks if a location has already been touched in
|
|
||||||
// the current witness, and charge extra gas if that isn't the case. This is
|
|
||||||
// meant to only be called on a tx-context access witness (i.e. before it is
|
|
||||||
// merged), not a block-context witness: witness costs are charged per tx.
|
|
||||||
func (aw *AccessWitness) TouchAddressAndChargeGas(addr, value []byte) uint64 {
|
|
||||||
var gas uint64
|
|
||||||
|
|
||||||
nstem, nsel := aw.TouchAddress(addr, value)
|
|
||||||
if nstem {
|
|
||||||
gas += params.WitnessBranchCost
|
|
||||||
}
|
|
||||||
if nsel {
|
|
||||||
gas += params.WitnessChunkCost
|
|
||||||
}
|
|
||||||
return gas
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge is used to merge the witness that got generated during the execution
|
|
||||||
// of a tx, with the accumulation of witnesses that were generated during the
|
|
||||||
// execution of all the txs preceding this one in a given block.
|
|
||||||
func (aw *AccessWitness) Merge(other *AccessWitness) {
|
|
||||||
for k := range other.Undefined {
|
|
||||||
if _, ok := aw.Undefined[k]; !ok {
|
|
||||||
aw.Undefined[k] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for k := range other.Branches {
|
|
||||||
if _, ok := aw.Branches[k]; !ok {
|
|
||||||
aw.Branches[k] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, chunk := range other.Chunks {
|
|
||||||
if _, ok := aw.Chunks[k]; !ok {
|
|
||||||
aw.Chunks[k] = chunk
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key returns, predictably, the list of keys that were touched during the
|
|
||||||
// buildup of the access witness.
|
|
||||||
func (aw *AccessWitness) Keys() [][]byte {
|
|
||||||
keys := make([][]byte, 0, len(aw.Chunks))
|
|
||||||
for key := range aw.Chunks {
|
|
||||||
var k [32]byte
|
|
||||||
copy(k[:], key[:])
|
|
||||||
keys = append(keys, k[:])
|
|
||||||
}
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aw *AccessWitness) KeyVals() map[common.Hash][]byte {
|
|
||||||
return aw.Chunks
|
|
||||||
}
|
|
||||||
|
|
||||||
func (aw *AccessWitness) Copy() *AccessWitness {
|
|
||||||
naw := &AccessWitness{
|
|
||||||
Branches: make(map[[31]byte]struct{}),
|
|
||||||
Chunks: make(map[common.Hash][]byte),
|
|
||||||
Undefined: make(map[common.Hash]struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
naw.Merge(aw)
|
|
||||||
|
|
||||||
return naw
|
|
||||||
}
|
|
@@ -86,9 +86,6 @@ type Header struct {
|
|||||||
// BaseFee was added by EIP-1559 and is ignored in legacy headers.
|
// BaseFee was added by EIP-1559 and is ignored in legacy headers.
|
||||||
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
|
BaseFee *big.Int `json:"baseFeePerGas" rlp:"optional"`
|
||||||
|
|
||||||
// The verkle proof is ignored in legacy headers
|
|
||||||
VerkleProof []byte `json:"verkleProof" rlp:"optional"`
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
TODO (MariusVanDerWijden) Add this field once needed
|
TODO (MariusVanDerWijden) Add this field once needed
|
||||||
// Random was added during the merge and contains the BeaconState randomness
|
// Random was added during the merge and contains the BeaconState randomness
|
||||||
@@ -340,10 +337,6 @@ func (b *Block) SanityCheck() error {
|
|||||||
return b.header.SanityCheck()
|
return b.header.SanityCheck()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *Block) SetVerkleProof(vp []byte) {
|
|
||||||
b.header.VerkleProof = vp
|
|
||||||
}
|
|
||||||
|
|
||||||
type writeCounter common.StorageSize
|
type writeCounter common.StorageSize
|
||||||
|
|
||||||
func (c *writeCounter) Write(b []byte) (int, error) {
|
func (c *writeCounter) Write(b []byte) (int, error) {
|
||||||
@@ -396,3 +389,21 @@ func (b *Block) Hash() common.Hash {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Blocks []*Block
|
type Blocks []*Block
|
||||||
|
|
||||||
|
// HeaderParentHashFromRLP returns the parentHash of an RLP-encoded
|
||||||
|
// header. If 'header' is invalid, the zero hash is returned.
|
||||||
|
func HeaderParentHashFromRLP(header []byte) common.Hash {
|
||||||
|
// parentHash is the first list element.
|
||||||
|
listContent, _, err := rlp.SplitList(header)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
parentHash, _, err := rlp.SplitString(listContent)
|
||||||
|
if err != nil {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
if len(parentHash) != 32 {
|
||||||
|
return common.Hash{}
|
||||||
|
}
|
||||||
|
return common.BytesToHash(parentHash)
|
||||||
|
}
|
||||||
|
@@ -281,3 +281,64 @@ func makeBenchBlock() *Block {
|
|||||||
}
|
}
|
||||||
return NewBlock(header, txs, uncles, receipts, newHasher())
|
return NewBlock(header, txs, uncles, receipts, newHasher())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRlpDecodeParentHash(t *testing.T) {
|
||||||
|
// A minimum one
|
||||||
|
want := common.HexToHash("0x112233445566778899001122334455667788990011223344556677889900aabb")
|
||||||
|
if rlpData, err := rlp.EncodeToBytes(Header{ParentHash: want}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else {
|
||||||
|
if have := HeaderParentHashFromRLP(rlpData); have != want {
|
||||||
|
t.Fatalf("have %x, want %x", have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// And a maximum one
|
||||||
|
// | Difficulty | dynamic| *big.Int | 0x5ad3c2c71bbff854908 (current mainnet TD: 76 bits) |
|
||||||
|
// | Number | dynamic| *big.Int | 64 bits |
|
||||||
|
// | Extra | dynamic| []byte | 65+32 byte (clique) |
|
||||||
|
// | BaseFee | dynamic| *big.Int | 64 bits |
|
||||||
|
mainnetTd := new(big.Int)
|
||||||
|
mainnetTd.SetString("5ad3c2c71bbff854908", 16)
|
||||||
|
if rlpData, err := rlp.EncodeToBytes(Header{
|
||||||
|
ParentHash: want,
|
||||||
|
Difficulty: mainnetTd,
|
||||||
|
Number: new(big.Int).SetUint64(math.MaxUint64),
|
||||||
|
Extra: make([]byte, 65+32),
|
||||||
|
BaseFee: new(big.Int).SetUint64(math.MaxUint64),
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else {
|
||||||
|
if have := HeaderParentHashFromRLP(rlpData); have != want {
|
||||||
|
t.Fatalf("have %x, want %x", have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Also test a very very large header.
|
||||||
|
{
|
||||||
|
// The rlp-encoding of the heder belowCauses _total_ length of 65540,
|
||||||
|
// which is the first to blow the fast-path.
|
||||||
|
h := Header{
|
||||||
|
ParentHash: want,
|
||||||
|
Extra: make([]byte, 65041),
|
||||||
|
}
|
||||||
|
if rlpData, err := rlp.EncodeToBytes(h); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else {
|
||||||
|
if have := HeaderParentHashFromRLP(rlpData); have != want {
|
||||||
|
t.Fatalf("have %x, want %x", have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
// Test some invalid erroneous stuff
|
||||||
|
for i, rlpData := range [][]byte{
|
||||||
|
nil,
|
||||||
|
common.FromHex("0x"),
|
||||||
|
common.FromHex("0x01"),
|
||||||
|
common.FromHex("0x3031323334"),
|
||||||
|
} {
|
||||||
|
if have, want := HeaderParentHashFromRLP(rlpData), (common.Hash{}); have != want {
|
||||||
|
t.Fatalf("invalid %d: have %x, want %x", i, have, want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -25,8 +25,8 @@ import (
|
|||||||
type DynamicFeeTx struct {
|
type DynamicFeeTx struct {
|
||||||
ChainID *big.Int
|
ChainID *big.Int
|
||||||
Nonce uint64
|
Nonce uint64
|
||||||
GasTipCap *big.Int
|
GasTipCap *big.Int // a.k.a. maxPriorityFeePerGas
|
||||||
GasFeeCap *big.Int
|
GasFeeCap *big.Int // a.k.a. maxFeePerGas
|
||||||
Gas uint64
|
Gas uint64
|
||||||
To *common.Address `rlp:"nil"` // nil means contract creation
|
To *common.Address `rlp:"nil"` // nil means contract creation
|
||||||
Value *big.Int
|
Value *big.Int
|
||||||
|
@@ -17,12 +17,12 @@
|
|||||||
package vm
|
package vm
|
||||||
|
|
||||||
const (
|
const (
|
||||||
set2BitsMask = uint16(0b1100_0000_0000_0000)
|
set2BitsMask = uint16(0b11)
|
||||||
set3BitsMask = uint16(0b1110_0000_0000_0000)
|
set3BitsMask = uint16(0b111)
|
||||||
set4BitsMask = uint16(0b1111_0000_0000_0000)
|
set4BitsMask = uint16(0b1111)
|
||||||
set5BitsMask = uint16(0b1111_1000_0000_0000)
|
set5BitsMask = uint16(0b1_1111)
|
||||||
set6BitsMask = uint16(0b1111_1100_0000_0000)
|
set6BitsMask = uint16(0b11_1111)
|
||||||
set7BitsMask = uint16(0b1111_1110_0000_0000)
|
set7BitsMask = uint16(0b111_1111)
|
||||||
)
|
)
|
||||||
|
|
||||||
// bitvec is a bit vector which maps bytes in a program.
|
// bitvec is a bit vector which maps bytes in a program.
|
||||||
@@ -30,32 +30,26 @@ const (
|
|||||||
// it's data (i.e. argument of PUSHxx).
|
// it's data (i.e. argument of PUSHxx).
|
||||||
type bitvec []byte
|
type bitvec []byte
|
||||||
|
|
||||||
var lookup = [8]byte{
|
|
||||||
0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bits bitvec) set1(pos uint64) {
|
func (bits bitvec) set1(pos uint64) {
|
||||||
bits[pos/8] |= lookup[pos%8]
|
bits[pos/8] |= 1 << (pos % 8)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bits bitvec) setN(flag uint16, pos uint64) {
|
func (bits bitvec) setN(flag uint16, pos uint64) {
|
||||||
a := flag >> (pos % 8)
|
a := flag << (pos % 8)
|
||||||
bits[pos/8] |= byte(a >> 8)
|
bits[pos/8] |= byte(a)
|
||||||
if b := byte(a); b != 0 {
|
if b := byte(a >> 8); b != 0 {
|
||||||
// If the bit-setting affects the neighbouring byte, we can assign - no need to OR it,
|
|
||||||
// since it's the first write to that byte
|
|
||||||
bits[pos/8+1] = b
|
bits[pos/8+1] = b
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bits bitvec) set8(pos uint64) {
|
func (bits bitvec) set8(pos uint64) {
|
||||||
a := byte(0xFF >> (pos % 8))
|
a := byte(0xFF << (pos % 8))
|
||||||
bits[pos/8] |= a
|
bits[pos/8] |= a
|
||||||
bits[pos/8+1] = ^a
|
bits[pos/8+1] = ^a
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bits bitvec) set16(pos uint64) {
|
func (bits bitvec) set16(pos uint64) {
|
||||||
a := byte(0xFF >> (pos % 8))
|
a := byte(0xFF << (pos % 8))
|
||||||
bits[pos/8] |= a
|
bits[pos/8] |= a
|
||||||
bits[pos/8+1] = 0xFF
|
bits[pos/8+1] = 0xFF
|
||||||
bits[pos/8+2] = ^a
|
bits[pos/8+2] = ^a
|
||||||
@@ -63,7 +57,7 @@ func (bits bitvec) set16(pos uint64) {
|
|||||||
|
|
||||||
// codeSegment checks if the position is in a code segment.
|
// codeSegment checks if the position is in a code segment.
|
||||||
func (bits *bitvec) codeSegment(pos uint64) bool {
|
func (bits *bitvec) codeSegment(pos uint64) bool {
|
||||||
return ((*bits)[pos/8] & (0x80 >> (pos % 8))) == 0
|
return (((*bits)[pos/8] >> (pos % 8)) & 1) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// codeBitmap collects data locations in code.
|
// codeBitmap collects data locations in code.
|
||||||
|
@@ -17,6 +17,7 @@
|
|||||||
package vm
|
package vm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math/bits"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
@@ -28,24 +29,27 @@ func TestJumpDestAnalysis(t *testing.T) {
|
|||||||
exp byte
|
exp byte
|
||||||
which int
|
which int
|
||||||
}{
|
}{
|
||||||
{[]byte{byte(PUSH1), 0x01, 0x01, 0x01}, 0x40, 0},
|
{[]byte{byte(PUSH1), 0x01, 0x01, 0x01}, 0b0000_0010, 0},
|
||||||
{[]byte{byte(PUSH1), byte(PUSH1), byte(PUSH1), byte(PUSH1)}, 0x50, 0},
|
{[]byte{byte(PUSH1), byte(PUSH1), byte(PUSH1), byte(PUSH1)}, 0b0000_1010, 0},
|
||||||
{[]byte{byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), 0x01, 0x01, 0x01}, 0x7F, 0},
|
{[]byte{0x00, byte(PUSH1), 0x00, byte(PUSH1), 0x00, byte(PUSH1), 0x00, byte(PUSH1)}, 0b0101_0100, 0},
|
||||||
{[]byte{byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x80, 1},
|
{[]byte{byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), byte(PUSH8), 0x01, 0x01, 0x01}, bits.Reverse8(0x7F), 0},
|
||||||
{[]byte{0x01, 0x01, 0x01, 0x01, 0x01, byte(PUSH2), byte(PUSH2), byte(PUSH2), 0x01, 0x01, 0x01}, 0x03, 0},
|
{[]byte{byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0000_0001, 1},
|
||||||
{[]byte{0x01, 0x01, 0x01, 0x01, 0x01, byte(PUSH2), 0x01, 0x01, 0x01, 0x01, 0x01}, 0x00, 1},
|
{[]byte{0x01, 0x01, 0x01, 0x01, 0x01, byte(PUSH2), byte(PUSH2), byte(PUSH2), 0x01, 0x01, 0x01}, 0b1100_0000, 0},
|
||||||
{[]byte{byte(PUSH3), 0x01, 0x01, 0x01, byte(PUSH1), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x74, 0},
|
{[]byte{0x01, 0x01, 0x01, 0x01, 0x01, byte(PUSH2), 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0000_0000, 1},
|
||||||
{[]byte{byte(PUSH3), 0x01, 0x01, 0x01, byte(PUSH1), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x00, 1},
|
{[]byte{byte(PUSH3), 0x01, 0x01, 0x01, byte(PUSH1), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0010_1110, 0},
|
||||||
{[]byte{0x01, byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x3F, 0},
|
{[]byte{byte(PUSH3), 0x01, 0x01, 0x01, byte(PUSH1), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0000_0000, 1},
|
||||||
{[]byte{0x01, byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0xC0, 1},
|
{[]byte{0x01, byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b1111_1100, 0},
|
||||||
{[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x7F, 0},
|
{[]byte{0x01, byte(PUSH8), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0000_0011, 1},
|
||||||
{[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0xFF, 1},
|
{[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b1111_1110, 0},
|
||||||
{[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0x80, 2},
|
{[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b1111_1111, 1},
|
||||||
{[]byte{byte(PUSH8), 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, byte(PUSH1), 0x01}, 0x7f, 0},
|
{[]byte{byte(PUSH16), 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01}, 0b0000_0001, 2},
|
||||||
{[]byte{byte(PUSH8), 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, byte(PUSH1), 0x01}, 0xA0, 1},
|
{[]byte{byte(PUSH8), 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, byte(PUSH1), 0x01}, 0b1111_1110, 0},
|
||||||
{[]byte{byte(PUSH32)}, 0x7F, 0},
|
{[]byte{byte(PUSH8), 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, byte(PUSH1), 0x01}, 0b0000_0101, 1},
|
||||||
{[]byte{byte(PUSH32)}, 0xFF, 1},
|
{[]byte{byte(PUSH32)}, 0b1111_1110, 0},
|
||||||
{[]byte{byte(PUSH32)}, 0xFF, 2},
|
{[]byte{byte(PUSH32)}, 0b1111_1111, 1},
|
||||||
|
{[]byte{byte(PUSH32)}, 0b1111_1111, 2},
|
||||||
|
{[]byte{byte(PUSH32)}, 0b1111_1111, 3},
|
||||||
|
{[]byte{byte(PUSH32)}, 0b0000_0001, 4},
|
||||||
}
|
}
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
ret := codeBitmap(test.code)
|
ret := codeBitmap(test.code)
|
||||||
|
@@ -93,12 +93,12 @@ func (c *Contract) validJumpdest(dest *uint256.Int) bool {
|
|||||||
if OpCode(c.Code[udest]) != JUMPDEST {
|
if OpCode(c.Code[udest]) != JUMPDEST {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return c.IsCode(udest)
|
return c.isCode(udest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsCode returns true if the provided PC location is an actual opcode, as
|
// isCode returns true if the provided PC location is an actual opcode, as
|
||||||
// opposed to a data-segment following a PUSHN operation.
|
// opposed to a data-segment following a PUSHN operation.
|
||||||
func (c *Contract) IsCode(udest uint64) bool {
|
func (c *Contract) isCode(udest uint64) bool {
|
||||||
// Do we already have an analysis laying around?
|
// Do we already have an analysis laying around?
|
||||||
if c.analysis != nil {
|
if c.analysis != nil {
|
||||||
return c.analysis.codeSegment(udest)
|
return c.analysis.codeSegment(udest)
|
||||||
|
@@ -36,6 +36,10 @@ var (
|
|||||||
ErrGasUintOverflow = errors.New("gas uint64 overflow")
|
ErrGasUintOverflow = errors.New("gas uint64 overflow")
|
||||||
ErrInvalidCode = errors.New("invalid code: must not begin with 0xef")
|
ErrInvalidCode = errors.New("invalid code: must not begin with 0xef")
|
||||||
ErrNonceUintOverflow = errors.New("nonce uint64 overflow")
|
ErrNonceUintOverflow = errors.New("nonce uint64 overflow")
|
||||||
|
|
||||||
|
// errStopToken is an internal token indicating interpreter loop termination,
|
||||||
|
// never returned to outside callers.
|
||||||
|
errStopToken = errors.New("stop token")
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrStackUnderflow wraps an evm error when the items on the stack less
|
// ErrStackUnderflow wraps an evm error when the items on the stack less
|
||||||
|
@@ -17,16 +17,13 @@
|
|||||||
package vm
|
package vm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
|
||||||
"math/big"
|
"math/big"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
|
||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
"github.com/ethereum/go-ethereum/trie/utils"
|
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -78,6 +75,7 @@ type BlockContext struct {
|
|||||||
Time *big.Int // Provides information for TIME
|
Time *big.Int // Provides information for TIME
|
||||||
Difficulty *big.Int // Provides information for DIFFICULTY
|
Difficulty *big.Int // Provides information for DIFFICULTY
|
||||||
BaseFee *big.Int // Provides information for BASEFEE
|
BaseFee *big.Int // Provides information for BASEFEE
|
||||||
|
Random *common.Hash // Provides information for RANDOM
|
||||||
}
|
}
|
||||||
|
|
||||||
// TxContext provides the EVM with information about a transaction.
|
// TxContext provides the EVM with information about a transaction.
|
||||||
@@ -86,8 +84,6 @@ type TxContext struct {
|
|||||||
// Message information
|
// Message information
|
||||||
Origin common.Address // Provides information for ORIGIN
|
Origin common.Address // Provides information for ORIGIN
|
||||||
GasPrice *big.Int // Provides information for GASPRICE
|
GasPrice *big.Int // Provides information for GASPRICE
|
||||||
|
|
||||||
Accesses *types.AccessWitness
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// EVM is the Ethereum Virtual Machine base object and provides
|
// EVM is the Ethereum Virtual Machine base object and provides
|
||||||
@@ -125,23 +121,18 @@ type EVM struct {
|
|||||||
// available gas is calculated in gasCall* according to the 63/64 rule and later
|
// available gas is calculated in gasCall* according to the 63/64 rule and later
|
||||||
// applied in opCall*.
|
// applied in opCall*.
|
||||||
callGasTemp uint64
|
callGasTemp uint64
|
||||||
|
|
||||||
accesses map[common.Hash]common.Hash
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEVM returns a new EVM. The returned EVM is not thread safe and should
|
// NewEVM returns a new EVM. The returned EVM is not thread safe and should
|
||||||
// only ever be used *once*.
|
// only ever be used *once*.
|
||||||
func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM {
|
func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, config Config) *EVM {
|
||||||
if txCtx.Accesses == nil {
|
|
||||||
txCtx.Accesses = types.NewAccessWitness()
|
|
||||||
}
|
|
||||||
evm := &EVM{
|
evm := &EVM{
|
||||||
Context: blockCtx,
|
Context: blockCtx,
|
||||||
TxContext: txCtx,
|
TxContext: txCtx,
|
||||||
StateDB: statedb,
|
StateDB: statedb,
|
||||||
Config: config,
|
Config: config,
|
||||||
chainConfig: chainConfig,
|
chainConfig: chainConfig,
|
||||||
chainRules: chainConfig.Rules(blockCtx.BlockNumber),
|
chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil),
|
||||||
}
|
}
|
||||||
evm.interpreter = NewEVMInterpreter(evm, config)
|
evm.interpreter = NewEVMInterpreter(evm, config)
|
||||||
return evm
|
return evm
|
||||||
@@ -175,9 +166,6 @@ func (evm *EVM) Interpreter() *EVMInterpreter {
|
|||||||
// the necessary steps to create accounts and reverses the state in case of an
|
// the necessary steps to create accounts and reverses the state in case of an
|
||||||
// execution error or failed value transfer.
|
// execution error or failed value transfer.
|
||||||
func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) {
|
func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) {
|
||||||
if evm.Config.NoRecursion && evm.depth > 0 {
|
|
||||||
return nil, gas, nil
|
|
||||||
}
|
|
||||||
// Fail if we're trying to execute above the call depth limit
|
// Fail if we're trying to execute above the call depth limit
|
||||||
if evm.depth > int(params.CallCreateDepth) {
|
if evm.depth > int(params.CallCreateDepth) {
|
||||||
return nil, gas, ErrDepth
|
return nil, gas, ErrDepth
|
||||||
@@ -232,16 +220,6 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
|||||||
if len(code) == 0 {
|
if len(code) == 0 {
|
||||||
ret, err = nil, nil // gas is unchanged
|
ret, err = nil, nil // gas is unchanged
|
||||||
} else {
|
} else {
|
||||||
// Touch the account data
|
|
||||||
var data [32]byte
|
|
||||||
evm.Accesses.TouchAddress(utils.GetTreeKeyVersion(addr.Bytes()), data[:])
|
|
||||||
binary.BigEndian.PutUint64(data[:], evm.StateDB.GetNonce(addr))
|
|
||||||
evm.Accesses.TouchAddress(utils.GetTreeKeyNonce(addr[:]), data[:])
|
|
||||||
evm.Accesses.TouchAddress(utils.GetTreeKeyBalance(addr[:]), evm.StateDB.GetBalance(addr).Bytes())
|
|
||||||
binary.BigEndian.PutUint64(data[:], uint64(len(code)))
|
|
||||||
evm.Accesses.TouchAddress(utils.GetTreeKeyCodeSize(addr[:]), data[:])
|
|
||||||
evm.Accesses.TouchAddress(utils.GetTreeKeyCodeKeccak(addr[:]), evm.StateDB.GetCodeHash(addr).Bytes())
|
|
||||||
|
|
||||||
addrCopy := addr
|
addrCopy := addr
|
||||||
// If the account has no code, we can abort here
|
// If the account has no code, we can abort here
|
||||||
// The depth-check is already done, and precompiles handled above
|
// The depth-check is already done, and precompiles handled above
|
||||||
@@ -274,9 +252,6 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas
|
|||||||
// CallCode differs from Call in the sense that it executes the given address'
|
// CallCode differs from Call in the sense that it executes the given address'
|
||||||
// code with the caller as context.
|
// code with the caller as context.
|
||||||
func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) {
|
func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) {
|
||||||
if evm.Config.NoRecursion && evm.depth > 0 {
|
|
||||||
return nil, gas, nil
|
|
||||||
}
|
|
||||||
// Fail if we're trying to execute above the call depth limit
|
// Fail if we're trying to execute above the call depth limit
|
||||||
if evm.depth > int(params.CallCreateDepth) {
|
if evm.depth > int(params.CallCreateDepth) {
|
||||||
return nil, gas, ErrDepth
|
return nil, gas, ErrDepth
|
||||||
@@ -325,9 +300,6 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte,
|
|||||||
// DelegateCall differs from CallCode in the sense that it executes the given address'
|
// DelegateCall differs from CallCode in the sense that it executes the given address'
|
||||||
// code with the caller as context and the caller is set to the caller of the caller.
|
// code with the caller as context and the caller is set to the caller of the caller.
|
||||||
func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) {
|
func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) {
|
||||||
if evm.Config.NoRecursion && evm.depth > 0 {
|
|
||||||
return nil, gas, nil
|
|
||||||
}
|
|
||||||
// Fail if we're trying to execute above the call depth limit
|
// Fail if we're trying to execute above the call depth limit
|
||||||
if evm.depth > int(params.CallCreateDepth) {
|
if evm.depth > int(params.CallCreateDepth) {
|
||||||
return nil, gas, ErrDepth
|
return nil, gas, ErrDepth
|
||||||
@@ -367,9 +339,6 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by
|
|||||||
// Opcodes that attempt to perform such modifications will result in exceptions
|
// Opcodes that attempt to perform such modifications will result in exceptions
|
||||||
// instead of performing the modifications.
|
// instead of performing the modifications.
|
||||||
func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) {
|
func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) {
|
||||||
if evm.Config.NoRecursion && evm.depth > 0 {
|
|
||||||
return nil, gas, nil
|
|
||||||
}
|
|
||||||
// Fail if we're trying to execute above the call depth limit
|
// Fail if we're trying to execute above the call depth limit
|
||||||
if evm.depth > int(params.CallCreateDepth) {
|
if evm.depth > int(params.CallCreateDepth) {
|
||||||
return nil, gas, ErrDepth
|
return nil, gas, ErrDepth
|
||||||
@@ -471,10 +440,6 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64,
|
|||||||
contract := NewContract(caller, AccountRef(address), value, gas)
|
contract := NewContract(caller, AccountRef(address), value, gas)
|
||||||
contract.SetCodeOptionalHash(&address, codeAndHash)
|
contract.SetCodeOptionalHash(&address, codeAndHash)
|
||||||
|
|
||||||
if evm.Config.NoRecursion && evm.depth > 0 {
|
|
||||||
return nil, address, gas, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if evm.Config.Debug {
|
if evm.Config.Debug {
|
||||||
if evm.depth == 0 {
|
if evm.depth == 0 {
|
||||||
evm.Config.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value)
|
evm.Config.Tracer.CaptureStart(evm, caller.Address(), address, true, codeAndHash.code, gas, value)
|
||||||
@@ -538,7 +503,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I
|
|||||||
|
|
||||||
// Create2 creates a new contract using code as deployment code.
|
// Create2 creates a new contract using code as deployment code.
|
||||||
//
|
//
|
||||||
// The different between Create2 with Create is Create2 uses sha3(0xff ++ msg.sender ++ salt ++ sha3(init_code))[12:]
|
// The different between Create2 with Create is Create2 uses keccak256(0xff ++ msg.sender ++ salt ++ keccak256(init_code))[12:]
|
||||||
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
|
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
|
||||||
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
|
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
|
||||||
codeAndHash := &codeAndHash{code: code}
|
codeAndHash := &codeAndHash{code: code}
|
||||||
|
@@ -22,8 +22,6 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
|
||||||
"github.com/holiman/uint256"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// memoryGasCost calculates the quadratic gas for memory expansion. It does so
|
// memoryGasCost calculates the quadratic gas for memory expansion. It does so
|
||||||
@@ -88,102 +86,14 @@ func memoryCopierGas(stackpos int) gasFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
|
||||||
usedGas := uint64(0)
|
|
||||||
slot := stack.Back(0)
|
|
||||||
if evm.accesses != nil {
|
|
||||||
index := trieUtils.GetTreeKeyCodeSize(slot.Bytes())
|
|
||||||
usedGas += evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return usedGas, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
gasCallDataCopy = memoryCopierGas(2)
|
gasCallDataCopy = memoryCopierGas(2)
|
||||||
gasCodeCopyStateful = memoryCopierGas(2)
|
gasCodeCopy = memoryCopierGas(2)
|
||||||
gasExtCodeCopyStateful = memoryCopierGas(3)
|
gasExtCodeCopy = memoryCopierGas(3)
|
||||||
gasReturnDataCopy = memoryCopierGas(2)
|
gasReturnDataCopy = memoryCopierGas(2)
|
||||||
)
|
)
|
||||||
|
|
||||||
func gasCodeCopy(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
|
||||||
var statelessGas uint64
|
|
||||||
if evm.accesses != nil {
|
|
||||||
var (
|
|
||||||
codeOffset = stack.Back(1)
|
|
||||||
length = stack.Back(2)
|
|
||||||
)
|
|
||||||
uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow()
|
|
||||||
if overflow {
|
|
||||||
uint64CodeOffset = 0xffffffffffffffff
|
|
||||||
}
|
|
||||||
uint64CodeEnd, overflow := new(uint256.Int).Add(codeOffset, length).Uint64WithOverflow()
|
|
||||||
if overflow {
|
|
||||||
uint64CodeEnd = 0xffffffffffffffff
|
|
||||||
}
|
|
||||||
addr := contract.Address()
|
|
||||||
chunk := uint64CodeOffset / 31
|
|
||||||
endChunk := uint64CodeEnd / 31
|
|
||||||
// XXX uint64 overflow in condition check
|
|
||||||
for ; chunk < endChunk; chunk++ {
|
|
||||||
|
|
||||||
// TODO make a version of GetTreeKeyCodeChunk without the bigint
|
|
||||||
index := trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(chunk))
|
|
||||||
statelessGas += evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
usedGas, err := gasCodeCopyStateful(evm, contract, stack, mem, memorySize)
|
|
||||||
return usedGas + statelessGas, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func gasExtCodeCopy(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
|
||||||
var statelessGas uint64
|
|
||||||
if evm.accesses != nil {
|
|
||||||
var (
|
|
||||||
a = stack.Back(0)
|
|
||||||
codeOffset = stack.Back(2)
|
|
||||||
length = stack.Back(3)
|
|
||||||
)
|
|
||||||
uint64CodeOffset, overflow := codeOffset.Uint64WithOverflow()
|
|
||||||
if overflow {
|
|
||||||
uint64CodeOffset = 0xffffffffffffffff
|
|
||||||
}
|
|
||||||
uint64CodeEnd, overflow := new(uint256.Int).Add(codeOffset, length).Uint64WithOverflow()
|
|
||||||
if overflow {
|
|
||||||
uint64CodeEnd = 0xffffffffffffffff
|
|
||||||
}
|
|
||||||
addr := common.Address(a.Bytes20())
|
|
||||||
chunk := uint64CodeOffset / 31
|
|
||||||
endChunk := uint64CodeEnd / 31
|
|
||||||
// XXX uint64 overflow in condition check
|
|
||||||
for ; chunk < endChunk; chunk++ {
|
|
||||||
// TODO(@gballet) make a version of GetTreeKeyCodeChunk without the bigint
|
|
||||||
index := trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(chunk))
|
|
||||||
statelessGas += evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
usedGas, err := gasExtCodeCopyStateful(evm, contract, stack, mem, memorySize)
|
|
||||||
return usedGas + statelessGas, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func gasSLoad(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
|
||||||
usedGas := uint64(0)
|
|
||||||
|
|
||||||
if evm.accesses != nil {
|
|
||||||
where := stack.Back(0)
|
|
||||||
addr := contract.Address()
|
|
||||||
index := trieUtils.GetTreeKeyStorageSlot(addr[:], where)
|
|
||||||
usedGas += evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
return usedGas, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
||||||
// Apply the witness access costs, err is nil
|
|
||||||
accessGas, _ := gasSLoad(evm, contract, stack, mem, memorySize)
|
|
||||||
var (
|
var (
|
||||||
y, x = stack.Back(1), stack.Back(0)
|
y, x = stack.Back(1), stack.Back(0)
|
||||||
current = evm.StateDB.GetState(contract.Address(), x.Bytes32())
|
current = evm.StateDB.GetState(contract.Address(), x.Bytes32())
|
||||||
@@ -199,15 +109,14 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi
|
|||||||
// 3. From a non-zero to a non-zero (CHANGE)
|
// 3. From a non-zero to a non-zero (CHANGE)
|
||||||
switch {
|
switch {
|
||||||
case current == (common.Hash{}) && y.Sign() != 0: // 0 => non 0
|
case current == (common.Hash{}) && y.Sign() != 0: // 0 => non 0
|
||||||
return params.SstoreSetGas + accessGas, nil
|
return params.SstoreSetGas, nil
|
||||||
case current != (common.Hash{}) && y.Sign() == 0: // non 0 => 0
|
case current != (common.Hash{}) && y.Sign() == 0: // non 0 => 0
|
||||||
evm.StateDB.AddRefund(params.SstoreRefundGas)
|
evm.StateDB.AddRefund(params.SstoreRefundGas)
|
||||||
return params.SstoreClearGas + accessGas, nil
|
return params.SstoreClearGas, nil
|
||||||
default: // non 0 => non 0 (or 0 => 0)
|
default: // non 0 => non 0 (or 0 => 0)
|
||||||
return params.SstoreResetGas + accessGas, nil
|
return params.SstoreResetGas, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The new gas metering is based on net gas costs (EIP-1283):
|
// The new gas metering is based on net gas costs (EIP-1283):
|
||||||
//
|
//
|
||||||
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
|
// 1. If current value equals new value (this is a no-op), 200 gas is deducted.
|
||||||
@@ -338,7 +247,7 @@ func makeGasLog(n uint64) gasFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func gasSha3(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
func gasKeccak256(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) {
|
||||||
gas, err := memoryGasCost(mem, memorySize)
|
gas, err := memoryGasCost(mem, memorySize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@@ -347,7 +256,7 @@ func gasSha3(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize
|
|||||||
if overflow {
|
if overflow {
|
||||||
return 0, ErrGasUintOverflow
|
return 0, ErrGasUintOverflow
|
||||||
}
|
}
|
||||||
if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Sha3WordGas); overflow {
|
if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Keccak256WordGas); overflow {
|
||||||
return 0, ErrGasUintOverflow
|
return 0, ErrGasUintOverflow
|
||||||
}
|
}
|
||||||
if gas, overflow = math.SafeAdd(gas, wordGas); overflow {
|
if gas, overflow = math.SafeAdd(gas, wordGas); overflow {
|
||||||
@@ -381,7 +290,7 @@ func gasCreate2(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memoryS
|
|||||||
if overflow {
|
if overflow {
|
||||||
return 0, ErrGasUintOverflow
|
return 0, ErrGasUintOverflow
|
||||||
}
|
}
|
||||||
if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Sha3WordGas); overflow {
|
if wordGas, overflow = math.SafeMul(toWordSize(wordGas), params.Keccak256WordGas); overflow {
|
||||||
return 0, ErrGasUintOverflow
|
return 0, ErrGasUintOverflow
|
||||||
}
|
}
|
||||||
if gas, overflow = math.SafeAdd(gas, wordGas); overflow {
|
if gas, overflow = math.SafeAdd(gas, wordGas); overflow {
|
||||||
@@ -422,14 +331,6 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize
|
|||||||
transfersValue = !stack.Back(2).IsZero()
|
transfersValue = !stack.Back(2).IsZero()
|
||||||
address = common.Address(stack.Back(1).Bytes20())
|
address = common.Address(stack.Back(1).Bytes20())
|
||||||
)
|
)
|
||||||
if evm.accesses != nil {
|
|
||||||
// Charge witness costs
|
|
||||||
for i := trieUtils.VersionLeafKey; i <= trieUtils.CodeSizeLeafKey; i++ {
|
|
||||||
index := trieUtils.GetTreeKeyAccountLeaf(address[:], byte(i))
|
|
||||||
gas += evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if evm.chainRules.IsEIP158 {
|
if evm.chainRules.IsEIP158 {
|
||||||
if transfersValue && evm.StateDB.Empty(address) {
|
if transfersValue && evm.StateDB.Empty(address) {
|
||||||
gas += params.CallNewAccountGas
|
gas += params.CallNewAccountGas
|
||||||
|
@@ -17,10 +17,11 @@
|
|||||||
package vm
|
package vm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/core/types"
|
"github.com/ethereum/go-ethereum/core/types"
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
|
||||||
"github.com/holiman/uint256"
|
"github.com/holiman/uint256"
|
||||||
"golang.org/x/crypto/sha3"
|
"golang.org/x/crypto/sha3"
|
||||||
)
|
)
|
||||||
@@ -232,7 +233,7 @@ func opSAR(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func opSha3(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opKeccak256(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
offset, size := scope.Stack.pop(), scope.Stack.peek()
|
offset, size := scope.Stack.pop(), scope.Stack.peek()
|
||||||
data := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
|
data := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
|
||||||
|
|
||||||
@@ -342,12 +343,7 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte
|
|||||||
|
|
||||||
func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
slot := scope.Stack.peek()
|
slot := scope.Stack.peek()
|
||||||
cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20()))
|
slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())))
|
||||||
if interpreter.evm.accesses != nil {
|
|
||||||
index := trieUtils.GetTreeKeyCodeSize(slot.Bytes())
|
|
||||||
interpreter.evm.TxContext.Accesses.TouchAddress(index, uint256.NewInt(cs).Bytes())
|
|
||||||
}
|
|
||||||
slot.SetUint64(cs)
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -368,65 +364,12 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
|
|||||||
if overflow {
|
if overflow {
|
||||||
uint64CodeOffset = 0xffffffffffffffff
|
uint64CodeOffset = 0xffffffffffffffff
|
||||||
}
|
}
|
||||||
uint64CodeEnd, overflow := new(uint256.Int).Add(&codeOffset, &length).Uint64WithOverflow()
|
codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64())
|
||||||
if overflow {
|
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
|
||||||
uint64CodeEnd = 0xffffffffffffffff
|
|
||||||
}
|
|
||||||
if interpreter.evm.accesses != nil {
|
|
||||||
copyCodeFromAccesses(scope.Contract.Address(), uint64CodeOffset, uint64CodeEnd, memOffset.Uint64(), interpreter, scope)
|
|
||||||
} else {
|
|
||||||
codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64())
|
|
||||||
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
|
|
||||||
|
|
||||||
touchEachChunks(uint64CodeOffset, uint64CodeEnd, codeCopy, scope.Contract, interpreter.evm)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to touch every chunk in a code range
|
|
||||||
func touchEachChunks(start, end uint64, code []byte, contract *Contract, evm *EVM) {
|
|
||||||
for chunk := start / 31; chunk <= end/31 && chunk <= uint64(len(code))/31; chunk++ {
|
|
||||||
index := trieUtils.GetTreeKeyCodeChunk(contract.Address().Bytes(), uint256.NewInt(chunk))
|
|
||||||
count := uint64(0)
|
|
||||||
end := (chunk + 1) * 31
|
|
||||||
|
|
||||||
// Look for the first code byte (i.e. no pushdata)
|
|
||||||
for ; count < 31 && end+count < uint64(len(contract.Code)) && !contract.IsCode(chunk*31+count); count++ {
|
|
||||||
}
|
|
||||||
var value [32]byte
|
|
||||||
value[0] = byte(count)
|
|
||||||
if end > uint64(len(code)) {
|
|
||||||
end = uint64(len(code))
|
|
||||||
}
|
|
||||||
copy(value[1:], code[chunk*31:end])
|
|
||||||
evm.Accesses.TouchAddress(index, value[:])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyCodeFromAccesses perform codecopy from the witness, not from the db.
|
|
||||||
func copyCodeFromAccesses(addr common.Address, codeOffset, codeEnd, memOffset uint64, in *EVMInterpreter, scope *ScopeContext) {
|
|
||||||
chunk := codeOffset / 31
|
|
||||||
endChunk := codeEnd / 31
|
|
||||||
start := codeOffset % 31 // start inside the first code chunk
|
|
||||||
offset := uint64(0) // memory offset to write to
|
|
||||||
// XXX uint64 overflow in condition check
|
|
||||||
for end := uint64(31); chunk < endChunk; chunk, start = chunk+1, 0 {
|
|
||||||
// case of the last chunk: figure out how many bytes need to
|
|
||||||
// be extracted from the last chunk.
|
|
||||||
if chunk+1 == endChunk {
|
|
||||||
end = codeEnd % 31
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO make a version of GetTreeKeyCodeChunk without the bigint
|
|
||||||
index := common.BytesToHash(trieUtils.GetTreeKeyCodeChunk(addr[:], uint256.NewInt(chunk)))
|
|
||||||
h := in.evm.accesses[index]
|
|
||||||
//in.evm.Accesses.TouchAddress(index.Bytes(), h[1+start:1+end])
|
|
||||||
scope.Memory.Set(memOffset+offset, end-start, h[1+start:end])
|
|
||||||
offset += 31 - start
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
var (
|
var (
|
||||||
stack = scope.Stack
|
stack = scope.Stack
|
||||||
@@ -439,19 +382,9 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
|
|||||||
if overflow {
|
if overflow {
|
||||||
uint64CodeOffset = 0xffffffffffffffff
|
uint64CodeOffset = 0xffffffffffffffff
|
||||||
}
|
}
|
||||||
uint64CodeEnd, overflow := new(uint256.Int).Add(&codeOffset, &length).Uint64WithOverflow()
|
|
||||||
if overflow {
|
|
||||||
uint64CodeEnd = 0xffffffffffffffff
|
|
||||||
}
|
|
||||||
addr := common.Address(a.Bytes20())
|
addr := common.Address(a.Bytes20())
|
||||||
if interpreter.evm.accesses != nil {
|
codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64())
|
||||||
copyCodeFromAccesses(addr, uint64CodeOffset, uint64CodeEnd, memOffset.Uint64(), interpreter, scope)
|
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
|
||||||
} else {
|
|
||||||
codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64())
|
|
||||||
scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy)
|
|
||||||
|
|
||||||
touchEachChunks(uint64CodeOffset, uint64CodeEnd, codeCopy, scope.Contract, interpreter.evm)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@@ -544,6 +477,12 @@ func opDifficulty(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func opRandom(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
|
v := new(uint256.Int).SetBytes((interpreter.evm.Context.Random.Bytes()))
|
||||||
|
scope.Stack.push(v)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opGasLimit(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit))
|
scope.Stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit))
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -579,14 +518,13 @@ func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
|
|||||||
hash := common.Hash(loc.Bytes32())
|
hash := common.Hash(loc.Bytes32())
|
||||||
val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash)
|
val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash)
|
||||||
loc.SetBytes(val.Bytes())
|
loc.SetBytes(val.Bytes())
|
||||||
// Get the initial value as it might not be present
|
|
||||||
|
|
||||||
index := trieUtils.GetTreeKeyStorageSlot(scope.Contract.Address().Bytes(), loc)
|
|
||||||
interpreter.evm.TxContext.Accesses.TouchAddress(index, val.Bytes())
|
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
|
if interpreter.readOnly {
|
||||||
|
return nil, ErrWriteProtection
|
||||||
|
}
|
||||||
loc := scope.Stack.pop()
|
loc := scope.Stack.pop()
|
||||||
val := scope.Stack.pop()
|
val := scope.Stack.pop()
|
||||||
interpreter.evm.StateDB.SetState(scope.Contract.Address(),
|
interpreter.evm.StateDB.SetState(scope.Contract.Address(),
|
||||||
@@ -595,23 +533,27 @@ func opSstore(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
|
|||||||
}
|
}
|
||||||
|
|
||||||
func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opJump(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
|
if atomic.LoadInt32(&interpreter.evm.abort) != 0 {
|
||||||
|
return nil, errStopToken
|
||||||
|
}
|
||||||
pos := scope.Stack.pop()
|
pos := scope.Stack.pop()
|
||||||
if !scope.Contract.validJumpdest(&pos) {
|
if !scope.Contract.validJumpdest(&pos) {
|
||||||
return nil, ErrInvalidJump
|
return nil, ErrInvalidJump
|
||||||
}
|
}
|
||||||
*pc = pos.Uint64()
|
*pc = pos.Uint64() - 1 // pc will be increased by the interpreter loop
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opJumpi(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
|
if atomic.LoadInt32(&interpreter.evm.abort) != 0 {
|
||||||
|
return nil, errStopToken
|
||||||
|
}
|
||||||
pos, cond := scope.Stack.pop(), scope.Stack.pop()
|
pos, cond := scope.Stack.pop(), scope.Stack.pop()
|
||||||
if !cond.IsZero() {
|
if !cond.IsZero() {
|
||||||
if !scope.Contract.validJumpdest(&pos) {
|
if !scope.Contract.validJumpdest(&pos) {
|
||||||
return nil, ErrInvalidJump
|
return nil, ErrInvalidJump
|
||||||
}
|
}
|
||||||
*pc = pos.Uint64()
|
*pc = pos.Uint64() - 1 // pc will be increased by the interpreter loop
|
||||||
} else {
|
|
||||||
*pc++
|
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@@ -636,6 +578,9 @@ func opGas(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte
|
|||||||
}
|
}
|
||||||
|
|
||||||
func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
|
if interpreter.readOnly {
|
||||||
|
return nil, ErrWriteProtection
|
||||||
|
}
|
||||||
var (
|
var (
|
||||||
value = scope.Stack.pop()
|
value = scope.Stack.pop()
|
||||||
offset, size = scope.Stack.pop(), scope.Stack.pop()
|
offset, size = scope.Stack.pop(), scope.Stack.pop()
|
||||||
@@ -671,12 +616,17 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
|
|||||||
scope.Contract.Gas += returnGas
|
scope.Contract.Gas += returnGas
|
||||||
|
|
||||||
if suberr == ErrExecutionReverted {
|
if suberr == ErrExecutionReverted {
|
||||||
|
interpreter.returnData = res // set REVERT data to return data buffer
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
interpreter.returnData = nil // clear dirty return data buffer
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
|
if interpreter.readOnly {
|
||||||
|
return nil, ErrWriteProtection
|
||||||
|
}
|
||||||
var (
|
var (
|
||||||
endowment = scope.Stack.pop()
|
endowment = scope.Stack.pop()
|
||||||
offset, size = scope.Stack.pop(), scope.Stack.pop()
|
offset, size = scope.Stack.pop(), scope.Stack.pop()
|
||||||
@@ -707,8 +657,10 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
|
|||||||
scope.Contract.Gas += returnGas
|
scope.Contract.Gas += returnGas
|
||||||
|
|
||||||
if suberr == ErrExecutionReverted {
|
if suberr == ErrExecutionReverted {
|
||||||
|
interpreter.returnData = res // set REVERT data to return data buffer
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
interpreter.returnData = nil // clear dirty return data buffer
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -724,6 +676,9 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
|
|||||||
// Get the arguments from the memory.
|
// Get the arguments from the memory.
|
||||||
args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
|
args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64()))
|
||||||
|
|
||||||
|
if interpreter.readOnly && !value.IsZero() {
|
||||||
|
return nil, ErrWriteProtection
|
||||||
|
}
|
||||||
var bigVal = big0
|
var bigVal = big0
|
||||||
//TODO: use uint256.Int instead of converting with toBig()
|
//TODO: use uint256.Int instead of converting with toBig()
|
||||||
// By using big0 here, we save an alloc for the most common case (non-ether-transferring contract calls),
|
// By using big0 here, we save an alloc for the most common case (non-ether-transferring contract calls),
|
||||||
@@ -747,6 +702,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt
|
|||||||
}
|
}
|
||||||
scope.Contract.Gas += returnGas
|
scope.Contract.Gas += returnGas
|
||||||
|
|
||||||
|
interpreter.returnData = ret
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -782,6 +738,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([
|
|||||||
}
|
}
|
||||||
scope.Contract.Gas += returnGas
|
scope.Contract.Gas += returnGas
|
||||||
|
|
||||||
|
interpreter.returnData = ret
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -810,6 +767,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext
|
|||||||
}
|
}
|
||||||
scope.Contract.Gas += returnGas
|
scope.Contract.Gas += returnGas
|
||||||
|
|
||||||
|
interpreter.returnData = ret
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -838,6 +796,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext)
|
|||||||
}
|
}
|
||||||
scope.Contract.Gas += returnGas
|
scope.Contract.Gas += returnGas
|
||||||
|
|
||||||
|
interpreter.returnData = ret
|
||||||
return ret, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -845,21 +804,29 @@ func opReturn(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b
|
|||||||
offset, size := scope.Stack.pop(), scope.Stack.pop()
|
offset, size := scope.Stack.pop(), scope.Stack.pop()
|
||||||
ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
|
ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
|
||||||
|
|
||||||
return ret, nil
|
return ret, errStopToken
|
||||||
}
|
}
|
||||||
|
|
||||||
func opRevert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opRevert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
offset, size := scope.Stack.pop(), scope.Stack.pop()
|
offset, size := scope.Stack.pop(), scope.Stack.pop()
|
||||||
ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
|
ret := scope.Memory.GetPtr(int64(offset.Uint64()), int64(size.Uint64()))
|
||||||
|
|
||||||
return ret, nil
|
interpreter.returnData = ret
|
||||||
|
return ret, ErrExecutionReverted
|
||||||
|
}
|
||||||
|
|
||||||
|
func opUndefined(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
|
return nil, &ErrInvalidOpCode{opcode: OpCode(scope.Contract.Code[*pc])}
|
||||||
}
|
}
|
||||||
|
|
||||||
func opStop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opStop(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
return nil, nil
|
return nil, errStopToken
|
||||||
}
|
}
|
||||||
|
|
||||||
func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
|
if interpreter.readOnly {
|
||||||
|
return nil, ErrWriteProtection
|
||||||
|
}
|
||||||
beneficiary := scope.Stack.pop()
|
beneficiary := scope.Stack.pop()
|
||||||
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
|
balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address())
|
||||||
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance)
|
interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance)
|
||||||
@@ -868,7 +835,7 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
|
|||||||
interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
|
interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance)
|
||||||
interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil)
|
interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil)
|
||||||
}
|
}
|
||||||
return nil, nil
|
return nil, errStopToken
|
||||||
}
|
}
|
||||||
|
|
||||||
// following functions are used by the instruction jump table
|
// following functions are used by the instruction jump table
|
||||||
@@ -876,6 +843,9 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]
|
|||||||
// make log instruction function
|
// make log instruction function
|
||||||
func makeLog(size int) executionFunc {
|
func makeLog(size int) executionFunc {
|
||||||
return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
return func(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) {
|
||||||
|
if interpreter.readOnly {
|
||||||
|
return nil, ErrWriteProtection
|
||||||
|
}
|
||||||
topics := make([]common.Hash, size)
|
topics := make([]common.Hash, size)
|
||||||
stack := scope.Stack
|
stack := scope.Stack
|
||||||
mStart, mSize := stack.pop(), stack.pop()
|
mStart, mSize := stack.pop(), stack.pop()
|
||||||
@@ -907,25 +877,6 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by
|
|||||||
*pc += 1
|
*pc += 1
|
||||||
if *pc < codeLen {
|
if *pc < codeLen {
|
||||||
scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc])))
|
scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc])))
|
||||||
// touch next chunk if PUSH1 is at the boundary. if so, *pc has
|
|
||||||
// advanced past this boundary.
|
|
||||||
if *pc%31 == 0 {
|
|
||||||
// touch push data by adding the last byte of the pushdata
|
|
||||||
var value [32]byte
|
|
||||||
chunk := *pc / 31
|
|
||||||
count := uint64(0)
|
|
||||||
// Look for the first code byte (i.e. no pushdata)
|
|
||||||
for ; count < 31 && !scope.Contract.IsCode(chunk*31+count); count++ {
|
|
||||||
}
|
|
||||||
value[0] = byte(count)
|
|
||||||
endMin := (chunk + 1) * 31
|
|
||||||
if endMin > uint64(len(scope.Contract.Code)) {
|
|
||||||
endMin = uint64(len(scope.Contract.Code))
|
|
||||||
}
|
|
||||||
copy(value[1:], scope.Contract.Code[chunk*31:endMin])
|
|
||||||
index := trieUtils.GetTreeKeyCodeChunk(scope.Contract.Address().Bytes(), uint256.NewInt(chunk))
|
|
||||||
interpreter.evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
scope.Stack.push(integer.Clear())
|
scope.Stack.push(integer.Clear())
|
||||||
}
|
}
|
||||||
@@ -951,39 +902,6 @@ func makePush(size uint64, pushByteSize int) executionFunc {
|
|||||||
scope.Stack.push(integer.SetBytes(common.RightPadBytes(
|
scope.Stack.push(integer.SetBytes(common.RightPadBytes(
|
||||||
scope.Contract.Code[startMin:endMin], pushByteSize)))
|
scope.Contract.Code[startMin:endMin], pushByteSize)))
|
||||||
|
|
||||||
// touch push data by adding the last byte of the pushdata
|
|
||||||
var value [32]byte
|
|
||||||
chunk := uint64(endMin-1) / 31
|
|
||||||
count := uint64(0)
|
|
||||||
// Look for the first code byte (i.e. no pushdata)
|
|
||||||
for ; count < 31 && !scope.Contract.IsCode(chunk*31+count); count++ {
|
|
||||||
}
|
|
||||||
value[0] = byte(count)
|
|
||||||
copy(value[1:], scope.Contract.Code[chunk*31:endMin])
|
|
||||||
index := trieUtils.GetTreeKeyCodeChunk(scope.Contract.Address().Bytes(), uint256.NewInt(chunk))
|
|
||||||
interpreter.evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
|
||||||
|
|
||||||
// in the case of PUSH32, the end data might be two chunks away,
|
|
||||||
// so also get the middle chunk. There is a boundary condition
|
|
||||||
// check (endMin > 2) in the case the code is a single PUSH32
|
|
||||||
// insctruction, whose immediate are just 0s.
|
|
||||||
if pushByteSize == 32 && endMin > 2 {
|
|
||||||
chunk = uint64(endMin-2) / 31
|
|
||||||
count = uint64(0)
|
|
||||||
// Look for the first code byte (i.e. no pushdata)
|
|
||||||
for ; count < 31 && !scope.Contract.IsCode(chunk*31+count); count++ {
|
|
||||||
}
|
|
||||||
value[0] = byte(count)
|
|
||||||
end := (chunk + 1) * 31
|
|
||||||
if end > uint64(len(scope.Contract.Code)) {
|
|
||||||
end = uint64(len(scope.Contract.Code))
|
|
||||||
}
|
|
||||||
copy(value[1:], scope.Contract.Code[chunk*31:end])
|
|
||||||
index := trieUtils.GetTreeKeyCodeChunk(scope.Contract.Address().Bytes(), uint256.NewInt(chunk))
|
|
||||||
interpreter.evm.TxContext.Accesses.TouchAddressAndChargeGas(index, nil)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
*pc += size
|
*pc += size
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@@ -21,6 +21,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
@@ -525,12 +526,14 @@ func TestOpMstore(t *testing.T) {
|
|||||||
mem.Resize(64)
|
mem.Resize(64)
|
||||||
pc := uint64(0)
|
pc := uint64(0)
|
||||||
v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700"
|
v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700"
|
||||||
stack.pushN(*new(uint256.Int).SetBytes(common.Hex2Bytes(v)), *new(uint256.Int))
|
stack.push(new(uint256.Int).SetBytes(common.Hex2Bytes(v)))
|
||||||
|
stack.push(new(uint256.Int))
|
||||||
opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
|
opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
|
||||||
if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v {
|
if got := common.Bytes2Hex(mem.GetCopy(0, 32)); got != v {
|
||||||
t.Fatalf("Mstore fail, got %v, expected %v", got, v)
|
t.Fatalf("Mstore fail, got %v, expected %v", got, v)
|
||||||
}
|
}
|
||||||
stack.pushN(*new(uint256.Int).SetUint64(0x1), *new(uint256.Int))
|
stack.push(new(uint256.Int).SetUint64(0x1))
|
||||||
|
stack.push(new(uint256.Int))
|
||||||
opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
|
opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
|
||||||
if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" {
|
if common.Bytes2Hex(mem.GetCopy(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" {
|
||||||
t.Fatalf("Mstore failed to overwrite previous value")
|
t.Fatalf("Mstore failed to overwrite previous value")
|
||||||
@@ -553,12 +556,13 @@ func BenchmarkOpMstore(bench *testing.B) {
|
|||||||
|
|
||||||
bench.ResetTimer()
|
bench.ResetTimer()
|
||||||
for i := 0; i < bench.N; i++ {
|
for i := 0; i < bench.N; i++ {
|
||||||
stack.pushN(*value, *memStart)
|
stack.push(value)
|
||||||
|
stack.push(memStart)
|
||||||
opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
|
opMstore(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkOpSHA3(bench *testing.B) {
|
func BenchmarkOpKeccak256(bench *testing.B) {
|
||||||
var (
|
var (
|
||||||
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
|
env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||||
stack = newstack()
|
stack = newstack()
|
||||||
@@ -572,8 +576,9 @@ func BenchmarkOpSHA3(bench *testing.B) {
|
|||||||
|
|
||||||
bench.ResetTimer()
|
bench.ResetTimer()
|
||||||
for i := 0; i < bench.N; i++ {
|
for i := 0; i < bench.N; i++ {
|
||||||
stack.pushN(*uint256.NewInt(32), *start)
|
stack.push(uint256.NewInt(32))
|
||||||
opSha3(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
|
stack.push(start)
|
||||||
|
opKeccak256(&pc, evmInterpreter, &ScopeContext{mem, stack, nil})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -650,3 +655,36 @@ func TestCreate2Addreses(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRandom(t *testing.T) {
|
||||||
|
type testcase struct {
|
||||||
|
name string
|
||||||
|
random common.Hash
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range []testcase{
|
||||||
|
{name: "empty hash", random: common.Hash{}},
|
||||||
|
{name: "1", random: common.Hash{0}},
|
||||||
|
{name: "emptyCodeHash", random: emptyCodeHash},
|
||||||
|
{name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})},
|
||||||
|
} {
|
||||||
|
var (
|
||||||
|
env = NewEVM(BlockContext{Random: &tt.random}, TxContext{}, nil, params.TestChainConfig, Config{})
|
||||||
|
stack = newstack()
|
||||||
|
pc = uint64(0)
|
||||||
|
evmInterpreter = env.interpreter
|
||||||
|
)
|
||||||
|
opRandom(&pc, evmInterpreter, &ScopeContext{nil, stack, nil})
|
||||||
|
if len(stack.data) != 1 {
|
||||||
|
t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data))
|
||||||
|
}
|
||||||
|
actual := stack.pop()
|
||||||
|
expected, overflow := uint256.FromBig(new(big.Int).SetBytes(tt.random.Bytes()))
|
||||||
|
if overflow {
|
||||||
|
t.Errorf("Testcase %v: invalid overflow", tt.name)
|
||||||
|
}
|
||||||
|
if actual.Cmp(expected) != 0 {
|
||||||
|
t.Errorf("Testcase %v: expected %x, got %x", tt.name, expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@@ -74,9 +74,6 @@ type StateDB interface {
|
|||||||
AddPreimage(common.Hash, []byte)
|
AddPreimage(common.Hash, []byte)
|
||||||
|
|
||||||
ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error
|
ForEachStorage(common.Address, func(common.Hash, common.Hash) bool) error
|
||||||
|
|
||||||
Witness() *types.AccessWitness
|
|
||||||
SetWitness(*types.AccessWitness)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CallContext provides a basic interface for the EVM calling conventions. The EVM
|
// CallContext provides a basic interface for the EVM calling conventions. The EVM
|
||||||
|
@@ -17,26 +17,21 @@
|
|||||||
package vm
|
package vm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"hash"
|
"hash"
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/common"
|
"github.com/ethereum/go-ethereum/common"
|
||||||
"github.com/ethereum/go-ethereum/common/math"
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
trieUtils "github.com/ethereum/go-ethereum/trie/utils"
|
|
||||||
"github.com/holiman/uint256"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config are the configuration options for the Interpreter
|
// Config are the configuration options for the Interpreter
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Debug bool // Enables debugging
|
Debug bool // Enables debugging
|
||||||
Tracer EVMLogger // Opcode logger
|
Tracer EVMLogger // Opcode logger
|
||||||
NoRecursion bool // Disables call, callcode, delegate call and create
|
|
||||||
NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls)
|
NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls)
|
||||||
EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages
|
EnablePreimageRecording bool // Enables recording of SHA3/keccak preimages
|
||||||
|
|
||||||
JumpTable JumpTable // EVM instruction table, automatically populated if unset
|
JumpTable *JumpTable // EVM instruction table, automatically populated if unset
|
||||||
|
|
||||||
ExtraEips []int // Additional EIPS that are to be enabled
|
ExtraEips []int // Additional EIPS that are to be enabled
|
||||||
}
|
}
|
||||||
@@ -71,39 +66,39 @@ type EVMInterpreter struct {
|
|||||||
|
|
||||||
// NewEVMInterpreter returns a new instance of the Interpreter.
|
// NewEVMInterpreter returns a new instance of the Interpreter.
|
||||||
func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
|
func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
|
||||||
// We use the STOP instruction whether to see
|
// If jump table was not initialised we set the default one.
|
||||||
// the jump table was initialised. If it was not
|
if cfg.JumpTable == nil {
|
||||||
// we'll set the default jump table.
|
|
||||||
if cfg.JumpTable[STOP] == nil {
|
|
||||||
var jt JumpTable
|
|
||||||
switch {
|
switch {
|
||||||
|
case evm.chainRules.IsMerge:
|
||||||
|
cfg.JumpTable = &mergeInstructionSet
|
||||||
case evm.chainRules.IsLondon:
|
case evm.chainRules.IsLondon:
|
||||||
jt = londonInstructionSet
|
cfg.JumpTable = &londonInstructionSet
|
||||||
case evm.chainRules.IsBerlin:
|
case evm.chainRules.IsBerlin:
|
||||||
jt = berlinInstructionSet
|
cfg.JumpTable = &berlinInstructionSet
|
||||||
case evm.chainRules.IsIstanbul:
|
case evm.chainRules.IsIstanbul:
|
||||||
jt = istanbulInstructionSet
|
cfg.JumpTable = &istanbulInstructionSet
|
||||||
case evm.chainRules.IsConstantinople:
|
case evm.chainRules.IsConstantinople:
|
||||||
jt = constantinopleInstructionSet
|
cfg.JumpTable = &constantinopleInstructionSet
|
||||||
case evm.chainRules.IsByzantium:
|
case evm.chainRules.IsByzantium:
|
||||||
jt = byzantiumInstructionSet
|
cfg.JumpTable = &byzantiumInstructionSet
|
||||||
case evm.chainRules.IsEIP158:
|
case evm.chainRules.IsEIP158:
|
||||||
jt = spuriousDragonInstructionSet
|
cfg.JumpTable = &spuriousDragonInstructionSet
|
||||||
case evm.chainRules.IsEIP150:
|
case evm.chainRules.IsEIP150:
|
||||||
jt = tangerineWhistleInstructionSet
|
cfg.JumpTable = &tangerineWhistleInstructionSet
|
||||||
case evm.chainRules.IsHomestead:
|
case evm.chainRules.IsHomestead:
|
||||||
jt = homesteadInstructionSet
|
cfg.JumpTable = &homesteadInstructionSet
|
||||||
default:
|
default:
|
||||||
jt = frontierInstructionSet
|
cfg.JumpTable = &frontierInstructionSet
|
||||||
}
|
}
|
||||||
for i, eip := range cfg.ExtraEips {
|
for i, eip := range cfg.ExtraEips {
|
||||||
if err := EnableEIP(eip, &jt); err != nil {
|
copy := *cfg.JumpTable
|
||||||
|
if err := EnableEIP(eip, ©); err != nil {
|
||||||
// Disable it, so caller can check if it's activated or not
|
// Disable it, so caller can check if it's activated or not
|
||||||
cfg.ExtraEips = append(cfg.ExtraEips[:i], cfg.ExtraEips[i+1:]...)
|
cfg.ExtraEips = append(cfg.ExtraEips[:i], cfg.ExtraEips[i+1:]...)
|
||||||
log.Error("EIP activation failed", "eip", eip, "error", err)
|
log.Error("EIP activation failed", "eip", eip, "error", err)
|
||||||
}
|
}
|
||||||
|
cfg.JumpTable = ©
|
||||||
}
|
}
|
||||||
cfg.JumpTable = jt
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &EVMInterpreter{
|
return &EVMInterpreter{
|
||||||
@@ -183,145 +178,70 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
|||||||
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
|
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
|
||||||
// the execution of one of the operations or until the done flag is set by the
|
// the execution of one of the operations or until the done flag is set by the
|
||||||
// parent context.
|
// parent context.
|
||||||
steps := 0
|
|
||||||
for {
|
for {
|
||||||
steps++
|
|
||||||
if steps%1000 == 0 && atomic.LoadInt32(&in.evm.abort) != 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if in.cfg.Debug {
|
if in.cfg.Debug {
|
||||||
// Capture pre-execution values for tracing.
|
// Capture pre-execution values for tracing.
|
||||||
logged, pcCopy, gasCopy = false, pc, contract.Gas
|
logged, pcCopy, gasCopy = false, pc, contract.Gas
|
||||||
}
|
}
|
||||||
|
|
||||||
// if the PC ends up in a new "page" of verkleized code, charge the
|
|
||||||
// associated witness costs.
|
|
||||||
inWitness := false
|
|
||||||
var codePage common.Hash
|
|
||||||
if in.evm.chainRules.IsCancun {
|
|
||||||
index := trieUtils.GetTreeKeyCodeChunk(contract.Address().Bytes(), uint256.NewInt(pc/31))
|
|
||||||
|
|
||||||
var value [32]byte
|
|
||||||
if in.evm.accesses != nil {
|
|
||||||
codePage, inWitness = in.evm.accesses[common.BytesToHash(index)]
|
|
||||||
// Return an error if we're in stateless mode
|
|
||||||
// and the code isn't in the witness. It means
|
|
||||||
// that if code is read beyond the actual code
|
|
||||||
// size, pages of 0s need to be added to the
|
|
||||||
// witness.
|
|
||||||
if !inWitness {
|
|
||||||
return nil, errors.New("code chunk missing from proof")
|
|
||||||
}
|
|
||||||
copy(value[:], codePage[:])
|
|
||||||
} else {
|
|
||||||
// Calculate the chunk
|
|
||||||
chunk := pc / 31
|
|
||||||
end := (chunk + 1) * 31
|
|
||||||
if end >= uint64(len(contract.Code)) {
|
|
||||||
end = uint64(len(contract.Code))
|
|
||||||
}
|
|
||||||
count := uint64(0)
|
|
||||||
// Look for the first code byte (i.e. no pushdata)
|
|
||||||
for ; chunk*31+count < end && count < 31 && !contract.IsCode(chunk*31+count); count++ {
|
|
||||||
}
|
|
||||||
value[0] = byte(count)
|
|
||||||
copy(value[1:], contract.Code[chunk*31:end])
|
|
||||||
}
|
|
||||||
contract.Gas -= in.evm.TxContext.Accesses.TouchAddressAndChargeGas(index, value[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
if inWitness {
|
|
||||||
// Get the op from the tree, skipping the header byte
|
|
||||||
op = OpCode(codePage[1+pc%31])
|
|
||||||
} else {
|
|
||||||
// If we are in witness mode, then raise an error
|
|
||||||
op = contract.GetOp(pc)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the operation from the jump table and validate the stack to ensure there are
|
// Get the operation from the jump table and validate the stack to ensure there are
|
||||||
// enough stack items available to perform the operation.
|
// enough stack items available to perform the operation.
|
||||||
|
op = contract.GetOp(pc)
|
||||||
operation := in.cfg.JumpTable[op]
|
operation := in.cfg.JumpTable[op]
|
||||||
if operation == nil {
|
cost = operation.constantGas // For tracing
|
||||||
return nil, &ErrInvalidOpCode{opcode: op}
|
|
||||||
}
|
|
||||||
// Validate stack
|
// Validate stack
|
||||||
if sLen := stack.len(); sLen < operation.minStack {
|
if sLen := stack.len(); sLen < operation.minStack {
|
||||||
return nil, &ErrStackUnderflow{stackLen: sLen, required: operation.minStack}
|
return nil, &ErrStackUnderflow{stackLen: sLen, required: operation.minStack}
|
||||||
} else if sLen > operation.maxStack {
|
} else if sLen > operation.maxStack {
|
||||||
return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack}
|
return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack}
|
||||||
}
|
}
|
||||||
// If the operation is valid, enforce write restrictions
|
if !contract.UseGas(cost) {
|
||||||
if in.readOnly && in.evm.chainRules.IsByzantium {
|
|
||||||
// If the interpreter is operating in readonly mode, make sure no
|
|
||||||
// state-modifying operation is performed. The 3rd stack item
|
|
||||||
// for a call operation is the value. Transferring value from one
|
|
||||||
// account to the others means the state is modified and should also
|
|
||||||
// return with an error.
|
|
||||||
if operation.writes || (op == CALL && stack.Back(2).Sign() != 0) {
|
|
||||||
return nil, ErrWriteProtection
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Static portion of gas
|
|
||||||
cost = operation.constantGas // For tracing
|
|
||||||
if !contract.UseGas(operation.constantGas) {
|
|
||||||
return nil, ErrOutOfGas
|
return nil, ErrOutOfGas
|
||||||
}
|
}
|
||||||
|
|
||||||
var memorySize uint64
|
|
||||||
// calculate the new memory size and expand the memory to fit
|
|
||||||
// the operation
|
|
||||||
// Memory check needs to be done prior to evaluating the dynamic gas portion,
|
|
||||||
// to detect calculation overflows
|
|
||||||
if operation.memorySize != nil {
|
|
||||||
memSize, overflow := operation.memorySize(stack)
|
|
||||||
if overflow {
|
|
||||||
return nil, ErrGasUintOverflow
|
|
||||||
}
|
|
||||||
// memory is expanded in words of 32 bytes. Gas
|
|
||||||
// is also calculated in words.
|
|
||||||
if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow {
|
|
||||||
return nil, ErrGasUintOverflow
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Dynamic portion of gas
|
|
||||||
// consume the gas and return an error if not enough gas is available.
|
|
||||||
// cost is explicitly set so that the capture state defer method can get the proper cost
|
|
||||||
if operation.dynamicGas != nil {
|
if operation.dynamicGas != nil {
|
||||||
|
// All ops with a dynamic memory usage also has a dynamic gas cost.
|
||||||
|
var memorySize uint64
|
||||||
|
// calculate the new memory size and expand the memory to fit
|
||||||
|
// the operation
|
||||||
|
// Memory check needs to be done prior to evaluating the dynamic gas portion,
|
||||||
|
// to detect calculation overflows
|
||||||
|
if operation.memorySize != nil {
|
||||||
|
memSize, overflow := operation.memorySize(stack)
|
||||||
|
if overflow {
|
||||||
|
return nil, ErrGasUintOverflow
|
||||||
|
}
|
||||||
|
// memory is expanded in words of 32 bytes. Gas
|
||||||
|
// is also calculated in words.
|
||||||
|
if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow {
|
||||||
|
return nil, ErrGasUintOverflow
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Consume the gas and return an error if not enough gas is available.
|
||||||
|
// cost is explicitly set so that the capture state defer method can get the proper cost
|
||||||
var dynamicCost uint64
|
var dynamicCost uint64
|
||||||
dynamicCost, err = operation.dynamicGas(in.evm, contract, stack, mem, memorySize)
|
dynamicCost, err = operation.dynamicGas(in.evm, contract, stack, mem, memorySize)
|
||||||
cost += dynamicCost // total cost, for debug tracing
|
cost += dynamicCost // for tracing
|
||||||
if err != nil || !contract.UseGas(dynamicCost) {
|
if err != nil || !contract.UseGas(dynamicCost) {
|
||||||
return nil, ErrOutOfGas
|
return nil, ErrOutOfGas
|
||||||
}
|
}
|
||||||
|
if memorySize > 0 {
|
||||||
|
mem.Resize(memorySize)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if memorySize > 0 {
|
|
||||||
mem.Resize(memorySize)
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.cfg.Debug {
|
if in.cfg.Debug {
|
||||||
in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
|
in.cfg.Tracer.CaptureState(pc, op, gasCopy, cost, callContext, in.returnData, in.evm.depth, err)
|
||||||
logged = true
|
logged = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// execute the operation
|
// execute the operation
|
||||||
res, err = operation.execute(&pc, in, callContext)
|
res, err = operation.execute(&pc, in, callContext)
|
||||||
// if the operation clears the return data (e.g. it has returning data)
|
if err != nil {
|
||||||
// set the last return to the result of the operation.
|
break
|
||||||
if operation.returns {
|
|
||||||
in.returnData = res
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case err != nil:
|
|
||||||
return nil, err
|
|
||||||
case operation.reverts:
|
|
||||||
return res, ErrExecutionReverted
|
|
||||||
case operation.halts:
|
|
||||||
return res, nil
|
|
||||||
case !operation.jumps:
|
|
||||||
pc++
|
|
||||||
}
|
}
|
||||||
|
pc++
|
||||||
}
|
}
|
||||||
return nil, nil
|
|
||||||
|
if err == errStopToken {
|
||||||
|
err = nil // clear stop token error
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
}
|
}
|
||||||
|
77
core/vm/interpreter_test.go
Normal file
77
core/vm/interpreter_test.go
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
// Copyright 2021 The go-ethereum Authors
|
||||||
|
// This file is part of the go-ethereum library.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Lesser General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Lesser General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Lesser General Public License
|
||||||
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package vm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/common"
|
||||||
|
"github.com/ethereum/go-ethereum/common/math"
|
||||||
|
"github.com/ethereum/go-ethereum/core/rawdb"
|
||||||
|
"github.com/ethereum/go-ethereum/core/state"
|
||||||
|
"github.com/ethereum/go-ethereum/params"
|
||||||
|
)
|
||||||
|
|
||||||
|
var loopInterruptTests = []string{
|
||||||
|
// infinite loop using JUMP: push(2) jumpdest dup1 jump
|
||||||
|
"60025b8056",
|
||||||
|
// infinite loop using JUMPI: push(1) push(4) jumpdest dup2 dup2 jumpi
|
||||||
|
"600160045b818157",
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoopInterrupt(t *testing.T) {
|
||||||
|
address := common.BytesToAddress([]byte("contract"))
|
||||||
|
vmctx := BlockContext{
|
||||||
|
Transfer: func(StateDB, common.Address, common.Address, *big.Int) {},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, tt := range loopInterruptTests {
|
||||||
|
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
|
||||||
|
statedb.CreateAccount(address)
|
||||||
|
statedb.SetCode(address, common.Hex2Bytes(tt))
|
||||||
|
statedb.Finalise(true)
|
||||||
|
|
||||||
|
evm := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, Config{})
|
||||||
|
|
||||||
|
errChannel := make(chan error)
|
||||||
|
timeout := make(chan bool)
|
||||||
|
|
||||||
|
go func(evm *EVM) {
|
||||||
|
_, _, err := evm.Call(AccountRef(common.Address{}), address, nil, math.MaxUint64, new(big.Int))
|
||||||
|
errChannel <- err
|
||||||
|
}(evm)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-time.After(time.Second)
|
||||||
|
timeout <- true
|
||||||
|
}()
|
||||||
|
|
||||||
|
evm.Cancel()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-timeout:
|
||||||
|
t.Errorf("test %d timed out", i)
|
||||||
|
case err := <-errChannel:
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %d failure: %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@@ -17,6 +17,8 @@
|
|||||||
package vm
|
package vm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/params"
|
"github.com/ethereum/go-ethereum/params"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -40,12 +42,6 @@ type operation struct {
|
|||||||
|
|
||||||
// memorySize returns the memory size required for the operation
|
// memorySize returns the memory size required for the operation
|
||||||
memorySize memorySizeFunc
|
memorySize memorySizeFunc
|
||||||
|
|
||||||
halts bool // indicates whether the operation should halt further execution
|
|
||||||
jumps bool // indicates whether the program counter should not increment
|
|
||||||
writes bool // determines whether this a state modifying operation
|
|
||||||
reverts bool // determines whether the operation reverts state (implicitly halts)
|
|
||||||
returns bool // determines whether the operations sets the return data content
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -58,18 +54,48 @@ var (
|
|||||||
istanbulInstructionSet = newIstanbulInstructionSet()
|
istanbulInstructionSet = newIstanbulInstructionSet()
|
||||||
berlinInstructionSet = newBerlinInstructionSet()
|
berlinInstructionSet = newBerlinInstructionSet()
|
||||||
londonInstructionSet = newLondonInstructionSet()
|
londonInstructionSet = newLondonInstructionSet()
|
||||||
|
mergeInstructionSet = newMergeInstructionSet()
|
||||||
)
|
)
|
||||||
|
|
||||||
// JumpTable contains the EVM opcodes supported at a given fork.
|
// JumpTable contains the EVM opcodes supported at a given fork.
|
||||||
type JumpTable [256]*operation
|
type JumpTable [256]*operation
|
||||||
|
|
||||||
|
func validate(jt JumpTable) JumpTable {
|
||||||
|
for i, op := range jt {
|
||||||
|
if op == nil {
|
||||||
|
panic(fmt.Sprintf("op 0x%x is not set", i))
|
||||||
|
}
|
||||||
|
// The interpreter has an assumption that if the memorySize function is
|
||||||
|
// set, then the dynamicGas function is also set. This is a somewhat
|
||||||
|
// arbitrary assumption, and can be removed if we need to -- but it
|
||||||
|
// allows us to avoid a condition check. As long as we have that assumption
|
||||||
|
// in there, this little sanity check prevents us from merging in a
|
||||||
|
// change which violates it.
|
||||||
|
if op.memorySize != nil && op.dynamicGas == nil {
|
||||||
|
panic(fmt.Sprintf("op %v has dynamic memory but not dynamic gas", OpCode(i).String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return jt
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMergeInstructionSet() JumpTable {
|
||||||
|
instructionSet := newLondonInstructionSet()
|
||||||
|
instructionSet[RANDOM] = &operation{
|
||||||
|
execute: opRandom,
|
||||||
|
constantGas: GasQuickStep,
|
||||||
|
minStack: minStack(0, 1),
|
||||||
|
maxStack: maxStack(0, 1),
|
||||||
|
}
|
||||||
|
return validate(instructionSet)
|
||||||
|
}
|
||||||
|
|
||||||
// newLondonInstructionSet returns the frontier, homestead, byzantium,
|
// newLondonInstructionSet returns the frontier, homestead, byzantium,
|
||||||
// contantinople, istanbul, petersburg, berlin and london instructions.
|
// contantinople, istanbul, petersburg, berlin and london instructions.
|
||||||
func newLondonInstructionSet() JumpTable {
|
func newLondonInstructionSet() JumpTable {
|
||||||
instructionSet := newBerlinInstructionSet()
|
instructionSet := newBerlinInstructionSet()
|
||||||
enable3529(&instructionSet) // EIP-3529: Reduction in refunds https://eips.ethereum.org/EIPS/eip-3529
|
enable3529(&instructionSet) // EIP-3529: Reduction in refunds https://eips.ethereum.org/EIPS/eip-3529
|
||||||
enable3198(&instructionSet) // Base fee opcode https://eips.ethereum.org/EIPS/eip-3198
|
enable3198(&instructionSet) // Base fee opcode https://eips.ethereum.org/EIPS/eip-3198
|
||||||
return instructionSet
|
return validate(instructionSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newBerlinInstructionSet returns the frontier, homestead, byzantium,
|
// newBerlinInstructionSet returns the frontier, homestead, byzantium,
|
||||||
@@ -77,7 +103,7 @@ func newLondonInstructionSet() JumpTable {
|
|||||||
func newBerlinInstructionSet() JumpTable {
|
func newBerlinInstructionSet() JumpTable {
|
||||||
instructionSet := newIstanbulInstructionSet()
|
instructionSet := newIstanbulInstructionSet()
|
||||||
enable2929(&instructionSet) // Access lists for trie accesses https://eips.ethereum.org/EIPS/eip-2929
|
enable2929(&instructionSet) // Access lists for trie accesses https://eips.ethereum.org/EIPS/eip-2929
|
||||||
return instructionSet
|
return validate(instructionSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newIstanbulInstructionSet returns the frontier, homestead, byzantium,
|
// newIstanbulInstructionSet returns the frontier, homestead, byzantium,
|
||||||
@@ -89,7 +115,7 @@ func newIstanbulInstructionSet() JumpTable {
|
|||||||
enable1884(&instructionSet) // Reprice reader opcodes - https://eips.ethereum.org/EIPS/eip-1884
|
enable1884(&instructionSet) // Reprice reader opcodes - https://eips.ethereum.org/EIPS/eip-1884
|
||||||
enable2200(&instructionSet) // Net metered SSTORE - https://eips.ethereum.org/EIPS/eip-2200
|
enable2200(&instructionSet) // Net metered SSTORE - https://eips.ethereum.org/EIPS/eip-2200
|
||||||
|
|
||||||
return instructionSet
|
return validate(instructionSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newConstantinopleInstructionSet returns the frontier, homestead,
|
// newConstantinopleInstructionSet returns the frontier, homestead,
|
||||||
@@ -127,10 +153,8 @@ func newConstantinopleInstructionSet() JumpTable {
|
|||||||
minStack: minStack(4, 1),
|
minStack: minStack(4, 1),
|
||||||
maxStack: maxStack(4, 1),
|
maxStack: maxStack(4, 1),
|
||||||
memorySize: memoryCreate2,
|
memorySize: memoryCreate2,
|
||||||
writes: true,
|
|
||||||
returns: true,
|
|
||||||
}
|
}
|
||||||
return instructionSet
|
return validate(instructionSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newByzantiumInstructionSet returns the frontier, homestead and
|
// newByzantiumInstructionSet returns the frontier, homestead and
|
||||||
@@ -144,7 +168,6 @@ func newByzantiumInstructionSet() JumpTable {
|
|||||||
minStack: minStack(6, 1),
|
minStack: minStack(6, 1),
|
||||||
maxStack: maxStack(6, 1),
|
maxStack: maxStack(6, 1),
|
||||||
memorySize: memoryStaticCall,
|
memorySize: memoryStaticCall,
|
||||||
returns: true,
|
|
||||||
}
|
}
|
||||||
instructionSet[RETURNDATASIZE] = &operation{
|
instructionSet[RETURNDATASIZE] = &operation{
|
||||||
execute: opReturnDataSize,
|
execute: opReturnDataSize,
|
||||||
@@ -166,17 +189,15 @@ func newByzantiumInstructionSet() JumpTable {
|
|||||||
minStack: minStack(2, 0),
|
minStack: minStack(2, 0),
|
||||||
maxStack: maxStack(2, 0),
|
maxStack: maxStack(2, 0),
|
||||||
memorySize: memoryRevert,
|
memorySize: memoryRevert,
|
||||||
reverts: true,
|
|
||||||
returns: true,
|
|
||||||
}
|
}
|
||||||
return instructionSet
|
return validate(instructionSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EIP 158 a.k.a Spurious Dragon
|
// EIP 158 a.k.a Spurious Dragon
|
||||||
func newSpuriousDragonInstructionSet() JumpTable {
|
func newSpuriousDragonInstructionSet() JumpTable {
|
||||||
instructionSet := newTangerineWhistleInstructionSet()
|
instructionSet := newTangerineWhistleInstructionSet()
|
||||||
instructionSet[EXP].dynamicGas = gasExpEIP158
|
instructionSet[EXP].dynamicGas = gasExpEIP158
|
||||||
return instructionSet
|
return validate(instructionSet)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -190,7 +211,7 @@ func newTangerineWhistleInstructionSet() JumpTable {
|
|||||||
instructionSet[CALL].constantGas = params.CallGasEIP150
|
instructionSet[CALL].constantGas = params.CallGasEIP150
|
||||||
instructionSet[CALLCODE].constantGas = params.CallGasEIP150
|
instructionSet[CALLCODE].constantGas = params.CallGasEIP150
|
||||||
instructionSet[DELEGATECALL].constantGas = params.CallGasEIP150
|
instructionSet[DELEGATECALL].constantGas = params.CallGasEIP150
|
||||||
return instructionSet
|
return validate(instructionSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newHomesteadInstructionSet returns the frontier and homestead
|
// newHomesteadInstructionSet returns the frontier and homestead
|
||||||
@@ -204,21 +225,19 @@ func newHomesteadInstructionSet() JumpTable {
|
|||||||
minStack: minStack(6, 1),
|
minStack: minStack(6, 1),
|
||||||
maxStack: maxStack(6, 1),
|
maxStack: maxStack(6, 1),
|
||||||
memorySize: memoryDelegateCall,
|
memorySize: memoryDelegateCall,
|
||||||
returns: true,
|
|
||||||
}
|
}
|
||||||
return instructionSet
|
return validate(instructionSet)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newFrontierInstructionSet returns the frontier instructions
|
// newFrontierInstructionSet returns the frontier instructions
|
||||||
// that can be executed during the frontier phase.
|
// that can be executed during the frontier phase.
|
||||||
func newFrontierInstructionSet() JumpTable {
|
func newFrontierInstructionSet() JumpTable {
|
||||||
return JumpTable{
|
tbl := JumpTable{
|
||||||
STOP: {
|
STOP: {
|
||||||
execute: opStop,
|
execute: opStop,
|
||||||
constantGas: 0,
|
constantGas: 0,
|
||||||
minStack: minStack(0, 0),
|
minStack: minStack(0, 0),
|
||||||
maxStack: maxStack(0, 0),
|
maxStack: maxStack(0, 0),
|
||||||
halts: true,
|
|
||||||
},
|
},
|
||||||
ADD: {
|
ADD: {
|
||||||
execute: opAdd,
|
execute: opAdd,
|
||||||
@@ -352,13 +371,13 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
minStack: minStack(2, 1),
|
minStack: minStack(2, 1),
|
||||||
maxStack: maxStack(2, 1),
|
maxStack: maxStack(2, 1),
|
||||||
},
|
},
|
||||||
SHA3: {
|
KECCAK256: {
|
||||||
execute: opSha3,
|
execute: opKeccak256,
|
||||||
constantGas: params.Sha3Gas,
|
constantGas: params.Keccak256Gas,
|
||||||
dynamicGas: gasSha3,
|
dynamicGas: gasKeccak256,
|
||||||
minStack: minStack(2, 1),
|
minStack: minStack(2, 1),
|
||||||
maxStack: maxStack(2, 1),
|
maxStack: maxStack(2, 1),
|
||||||
memorySize: memorySha3,
|
memorySize: memoryKeccak256,
|
||||||
},
|
},
|
||||||
ADDRESS: {
|
ADDRESS: {
|
||||||
execute: opAddress,
|
execute: opAddress,
|
||||||
@@ -433,7 +452,6 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
EXTCODESIZE: {
|
EXTCODESIZE: {
|
||||||
execute: opExtCodeSize,
|
execute: opExtCodeSize,
|
||||||
constantGas: params.ExtcodeSizeGasFrontier,
|
constantGas: params.ExtcodeSizeGasFrontier,
|
||||||
dynamicGas: gasExtCodeSize,
|
|
||||||
minStack: minStack(1, 1),
|
minStack: minStack(1, 1),
|
||||||
maxStack: maxStack(1, 1),
|
maxStack: maxStack(1, 1),
|
||||||
},
|
},
|
||||||
@@ -514,7 +532,6 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
SLOAD: {
|
SLOAD: {
|
||||||
execute: opSload,
|
execute: opSload,
|
||||||
constantGas: params.SloadGasFrontier,
|
constantGas: params.SloadGasFrontier,
|
||||||
dynamicGas: gasSLoad,
|
|
||||||
minStack: minStack(1, 1),
|
minStack: minStack(1, 1),
|
||||||
maxStack: maxStack(1, 1),
|
maxStack: maxStack(1, 1),
|
||||||
},
|
},
|
||||||
@@ -523,21 +540,18 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
dynamicGas: gasSStore,
|
dynamicGas: gasSStore,
|
||||||
minStack: minStack(2, 0),
|
minStack: minStack(2, 0),
|
||||||
maxStack: maxStack(2, 0),
|
maxStack: maxStack(2, 0),
|
||||||
writes: true,
|
|
||||||
},
|
},
|
||||||
JUMP: {
|
JUMP: {
|
||||||
execute: opJump,
|
execute: opJump,
|
||||||
constantGas: GasMidStep,
|
constantGas: GasMidStep,
|
||||||
minStack: minStack(1, 0),
|
minStack: minStack(1, 0),
|
||||||
maxStack: maxStack(1, 0),
|
maxStack: maxStack(1, 0),
|
||||||
jumps: true,
|
|
||||||
},
|
},
|
||||||
JUMPI: {
|
JUMPI: {
|
||||||
execute: opJumpi,
|
execute: opJumpi,
|
||||||
constantGas: GasSlowStep,
|
constantGas: GasSlowStep,
|
||||||
minStack: minStack(2, 0),
|
minStack: minStack(2, 0),
|
||||||
maxStack: maxStack(2, 0),
|
maxStack: maxStack(2, 0),
|
||||||
jumps: true,
|
|
||||||
},
|
},
|
||||||
PC: {
|
PC: {
|
||||||
execute: opPc,
|
execute: opPc,
|
||||||
@@ -953,7 +967,6 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
minStack: minStack(2, 0),
|
minStack: minStack(2, 0),
|
||||||
maxStack: maxStack(2, 0),
|
maxStack: maxStack(2, 0),
|
||||||
memorySize: memoryLog,
|
memorySize: memoryLog,
|
||||||
writes: true,
|
|
||||||
},
|
},
|
||||||
LOG1: {
|
LOG1: {
|
||||||
execute: makeLog(1),
|
execute: makeLog(1),
|
||||||
@@ -961,7 +974,6 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
minStack: minStack(3, 0),
|
minStack: minStack(3, 0),
|
||||||
maxStack: maxStack(3, 0),
|
maxStack: maxStack(3, 0),
|
||||||
memorySize: memoryLog,
|
memorySize: memoryLog,
|
||||||
writes: true,
|
|
||||||
},
|
},
|
||||||
LOG2: {
|
LOG2: {
|
||||||
execute: makeLog(2),
|
execute: makeLog(2),
|
||||||
@@ -969,7 +981,6 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
minStack: minStack(4, 0),
|
minStack: minStack(4, 0),
|
||||||
maxStack: maxStack(4, 0),
|
maxStack: maxStack(4, 0),
|
||||||
memorySize: memoryLog,
|
memorySize: memoryLog,
|
||||||
writes: true,
|
|
||||||
},
|
},
|
||||||
LOG3: {
|
LOG3: {
|
||||||
execute: makeLog(3),
|
execute: makeLog(3),
|
||||||
@@ -977,7 +988,6 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
minStack: minStack(5, 0),
|
minStack: minStack(5, 0),
|
||||||
maxStack: maxStack(5, 0),
|
maxStack: maxStack(5, 0),
|
||||||
memorySize: memoryLog,
|
memorySize: memoryLog,
|
||||||
writes: true,
|
|
||||||
},
|
},
|
||||||
LOG4: {
|
LOG4: {
|
||||||
execute: makeLog(4),
|
execute: makeLog(4),
|
||||||
@@ -985,7 +995,6 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
minStack: minStack(6, 0),
|
minStack: minStack(6, 0),
|
||||||
maxStack: maxStack(6, 0),
|
maxStack: maxStack(6, 0),
|
||||||
memorySize: memoryLog,
|
memorySize: memoryLog,
|
||||||
writes: true,
|
|
||||||
},
|
},
|
||||||
CREATE: {
|
CREATE: {
|
||||||
execute: opCreate,
|
execute: opCreate,
|
||||||
@@ -994,8 +1003,6 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
minStack: minStack(3, 1),
|
minStack: minStack(3, 1),
|
||||||
maxStack: maxStack(3, 1),
|
maxStack: maxStack(3, 1),
|
||||||
memorySize: memoryCreate,
|
memorySize: memoryCreate,
|
||||||
writes: true,
|
|
||||||
returns: true,
|
|
||||||
},
|
},
|
||||||
CALL: {
|
CALL: {
|
||||||
execute: opCall,
|
execute: opCall,
|
||||||
@@ -1004,7 +1011,6 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
minStack: minStack(7, 1),
|
minStack: minStack(7, 1),
|
||||||
maxStack: maxStack(7, 1),
|
maxStack: maxStack(7, 1),
|
||||||
memorySize: memoryCall,
|
memorySize: memoryCall,
|
||||||
returns: true,
|
|
||||||
},
|
},
|
||||||
CALLCODE: {
|
CALLCODE: {
|
||||||
execute: opCallCode,
|
execute: opCallCode,
|
||||||
@@ -1013,7 +1019,6 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
minStack: minStack(7, 1),
|
minStack: minStack(7, 1),
|
||||||
maxStack: maxStack(7, 1),
|
maxStack: maxStack(7, 1),
|
||||||
memorySize: memoryCall,
|
memorySize: memoryCall,
|
||||||
returns: true,
|
|
||||||
},
|
},
|
||||||
RETURN: {
|
RETURN: {
|
||||||
execute: opReturn,
|
execute: opReturn,
|
||||||
@@ -1021,15 +1026,21 @@ func newFrontierInstructionSet() JumpTable {
|
|||||||
minStack: minStack(2, 0),
|
minStack: minStack(2, 0),
|
||||||
maxStack: maxStack(2, 0),
|
maxStack: maxStack(2, 0),
|
||||||
memorySize: memoryReturn,
|
memorySize: memoryReturn,
|
||||||
halts: true,
|
|
||||||
},
|
},
|
||||||
SELFDESTRUCT: {
|
SELFDESTRUCT: {
|
||||||
execute: opSuicide,
|
execute: opSelfdestruct,
|
||||||
dynamicGas: gasSelfdestruct,
|
dynamicGas: gasSelfdestruct,
|
||||||
minStack: minStack(1, 0),
|
minStack: minStack(1, 0),
|
||||||
maxStack: maxStack(1, 0),
|
maxStack: maxStack(1, 0),
|
||||||
halts: true,
|
|
||||||
writes: true,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fill all unassigned slots with opUndefined.
|
||||||
|
for i, entry := range tbl {
|
||||||
|
if entry == nil {
|
||||||
|
tbl[i] = &operation{execute: opUndefined, maxStack: maxStack(0, 0)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return validate(tbl)
|
||||||
}
|
}
|
||||||
|
@@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
package vm
|
package vm
|
||||||
|
|
||||||
func memorySha3(stack *Stack) (uint64, bool) {
|
func memoryKeccak256(stack *Stack) (uint64, bool) {
|
||||||
return calcMemSize64(stack.Back(0), stack.Back(1))
|
return calcMemSize64(stack.Back(0), stack.Back(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user