consensus/ethash: improve cache/dataset handling (#15864)

* consensus/ethash: add maxEpoch constant

* consensus/ethash: improve cache/dataset handling

There are two fixes in this commit:

Unmap the memory through a finalizer like the libethash wrapper did. The
release logic was incorrect and freed the memory while it was being
used, leading to crashes like in #14495 or #14943.

Track caches and datasets using simplelru instead of reinventing LRU
logic. This should make it easier to see whether it's correct.

* consensus/ethash: restore 'future item' logic in lru

* consensus/ethash: use mmap even in test mode

This makes it possible to shorten the time taken for TestCacheFileEvict.

* consensus/ethash: shuffle func calc*Size comments around

* consensus/ethash: ensure future cache/dataset is in the lru cache

* consensus/ethash: add issue link to the new test

* consensus/ethash: fix vet

* consensus/ethash: fix test

* consensus: tiny issue + nitpick fixes
This commit is contained in:
Felix Lange
2018-01-23 11:05:30 +01:00
committed by Péter Szilágyi
parent 5d4267911a
commit 924065e19d
8 changed files with 208 additions and 213 deletions

View File

@@ -20,17 +20,20 @@ package ethash
import "math/big"
// cacheSize calculates and returns the size of the ethash verification cache that
// belongs to a certain block number. The cache size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
// cacheSize returns the size of the ethash verification cache that belongs to a certain
// block number.
func cacheSize(block uint64) uint64 {
// If we have a pre-generated value, use that
epoch := int(block / epochLength)
if epoch < len(cacheSizes) {
if epoch < maxEpoch {
return cacheSizes[epoch]
}
// No known cache size, calculate manually (sanity branch only)
return calcCacheSize(epoch)
}
// calcCacheSize calculates the cache size for epoch. The cache size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcCacheSize(epoch int) uint64 {
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * hashBytes
@@ -38,17 +41,20 @@ func cacheSize(block uint64) uint64 {
return size
}
// datasetSize calculates and returns the size of the ethash mining dataset that
// belongs to a certain block number. The dataset size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
// datasetSize returns the size of the ethash mining dataset that belongs to a certain
// block number.
func datasetSize(block uint64) uint64 {
// If we have a pre-generated value, use that
epoch := int(block / epochLength)
if epoch < len(datasetSizes) {
if epoch < maxEpoch {
return datasetSizes[epoch]
}
// No known dataset size, calculate manually (sanity branch only)
return calcDatasetSize(epoch)
}
// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcDatasetSize(epoch int) uint64 {
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * mixBytes