Merge pull request #14523 from karalabe/txpool-cli-flags
cmd, core, eth: configurable txpool parameters
This commit is contained in:
		| @@ -66,6 +66,13 @@ var ( | |||||||
| 		utils.EthashDatasetDirFlag, | 		utils.EthashDatasetDirFlag, | ||||||
| 		utils.EthashDatasetsInMemoryFlag, | 		utils.EthashDatasetsInMemoryFlag, | ||||||
| 		utils.EthashDatasetsOnDiskFlag, | 		utils.EthashDatasetsOnDiskFlag, | ||||||
|  | 		utils.TxPoolPriceLimitFlag, | ||||||
|  | 		utils.TxPoolPriceBumpFlag, | ||||||
|  | 		utils.TxPoolAccountSlotsFlag, | ||||||
|  | 		utils.TxPoolGlobalSlotsFlag, | ||||||
|  | 		utils.TxPoolAccountQueueFlag, | ||||||
|  | 		utils.TxPoolGlobalQueueFlag, | ||||||
|  | 		utils.TxPoolLifetimeFlag, | ||||||
| 		utils.FastSyncFlag, | 		utils.FastSyncFlag, | ||||||
| 		utils.LightModeFlag, | 		utils.LightModeFlag, | ||||||
| 		utils.SyncModeFlag, | 		utils.SyncModeFlag, | ||||||
|   | |||||||
| @@ -92,6 +92,18 @@ var AppHelpFlagGroups = []flagGroup{ | |||||||
| 			utils.EthashDatasetsOnDiskFlag, | 			utils.EthashDatasetsOnDiskFlag, | ||||||
| 		}, | 		}, | ||||||
| 	}, | 	}, | ||||||
|  | 	{ | ||||||
|  | 		Name: "TRANSACTION POOL", | ||||||
|  | 		Flags: []cli.Flag{ | ||||||
|  | 			utils.TxPoolPriceLimitFlag, | ||||||
|  | 			utils.TxPoolPriceBumpFlag, | ||||||
|  | 			utils.TxPoolAccountSlotsFlag, | ||||||
|  | 			utils.TxPoolGlobalSlotsFlag, | ||||||
|  | 			utils.TxPoolAccountQueueFlag, | ||||||
|  | 			utils.TxPoolGlobalQueueFlag, | ||||||
|  | 			utils.TxPoolLifetimeFlag, | ||||||
|  | 		}, | ||||||
|  | 	}, | ||||||
| 	{ | 	{ | ||||||
| 		Name: "PERFORMANCE TUNING", | 		Name: "PERFORMANCE TUNING", | ||||||
| 		Flags: []cli.Flag{ | 		Flags: []cli.Flag{ | ||||||
|   | |||||||
| @@ -123,35 +123,6 @@ var ( | |||||||
| 		Name:  "nousb", | 		Name:  "nousb", | ||||||
| 		Usage: "Disables monitoring for and managine USB hardware wallets", | 		Usage: "Disables monitoring for and managine USB hardware wallets", | ||||||
| 	} | 	} | ||||||
| 	EthashCacheDirFlag = DirectoryFlag{ |  | ||||||
| 		Name:  "ethash.cachedir", |  | ||||||
| 		Usage: "Directory to store the ethash verification caches (default = inside the datadir)", |  | ||||||
| 	} |  | ||||||
| 	EthashCachesInMemoryFlag = cli.IntFlag{ |  | ||||||
| 		Name:  "ethash.cachesinmem", |  | ||||||
| 		Usage: "Number of recent ethash caches to keep in memory (16MB each)", |  | ||||||
| 		Value: eth.DefaultConfig.EthashCachesInMem, |  | ||||||
| 	} |  | ||||||
| 	EthashCachesOnDiskFlag = cli.IntFlag{ |  | ||||||
| 		Name:  "ethash.cachesondisk", |  | ||||||
| 		Usage: "Number of recent ethash caches to keep on disk (16MB each)", |  | ||||||
| 		Value: eth.DefaultConfig.EthashCachesOnDisk, |  | ||||||
| 	} |  | ||||||
| 	EthashDatasetDirFlag = DirectoryFlag{ |  | ||||||
| 		Name:  "ethash.dagdir", |  | ||||||
| 		Usage: "Directory to store the ethash mining DAGs (default = inside home folder)", |  | ||||||
| 		Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir}, |  | ||||||
| 	} |  | ||||||
| 	EthashDatasetsInMemoryFlag = cli.IntFlag{ |  | ||||||
| 		Name:  "ethash.dagsinmem", |  | ||||||
| 		Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)", |  | ||||||
| 		Value: eth.DefaultConfig.EthashDatasetsInMem, |  | ||||||
| 	} |  | ||||||
| 	EthashDatasetsOnDiskFlag = cli.IntFlag{ |  | ||||||
| 		Name:  "ethash.dagsondisk", |  | ||||||
| 		Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)", |  | ||||||
| 		Value: eth.DefaultConfig.EthashDatasetsOnDisk, |  | ||||||
| 	} |  | ||||||
| 	NetworkIdFlag = cli.Uint64Flag{ | 	NetworkIdFlag = cli.Uint64Flag{ | ||||||
| 		Name:  "networkid", | 		Name:  "networkid", | ||||||
| 		Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten, 4=Rinkeby)", | 		Usage: "Network identifier (integer, 1=Frontier, 2=Morden (disused), 3=Ropsten, 4=Rinkeby)", | ||||||
| @@ -207,6 +178,72 @@ var ( | |||||||
| 		Name:  "lightkdf", | 		Name:  "lightkdf", | ||||||
| 		Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", | 		Usage: "Reduce key-derivation RAM & CPU usage at some expense of KDF strength", | ||||||
| 	} | 	} | ||||||
|  | 	// Ethash settings | ||||||
|  | 	EthashCacheDirFlag = DirectoryFlag{ | ||||||
|  | 		Name:  "ethash.cachedir", | ||||||
|  | 		Usage: "Directory to store the ethash verification caches (default = inside the datadir)", | ||||||
|  | 	} | ||||||
|  | 	EthashCachesInMemoryFlag = cli.IntFlag{ | ||||||
|  | 		Name:  "ethash.cachesinmem", | ||||||
|  | 		Usage: "Number of recent ethash caches to keep in memory (16MB each)", | ||||||
|  | 		Value: eth.DefaultConfig.EthashCachesInMem, | ||||||
|  | 	} | ||||||
|  | 	EthashCachesOnDiskFlag = cli.IntFlag{ | ||||||
|  | 		Name:  "ethash.cachesondisk", | ||||||
|  | 		Usage: "Number of recent ethash caches to keep on disk (16MB each)", | ||||||
|  | 		Value: eth.DefaultConfig.EthashCachesOnDisk, | ||||||
|  | 	} | ||||||
|  | 	EthashDatasetDirFlag = DirectoryFlag{ | ||||||
|  | 		Name:  "ethash.dagdir", | ||||||
|  | 		Usage: "Directory to store the ethash mining DAGs (default = inside home folder)", | ||||||
|  | 		Value: DirectoryString{eth.DefaultConfig.EthashDatasetDir}, | ||||||
|  | 	} | ||||||
|  | 	EthashDatasetsInMemoryFlag = cli.IntFlag{ | ||||||
|  | 		Name:  "ethash.dagsinmem", | ||||||
|  | 		Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)", | ||||||
|  | 		Value: eth.DefaultConfig.EthashDatasetsInMem, | ||||||
|  | 	} | ||||||
|  | 	EthashDatasetsOnDiskFlag = cli.IntFlag{ | ||||||
|  | 		Name:  "ethash.dagsondisk", | ||||||
|  | 		Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)", | ||||||
|  | 		Value: eth.DefaultConfig.EthashDatasetsOnDisk, | ||||||
|  | 	} | ||||||
|  | 	// Transaction pool settings | ||||||
|  | 	TxPoolPriceLimitFlag = cli.Uint64Flag{ | ||||||
|  | 		Name:  "txpool.pricelimit", | ||||||
|  | 		Usage: "Minimum gas price limit to enforce for acceptance into the pool", | ||||||
|  | 		Value: eth.DefaultConfig.TxPool.PriceLimit, | ||||||
|  | 	} | ||||||
|  | 	TxPoolPriceBumpFlag = cli.Uint64Flag{ | ||||||
|  | 		Name:  "txpool.pricebump", | ||||||
|  | 		Usage: "Price bump percentage to replace an already existing transaction", | ||||||
|  | 		Value: eth.DefaultConfig.TxPool.PriceBump, | ||||||
|  | 	} | ||||||
|  | 	TxPoolAccountSlotsFlag = cli.Uint64Flag{ | ||||||
|  | 		Name:  "txpool.accountslots", | ||||||
|  | 		Usage: "Minimum number of executable transaction slots guaranteed per account", | ||||||
|  | 		Value: eth.DefaultConfig.TxPool.AccountSlots, | ||||||
|  | 	} | ||||||
|  | 	TxPoolGlobalSlotsFlag = cli.Uint64Flag{ | ||||||
|  | 		Name:  "txpool.globalslots", | ||||||
|  | 		Usage: "Maximum number of executable transaction slots for all accounts", | ||||||
|  | 		Value: eth.DefaultConfig.TxPool.GlobalSlots, | ||||||
|  | 	} | ||||||
|  | 	TxPoolAccountQueueFlag = cli.Uint64Flag{ | ||||||
|  | 		Name:  "txpool.accountqueue", | ||||||
|  | 		Usage: "Maximum number of non-executable transaction slots permitted per account", | ||||||
|  | 		Value: eth.DefaultConfig.TxPool.AccountQueue, | ||||||
|  | 	} | ||||||
|  | 	TxPoolGlobalQueueFlag = cli.Uint64Flag{ | ||||||
|  | 		Name:  "txpool.globalqueue", | ||||||
|  | 		Usage: "Maximum number of non-executable transaction slots for all accounts", | ||||||
|  | 		Value: eth.DefaultConfig.TxPool.GlobalQueue, | ||||||
|  | 	} | ||||||
|  | 	TxPoolLifetimeFlag = cli.DurationFlag{ | ||||||
|  | 		Name:  "txpool.lifetime", | ||||||
|  | 		Usage: "Maximum amount of time non-executable transaction are queued", | ||||||
|  | 		Value: eth.DefaultConfig.TxPool.Lifetime, | ||||||
|  | 	} | ||||||
| 	// Performance tuning settings | 	// Performance tuning settings | ||||||
| 	CacheFlag = cli.IntFlag{ | 	CacheFlag = cli.IntFlag{ | ||||||
| 		Name:  "cache", | 		Name:  "cache", | ||||||
| @@ -784,6 +821,30 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config) { | |||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|  | func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { | ||||||
|  | 	if ctx.GlobalIsSet(TxPoolPriceLimitFlag.Name) { | ||||||
|  | 		cfg.PriceLimit = ctx.GlobalUint64(TxPoolPriceLimitFlag.Name) | ||||||
|  | 	} | ||||||
|  | 	if ctx.GlobalIsSet(TxPoolPriceBumpFlag.Name) { | ||||||
|  | 		cfg.PriceBump = ctx.GlobalUint64(TxPoolPriceBumpFlag.Name) | ||||||
|  | 	} | ||||||
|  | 	if ctx.GlobalIsSet(TxPoolAccountSlotsFlag.Name) { | ||||||
|  | 		cfg.AccountSlots = ctx.GlobalUint64(TxPoolAccountSlotsFlag.Name) | ||||||
|  | 	} | ||||||
|  | 	if ctx.GlobalIsSet(TxPoolGlobalSlotsFlag.Name) { | ||||||
|  | 		cfg.GlobalSlots = ctx.GlobalUint64(TxPoolGlobalSlotsFlag.Name) | ||||||
|  | 	} | ||||||
|  | 	if ctx.GlobalIsSet(TxPoolAccountQueueFlag.Name) { | ||||||
|  | 		cfg.AccountQueue = ctx.GlobalUint64(TxPoolAccountQueueFlag.Name) | ||||||
|  | 	} | ||||||
|  | 	if ctx.GlobalIsSet(TxPoolGlobalQueueFlag.Name) { | ||||||
|  | 		cfg.GlobalQueue = ctx.GlobalUint64(TxPoolGlobalQueueFlag.Name) | ||||||
|  | 	} | ||||||
|  | 	if ctx.GlobalIsSet(TxPoolLifetimeFlag.Name) { | ||||||
|  | 		cfg.Lifetime = ctx.GlobalDuration(TxPoolLifetimeFlag.Name) | ||||||
|  | 	} | ||||||
|  | } | ||||||
|  |  | ||||||
| func setEthash(ctx *cli.Context, cfg *eth.Config) { | func setEthash(ctx *cli.Context, cfg *eth.Config) { | ||||||
| 	if ctx.GlobalIsSet(EthashCacheDirFlag.Name) { | 	if ctx.GlobalIsSet(EthashCacheDirFlag.Name) { | ||||||
| 		cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name) | 		cfg.EthashCacheDir = ctx.GlobalString(EthashCacheDirFlag.Name) | ||||||
| @@ -826,6 +887,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { | |||||||
| 	ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) | 	ks := stack.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore) | ||||||
| 	setEtherbase(ctx, ks, cfg) | 	setEtherbase(ctx, ks, cfg) | ||||||
| 	setGPO(ctx, &cfg.GPO) | 	setGPO(ctx, &cfg.GPO) | ||||||
|  | 	setTxPool(ctx, &cfg.TxPool) | ||||||
| 	setEthash(ctx, cfg) | 	setEthash(ctx, cfg) | ||||||
|  |  | ||||||
| 	switch { | 	switch { | ||||||
|   | |||||||
| @@ -246,11 +246,11 @@ func (l *txList) Overlaps(tx *types.Transaction) bool { | |||||||
| // | // | ||||||
| // If the new transaction is accepted into the list, the lists' cost threshold | // If the new transaction is accepted into the list, the lists' cost threshold | ||||||
| // is also potentially updated. | // is also potentially updated. | ||||||
| func (l *txList) Add(tx *types.Transaction) (bool, *types.Transaction) { | func (l *txList) Add(tx *types.Transaction, priceBump uint64) (bool, *types.Transaction) { | ||||||
| 	// If there's an older better transaction, abort | 	// If there's an older better transaction, abort | ||||||
| 	old := l.txs.Get(tx.Nonce()) | 	old := l.txs.Get(tx.Nonce()) | ||||||
| 	if old != nil { | 	if old != nil { | ||||||
| 		threshold := new(big.Int).Div(new(big.Int).Mul(old.GasPrice(), big.NewInt(100+minPriceBumpPercent)), big.NewInt(100)) | 		threshold := new(big.Int).Div(new(big.Int).Mul(old.GasPrice(), big.NewInt(100+int64(priceBump))), big.NewInt(100)) | ||||||
| 		if threshold.Cmp(tx.GasPrice()) >= 0 { | 		if threshold.Cmp(tx.GasPrice()) >= 0 { | ||||||
| 			return false, nil | 			return false, nil | ||||||
| 		} | 		} | ||||||
|   | |||||||
| @@ -38,7 +38,7 @@ func TestStrictTxListAdd(t *testing.T) { | |||||||
| 	// Insert the transactions in a random order | 	// Insert the transactions in a random order | ||||||
| 	list := newTxList(true) | 	list := newTxList(true) | ||||||
| 	for _, v := range rand.Perm(len(txs)) { | 	for _, v := range rand.Perm(len(txs)) { | ||||||
| 		list.Add(txs[v]) | 		list.Add(txs[v], DefaultTxPoolConfig.PriceBump) | ||||||
| 	} | 	} | ||||||
| 	// Verify internal state | 	// Verify internal state | ||||||
| 	if len(list.txs.items) != len(txs) { | 	if len(list.txs.items) != len(txs) { | ||||||
|   | |||||||
							
								
								
									
										101
									
								
								core/tx_pool.go
									
									
									
									
									
								
							
							
						
						
									
										101
									
								
								core/tx_pool.go
									
									
									
									
									
								
							| @@ -48,14 +48,8 @@ var ( | |||||||
| ) | ) | ||||||
|  |  | ||||||
| var ( | var ( | ||||||
| 	minPendingPerAccount = uint64(16)      // Min number of guaranteed transaction slots per address | 	evictionInterval    = time.Minute     // Time interval to check for evictable transactions | ||||||
| 	maxPendingTotal      = uint64(4096)    // Max limit of pending transactions from all accounts (soft) | 	statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats | ||||||
| 	maxQueuedPerAccount  = uint64(64)      // Max limit of queued transactions per address |  | ||||||
| 	maxQueuedTotal       = uint64(1024)    // Max limit of queued transactions from all accounts |  | ||||||
| 	maxQueuedLifetime    = 3 * time.Hour   // Max amount of time transactions from idle accounts are queued |  | ||||||
| 	minPriceBumpPercent  = int64(10)       // Minimum price bump needed to replace an old transaction |  | ||||||
| 	evictionInterval     = time.Minute     // Time interval to check for evictable transactions |  | ||||||
| 	statsReportInterval  = 8 * time.Second // Time interval to report transaction pool stats |  | ||||||
| ) | ) | ||||||
|  |  | ||||||
| var ( | var ( | ||||||
| @@ -78,6 +72,48 @@ var ( | |||||||
|  |  | ||||||
| type stateFn func() (*state.StateDB, error) | type stateFn func() (*state.StateDB, error) | ||||||
|  |  | ||||||
|  | // TxPoolConfig are the configuration parameters of the transaction pool. | ||||||
|  | type TxPoolConfig struct { | ||||||
|  | 	PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool | ||||||
|  | 	PriceBump  uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) | ||||||
|  |  | ||||||
|  | 	AccountSlots uint64 // Minimum number of executable transaction slots guaranteed per account | ||||||
|  | 	GlobalSlots  uint64 // Maximum number of executable transaction slots for all accounts | ||||||
|  | 	AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account | ||||||
|  | 	GlobalQueue  uint64 // Maximum number of non-executable transaction slots for all accounts | ||||||
|  |  | ||||||
|  | 	Lifetime time.Duration // Maximum amount of time non-executable transaction are queued | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // DefaultTxPoolConfig contains the default configurations for the transaction | ||||||
|  | // pool. | ||||||
|  | var DefaultTxPoolConfig = TxPoolConfig{ | ||||||
|  | 	PriceLimit: 1, | ||||||
|  | 	PriceBump:  10, | ||||||
|  |  | ||||||
|  | 	AccountSlots: 16, | ||||||
|  | 	GlobalSlots:  4096, | ||||||
|  | 	AccountQueue: 64, | ||||||
|  | 	GlobalQueue:  1024, | ||||||
|  |  | ||||||
|  | 	Lifetime: 3 * time.Hour, | ||||||
|  | } | ||||||
|  |  | ||||||
|  | // sanitize checks the provided user configurations and changes anything that's | ||||||
|  | // unreasonable or unworkable. | ||||||
|  | func (config *TxPoolConfig) sanitize() TxPoolConfig { | ||||||
|  | 	conf := *config | ||||||
|  | 	if conf.PriceLimit < 1 { | ||||||
|  | 		log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultTxPoolConfig.PriceLimit) | ||||||
|  | 		conf.PriceLimit = DefaultTxPoolConfig.PriceLimit | ||||||
|  | 	} | ||||||
|  | 	if conf.PriceBump < 1 { | ||||||
|  | 		log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultTxPoolConfig.PriceBump) | ||||||
|  | 		conf.PriceBump = DefaultTxPoolConfig.PriceBump | ||||||
|  | 	} | ||||||
|  | 	return conf | ||||||
|  | } | ||||||
|  |  | ||||||
| // TxPool contains all currently known transactions. Transactions | // TxPool contains all currently known transactions. Transactions | ||||||
| // enter the pool when they are received from the network or submitted | // enter the pool when they are received from the network or submitted | ||||||
| // locally. They exit the pool when they are included in the blockchain. | // locally. They exit the pool when they are included in the blockchain. | ||||||
| @@ -86,7 +122,8 @@ type stateFn func() (*state.StateDB, error) | |||||||
| // current state) and future transactions. Transactions move between those | // current state) and future transactions. Transactions move between those | ||||||
| // two states over time as they are received and processed. | // two states over time as they are received and processed. | ||||||
| type TxPool struct { | type TxPool struct { | ||||||
| 	config       *params.ChainConfig | 	config       TxPoolConfig | ||||||
|  | 	chainconfig  *params.ChainConfig | ||||||
| 	currentState stateFn // The state function which will allow us to do some pre checks | 	currentState stateFn // The state function which will allow us to do some pre checks | ||||||
| 	pendingState *state.ManagedState | 	pendingState *state.ManagedState | ||||||
| 	gasLimit     func() *big.Int // The current gas limit function callback | 	gasLimit     func() *big.Int // The current gas limit function callback | ||||||
| @@ -109,10 +146,17 @@ type TxPool struct { | |||||||
| 	homestead bool | 	homestead bool | ||||||
| } | } | ||||||
|  |  | ||||||
| func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool { | // NewTxPool creates a new transaction pool to gather, sort and filter inbound | ||||||
|  | // trnsactions from the network. | ||||||
|  | func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, eventMux *event.TypeMux, currentStateFn stateFn, gasLimitFn func() *big.Int) *TxPool { | ||||||
|  | 	// Sanitize the input to ensure no vulnerable gas prices are set | ||||||
|  | 	config = (&config).sanitize() | ||||||
|  |  | ||||||
|  | 	// Create the transaction pool with its initial settings | ||||||
| 	pool := &TxPool{ | 	pool := &TxPool{ | ||||||
| 		config:       config, | 		config:       config, | ||||||
| 		signer:       types.NewEIP155Signer(config.ChainId), | 		chainconfig:  chainconfig, | ||||||
|  | 		signer:       types.NewEIP155Signer(chainconfig.ChainId), | ||||||
| 		pending:      make(map[common.Address]*txList), | 		pending:      make(map[common.Address]*txList), | ||||||
| 		queue:        make(map[common.Address]*txList), | 		queue:        make(map[common.Address]*txList), | ||||||
| 		beats:        make(map[common.Address]time.Time), | 		beats:        make(map[common.Address]time.Time), | ||||||
| @@ -120,7 +164,7 @@ func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentState | |||||||
| 		eventMux:     eventMux, | 		eventMux:     eventMux, | ||||||
| 		currentState: currentStateFn, | 		currentState: currentStateFn, | ||||||
| 		gasLimit:     gasLimitFn, | 		gasLimit:     gasLimitFn, | ||||||
| 		gasPrice:     big.NewInt(1), | 		gasPrice:     new(big.Int).SetUint64(config.PriceLimit), | ||||||
| 		pendingState: nil, | 		pendingState: nil, | ||||||
| 		locals:       newTxSet(), | 		locals:       newTxSet(), | ||||||
| 		events:       eventMux.Subscribe(ChainHeadEvent{}, RemovedTransactionEvent{}), | 		events:       eventMux.Subscribe(ChainHeadEvent{}, RemovedTransactionEvent{}), | ||||||
| @@ -129,6 +173,7 @@ func NewTxPool(config *params.ChainConfig, eventMux *event.TypeMux, currentState | |||||||
| 	pool.priced = newTxPricedList(&pool.all) | 	pool.priced = newTxPricedList(&pool.all) | ||||||
| 	pool.resetState() | 	pool.resetState() | ||||||
|  |  | ||||||
|  | 	// Start the various events loops and return | ||||||
| 	pool.wg.Add(2) | 	pool.wg.Add(2) | ||||||
| 	go pool.eventLoop() | 	go pool.eventLoop() | ||||||
| 	go pool.expirationLoop() | 	go pool.expirationLoop() | ||||||
| @@ -159,7 +204,7 @@ func (pool *TxPool) eventLoop() { | |||||||
| 			case ChainHeadEvent: | 			case ChainHeadEvent: | ||||||
| 				pool.mu.Lock() | 				pool.mu.Lock() | ||||||
| 				if ev.Block != nil { | 				if ev.Block != nil { | ||||||
| 					if pool.config.IsHomestead(ev.Block.Number()) { | 					if pool.chainconfig.IsHomestead(ev.Block.Number()) { | ||||||
| 						pool.homestead = true | 						pool.homestead = true | ||||||
| 					} | 					} | ||||||
| 				} | 				} | ||||||
| @@ -388,7 +433,7 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) { | |||||||
| 		return false, err | 		return false, err | ||||||
| 	} | 	} | ||||||
| 	// If the transaction pool is full, discard underpriced transactions | 	// If the transaction pool is full, discard underpriced transactions | ||||||
| 	if uint64(len(pool.all)) >= maxPendingTotal+maxQueuedTotal { | 	if uint64(len(pool.all)) >= pool.config.GlobalSlots+pool.config.GlobalQueue { | ||||||
| 		// If the new transaction is underpriced, don't accept it | 		// If the new transaction is underpriced, don't accept it | ||||||
| 		if pool.priced.Underpriced(tx, pool.locals) { | 		if pool.priced.Underpriced(tx, pool.locals) { | ||||||
| 			log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) | 			log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) | ||||||
| @@ -396,7 +441,7 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) { | |||||||
| 			return false, ErrUnderpriced | 			return false, ErrUnderpriced | ||||||
| 		} | 		} | ||||||
| 		// New transaction is better than our worse ones, make room for it | 		// New transaction is better than our worse ones, make room for it | ||||||
| 		drop := pool.priced.Discard(len(pool.all)-int(maxPendingTotal+maxQueuedTotal-1), pool.locals) | 		drop := pool.priced.Discard(len(pool.all)-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals) | ||||||
| 		for _, tx := range drop { | 		for _, tx := range drop { | ||||||
| 			log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) | 			log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) | ||||||
| 			underpricedTxCounter.Inc(1) | 			underpricedTxCounter.Inc(1) | ||||||
| @@ -407,7 +452,7 @@ func (pool *TxPool) add(tx *types.Transaction) (bool, error) { | |||||||
| 	from, _ := types.Sender(pool.signer, tx) // already validated | 	from, _ := types.Sender(pool.signer, tx) // already validated | ||||||
| 	if list := pool.pending[from]; list != nil && list.Overlaps(tx) { | 	if list := pool.pending[from]; list != nil && list.Overlaps(tx) { | ||||||
| 		// Nonce already pending, check if required price bump is met | 		// Nonce already pending, check if required price bump is met | ||||||
| 		inserted, old := list.Add(tx) | 		inserted, old := list.Add(tx, pool.config.PriceBump) | ||||||
| 		if !inserted { | 		if !inserted { | ||||||
| 			pendingDiscardCounter.Inc(1) | 			pendingDiscardCounter.Inc(1) | ||||||
| 			return false, ErrReplaceUnderpriced | 			return false, ErrReplaceUnderpriced | ||||||
| @@ -442,7 +487,7 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er | |||||||
| 	if pool.queue[from] == nil { | 	if pool.queue[from] == nil { | ||||||
| 		pool.queue[from] = newTxList(false) | 		pool.queue[from] = newTxList(false) | ||||||
| 	} | 	} | ||||||
| 	inserted, old := pool.queue[from].Add(tx) | 	inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) | ||||||
| 	if !inserted { | 	if !inserted { | ||||||
| 		// An older transaction was better, discard this | 		// An older transaction was better, discard this | ||||||
| 		queuedDiscardCounter.Inc(1) | 		queuedDiscardCounter.Inc(1) | ||||||
| @@ -469,7 +514,7 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T | |||||||
| 	} | 	} | ||||||
| 	list := pool.pending[addr] | 	list := pool.pending[addr] | ||||||
|  |  | ||||||
| 	inserted, old := list.Add(tx) | 	inserted, old := list.Add(tx, pool.config.PriceBump) | ||||||
| 	if !inserted { | 	if !inserted { | ||||||
| 		// An older transaction was better, discard this | 		// An older transaction was better, discard this | ||||||
| 		delete(pool.all, hash) | 		delete(pool.all, hash) | ||||||
| @@ -644,7 +689,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { | |||||||
| 			pool.promoteTx(addr, hash, tx) | 			pool.promoteTx(addr, hash, tx) | ||||||
| 		} | 		} | ||||||
| 		// Drop all transactions over the allowed limit | 		// Drop all transactions over the allowed limit | ||||||
| 		for _, tx := range list.Cap(int(maxQueuedPerAccount)) { | 		for _, tx := range list.Cap(int(pool.config.AccountQueue)) { | ||||||
| 			hash := tx.Hash() | 			hash := tx.Hash() | ||||||
| 			log.Trace("Removed cap-exceeding queued transaction", "hash", hash) | 			log.Trace("Removed cap-exceeding queued transaction", "hash", hash) | ||||||
| 			delete(pool.all, hash) | 			delete(pool.all, hash) | ||||||
| @@ -663,13 +708,13 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { | |||||||
| 	for _, list := range pool.pending { | 	for _, list := range pool.pending { | ||||||
| 		pending += uint64(list.Len()) | 		pending += uint64(list.Len()) | ||||||
| 	} | 	} | ||||||
| 	if pending > maxPendingTotal { | 	if pending > pool.config.GlobalSlots { | ||||||
| 		pendingBeforeCap := pending | 		pendingBeforeCap := pending | ||||||
| 		// Assemble a spam order to penalize large transactors first | 		// Assemble a spam order to penalize large transactors first | ||||||
| 		spammers := prque.New() | 		spammers := prque.New() | ||||||
| 		for addr, list := range pool.pending { | 		for addr, list := range pool.pending { | ||||||
| 			// Only evict transactions from high rollers | 			// Only evict transactions from high rollers | ||||||
| 			if uint64(list.Len()) > minPendingPerAccount { | 			if uint64(list.Len()) > pool.config.AccountSlots { | ||||||
| 				// Skip local accounts as pools should maintain backlogs for themselves | 				// Skip local accounts as pools should maintain backlogs for themselves | ||||||
| 				for _, tx := range list.txs.items { | 				for _, tx := range list.txs.items { | ||||||
| 					if !pool.locals.contains(tx.Hash()) { | 					if !pool.locals.contains(tx.Hash()) { | ||||||
| @@ -681,7 +726,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { | |||||||
| 		} | 		} | ||||||
| 		// Gradually drop transactions from offenders | 		// Gradually drop transactions from offenders | ||||||
| 		offenders := []common.Address{} | 		offenders := []common.Address{} | ||||||
| 		for pending > maxPendingTotal && !spammers.Empty() { | 		for pending > pool.config.GlobalSlots && !spammers.Empty() { | ||||||
| 			// Retrieve the next offender if not local address | 			// Retrieve the next offender if not local address | ||||||
| 			offender, _ := spammers.Pop() | 			offender, _ := spammers.Pop() | ||||||
| 			offenders = append(offenders, offender.(common.Address)) | 			offenders = append(offenders, offender.(common.Address)) | ||||||
| @@ -692,7 +737,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { | |||||||
| 				threshold := pool.pending[offender.(common.Address)].Len() | 				threshold := pool.pending[offender.(common.Address)].Len() | ||||||
|  |  | ||||||
| 				// Iteratively reduce all offenders until below limit or threshold reached | 				// Iteratively reduce all offenders until below limit or threshold reached | ||||||
| 				for pending > maxPendingTotal && pool.pending[offenders[len(offenders)-2]].Len() > threshold { | 				for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { | ||||||
| 					for i := 0; i < len(offenders)-1; i++ { | 					for i := 0; i < len(offenders)-1; i++ { | ||||||
| 						list := pool.pending[offenders[i]] | 						list := pool.pending[offenders[i]] | ||||||
| 						list.Cap(list.Len() - 1) | 						list.Cap(list.Len() - 1) | ||||||
| @@ -702,8 +747,8 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { | |||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 		// If still above threshold, reduce to limit or min allowance | 		// If still above threshold, reduce to limit or min allowance | ||||||
| 		if pending > maxPendingTotal && len(offenders) > 0 { | 		if pending > pool.config.GlobalSlots && len(offenders) > 0 { | ||||||
| 			for pending > maxPendingTotal && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > minPendingPerAccount { | 			for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { | ||||||
| 				for _, addr := range offenders { | 				for _, addr := range offenders { | ||||||
| 					list := pool.pending[addr] | 					list := pool.pending[addr] | ||||||
| 					list.Cap(list.Len() - 1) | 					list.Cap(list.Len() - 1) | ||||||
| @@ -714,7 +759,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { | |||||||
| 		pendingRLCounter.Inc(int64(pendingBeforeCap - pending)) | 		pendingRLCounter.Inc(int64(pendingBeforeCap - pending)) | ||||||
| 	} | 	} | ||||||
| 	// If we've queued more transactions than the hard limit, drop oldest ones | 	// If we've queued more transactions than the hard limit, drop oldest ones | ||||||
| 	if queued > maxQueuedTotal { | 	if queued > pool.config.GlobalQueue { | ||||||
| 		// Sort all accounts with queued transactions by heartbeat | 		// Sort all accounts with queued transactions by heartbeat | ||||||
| 		addresses := make(addresssByHeartbeat, 0, len(pool.queue)) | 		addresses := make(addresssByHeartbeat, 0, len(pool.queue)) | ||||||
| 		for addr := range pool.queue { | 		for addr := range pool.queue { | ||||||
| @@ -723,7 +768,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { | |||||||
| 		sort.Sort(addresses) | 		sort.Sort(addresses) | ||||||
|  |  | ||||||
| 		// Drop transactions until the total is below the limit | 		// Drop transactions until the total is below the limit | ||||||
| 		for drop := queued - maxQueuedTotal; drop > 0; { | 		for drop := queued - pool.config.GlobalQueue; drop > 0; { | ||||||
| 			addr := addresses[len(addresses)-1] | 			addr := addresses[len(addresses)-1] | ||||||
| 			list := pool.queue[addr.address] | 			list := pool.queue[addr.address] | ||||||
|  |  | ||||||
| @@ -800,7 +845,7 @@ func (pool *TxPool) expirationLoop() { | |||||||
| 		case <-evict.C: | 		case <-evict.C: | ||||||
| 			pool.mu.Lock() | 			pool.mu.Lock() | ||||||
| 			for addr := range pool.queue { | 			for addr := range pool.queue { | ||||||
| 				if time.Since(pool.beats[addr]) > maxQueuedLifetime { | 				if time.Since(pool.beats[addr]) > pool.config.Lifetime { | ||||||
| 					for _, tx := range pool.queue[addr].Flatten() { | 					for _, tx := range pool.queue[addr].Flatten() { | ||||||
| 						pool.removeTx(tx.Hash()) | 						pool.removeTx(tx.Hash()) | ||||||
| 					} | 					} | ||||||
|   | |||||||
| @@ -46,7 +46,7 @@ func setupTxPool() (*TxPool, *ecdsa.PrivateKey) { | |||||||
| 	statedb, _ := state.New(common.Hash{}, db) | 	statedb, _ := state.New(common.Hash{}, db) | ||||||
|  |  | ||||||
| 	key, _ := crypto.GenerateKey() | 	key, _ := crypto.GenerateKey() | ||||||
| 	newPool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | 	newPool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | ||||||
| 	newPool.resetState() | 	newPool.resetState() | ||||||
|  |  | ||||||
| 	return newPool, key | 	return newPool, key | ||||||
| @@ -95,7 +95,7 @@ func TestStateChangeDuringPoolReset(t *testing.T) { | |||||||
|  |  | ||||||
| 	gasLimitFunc := func() *big.Int { return big.NewInt(1000000000) } | 	gasLimitFunc := func() *big.Int { return big.NewInt(1000000000) } | ||||||
|  |  | ||||||
| 	txpool := NewTxPool(params.TestChainConfig, mux, stateFunc, gasLimitFunc) | 	txpool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, mux, stateFunc, gasLimitFunc) | ||||||
| 	txpool.resetState() | 	txpool.resetState() | ||||||
|  |  | ||||||
| 	nonce := txpool.State().GetNonce(address) | 	nonce := txpool.State().GetNonce(address) | ||||||
| @@ -533,25 +533,25 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { | |||||||
| 	pool.resetState() | 	pool.resetState() | ||||||
|  |  | ||||||
| 	// Keep queuing up transactions and make sure all above a limit are dropped | 	// Keep queuing up transactions and make sure all above a limit are dropped | ||||||
| 	for i := uint64(1); i <= maxQueuedPerAccount+5; i++ { | 	for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue+5; i++ { | ||||||
| 		if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil { | 		if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil { | ||||||
| 			t.Fatalf("tx %d: failed to add transaction: %v", i, err) | 			t.Fatalf("tx %d: failed to add transaction: %v", i, err) | ||||||
| 		} | 		} | ||||||
| 		if len(pool.pending) != 0 { | 		if len(pool.pending) != 0 { | ||||||
| 			t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0) | 			t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0) | ||||||
| 		} | 		} | ||||||
| 		if i <= maxQueuedPerAccount { | 		if i <= DefaultTxPoolConfig.AccountQueue { | ||||||
| 			if pool.queue[account].Len() != int(i) { | 			if pool.queue[account].Len() != int(i) { | ||||||
| 				t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i) | 				t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i) | ||||||
| 			} | 			} | ||||||
| 		} else { | 		} else { | ||||||
| 			if pool.queue[account].Len() != int(maxQueuedPerAccount) { | 			if pool.queue[account].Len() != int(DefaultTxPoolConfig.AccountQueue) { | ||||||
| 				t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), maxQueuedPerAccount) | 				t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), DefaultTxPoolConfig.AccountQueue) | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	if len(pool.all) != int(maxQueuedPerAccount) { | 	if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue) { | ||||||
| 		t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), maxQueuedPerAccount) | 		t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -559,14 +559,14 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { | |||||||
| // some threshold, the higher transactions are dropped to prevent DOS attacks. | // some threshold, the higher transactions are dropped to prevent DOS attacks. | ||||||
| func TestTransactionQueueGlobalLimiting(t *testing.T) { | func TestTransactionQueueGlobalLimiting(t *testing.T) { | ||||||
| 	// Reduce the queue limits to shorten test time | 	// Reduce the queue limits to shorten test time | ||||||
| 	defer func(old uint64) { maxQueuedTotal = old }(maxQueuedTotal) | 	defer func(old uint64) { DefaultTxPoolConfig.GlobalQueue = old }(DefaultTxPoolConfig.GlobalQueue) | ||||||
| 	maxQueuedTotal = maxQueuedPerAccount * 3 | 	DefaultTxPoolConfig.GlobalQueue = DefaultTxPoolConfig.AccountQueue * 3 | ||||||
|  |  | ||||||
| 	// Create the pool to test the limit enforcement with | 	// Create the pool to test the limit enforcement with | ||||||
| 	db, _ := ethdb.NewMemDatabase() | 	db, _ := ethdb.NewMemDatabase() | ||||||
| 	statedb, _ := state.New(common.Hash{}, db) | 	statedb, _ := state.New(common.Hash{}, db) | ||||||
|  |  | ||||||
| 	pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | 	pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | ||||||
| 	pool.resetState() | 	pool.resetState() | ||||||
|  |  | ||||||
| 	// Create a number of test accounts and fund them | 	// Create a number of test accounts and fund them | ||||||
| @@ -580,7 +580,7 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) { | |||||||
| 	// Generate and queue a batch of transactions | 	// Generate and queue a batch of transactions | ||||||
| 	nonces := make(map[common.Address]uint64) | 	nonces := make(map[common.Address]uint64) | ||||||
|  |  | ||||||
| 	txs := make(types.Transactions, 0, 3*maxQueuedTotal) | 	txs := make(types.Transactions, 0, 3*DefaultTxPoolConfig.GlobalQueue) | ||||||
| 	for len(txs) < cap(txs) { | 	for len(txs) < cap(txs) { | ||||||
| 		key := keys[rand.Intn(len(keys))] | 		key := keys[rand.Intn(len(keys))] | ||||||
| 		addr := crypto.PubkeyToAddress(key.PublicKey) | 		addr := crypto.PubkeyToAddress(key.PublicKey) | ||||||
| @@ -593,13 +593,13 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) { | |||||||
|  |  | ||||||
| 	queued := 0 | 	queued := 0 | ||||||
| 	for addr, list := range pool.queue { | 	for addr, list := range pool.queue { | ||||||
| 		if list.Len() > int(maxQueuedPerAccount) { | 		if list.Len() > int(DefaultTxPoolConfig.AccountQueue) { | ||||||
| 			t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), maxQueuedPerAccount) | 			t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), DefaultTxPoolConfig.AccountQueue) | ||||||
| 		} | 		} | ||||||
| 		queued += list.Len() | 		queued += list.Len() | ||||||
| 	} | 	} | ||||||
| 	if queued > int(maxQueuedTotal) { | 	if queued > int(DefaultTxPoolConfig.GlobalQueue) { | ||||||
| 		t.Fatalf("total transactions overflow allowance: %d > %d", queued, maxQueuedTotal) | 		t.Fatalf("total transactions overflow allowance: %d > %d", queued, DefaultTxPoolConfig.GlobalQueue) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -608,9 +608,9 @@ func TestTransactionQueueGlobalLimiting(t *testing.T) { | |||||||
| // on shuffling them around. | // on shuffling them around. | ||||||
| func TestTransactionQueueTimeLimiting(t *testing.T) { | func TestTransactionQueueTimeLimiting(t *testing.T) { | ||||||
| 	// Reduce the queue limits to shorten test time | 	// Reduce the queue limits to shorten test time | ||||||
| 	defer func(old time.Duration) { maxQueuedLifetime = old }(maxQueuedLifetime) | 	defer func(old time.Duration) { DefaultTxPoolConfig.Lifetime = old }(DefaultTxPoolConfig.Lifetime) | ||||||
| 	defer func(old time.Duration) { evictionInterval = old }(evictionInterval) | 	defer func(old time.Duration) { evictionInterval = old }(evictionInterval) | ||||||
| 	maxQueuedLifetime = time.Second | 	DefaultTxPoolConfig.Lifetime = time.Second | ||||||
| 	evictionInterval = time.Second | 	evictionInterval = time.Second | ||||||
|  |  | ||||||
| 	// Create a test account and fund it | 	// Create a test account and fund it | ||||||
| @@ -621,7 +621,7 @@ func TestTransactionQueueTimeLimiting(t *testing.T) { | |||||||
| 	state.AddBalance(account, big.NewInt(1000000)) | 	state.AddBalance(account, big.NewInt(1000000)) | ||||||
|  |  | ||||||
| 	// Queue up a batch of transactions | 	// Queue up a batch of transactions | ||||||
| 	for i := uint64(1); i <= maxQueuedPerAccount; i++ { | 	for i := uint64(1); i <= DefaultTxPoolConfig.AccountQueue; i++ { | ||||||
| 		if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil { | 		if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil { | ||||||
| 			t.Fatalf("tx %d: failed to add transaction: %v", i, err) | 			t.Fatalf("tx %d: failed to add transaction: %v", i, err) | ||||||
| 		} | 		} | ||||||
| @@ -646,7 +646,7 @@ func TestTransactionPendingLimiting(t *testing.T) { | |||||||
| 	pool.resetState() | 	pool.resetState() | ||||||
|  |  | ||||||
| 	// Keep queuing up transactions and make sure all above a limit are dropped | 	// Keep queuing up transactions and make sure all above a limit are dropped | ||||||
| 	for i := uint64(0); i < maxQueuedPerAccount+5; i++ { | 	for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ { | ||||||
| 		if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil { | 		if err := pool.Add(transaction(i, big.NewInt(100000), key)); err != nil { | ||||||
| 			t.Fatalf("tx %d: failed to add transaction: %v", i, err) | 			t.Fatalf("tx %d: failed to add transaction: %v", i, err) | ||||||
| 		} | 		} | ||||||
| @@ -657,8 +657,8 @@ func TestTransactionPendingLimiting(t *testing.T) { | |||||||
| 			t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) | 			t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 	if len(pool.all) != int(maxQueuedPerAccount+5) { | 	if len(pool.all) != int(DefaultTxPoolConfig.AccountQueue+5) { | ||||||
| 		t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), maxQueuedPerAccount+5) | 		t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), DefaultTxPoolConfig.AccountQueue+5) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -674,7 +674,7 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) { | |||||||
| 	state1, _ := pool1.currentState() | 	state1, _ := pool1.currentState() | ||||||
| 	state1.AddBalance(account1, big.NewInt(1000000)) | 	state1.AddBalance(account1, big.NewInt(1000000)) | ||||||
|  |  | ||||||
| 	for i := uint64(0); i < maxQueuedPerAccount+5; i++ { | 	for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ { | ||||||
| 		if err := pool1.Add(transaction(origin+i, big.NewInt(100000), key1)); err != nil { | 		if err := pool1.Add(transaction(origin+i, big.NewInt(100000), key1)); err != nil { | ||||||
| 			t.Fatalf("tx %d: failed to add transaction: %v", i, err) | 			t.Fatalf("tx %d: failed to add transaction: %v", i, err) | ||||||
| 		} | 		} | ||||||
| @@ -686,7 +686,7 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) { | |||||||
| 	state2.AddBalance(account2, big.NewInt(1000000)) | 	state2.AddBalance(account2, big.NewInt(1000000)) | ||||||
|  |  | ||||||
| 	txns := []*types.Transaction{} | 	txns := []*types.Transaction{} | ||||||
| 	for i := uint64(0); i < maxQueuedPerAccount+5; i++ { | 	for i := uint64(0); i < DefaultTxPoolConfig.AccountQueue+5; i++ { | ||||||
| 		txns = append(txns, transaction(origin+i, big.NewInt(100000), key2)) | 		txns = append(txns, transaction(origin+i, big.NewInt(100000), key2)) | ||||||
| 	} | 	} | ||||||
| 	pool2.AddBatch(txns) | 	pool2.AddBatch(txns) | ||||||
| @@ -708,14 +708,14 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) { | |||||||
| // attacks. | // attacks. | ||||||
| func TestTransactionPendingGlobalLimiting(t *testing.T) { | func TestTransactionPendingGlobalLimiting(t *testing.T) { | ||||||
| 	// Reduce the queue limits to shorten test time | 	// Reduce the queue limits to shorten test time | ||||||
| 	defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal) | 	defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots) | ||||||
| 	maxPendingTotal = minPendingPerAccount * 10 | 	DefaultTxPoolConfig.GlobalSlots = DefaultTxPoolConfig.AccountSlots * 10 | ||||||
|  |  | ||||||
| 	// Create the pool to test the limit enforcement with | 	// Create the pool to test the limit enforcement with | ||||||
| 	db, _ := ethdb.NewMemDatabase() | 	db, _ := ethdb.NewMemDatabase() | ||||||
| 	statedb, _ := state.New(common.Hash{}, db) | 	statedb, _ := state.New(common.Hash{}, db) | ||||||
|  |  | ||||||
| 	pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | 	pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | ||||||
| 	pool.resetState() | 	pool.resetState() | ||||||
|  |  | ||||||
| 	// Create a number of test accounts and fund them | 	// Create a number of test accounts and fund them | ||||||
| @@ -732,7 +732,7 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { | |||||||
| 	txs := types.Transactions{} | 	txs := types.Transactions{} | ||||||
| 	for _, key := range keys { | 	for _, key := range keys { | ||||||
| 		addr := crypto.PubkeyToAddress(key.PublicKey) | 		addr := crypto.PubkeyToAddress(key.PublicKey) | ||||||
| 		for j := 0; j < int(maxPendingTotal)/len(keys)*2; j++ { | 		for j := 0; j < int(DefaultTxPoolConfig.GlobalSlots)/len(keys)*2; j++ { | ||||||
| 			txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key)) | 			txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key)) | ||||||
| 			nonces[addr]++ | 			nonces[addr]++ | ||||||
| 		} | 		} | ||||||
| @@ -744,8 +744,8 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { | |||||||
| 	for _, list := range pool.pending { | 	for _, list := range pool.pending { | ||||||
| 		pending += list.Len() | 		pending += list.Len() | ||||||
| 	} | 	} | ||||||
| 	if pending > int(maxPendingTotal) { | 	if pending > int(DefaultTxPoolConfig.GlobalSlots) { | ||||||
| 		t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, maxPendingTotal) | 		t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, DefaultTxPoolConfig.GlobalSlots) | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -754,14 +754,14 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) { | |||||||
| // the transactions are still kept. | // the transactions are still kept. | ||||||
| func TestTransactionPendingMinimumAllowance(t *testing.T) { | func TestTransactionPendingMinimumAllowance(t *testing.T) { | ||||||
| 	// Reduce the queue limits to shorten test time | 	// Reduce the queue limits to shorten test time | ||||||
| 	defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal) | 	defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots) | ||||||
| 	maxPendingTotal = 0 | 	DefaultTxPoolConfig.GlobalSlots = 0 | ||||||
|  |  | ||||||
| 	// Create the pool to test the limit enforcement with | 	// Create the pool to test the limit enforcement with | ||||||
| 	db, _ := ethdb.NewMemDatabase() | 	db, _ := ethdb.NewMemDatabase() | ||||||
| 	statedb, _ := state.New(common.Hash{}, db) | 	statedb, _ := state.New(common.Hash{}, db) | ||||||
|  |  | ||||||
| 	pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | 	pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | ||||||
| 	pool.resetState() | 	pool.resetState() | ||||||
|  |  | ||||||
| 	// Create a number of test accounts and fund them | 	// Create a number of test accounts and fund them | ||||||
| @@ -778,7 +778,7 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { | |||||||
| 	txs := types.Transactions{} | 	txs := types.Transactions{} | ||||||
| 	for _, key := range keys { | 	for _, key := range keys { | ||||||
| 		addr := crypto.PubkeyToAddress(key.PublicKey) | 		addr := crypto.PubkeyToAddress(key.PublicKey) | ||||||
| 		for j := 0; j < int(minPendingPerAccount)*2; j++ { | 		for j := 0; j < int(DefaultTxPoolConfig.AccountSlots)*2; j++ { | ||||||
| 			txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key)) | 			txs = append(txs, transaction(nonces[addr], big.NewInt(100000), key)) | ||||||
| 			nonces[addr]++ | 			nonces[addr]++ | ||||||
| 		} | 		} | ||||||
| @@ -787,8 +787,8 @@ func TestTransactionPendingMinimumAllowance(t *testing.T) { | |||||||
| 	pool.AddBatch(txs) | 	pool.AddBatch(txs) | ||||||
|  |  | ||||||
| 	for addr, list := range pool.pending { | 	for addr, list := range pool.pending { | ||||||
| 		if list.Len() != int(minPendingPerAccount) { | 		if list.Len() != int(DefaultTxPoolConfig.AccountSlots) { | ||||||
| 			t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), minPendingPerAccount) | 			t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), DefaultTxPoolConfig.AccountSlots) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| @@ -803,7 +803,7 @@ func TestTransactionPoolRepricing(t *testing.T) { | |||||||
| 	db, _ := ethdb.NewMemDatabase() | 	db, _ := ethdb.NewMemDatabase() | ||||||
| 	statedb, _ := state.New(common.Hash{}, db) | 	statedb, _ := state.New(common.Hash{}, db) | ||||||
|  |  | ||||||
| 	pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | 	pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | ||||||
| 	pool.resetState() | 	pool.resetState() | ||||||
|  |  | ||||||
| 	// Create a number of test accounts and fund them | 	// Create a number of test accounts and fund them | ||||||
| @@ -874,17 +874,17 @@ func TestTransactionPoolRepricing(t *testing.T) { | |||||||
| // Note, local transactions are never allowed to be dropped. | // Note, local transactions are never allowed to be dropped. | ||||||
| func TestTransactionPoolUnderpricing(t *testing.T) { | func TestTransactionPoolUnderpricing(t *testing.T) { | ||||||
| 	// Reduce the queue limits to shorten test time | 	// Reduce the queue limits to shorten test time | ||||||
| 	defer func(old uint64) { maxPendingTotal = old }(maxPendingTotal) | 	defer func(old uint64) { DefaultTxPoolConfig.GlobalSlots = old }(DefaultTxPoolConfig.GlobalSlots) | ||||||
| 	maxPendingTotal = 2 | 	DefaultTxPoolConfig.GlobalSlots = 2 | ||||||
|  |  | ||||||
| 	defer func(old uint64) { maxQueuedTotal = old }(maxQueuedTotal) | 	defer func(old uint64) { DefaultTxPoolConfig.GlobalQueue = old }(DefaultTxPoolConfig.GlobalQueue) | ||||||
| 	maxQueuedTotal = 2 | 	DefaultTxPoolConfig.GlobalQueue = 2 | ||||||
|  |  | ||||||
| 	// Create the pool to test the pricing enforcement with | 	// Create the pool to test the pricing enforcement with | ||||||
| 	db, _ := ethdb.NewMemDatabase() | 	db, _ := ethdb.NewMemDatabase() | ||||||
| 	statedb, _ := state.New(common.Hash{}, db) | 	statedb, _ := state.New(common.Hash{}, db) | ||||||
|  |  | ||||||
| 	pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | 	pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | ||||||
| 	pool.resetState() | 	pool.resetState() | ||||||
|  |  | ||||||
| 	// Create a number of test accounts and fund them | 	// Create a number of test accounts and fund them | ||||||
| @@ -960,7 +960,7 @@ func TestTransactionReplacement(t *testing.T) { | |||||||
| 	db, _ := ethdb.NewMemDatabase() | 	db, _ := ethdb.NewMemDatabase() | ||||||
| 	statedb, _ := state.New(common.Hash{}, db) | 	statedb, _ := state.New(common.Hash{}, db) | ||||||
|  |  | ||||||
| 	pool := NewTxPool(params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | 	pool := NewTxPool(DefaultTxPoolConfig, params.TestChainConfig, new(event.TypeMux), func() (*state.StateDB, error) { return statedb, nil }, func() *big.Int { return big.NewInt(1000000) }) | ||||||
| 	pool.resetState() | 	pool.resetState() | ||||||
|  |  | ||||||
| 	// Create a a test account to add transactions with | 	// Create a a test account to add transactions with | ||||||
| @@ -971,7 +971,7 @@ func TestTransactionReplacement(t *testing.T) { | |||||||
|  |  | ||||||
| 	// Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) | 	// Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) | ||||||
| 	price := int64(100) | 	price := int64(100) | ||||||
| 	threshold := (price * (100 + minPriceBumpPercent)) / 100 | 	threshold := (price * (100 + int64(DefaultTxPoolConfig.PriceBump))) / 100 | ||||||
|  |  | ||||||
| 	if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), key)); err != nil { | 	if err := pool.Add(pricedTransaction(0, big.NewInt(100000), big.NewInt(1), key)); err != nil { | ||||||
| 		t.Fatalf("failed to add original cheap pending transaction: %v", err) | 		t.Fatalf("failed to add original cheap pending transaction: %v", err) | ||||||
|   | |||||||
| @@ -150,7 +150,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { | |||||||
| 		core.WriteChainConfig(chainDb, genesisHash, chainConfig) | 		core.WriteChainConfig(chainDb, genesisHash, chainConfig) | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	newPool := core.NewTxPool(eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit) | 	newPool := core.NewTxPool(config.TxPool, eth.chainConfig, eth.EventMux(), eth.blockchain.State, eth.blockchain.GasLimit) | ||||||
| 	eth.txPool = newPool | 	eth.txPool = newPool | ||||||
|  |  | ||||||
| 	maxPeers := config.MaxPeers | 	maxPeers := config.MaxPeers | ||||||
|   | |||||||
| @@ -44,6 +44,7 @@ var DefaultConfig = Config{ | |||||||
| 	DatabaseCache:        128, | 	DatabaseCache:        128, | ||||||
| 	GasPrice:             big.NewInt(18 * params.Shannon), | 	GasPrice:             big.NewInt(18 * params.Shannon), | ||||||
|  |  | ||||||
|  | 	TxPool: core.DefaultTxPoolConfig, | ||||||
| 	GPO: gasprice.Config{ | 	GPO: gasprice.Config{ | ||||||
| 		Blocks:     10, | 		Blocks:     10, | ||||||
| 		Percentile: 50, | 		Percentile: 50, | ||||||
| @@ -99,6 +100,9 @@ type Config struct { | |||||||
| 	EthashDatasetsInMem  int | 	EthashDatasetsInMem  int | ||||||
| 	EthashDatasetsOnDisk int | 	EthashDatasetsOnDisk int | ||||||
|  |  | ||||||
|  | 	// Transaction pool options | ||||||
|  | 	TxPool core.TxPoolConfig | ||||||
|  |  | ||||||
| 	// Gas Price Oracle options | 	// Gas Price Oracle options | ||||||
| 	GPO gasprice.Config | 	GPO gasprice.Config | ||||||
|  |  | ||||||
|   | |||||||
| @@ -33,6 +33,7 @@ func (c Config) MarshalTOML() (interface{}, error) { | |||||||
| 		EthashDatasetDir        string | 		EthashDatasetDir        string | ||||||
| 		EthashDatasetsInMem     int | 		EthashDatasetsInMem     int | ||||||
| 		EthashDatasetsOnDisk    int | 		EthashDatasetsOnDisk    int | ||||||
|  | 		TxPool                  core.TxPoolConfig | ||||||
| 		GPO                     gasprice.Config | 		GPO                     gasprice.Config | ||||||
| 		EnablePreimageRecording bool | 		EnablePreimageRecording bool | ||||||
| 		DocRoot                 string `toml:"-"` | 		DocRoot                 string `toml:"-"` | ||||||
| @@ -60,6 +61,7 @@ func (c Config) MarshalTOML() (interface{}, error) { | |||||||
| 	enc.EthashDatasetDir = c.EthashDatasetDir | 	enc.EthashDatasetDir = c.EthashDatasetDir | ||||||
| 	enc.EthashDatasetsInMem = c.EthashDatasetsInMem | 	enc.EthashDatasetsInMem = c.EthashDatasetsInMem | ||||||
| 	enc.EthashDatasetsOnDisk = c.EthashDatasetsOnDisk | 	enc.EthashDatasetsOnDisk = c.EthashDatasetsOnDisk | ||||||
|  | 	enc.TxPool = c.TxPool | ||||||
| 	enc.GPO = c.GPO | 	enc.GPO = c.GPO | ||||||
| 	enc.EnablePreimageRecording = c.EnablePreimageRecording | 	enc.EnablePreimageRecording = c.EnablePreimageRecording | ||||||
| 	enc.DocRoot = c.DocRoot | 	enc.DocRoot = c.DocRoot | ||||||
| @@ -90,6 +92,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { | |||||||
| 		EthashDatasetDir        *string | 		EthashDatasetDir        *string | ||||||
| 		EthashDatasetsInMem     *int | 		EthashDatasetsInMem     *int | ||||||
| 		EthashDatasetsOnDisk    *int | 		EthashDatasetsOnDisk    *int | ||||||
|  | 		TxPool                  *core.TxPoolConfig | ||||||
| 		GPO                     *gasprice.Config | 		GPO                     *gasprice.Config | ||||||
| 		EnablePreimageRecording *bool | 		EnablePreimageRecording *bool | ||||||
| 		DocRoot                 *string `toml:"-"` | 		DocRoot                 *string `toml:"-"` | ||||||
| @@ -158,6 +161,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { | |||||||
| 	if dec.EthashDatasetsOnDisk != nil { | 	if dec.EthashDatasetsOnDisk != nil { | ||||||
| 		c.EthashDatasetsOnDisk = *dec.EthashDatasetsOnDisk | 		c.EthashDatasetsOnDisk = *dec.EthashDatasetsOnDisk | ||||||
| 	} | 	} | ||||||
|  | 	if dec.TxPool != nil { | ||||||
|  | 		c.TxPool = *dec.TxPool | ||||||
|  | 	} | ||||||
| 	if dec.GPO != nil { | 	if dec.GPO != nil { | ||||||
| 		c.GPO = *dec.GPO | 		c.GPO = *dec.GPO | ||||||
| 	} | 	} | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user