les: implement new client pool (#19745)
This commit is contained in:
@ -42,6 +42,12 @@ type Clock interface {
|
||||
Now() AbsTime
|
||||
Sleep(time.Duration)
|
||||
After(time.Duration) <-chan time.Time
|
||||
AfterFunc(d time.Duration, f func()) Event
|
||||
}
|
||||
|
||||
// Event represents a cancellable event returned by AfterFunc
|
||||
type Event interface {
|
||||
Cancel() bool
|
||||
}
|
||||
|
||||
// System implements Clock using the system clock.
|
||||
@ -61,3 +67,16 @@ func (System) Sleep(d time.Duration) {
|
||||
func (System) After(d time.Duration) <-chan time.Time {
|
||||
return time.After(d)
|
||||
}
|
||||
|
||||
// AfterFunc implements Clock.
|
||||
func (System) AfterFunc(d time.Duration, f func()) Event {
|
||||
return (*SystemEvent)(time.AfterFunc(d, f))
|
||||
}
|
||||
|
||||
// SystemEvent implements Event using time.Timer.
|
||||
type SystemEvent time.Timer
|
||||
|
||||
// Cancel implements Event.
|
||||
func (e *SystemEvent) Cancel() bool {
|
||||
return (*time.Timer)(e).Stop()
|
||||
}
|
||||
|
@ -35,30 +35,44 @@ type Simulated struct {
|
||||
scheduled []event
|
||||
mu sync.RWMutex
|
||||
cond *sync.Cond
|
||||
lastId uint64
|
||||
}
|
||||
|
||||
type event struct {
|
||||
do func()
|
||||
at AbsTime
|
||||
id uint64
|
||||
}
|
||||
|
||||
// SimulatedEvent implements Event for a virtual clock.
|
||||
type SimulatedEvent struct {
|
||||
at AbsTime
|
||||
id uint64
|
||||
s *Simulated
|
||||
}
|
||||
|
||||
// Run moves the clock by the given duration, executing all timers before that duration.
|
||||
func (s *Simulated) Run(d time.Duration) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.init()
|
||||
|
||||
end := s.now + AbsTime(d)
|
||||
var do []func()
|
||||
for len(s.scheduled) > 0 {
|
||||
ev := s.scheduled[0]
|
||||
if ev.at > end {
|
||||
break
|
||||
}
|
||||
s.now = ev.at
|
||||
ev.do()
|
||||
do = append(do, ev.do)
|
||||
s.scheduled = s.scheduled[1:]
|
||||
}
|
||||
s.now = end
|
||||
s.mu.Unlock()
|
||||
|
||||
for _, fn := range do {
|
||||
fn()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Simulated) ActiveTimers() int {
|
||||
@ -94,23 +108,26 @@ func (s *Simulated) Sleep(d time.Duration) {
|
||||
// After implements Clock.
|
||||
func (s *Simulated) After(d time.Duration) <-chan time.Time {
|
||||
after := make(chan time.Time, 1)
|
||||
s.insert(d, func() {
|
||||
s.AfterFunc(d, func() {
|
||||
after <- (time.Time{}).Add(time.Duration(s.now))
|
||||
})
|
||||
return after
|
||||
}
|
||||
|
||||
func (s *Simulated) insert(d time.Duration, do func()) {
|
||||
// AfterFunc implements Clock.
|
||||
func (s *Simulated) AfterFunc(d time.Duration, do func()) Event {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.init()
|
||||
|
||||
at := s.now + AbsTime(d)
|
||||
s.lastId++
|
||||
id := s.lastId
|
||||
l, h := 0, len(s.scheduled)
|
||||
ll := h
|
||||
for l != h {
|
||||
m := (l + h) / 2
|
||||
if at < s.scheduled[m].at {
|
||||
if (at < s.scheduled[m].at) || ((at == s.scheduled[m].at) && (id < s.scheduled[m].id)) {
|
||||
h = m
|
||||
} else {
|
||||
l = m + 1
|
||||
@ -118,8 +135,10 @@ func (s *Simulated) insert(d time.Duration, do func()) {
|
||||
}
|
||||
s.scheduled = append(s.scheduled, event{})
|
||||
copy(s.scheduled[l+1:], s.scheduled[l:ll])
|
||||
s.scheduled[l] = event{do: do, at: at}
|
||||
e := event{do: do, at: at, id: id}
|
||||
s.scheduled[l] = e
|
||||
s.cond.Broadcast()
|
||||
return &SimulatedEvent{at: at, id: id, s: s}
|
||||
}
|
||||
|
||||
func (s *Simulated) init() {
|
||||
@ -127,3 +146,31 @@ func (s *Simulated) init() {
|
||||
s.cond = sync.NewCond(&s.mu)
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel implements Event.
|
||||
func (e *SimulatedEvent) Cancel() bool {
|
||||
s := e.s
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
l, h := 0, len(s.scheduled)
|
||||
ll := h
|
||||
for l != h {
|
||||
m := (l + h) / 2
|
||||
if e.id == s.scheduled[m].id {
|
||||
l = m
|
||||
break
|
||||
}
|
||||
if (e.at < s.scheduled[m].at) || ((e.at == s.scheduled[m].at) && (e.id < s.scheduled[m].id)) {
|
||||
h = m
|
||||
} else {
|
||||
l = m + 1
|
||||
}
|
||||
}
|
||||
if l >= ll || s.scheduled[l].id != e.id {
|
||||
return false
|
||||
}
|
||||
copy(s.scheduled[l:ll-1], s.scheduled[l+1:])
|
||||
s.scheduled = s.scheduled[:ll-1]
|
||||
return true
|
||||
}
|
||||
|
182
common/prque/lazyqueue.go
Normal file
182
common/prque/lazyqueue.go
Normal file
@ -0,0 +1,182 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package prque
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
)
|
||||
|
||||
// LazyQueue is a priority queue data structure where priorities can change over
|
||||
// time and are only evaluated on demand.
|
||||
// Two callbacks are required:
|
||||
// - priority evaluates the actual priority of an item
|
||||
// - maxPriority gives an upper estimate for the priority in any moment between
|
||||
// now and the given absolute time
|
||||
// If the upper estimate is exceeded then Update should be called for that item.
|
||||
// A global Refresh function should also be called periodically.
|
||||
type LazyQueue struct {
|
||||
clock mclock.Clock
|
||||
// Items are stored in one of two internal queues ordered by estimated max
|
||||
// priority until the next and the next-after-next refresh. Update and Refresh
|
||||
// always places items in queue[1].
|
||||
queue [2]*sstack
|
||||
popQueue *sstack
|
||||
period time.Duration
|
||||
maxUntil mclock.AbsTime
|
||||
indexOffset int
|
||||
setIndex SetIndexCallback
|
||||
priority PriorityCallback
|
||||
maxPriority MaxPriorityCallback
|
||||
}
|
||||
|
||||
type (
|
||||
PriorityCallback func(data interface{}, now mclock.AbsTime) int64 // actual priority callback
|
||||
MaxPriorityCallback func(data interface{}, until mclock.AbsTime) int64 // estimated maximum priority callback
|
||||
)
|
||||
|
||||
// NewLazyQueue creates a new lazy queue
|
||||
func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPriority MaxPriorityCallback, clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue {
|
||||
q := &LazyQueue{
|
||||
popQueue: newSstack(nil),
|
||||
setIndex: setIndex,
|
||||
priority: priority,
|
||||
maxPriority: maxPriority,
|
||||
clock: clock,
|
||||
period: refreshPeriod}
|
||||
q.Reset()
|
||||
q.Refresh()
|
||||
return q
|
||||
}
|
||||
|
||||
// Reset clears the contents of the queue
|
||||
func (q *LazyQueue) Reset() {
|
||||
q.queue[0] = newSstack(q.setIndex0)
|
||||
q.queue[1] = newSstack(q.setIndex1)
|
||||
}
|
||||
|
||||
// Refresh should be called at least with the frequency specified by the refreshPeriod parameter
|
||||
func (q *LazyQueue) Refresh() {
|
||||
q.maxUntil = q.clock.Now() + mclock.AbsTime(q.period)
|
||||
for q.queue[0].Len() != 0 {
|
||||
q.Push(heap.Pop(q.queue[0]).(*item).value)
|
||||
}
|
||||
q.queue[0], q.queue[1] = q.queue[1], q.queue[0]
|
||||
q.indexOffset = 1 - q.indexOffset
|
||||
q.maxUntil += mclock.AbsTime(q.period)
|
||||
}
|
||||
|
||||
// Push adds an item to the queue
|
||||
func (q *LazyQueue) Push(data interface{}) {
|
||||
heap.Push(q.queue[1], &item{data, q.maxPriority(data, q.maxUntil)})
|
||||
}
|
||||
|
||||
// Update updates the upper priority estimate for the item with the given queue index
|
||||
func (q *LazyQueue) Update(index int) {
|
||||
q.Push(q.Remove(index))
|
||||
}
|
||||
|
||||
// Pop removes and returns the item with the greatest actual priority
|
||||
func (q *LazyQueue) Pop() (interface{}, int64) {
|
||||
var (
|
||||
resData interface{}
|
||||
resPri int64
|
||||
)
|
||||
q.MultiPop(func(data interface{}, priority int64) bool {
|
||||
resData = data
|
||||
resPri = priority
|
||||
return false
|
||||
})
|
||||
return resData, resPri
|
||||
}
|
||||
|
||||
// peekIndex returns the index of the internal queue where the item with the
|
||||
// highest estimated priority is or -1 if both are empty
|
||||
func (q *LazyQueue) peekIndex() int {
|
||||
if q.queue[0].Len() != 0 {
|
||||
if q.queue[1].Len() != 0 && q.queue[1].blocks[0][0].priority > q.queue[0].blocks[0][0].priority {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if q.queue[1].Len() != 0 {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// MultiPop pops multiple items from the queue and is more efficient than calling
|
||||
// Pop multiple times. Popped items are passed to the callback. MultiPop returns
|
||||
// when the callback returns false or there are no more items to pop.
|
||||
func (q *LazyQueue) MultiPop(callback func(data interface{}, priority int64) bool) {
|
||||
now := q.clock.Now()
|
||||
nextIndex := q.peekIndex()
|
||||
for nextIndex != -1 {
|
||||
data := heap.Pop(q.queue[nextIndex]).(*item).value
|
||||
heap.Push(q.popQueue, &item{data, q.priority(data, now)})
|
||||
nextIndex = q.peekIndex()
|
||||
for q.popQueue.Len() != 0 && (nextIndex == -1 || q.queue[nextIndex].blocks[0][0].priority < q.popQueue.blocks[0][0].priority) {
|
||||
i := heap.Pop(q.popQueue).(*item)
|
||||
if !callback(i.value, i.priority) {
|
||||
for q.popQueue.Len() != 0 {
|
||||
q.Push(heap.Pop(q.popQueue).(*item).value)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PopItem pops the item from the queue only, dropping the associated priority value.
|
||||
func (q *LazyQueue) PopItem() interface{} {
|
||||
i, _ := q.Pop()
|
||||
return i
|
||||
}
|
||||
|
||||
// Remove removes removes the item with the given index.
|
||||
func (q *LazyQueue) Remove(index int) interface{} {
|
||||
if index < 0 {
|
||||
return nil
|
||||
}
|
||||
return heap.Remove(q.queue[index&1^q.indexOffset], index>>1).(*item).value
|
||||
}
|
||||
|
||||
// Empty checks whether the priority queue is empty.
|
||||
func (q *LazyQueue) Empty() bool {
|
||||
return q.queue[0].Len() == 0 && q.queue[1].Len() == 0
|
||||
}
|
||||
|
||||
// Size returns the number of items in the priority queue.
|
||||
func (q *LazyQueue) Size() int {
|
||||
return q.queue[0].Len() + q.queue[1].Len()
|
||||
}
|
||||
|
||||
// setIndex0 translates internal queue item index to the virtual index space of LazyQueue
|
||||
func (q *LazyQueue) setIndex0(data interface{}, index int) {
|
||||
if index == -1 {
|
||||
q.setIndex(data, -1)
|
||||
} else {
|
||||
q.setIndex(data, index+index)
|
||||
}
|
||||
}
|
||||
|
||||
// setIndex1 translates internal queue item index to the virtual index space of LazyQueue
|
||||
func (q *LazyQueue) setIndex1(data interface{}, index int) {
|
||||
q.setIndex(data, index+index+1)
|
||||
}
|
119
common/prque/lazyqueue_test.go
Normal file
119
common/prque/lazyqueue_test.go
Normal file
@ -0,0 +1,119 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package prque
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common/mclock"
|
||||
)
|
||||
|
||||
const (
|
||||
testItems = 1000
|
||||
testPriorityStep = 100
|
||||
testSteps = 1000000
|
||||
testStepPeriod = time.Millisecond
|
||||
testQueueRefresh = time.Second
|
||||
testAvgRate = float64(testPriorityStep) / float64(testItems) / float64(testStepPeriod)
|
||||
)
|
||||
|
||||
type lazyItem struct {
|
||||
p, maxp int64
|
||||
last mclock.AbsTime
|
||||
index int
|
||||
}
|
||||
|
||||
func testPriority(a interface{}, now mclock.AbsTime) int64 {
|
||||
return a.(*lazyItem).p
|
||||
}
|
||||
|
||||
func testMaxPriority(a interface{}, until mclock.AbsTime) int64 {
|
||||
i := a.(*lazyItem)
|
||||
dt := until - i.last
|
||||
i.maxp = i.p + int64(float64(dt)*testAvgRate)
|
||||
return i.maxp
|
||||
}
|
||||
|
||||
func testSetIndex(a interface{}, i int) {
|
||||
a.(*lazyItem).index = i
|
||||
}
|
||||
|
||||
func TestLazyQueue(t *testing.T) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
clock := &mclock.Simulated{}
|
||||
q := NewLazyQueue(testSetIndex, testPriority, testMaxPriority, clock, testQueueRefresh)
|
||||
|
||||
var (
|
||||
items [testItems]lazyItem
|
||||
maxPri int64
|
||||
)
|
||||
|
||||
for i := range items[:] {
|
||||
items[i].p = rand.Int63n(testPriorityStep * 10)
|
||||
if items[i].p > maxPri {
|
||||
maxPri = items[i].p
|
||||
}
|
||||
items[i].index = -1
|
||||
q.Push(&items[i])
|
||||
}
|
||||
|
||||
var lock sync.Mutex
|
||||
stopCh := make(chan chan struct{})
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-clock.After(testQueueRefresh):
|
||||
lock.Lock()
|
||||
q.Refresh()
|
||||
lock.Unlock()
|
||||
case stop := <-stopCh:
|
||||
close(stop)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for c := 0; c < testSteps; c++ {
|
||||
i := rand.Intn(testItems)
|
||||
lock.Lock()
|
||||
items[i].p += rand.Int63n(testPriorityStep*2-1) + 1
|
||||
if items[i].p > maxPri {
|
||||
maxPri = items[i].p
|
||||
}
|
||||
items[i].last = clock.Now()
|
||||
if items[i].p > items[i].maxp {
|
||||
q.Update(items[i].index)
|
||||
}
|
||||
if rand.Intn(100) == 0 {
|
||||
p := q.PopItem().(*lazyItem)
|
||||
if p.p != maxPri {
|
||||
t.Fatalf("incorrect item (best known priority %d, popped %d)", maxPri, p.p)
|
||||
}
|
||||
q.Push(p)
|
||||
}
|
||||
lock.Unlock()
|
||||
clock.Run(testStepPeriod)
|
||||
clock.WaitForTimers(1)
|
||||
}
|
||||
|
||||
stop := make(chan struct{})
|
||||
stopCh <- stop
|
||||
<-stop
|
||||
}
|
Reference in New Issue
Block a user