Skip to content

Commit

Permalink
Implement eventual map realloc with threshold
Browse files Browse the repository at this point in the history
  • Loading branch information
mgnsk committed Oct 5, 2023
1 parent 0a7d376 commit 684df4f
Show file tree
Hide file tree
Showing 2 changed files with 157 additions and 22 deletions.
74 changes: 52 additions & 22 deletions internal/backend/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,11 @@ type Record[V any] struct {
// Element is the cache element.
type Element[V any] *list.Element[Record[V]]

// NewElement returns a new cache element.
func NewElement[V any](v V) Element[V] {
return list.NewElement(Record[V]{Value: v})
}

// RecordMap is the cache's record map.
type RecordMap[K comparable, V any] map[K]Element[V]

Expand All @@ -33,7 +38,9 @@ type Backend[K comparable, V any] struct {
list list.List[Record[V]]
earliestExpireAt int64
cap int
reallocThreshold int // if map hits this size and then shrinks by half, it is reallocated
largestLen int // the map has at least this capacity
needRealloc bool
once sync.Once
sync.RWMutex
}
Expand All @@ -44,10 +51,11 @@ func NewBackend[K comparable, V any](capacity int) *Backend[K, V] {
<-t.C

return &Backend[K, V]{
timer: t,
done: make(chan struct{}),
xmap: make(RecordMap[K, V], capacity),
cap: capacity,
timer: t,
done: make(chan struct{}),
xmap: make(RecordMap[K, V], capacity),
cap: capacity,
reallocThreshold: 100000, // 100000 * pointer size
}
}

Expand Down Expand Up @@ -120,18 +128,20 @@ func (b *Backend[K, V]) Evict(key K) (Element[V], bool) {
}

// Delete a record from the backend map.
// When half of the records are deleted, the map is reallocated.
// When capacity is 0 and half of the records are deleted, the map will be eventually reallocated.
func (b *Backend[K, V]) Delete(key K) {
if b.cap == 0 {
if n := len(b.xmap); n >= b.reallocThreshold || b.largestLen > 0 && n > b.largestLen {
b.largestLen = n
}
}

delete(b.xmap, key)

if n := len(b.xmap); n > b.largestLen {
b.largestLen = n
} else if n <= b.largestLen/2 {
m := make(RecordMap[K, V], n)
for k, v := range b.xmap {
m[k] = v
}
b.xmap = m
if b.largestLen > 0 && len(b.xmap) <= b.largestLen/2 {
b.largestLen = 0
b.needRealloc = true
b.timer.Reset(0)
}
}

Expand Down Expand Up @@ -161,19 +171,19 @@ func (b *Backend[K, V]) startGCOnce() {
b.timer.Stop()
return
case now := <-b.timer.C:
b.runGC(now.UnixNano())
b.RunGC(now.UnixNano())
}
}
}()
})
}

func (b *Backend[K, V]) runGC(now int64) {
// RunGC runs map cleanup.
func (b *Backend[K, V]) RunGC(now int64) {
b.Lock()
defer b.Unlock()

var overflowed map[Element[V]]bool

if n := b.overflow(); n > 0 {
overflowed = make(map[Element[V]]bool, n)

Expand All @@ -186,26 +196,46 @@ func (b *Backend[K, V]) runGC(now int64) {
}
}

var newMap RecordMap[K, V]
if b.needRealloc {
b.needRealloc = false
newMap = make(RecordMap[K, V], len(b.xmap)-len(overflowed))
defer func() {
b.xmap = newMap
}()
}

var earliest int64
defer func() {
b.earliestExpireAt = earliest
if earliest > 0 {
b.timer.Reset(time.Duration(earliest - now))
}
}()

for key, elem := range b.xmap {
if elem.Value.Initialized.Load() {
if len(overflowed) > 0 && overflowed[elem] {
delete(overflowed, elem)
b.Delete(key)
b.list.Remove(elem)
} else if elem.Value.Deadline > 0 && elem.Value.Deadline < now {
continue
}

if elem.Value.Deadline > 0 && elem.Value.Deadline < now {
b.Delete(key)
b.list.Remove(elem)
} else if elem.Value.Deadline > 0 && (earliest == 0 || elem.Value.Deadline < earliest) {
continue
}

if elem.Value.Deadline > 0 && (earliest == 0 || elem.Value.Deadline < earliest) {
earliest = elem.Value.Deadline
}
}
}

b.earliestExpireAt = earliest
if earliest > 0 {
b.timer.Reset(time.Duration(earliest - now))
if newMap != nil {
newMap[key] = elem
}
}
}

Expand Down
105 changes: 105 additions & 0 deletions internal/backend/backend_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
package backend_test

import (
"testing"
"time"

"github.com/mgnsk/evcache/v3/internal/backend"
. "github.com/onsi/gomega"
)

// Note: hardcoded realloc threshold value in the backend.
const size = 100000

func newBackend(size int) *backend.Backend[int, int] {
b := backend.NewBackend[int, int](0)

for i := 0; i < size; i++ {
elem := backend.NewElement(i)
b.LoadOrStore(i, elem)
b.Lock()
b.PushBack(elem, 0)
b.Unlock()
}

return b
}

func TestUnlimitedCapacityMapShrink(t *testing.T) {
t.Run("no realloc", func(t *testing.T) {
g := NewWithT(t)

b := newBackend(size)
g.Expect(b.Len()).To(Equal(size))
g.Expect(getMapLen(b)).To(Equal(size))

// Evict half-1 of records.
for i := 0; i < size/2-1; i++ {
b.Evict(i)
}

b.RunGC(time.Now().UnixNano())

g.Expect(b.Len()).To(Equal((size / 2) + 1))
g.Expect(getMapLen(b)).To(Equal((size / 2) + 1))

// TODO: assert that map is the same
})

t.Run("realloc", func(t *testing.T) {
g := NewWithT(t)

b := newBackend(size)
g.Expect(b.Len()).To(Equal(size))
g.Expect(getMapLen(b)).To(Equal(size))

// Evict half of records.
for i := 0; i < size/2; i++ {
b.Evict(i)
}

b.RunGC(time.Now().UnixNano())

g.Expect(b.Len()).To(Equal(size / 2))
g.Expect(getMapLen(b)).To(Equal(size / 2))

// TODO: assert that map is new
})
}

func TestUnlimitedCapacityMapShrinkUninitializedRecords(t *testing.T) {
t.Run("realloc", func(t *testing.T) {
g := NewWithT(t)

b := newBackend(size - 1)
g.Expect(b.Len()).To(Equal(size - 1))
g.Expect(getMapLen(b)).To(Equal(size - 1))

// Store uninitialized record.
elem := backend.NewElement(size - 1)
b.LoadOrStore(size-1, elem)

g.Expect(b.Len()).To(Equal(size-1), "list len only initialized records")
g.Expect(getMapLen(b)).To(Equal(size), "map len also uninitialized")

// Evict half of records.
for i := 0; i < size/2; i++ {
b.Evict(i)
}

b.RunGC(time.Now().UnixNano())

g.Expect(b.Len()).To(Equal((size/2)-1), "list len only initialized records")
g.Expect(getMapLen(b)).To(Equal(size/2), "map len also uninitialized")
})
}

func getMapLen(b *backend.Backend[int, int]) int {
n := 0
b.Range(func(key int, elem backend.Element[int]) bool {
n++
return true
})

return n
}

0 comments on commit 684df4f

Please sign in to comment.