diff --git a/cache.go b/cache.go index ad833f9..e59f600 100644 --- a/cache.go +++ b/cache.go @@ -75,8 +75,8 @@ func (c *Cache[K, V]) Get(key K) (value V, exists bool) { // // Range is allowed to modify the cache. func (c *Cache[K, V]) Range(f func(key K, value V) bool) { - c.backend.Range(func(key K, elem *ringlist.Element[backend.Record[V]]) bool { - return f(key, elem.Value.Value) + c.backend.Range(func(elem *ringlist.Element[backend.Record[K, V]]) bool { + return f(elem.Value.Key, elem.Value.Value) }) } @@ -129,16 +129,16 @@ func (c *Cache[K, V]) Fetch(key K, ttl time.Duration, f func() (V, error)) (valu // TryFetch is like Fetch but allows the TTL to be returned alongside the value from callback. func (c *Cache[K, V]) TryFetch(key K, f func() (V, time.Duration, error)) (value V, err error) { - newElem := c.backend.Reserve() + newElem := c.backend.Reserve(key) - if elem, loaded := c.backend.LoadOrStore(key, newElem); loaded { + if elem, loaded := c.backend.LoadOrStore(newElem); loaded { c.backend.Release(newElem) return elem.Value.Value, nil } defer func() { if r := recover(); r != nil { - c.backend.Discard(key, newElem) + c.backend.Discard(newElem) panic(r) } @@ -146,7 +146,7 @@ func (c *Cache[K, V]) TryFetch(key K, f func() (V, time.Duration, error)) (value value, ttl, err := f() if err != nil { - c.backend.Discard(key, newElem) + c.backend.Discard(newElem) var zero V return zero, err diff --git a/cache_test.go b/cache_test.go index 9278959..24c9b92 100644 --- a/cache_test.go +++ b/cache_test.go @@ -181,9 +181,7 @@ func TestOverflow(t *testing.T) { c.LoadOrStore(i, 0, 0) } - EventuallyTrue(t, func() bool { - return c.Len() == capacity - }) + Equal(t, c.Len(), capacity) } func TestExpire(t *testing.T) { diff --git a/internal/backend/backend.go b/internal/backend/backend.go index 80e08a0..df5ad5a 100644 --- a/internal/backend/backend.go +++ b/internal/backend/backend.go @@ -16,15 +16,14 @@ type Backend[K comparable, V any] struct { Policy Policy timer *time.Timer done chan struct{} - xmap map[K]*ringlist.Element[Record[V]] // map of uninitialized and initialized elements - list ringlist.List[Record[V]] // list of initialized elements - pool sync.Pool // pool of elements + xmap map[K]*ringlist.Element[Record[K, V]] // map of uninitialized and initialized elements + list ringlist.List[Record[K, V]] // list of initialized elements + pool sync.Pool // pool of elements earliestExpireAt int64 cap int lastLen int numDeleted uint64 - needRealloc bool - once sync.Once + gcStarted bool mu sync.Mutex } @@ -36,7 +35,7 @@ func NewBackend[K comparable, V any](capacity int) *Backend[K, V] { return &Backend[K, V]{ timer: t, done: make(chan struct{}), - xmap: make(map[K]*ringlist.Element[Record[V]], capacity), + xmap: make(map[K]*ringlist.Element[Record[K, V]], capacity), cap: capacity, } } @@ -56,35 +55,35 @@ func (b *Backend[K, V]) Len() int { } // Reserve a new uninitialized element. -func (b *Backend[K, V]) Reserve() *ringlist.Element[Record[V]] { - elem, ok := b.pool.Get().(*ringlist.Element[Record[V]]) +func (b *Backend[K, V]) Reserve(key K) *ringlist.Element[Record[K, V]] { + elem, ok := b.pool.Get().(*ringlist.Element[Record[K, V]]) if !ok { - elem = ringlist.NewElement(Record[V]{}) + elem = ringlist.NewElement(Record[K, V]{}) } + elem.Value.Key = key elem.Value.wg.Add(1) return elem } // Release a reserved uninitialized element. -func (b *Backend[K, V]) Release(elem *ringlist.Element[Record[V]]) { - defer elem.Value.wg.Done() - +func (b *Backend[K, V]) Release(elem *ringlist.Element[Record[K, V]]) { + elem.Value.Key = *new(K) + elem.Value.wg.Done() b.pool.Put(elem) } // Discard a reserved uninitialized element. -func (b *Backend[K, V]) Discard(key K, elem *ringlist.Element[Record[V]]) { - defer elem.Value.wg.Done() - +func (b *Backend[K, V]) Discard(elem *ringlist.Element[Record[K, V]]) { b.mu.Lock() - delete(b.xmap, key) + delete(b.xmap, elem.Value.Key) + elem.Value.wg.Done() b.mu.Unlock() } // Initialize a previously stored uninitialized element. -func (b *Backend[K, V]) Initialize(elem *ringlist.Element[Record[V]], value V, ttl time.Duration) { +func (b *Backend[K, V]) Initialize(elem *ringlist.Element[Record[K, V]], value V, ttl time.Duration) { b.mu.Lock() defer b.mu.Unlock() @@ -99,9 +98,10 @@ func (b *Backend[K, V]) Initialize(elem *ringlist.Element[Record[V]], value V, t b.list.PushBack(elem) if n := b.overflow(); n > 0 { - b.startGCOnce() - b.timer.Reset(0) - } else if elem.Value.deadline > 0 { + b.delete(b.list.Front()) + } + + if elem.Value.deadline > 0 { b.startGCOnce() if b.earliestExpireAt == 0 || elem.Value.deadline < b.earliestExpireAt { b.earliestExpireAt = elem.Value.deadline @@ -110,7 +110,7 @@ func (b *Backend[K, V]) Initialize(elem *ringlist.Element[Record[V]], value V, t } } -func (b *Backend[K, V]) hit(elem *ringlist.Element[Record[V]]) { +func (b *Backend[K, V]) hit(elem *ringlist.Element[Record[K, V]]) { switch b.Policy { case Default: case LFU: @@ -121,7 +121,7 @@ func (b *Backend[K, V]) hit(elem *ringlist.Element[Record[V]]) { } // Load an initialized element. -func (b *Backend[K, V]) Load(key K) (value *ringlist.Element[Record[V]], ok bool) { +func (b *Backend[K, V]) Load(key K) (value *ringlist.Element[Record[K, V]], ok bool) { b.mu.Lock() defer b.mu.Unlock() @@ -134,10 +134,10 @@ func (b *Backend[K, V]) Load(key K) (value *ringlist.Element[Record[V]], ok bool } // LoadOrStore loads or stores an element. -func (b *Backend[K, V]) LoadOrStore(key K, new *ringlist.Element[Record[V]]) (old *ringlist.Element[Record[V]], loaded bool) { +func (b *Backend[K, V]) LoadOrStore(new *ringlist.Element[Record[K, V]]) (old *ringlist.Element[Record[K, V]], loaded bool) { tryLoadStore: b.mu.Lock() - if elem, ok := b.xmap[key]; ok { + if elem, ok := b.xmap[new.Value.Key]; ok { if elem.Value.initialized { b.hit(elem) b.mu.Unlock() @@ -149,7 +149,7 @@ tryLoadStore: goto tryLoadStore } - b.xmap[key] = new + b.xmap[new.Value.Key] = new b.mu.Unlock() @@ -157,7 +157,7 @@ tryLoadStore: } // Range iterates over initialized cache elements in no particular order or consistency. -func (b *Backend[K, V]) Range(f func(key K, r *ringlist.Element[Record[V]]) bool) { +func (b *Backend[K, V]) Range(f func(r *ringlist.Element[Record[K, V]]) bool) { b.mu.Lock() keys := make([]K, 0, len(b.xmap)) for key := range b.xmap { @@ -170,7 +170,7 @@ func (b *Backend[K, V]) Range(f func(key K, r *ringlist.Element[Record[V]]) bool elem, ok := b.xmap[key] initialized := ok && elem.Value.initialized b.mu.Unlock() - if initialized && !f(key, elem) { + if initialized && !f(elem) { return } } @@ -184,7 +184,7 @@ func (b *Backend[K, V]) Evict(key K) (V, bool) { var zero V if elem, ok := b.xmap[key]; ok && elem.Value.initialized { - b.delete(key, elem) + b.delete(elem) return elem.Value.Value, true } @@ -199,8 +199,8 @@ func (b *Backend[K, V]) overflow() int { return 0 } -func (b *Backend[K, V]) delete(key K, elem *ringlist.Element[Record[V]]) { - delete(b.xmap, key) +func (b *Backend[K, V]) delete(elem *ringlist.Element[Record[K, V]]) { + delete(b.xmap, elem.Value.Key) b.list.Remove(elem) b.numDeleted++ @@ -211,25 +211,28 @@ func (b *Backend[K, V]) delete(key K, elem *ringlist.Element[Record[V]]) { if b.numDeleted > uint64(b.lastLen)/2 { b.numDeleted = 0 b.lastLen = b.list.Len() - b.needRealloc = true - b.timer.Reset(0) + b.xmap = maps.Clone(b.xmap) } } func (b *Backend[K, V]) startGCOnce() { - b.once.Do(func() { - go func() { - for { - select { - case <-b.done: - b.timer.Stop() - return - case now := <-b.timer.C: - b.RunGC(now.UnixNano()) - } + if b.gcStarted { + return + } + + b.gcStarted = true + + go func() { + for { + select { + case <-b.done: + b.timer.Stop() + return + case now := <-b.timer.C: + b.RunGC(now.UnixNano()) } - }() - }) + } + }() } func (b *Backend[K, V]) RunGC(now int64) { @@ -237,47 +240,27 @@ func (b *Backend[K, V]) RunGC(now int64) { defer b.mu.Unlock() var ( - overflowed map[*ringlist.Element[Record[V]]]bool - numOverflowed = b.overflow() + earliest int64 + deleted []*ringlist.Element[Record[K, V]] ) - if numOverflowed > 0 { - overflowed = make(map[*ringlist.Element[Record[V]]]bool, numOverflowed) - b.list.Do(func(e *ringlist.Element[Record[V]]) bool { - if len(overflowed) == numOverflowed { - return false - } - - overflowed[e] = true - - return true - }) - } - - var earliest int64 - - for key, elem := range b.xmap { - if len(overflowed) > 0 && overflowed[elem] { - delete(overflowed, elem) - b.delete(key, elem) - continue - } - + b.list.Do(func(elem *ringlist.Element[Record[K, V]]) bool { deadline := elem.Value.deadline if deadline > 0 && deadline < now { - b.delete(key, elem) - continue + deleted = append(deleted, elem) + return true } if deadline > 0 && (earliest == 0 || deadline < earliest) { earliest = deadline } - } - if b.needRealloc { - b.needRealloc = false - b.xmap = maps.Clone(b.xmap) + return true + }) + + for _, elem := range deleted { + b.delete(elem) } b.earliestExpireAt = earliest diff --git a/internal/backend/backend_test.go b/internal/backend/backend_test.go index 06842c8..5980f44 100644 --- a/internal/backend/backend_test.go +++ b/internal/backend/backend_test.go @@ -4,7 +4,6 @@ import ( "maps" "runtime" "testing" - "time" "github.com/mgnsk/evcache/v3/internal/backend" . "github.com/mgnsk/evcache/v3/internal/testing" @@ -19,18 +18,18 @@ func TestReallocUninitializedRecords(t *testing.T) { t.Log("filling the cache") for i := 0; i < size; i++ { - elem, _ := b.LoadOrStore(i, b.Reserve()) + elem, _ := b.LoadOrStore(b.Reserve(i)) b.Initialize(elem, i, 0) } t.Log("asserting number of initialized elements is correct") assertCacheLen(t, b, size) - var elem *ringlist.Element[backend.Record[int]] + var elem *ringlist.Element[backend.Record[int, int]] t.Log("storing a new uninitialized element") - elem = b.Reserve() - _, loaded := b.LoadOrStore(size, elem) + elem = b.Reserve(size) + _, loaded := b.LoadOrStore(elem) Equal(t, loaded, false) t.Log("asserting number of initialized has not changed") @@ -42,9 +41,6 @@ func TestReallocUninitializedRecords(t *testing.T) { Equal(t, ok, true) } - t.Log("by running GC to force realloc") - b.RunGC(time.Now().UnixNano()) - t.Log("asserting number of initialized elements is correct") assertCacheLen(t, b, size/2) @@ -60,8 +56,8 @@ func TestDeleteUninitializedElement(t *testing.T) { b := backend.NewBackend[int, int](0) t.Log("storing a new uninitialized element") - elem := b.Reserve() - _, loaded := b.LoadOrStore(0, elem) + elem := b.Reserve(0) + _, loaded := b.LoadOrStore(elem) Equal(t, loaded, false) assertCacheLen(t, b, 0) @@ -69,7 +65,7 @@ func TestDeleteUninitializedElement(t *testing.T) { Equal(t, evicted, false) assertCacheLen(t, b, 0) - b.Discard(0, elem) + b.Discard(elem) assertCacheLen(t, b, 0) } @@ -188,7 +184,7 @@ func getMemStats() uint64 { func getMapRangeCount[K comparable, V any](b *backend.Backend[K, V]) int { n := 0 - b.Range(func(K, *ringlist.Element[backend.Record[V]]) bool { + b.Range(func(*ringlist.Element[backend.Record[K, V]]) bool { n++ return true }) @@ -205,8 +201,8 @@ func assertCacheLen[K comparable, V any](t *testing.T, be *backend.Backend[K, V] func fillCache(t *testing.T, b *backend.Backend[int, int], capacity int) { for i := 0; i < capacity; i++ { - elem := b.Reserve() - _, loaded := b.LoadOrStore(i, elem) + elem := b.Reserve(i) + _, loaded := b.LoadOrStore(elem) Equal(t, loaded, false) b.Initialize(elem, 0, 0) } @@ -251,27 +247,25 @@ func overflowAndCollectKeys(t *testing.T, b *backend.Backend[int, int], capacity // Collect all cache keys, then overflow the cache and observe which key disappears. t.Log("collecting current cache state") oldKeys := map[int]struct{}{} - b.Range(func(key int, _ *ringlist.Element[backend.Record[int]]) bool { - oldKeys[key] = struct{}{} + b.Range(func(elem *ringlist.Element[backend.Record[int, int]]) bool { + oldKeys[elem.Value.Key] = struct{}{} return true }) Equal(t, len(oldKeys), capacity) t.Logf("store: %v", i) - elem := b.Reserve() - _, loaded := b.LoadOrStore(i, elem) + elem := b.Reserve(i) + _, loaded := b.LoadOrStore(elem) Equal(t, loaded, false) b.Initialize(elem, 0, 0) - t.Log("expecting GC to run") - EventuallyTrue(t, func() bool { - return b.Len() == capacity - }) + t.Logf("expected overflowed element was evicted") + assertCacheLen(t, b, capacity) t.Log("collecting new cache state") newKeys := map[int]struct{}{} - b.Range(func(key int, _ *ringlist.Element[backend.Record[int]]) bool { - newKeys[key] = struct{}{} + b.Range(func(elem *ringlist.Element[backend.Record[int, int]]) bool { + newKeys[elem.Value.Key] = struct{}{} return true }) diff --git a/internal/backend/benchmark_test.go b/internal/backend/benchmark_test.go index 1f10df2..87c7473 100644 --- a/internal/backend/benchmark_test.go +++ b/internal/backend/benchmark_test.go @@ -22,11 +22,11 @@ func BenchmarkSliceLoop(b *testing.B) { 1e6, } { b.Run(fmt.Sprint(n), newTimePerElementBench( - func() ([]*backend.Record[int], int) { - items := createSlice[int](n, nil) + func() ([]*backend.Record[int, int], int) { + items := createSlice[int, int](n, nil) return items, len(items) }, - func(items []*backend.Record[int]) { + func(items []*backend.Record[int, int]) { for _, elem := range items { if elem.Value > 0 { panic("expected zero") @@ -45,11 +45,11 @@ func BenchmarkSliceLoop(b *testing.B) { 1e6, } { b.Run(fmt.Sprint(n), newTimePerElementBench( - func() ([]*backend.Record[atomic.Uint64], int) { - items := createSlice[atomic.Uint64](n, nil) + func() ([]*backend.Record[int, atomic.Uint64], int) { + items := createSlice[int, atomic.Uint64](n, nil) return items, len(items) }, - func(items []*backend.Record[atomic.Uint64]) { + func(items []*backend.Record[int, atomic.Uint64]) { for _, elem := range items { if elem.Value.Load() > 0 { panic("expected zero") @@ -69,11 +69,11 @@ func BenchmarkMapIter(b *testing.B) { 1e6, } { b.Run(fmt.Sprint(n), newTimePerElementBench( - func() (map[int]*backend.Record[int], int) { + func() (map[int]*backend.Record[int, int], int) { m := createMap[int, int](n, nil) return m, n }, - func(m map[int]*backend.Record[int]) { + func(m map[int]*backend.Record[int, int]) { for _, elem := range m { if elem.Value > 0 { panic("expected zero") @@ -93,14 +93,14 @@ func BenchmarkSliceSort(b *testing.B) { 1e6, } { b.Run(fmt.Sprint(n), newTimePerElementBench( - func() ([]*backend.Record[int], int) { - items := createSlice(n, func(v *int) { + func() ([]*backend.Record[int, int], int) { + items := createSlice[int, int](n, func(_ *int, v *int) { *v = rand.Int() }) return items, len(items) }, - func(items []*backend.Record[int]) { - slices.SortFunc(items, func(a, b *backend.Record[int]) int { + func(items []*backend.Record[int, int]) { + slices.SortFunc(items, func(a, b *backend.Record[int, int]) int { return cmp.Compare(a.Value, b.Value) }) }, @@ -116,14 +116,14 @@ func BenchmarkSliceSort(b *testing.B) { 1e6, } { b.Run(fmt.Sprint(n), newTimePerElementBench( - func() ([]*backend.Record[atomic.Uint64], int) { - items := createSlice(n, func(u *atomic.Uint64) { + func() ([]*backend.Record[int, atomic.Uint64], int) { + items := createSlice(n, func(_ *int, u *atomic.Uint64) { u.Store(rand.Uint64()) }) return items, len(items) }, - func(items []*backend.Record[atomic.Uint64]) { - slices.SortFunc(items, func(a, b *backend.Record[atomic.Uint64]) int { + func(items []*backend.Record[int, atomic.Uint64]) { + slices.SortFunc(items, func(a, b *backend.Record[int, atomic.Uint64]) int { return cmp.Compare(a.Value.Load(), b.Value.Load()) }) }, @@ -143,8 +143,8 @@ func BenchmarkBackendGC(b *testing.B) { func() (*backend.Backend[int, int], int) { be := backend.NewBackend[int, int](n) for i := 0; i < n; i++ { - elem := be.Reserve() - _, loaded := be.LoadOrStore(i, elem) + elem := be.Reserve(i) + _, loaded := be.LoadOrStore(elem) Equal(b, loaded, false) be.Initialize(elem, 0, 0) } @@ -171,8 +171,8 @@ func BenchmarkBackendGCLFU(b *testing.B) { be.Policy = backend.LFU for i := 0; i < n; i++ { - elem := be.Reserve() - _, loaded := be.LoadOrStore(i, elem) + elem := be.Reserve(i) + _, loaded := be.LoadOrStore(elem) Equal(b, loaded, false) be.Initialize(elem, 0, 0) } @@ -199,8 +199,8 @@ func BenchmarkBackendGCLRU(b *testing.B) { be.Policy = backend.LRU for i := 0; i < n; i++ { - elem := be.Reserve() - _, loaded := be.LoadOrStore(i, elem) + elem := be.Reserve(i) + _, loaded := be.LoadOrStore(elem) Equal(b, loaded, false) be.Initialize(elem, 0, 0) } @@ -214,20 +214,20 @@ func BenchmarkBackendGCLRU(b *testing.B) { } } -func createSlice[V any](n int, valueFn func(*V)) []*backend.Record[V] { - items := make([]*backend.Record[V], n) +func createSlice[K comparable, V any](n int, valueFn func(*K, *V)) []*backend.Record[K, V] { + items := make([]*backend.Record[K, V], n) for i := 0; i < len(items); i++ { - items[i] = &backend.Record[V]{} + items[i] = &backend.Record[K, V]{} if valueFn != nil { - valueFn(&items[i].Value) + valueFn(&items[i].Key, &items[i].Value) } } return items } -func createMap[K comparable, V any](n int, valueFn func(*K, *V)) map[K]*backend.Record[V] { - m := make(map[K]*backend.Record[V], n) +func createMap[K comparable, V any](n int, valueFn func(*K, *V)) map[K]*backend.Record[K, V] { + m := make(map[K]*backend.Record[K, V], n) for i := 0; i < n; i++ { key := *new(K) value := *new(V) @@ -236,7 +236,7 @@ func createMap[K comparable, V any](n int, valueFn func(*K, *V)) map[K]*backend. valueFn(&key, &value) } - m[key] = &backend.Record[V]{Value: value} + m[key] = &backend.Record[K, V]{Value: value} } return m diff --git a/internal/backend/element.go b/internal/backend/element.go index 015c1a6..ada5383 100644 --- a/internal/backend/element.go +++ b/internal/backend/element.go @@ -5,7 +5,8 @@ import ( ) // Record is a cache record. -type Record[V any] struct { +type Record[K comparable, V any] struct { + Key K Value V deadline int64 initialized bool diff --git a/internal/backend/element_test.go b/internal/backend/element_test.go new file mode 100644 index 0000000..a950660 --- /dev/null +++ b/internal/backend/element_test.go @@ -0,0 +1,15 @@ +package backend_test + +import ( + "reflect" + "testing" + + "github.com/mgnsk/evcache/v3/internal/backend" + . "github.com/mgnsk/evcache/v3/internal/testing" +) + +func TestRecordSize(t *testing.T) { + rt := reflect.TypeOf(backend.Record[int, int]{}) + + Equal(t, int(rt.Size()), 48) +}