From f56b2dcdb5d1e2790de4526caf4b0277d762c77f Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Mon, 30 Dec 2024 19:46:11 +0100 Subject: [PATCH 01/17] wip --- src/Compiler/FSharp.Compiler.Service.fsproj | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Compiler/FSharp.Compiler.Service.fsproj b/src/Compiler/FSharp.Compiler.Service.fsproj index b345d6ed1ca..f39ee5fe9fb 100644 --- a/src/Compiler/FSharp.Compiler.Service.fsproj +++ b/src/Compiler/FSharp.Compiler.Service.fsproj @@ -149,6 +149,7 @@ + From 992c3418d440a2584d7ec20b0dc748add6ae3e97 Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Mon, 30 Dec 2024 19:51:48 +0100 Subject: [PATCH 02/17] wip --- src/Compiler/Utilities/Caches.fs | 143 +++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) create mode 100644 src/Compiler/Utilities/Caches.fs diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs new file mode 100644 index 00000000000..1ccb3d309b1 --- /dev/null +++ b/src/Compiler/Utilities/Caches.fs @@ -0,0 +1,143 @@ +namespace FSharp.Compiler + +open System +open System.Collections.Concurrent +open System.Threading + +[] +type CachingStrategy = + /// Least Recently Used - replaces/evicts the item not requested for the longest time. + | LRU + /// Most Recently Used - replaces/evicts the item requested most recently. + | MRU + /// Least Frequently Used - replaces/evicts the item with the least number of requests. + | LFU + +[] +type EvictionMethod = Blocking | ThreadPool + +[] +type CacheOptions = + { Capacity: int + PercentageToEvict: int + Strategy: CachingStrategy + EvictionMethod: EvictionMethod + LevelOfConcurrency: int } with + static member Default = { Capacity = 100; PercentageToEvict = 5; Strategy = CachingStrategy.LRU; LevelOfConcurrency = Environment.ProcessorCount; EvictionMethod = EvictionMethod.Blocking } + +[] +type CachedEntity<'Key, 'Value> = + val Key: 'Key + val Value: 'Value + val mutable LastAccessed: DateTimeOffset + val mutable AccessCount: uint64 + + new(key: 'Key, value: 'Value) = { Key = key; Value = value; LastAccessed = DateTimeOffset.Now; AccessCount = 0UL } + + +// TODO: This has a very naive and straightforward implementation for managing lifetimes, when evicting, will have to traverse the dictionary. +[] +type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) = + + // Increase expected capacity by the percentage to evict, since we want to not resize the dictionary. + let capacity = options.Capacity + (options.Capacity * options.PercentageToEvict / 100) + let store = ConcurrentDictionary<'Key, CachedEntity<'Key,'Value>>(options.LevelOfConcurrency, capacity) + // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. + + member _.GetStats() = {| + Capacity = options.Capacity + PercentageToEvict = options.PercentageToEvict + Strategy = options.Strategy + LevelOfConcurrency = options.LevelOfConcurrency + Count = store.Count + LeastRecentlyAccesssed = store.Values |> Seq.minBy _.LastAccessed |> _.LastAccessed + MostRecentlyAccesssed = store.Values |> Seq.maxBy _.LastAccessed |> _.LastAccessed + LeastFrequentlyAccessed = store.Values |> Seq.minBy _.AccessCount |> _.AccessCount + MostFrequentlyAccessed = store.Values |> Seq.maxBy _.AccessCount |> _.AccessCount + |} + + + member private _.GetEvictCount() = + let count = store.Count + count * options.PercentageToEvict / 100 + + // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies. + member private this.TryEvictLRU () = + + printfn $"Evicting {this.GetEvictCount()} items using LRU strategy." + + let evictKeys = store.Values |> Seq.sortByDescending _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + + printfn $""" Evicting keys: {{{String.Join(", ", evictKeys)}}}""" + + if this.GetEvictCount() > 0 then + for key in evictKeys do + let _ = store.TryRemove(key) in () + + member private this.TryEvictMRU () = + printfn $"Evicting {this.GetEvictCount()} items using MRU strategy." + + let evictKeys = store.Values |> Seq.sortBy _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + + printfn $""" Evicting keys: {{{String.Join(", ", evictKeys)}}}""" + + if this.GetEvictCount() > 0 then + for key in evictKeys do + let _ = store.TryRemove(key) in () + + member private this.TryEvictLFU () = + printfn $"Evicting {this.GetEvictCount()} items using MRU strategy." + + let evictKeys = store.Values |> Seq.sortBy _.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + + printfn $""" Evicting keys: {{{String.Join(", ", evictKeys)}}}""" + + if this.GetEvictCount() > 0 then + for key in evictKeys do + let _ = store.TryRemove(key) in () + + member this.TryEvict() = + + let evictCount = this.GetEvictCount() + + if evictCount <= 0 then + () + else + + printfn $"Need to evict {evictCount} items." + + let evictionJob = + match options.Strategy with + | CachingStrategy.LRU -> this.TryEvictLRU + | CachingStrategy.MRU -> this.TryEvictMRU + | CachingStrategy.LFU -> fun () -> () + + + if store.Count <= options.Capacity then + () + else + // TODO: Handle any already running eviction jobs (?) + + match options.EvictionMethod with + | EvictionMethod.Blocking -> evictionJob () + | EvictionMethod.ThreadPool -> ThreadPool.QueueUserWorkItem (fun _ -> evictionJob ()) |> ignore + + + member _.TryGet(key: 'Key) = + match store.TryGetValue(key) with + | true, value -> + // this is fine to be non-atomic, I guess, we are okay with race if the time is within the time of multiple concurrent calls. + value.LastAccessed <- DateTimeOffset.Now + value.AccessCount <- Interlocked.Increment(&value.AccessCount) + ValueSome value + | _ -> + ValueNone + + member this.Add(key: 'Key, value: 'Value) = let _ = this.TryAdd(key, value) in () + + member this.TryAdd(key: 'Key, value: 'Value) = + + if store.Count >= options.Capacity then + let _ = this.TryEvict() in () + + store.TryAdd(key, CachedEntity<'Key, 'Value>(key, value)) \ No newline at end of file From 400335ba99a7723bffa185fc74d08c532db9199e Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Thu, 2 Jan 2025 14:13:39 +0100 Subject: [PATCH 03/17] wip --- src/Compiler/Utilities/Caches.fs | 104 +++++++++++++++++-------------- 1 file changed, 56 insertions(+), 48 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index 1ccb3d309b1..a8d6c30b027 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -3,6 +3,7 @@ namespace FSharp.Compiler open System open System.Collections.Concurrent open System.Threading +open System.Threading.Tasks [] type CachingStrategy = @@ -14,7 +15,7 @@ type CachingStrategy = | LFU [] -type EvictionMethod = Blocking | ThreadPool +type EvictionMethod = Blocking | Background [] type CacheOptions = @@ -29,19 +30,40 @@ type CacheOptions = type CachedEntity<'Key, 'Value> = val Key: 'Key val Value: 'Value - val mutable LastAccessed: DateTimeOffset + val mutable LastAccessed: int64 val mutable AccessCount: uint64 - new(key: 'Key, value: 'Value) = { Key = key; Value = value; LastAccessed = DateTimeOffset.Now; AccessCount = 0UL } + new(key: 'Key, value: 'Value) = { Key = key; Value = value; LastAccessed = DateTimeOffset.Now.Ticks; AccessCount = 0UL } // TODO: This has a very naive and straightforward implementation for managing lifetimes, when evicting, will have to traverse the dictionary. [] -type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) = +type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = // Increase expected capacity by the percentage to evict, since we want to not resize the dictionary. let capacity = options.Capacity + (options.Capacity * options.PercentageToEvict / 100) let store = ConcurrentDictionary<'Key, CachedEntity<'Key,'Value>>(options.LevelOfConcurrency, capacity) + let cts = new CancellationTokenSource() + + do + Task.Run(fun () -> this.TryEvictTask(), cts.Token) |> ignore + + + let cacheHit = Event<'Key * 'Value>() + let cacheMiss = Event<'Key>() + let eviction = Event<'Key * 'Value>() + + [] + member val CacheHit = cacheHit.Publish + + [] + member val CacheMiss = cacheMiss.Publish + + [] + + member val Eviction = eviction.Publish + + // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. member _.GetStats() = {| @@ -62,39 +84,30 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) = count * options.PercentageToEvict / 100 // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies. - member private this.TryEvictLRU () = - - printfn $"Evicting {this.GetEvictCount()} items using LRU strategy." - - let evictKeys = store.Values |> Seq.sortByDescending _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - - printfn $""" Evicting keys: {{{String.Join(", ", evictKeys)}}}""" - - if this.GetEvictCount() > 0 then - for key in evictKeys do - let _ = store.TryRemove(key) in () - - member private this.TryEvictMRU () = - printfn $"Evicting {this.GetEvictCount()} items using MRU strategy." - - let evictKeys = store.Values |> Seq.sortBy _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - - printfn $""" Evicting keys: {{{String.Join(", ", evictKeys)}}}""" - - if this.GetEvictCount() > 0 then - for key in evictKeys do - let _ = store.TryRemove(key) in () + member private this.TryGetItemsToEvict () = + match options.Strategy with + | CachingStrategy.LRU -> store.Values |> Seq.sortByDescending _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + | CachingStrategy.MRU -> store.Values |> Seq.sortBy _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + | CachingStrategy.LFU -> store.Values |> Seq.sortBy _.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - member private this.TryEvictLFU () = - printfn $"Evicting {this.GetEvictCount()} items using MRU strategy." - - let evictKeys = store.Values |> Seq.sortBy _.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - - printfn $""" Evicting keys: {{{String.Join(", ", evictKeys)}}}""" + member private this.TryEvictItems () = + let evictKeys = this.TryGetItemsToEvict () if this.GetEvictCount() > 0 then for key in evictKeys do - let _ = store.TryRemove(key) in () + let (removed, value) = store.TryRemove(key) + if removed then + eviction.Trigger(key, value.Value) + + member private this.TryEvictTask () = + task { + while not cts.Token.IsCancellationRequested do + let evictCount = this.GetEvictCount() + if evictCount > 0 then + // printfn $"Evicting {evictCount} items using {options.EvictionMethod} strategy." + this.TryEvictItems () + do! Task.Delay(1000, cts.Token) + } member this.TryEvict() = @@ -103,34 +116,26 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) = if evictCount <= 0 then () else - - printfn $"Need to evict {evictCount} items." - - let evictionJob = - match options.Strategy with - | CachingStrategy.LRU -> this.TryEvictLRU - | CachingStrategy.MRU -> this.TryEvictMRU - | CachingStrategy.LFU -> fun () -> () - + // printfn $"Need to evict {evictCount} items." if store.Count <= options.Capacity then () else - // TODO: Handle any already running eviction jobs (?) - match options.EvictionMethod with - | EvictionMethod.Blocking -> evictionJob () - | EvictionMethod.ThreadPool -> ThreadPool.QueueUserWorkItem (fun _ -> evictionJob ()) |> ignore + | EvictionMethod.Blocking -> this.TryEvictItems () + | EvictionMethod.Background -> () member _.TryGet(key: 'Key) = match store.TryGetValue(key) with | true, value -> // this is fine to be non-atomic, I guess, we are okay with race if the time is within the time of multiple concurrent calls. - value.LastAccessed <- DateTimeOffset.Now + value.LastAccessed <- DateTimeOffset.Now.Ticks value.AccessCount <- Interlocked.Increment(&value.AccessCount) + cacheHit.Trigger(key, value.Value) ValueSome value | _ -> + cacheMiss.Trigger(key) ValueNone member this.Add(key: 'Key, value: 'Value) = let _ = this.TryAdd(key, value) in () @@ -140,4 +145,7 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) = if store.Count >= options.Capacity then let _ = this.TryEvict() in () - store.TryAdd(key, CachedEntity<'Key, 'Value>(key, value)) \ No newline at end of file + store.TryAdd(key, CachedEntity<'Key, 'Value>(key, value)) + + interface IDisposable with + member _.Dispose() = cts.Cancel() \ No newline at end of file From 018ec6366ee8bad9d44b9de65d78917c96fc4e9a Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Thu, 2 Jan 2025 15:12:21 +0100 Subject: [PATCH 04/17] wip --- src/Compiler/Utilities/Caches.fs | 53 ++++++++++++-------------------- 1 file changed, 20 insertions(+), 33 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index a8d6c30b027..0f93c463d7f 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -19,12 +19,12 @@ type EvictionMethod = Blocking | Background [] type CacheOptions = - { Capacity: int + { MaximumCapacity: int PercentageToEvict: int Strategy: CachingStrategy EvictionMethod: EvictionMethod LevelOfConcurrency: int } with - static member Default = { Capacity = 100; PercentageToEvict = 5; Strategy = CachingStrategy.LRU; LevelOfConcurrency = Environment.ProcessorCount; EvictionMethod = EvictionMethod.Blocking } + static member Default = { MaximumCapacity = 100; PercentageToEvict = 5; Strategy = CachingStrategy.LRU; LevelOfConcurrency = Environment.ProcessorCount; EvictionMethod = EvictionMethod.Blocking } [] type CachedEntity<'Key, 'Value> = @@ -41,13 +41,12 @@ type CachedEntity<'Key, 'Value> = type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = // Increase expected capacity by the percentage to evict, since we want to not resize the dictionary. - let capacity = options.Capacity + (options.Capacity * options.PercentageToEvict / 100) + let capacity = options.MaximumCapacity + (options.MaximumCapacity * options.PercentageToEvict / 100) let store = ConcurrentDictionary<'Key, CachedEntity<'Key,'Value>>(options.LevelOfConcurrency, capacity) let cts = new CancellationTokenSource() do - Task.Run(fun () -> this.TryEvictTask(), cts.Token) |> ignore - + Task.Run(this.TryEvictTask, cts.Token) |> ignore let cacheHit = Event<'Key * 'Value>() let cacheMiss = Event<'Key>() @@ -67,7 +66,7 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. member _.GetStats() = {| - Capacity = options.Capacity + Capacity = options.MaximumCapacity PercentageToEvict = options.PercentageToEvict Strategy = options.Strategy LevelOfConcurrency = options.LevelOfConcurrency @@ -80,8 +79,10 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = member private _.GetEvictCount() = - let count = store.Count - count * options.PercentageToEvict / 100 + if store.Count >= options.MaximumCapacity then + store.Count * options.PercentageToEvict / 100 + else + 0 // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies. member private this.TryGetItemsToEvict () = @@ -91,39 +92,26 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = | CachingStrategy.LFU -> store.Values |> Seq.sortBy _.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) member private this.TryEvictItems () = - let evictKeys = this.TryGetItemsToEvict () - if this.GetEvictCount() > 0 then - for key in evictKeys do + for key in this.TryGetItemsToEvict () do let (removed, value) = store.TryRemove(key) if removed then eviction.Trigger(key, value.Value) member private this.TryEvictTask () = - task { + backgroundTask { while not cts.Token.IsCancellationRequested do let evictCount = this.GetEvictCount() if evictCount > 0 then - // printfn $"Evicting {evictCount} items using {options.EvictionMethod} strategy." this.TryEvictItems () - do! Task.Delay(1000, cts.Token) + //do! Task.Delay(500, cts.Token) } member this.TryEvict() = - - let evictCount = this.GetEvictCount() - - if evictCount <= 0 then - () - else - // printfn $"Need to evict {evictCount} items." - - if store.Count <= options.Capacity then - () - else - match options.EvictionMethod with - | EvictionMethod.Blocking -> this.TryEvictItems () - | EvictionMethod.Background -> () + if this.GetEvictCount() > 0 then + match options.EvictionMethod with + | EvictionMethod.Blocking -> this.TryEvictItems () + | EvictionMethod.Background -> () member _.TryGet(key: 'Key) = @@ -141,11 +129,10 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = member this.Add(key: 'Key, value: 'Value) = let _ = this.TryAdd(key, value) in () member this.TryAdd(key: 'Key, value: 'Value) = - - if store.Count >= options.Capacity then - let _ = this.TryEvict() in () - + this.TryEvict() store.TryAdd(key, CachedEntity<'Key, 'Value>(key, value)) interface IDisposable with - member _.Dispose() = cts.Cancel() \ No newline at end of file + member _.Dispose() = cts.Cancel() + + member this.Dispose() = (this :> IDisposable).Dispose() \ No newline at end of file From 4f4ca13ccacbad5d7739a1239f13a424616ecb70 Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Thu, 2 Jan 2025 15:46:35 +0100 Subject: [PATCH 05/17] wip --- src/Compiler/Utilities/Caches.fs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index 0f93c463d7f..8afc8d1378e 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -63,8 +63,6 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = member val Eviction = eviction.Publish - // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. - member _.GetStats() = {| Capacity = options.MaximumCapacity PercentageToEvict = options.PercentageToEvict @@ -84,7 +82,7 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = else 0 - // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies. + // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies, it will always walk the dictionary to find the items to evict, this is not efficient. member private this.TryGetItemsToEvict () = match options.Strategy with | CachingStrategy.LRU -> store.Values |> Seq.sortByDescending _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) @@ -99,14 +97,17 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = eviction.Trigger(key, value.Value) member private this.TryEvictTask () = + // This will spin in the background trying to evict items. + // One of the issues is that if the delay is high (>100ms), it will not be able to evict items in time, and the cache will grow beyond the maximum capacity. backgroundTask { while not cts.Token.IsCancellationRequested do let evictCount = this.GetEvictCount() if evictCount > 0 then this.TryEvictItems () - //do! Task.Delay(500, cts.Token) + // do! Task.Delay(100, cts.Token) } + // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. member this.TryEvict() = if this.GetEvictCount() > 0 then match options.EvictionMethod with From 7ce92e655b1e3bee9ad9d09ddaec9be628097987 Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Thu, 2 Jan 2025 17:08:36 +0100 Subject: [PATCH 06/17] wip --- src/Compiler/Utilities/Caches.fs | 65 ++++++++++++++++++++++---------- 1 file changed, 46 insertions(+), 19 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index 8afc8d1378e..dad3c58e4d0 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -4,6 +4,7 @@ open System open System.Collections.Concurrent open System.Threading open System.Threading.Tasks +open System.Runtime.CompilerServices [] type CachingStrategy = @@ -14,17 +15,21 @@ type CachingStrategy = /// Least Frequently Used - replaces/evicts the item with the least number of requests. | LFU -[] +[] type EvictionMethod = Blocking | Background +[] +type EvictionReason = Evicted | Collected + [] type CacheOptions = { MaximumCapacity: int PercentageToEvict: int Strategy: CachingStrategy EvictionMethod: EvictionMethod - LevelOfConcurrency: int } with - static member Default = { MaximumCapacity = 100; PercentageToEvict = 5; Strategy = CachingStrategy.LRU; LevelOfConcurrency = Environment.ProcessorCount; EvictionMethod = EvictionMethod.Blocking } + LevelOfConcurrency: int + Weak: bool } with + static member Default = { MaximumCapacity = 100; PercentageToEvict = 5; Strategy = CachingStrategy.LRU; LevelOfConcurrency = Environment.ProcessorCount; EvictionMethod = EvictionMethod.Blocking; Weak = false } [] type CachedEntity<'Key, 'Value> = @@ -35,22 +40,33 @@ type CachedEntity<'Key, 'Value> = new(key: 'Key, value: 'Value) = { Key = key; Value = value; LastAccessed = DateTimeOffset.Now.Ticks; AccessCount = 0UL } +[] +type Weak<'K>(key) = + let collected = new Event<'K>() + [] + member val Collected = collected.Publish + + override _.Finalize() = collected.Trigger key // TODO: This has a very naive and straightforward implementation for managing lifetimes, when evicting, will have to traverse the dictionary. [] -type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = +type Cache<'Key, 'Value> (options: CacheOptions) as this = // Increase expected capacity by the percentage to evict, since we want to not resize the dictionary. let capacity = options.MaximumCapacity + (options.MaximumCapacity * options.PercentageToEvict / 100) let store = ConcurrentDictionary<'Key, CachedEntity<'Key,'Value>>(options.LevelOfConcurrency, capacity) + + let conditionalWeakTable = new ConditionalWeakTable<_, Weak<_>>(); + let cts = new CancellationTokenSource() do - Task.Run(this.TryEvictTask, cts.Token) |> ignore + if options.EvictionMethod = EvictionMethod.Background then + Task.Run(this.TryEvictTask, cts.Token) |> ignore let cacheHit = Event<'Key * 'Value>() let cacheMiss = Event<'Key>() - let eviction = Event<'Key * 'Value>() + let eviction = Event<'Key * EvictionReason>() [] member val CacheHit = cacheHit.Publish @@ -63,17 +79,18 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = member val Eviction = eviction.Publish - member _.GetStats() = {| - Capacity = options.MaximumCapacity - PercentageToEvict = options.PercentageToEvict - Strategy = options.Strategy - LevelOfConcurrency = options.LevelOfConcurrency - Count = store.Count - LeastRecentlyAccesssed = store.Values |> Seq.minBy _.LastAccessed |> _.LastAccessed - MostRecentlyAccesssed = store.Values |> Seq.maxBy _.LastAccessed |> _.LastAccessed - LeastFrequentlyAccessed = store.Values |> Seq.minBy _.AccessCount |> _.AccessCount - MostFrequentlyAccessed = store.Values |> Seq.maxBy _.AccessCount |> _.AccessCount - |} + member _.GetStats() = + {| + Capacity = options.MaximumCapacity + PercentageToEvict = options.PercentageToEvict + Strategy = options.Strategy + LevelOfConcurrency = options.LevelOfConcurrency + Count = store.Count + LeastRecentlyAccesssed = store.Values |> Seq.minBy _.LastAccessed |> _.LastAccessed + MostRecentlyAccesssed = store.Values |> Seq.maxBy _.LastAccessed |> _.LastAccessed + LeastFrequentlyAccessed = store.Values |> Seq.minBy _.AccessCount |> _.AccessCount + MostFrequentlyAccessed = store.Values |> Seq.maxBy _.AccessCount |> _.AccessCount + |} member private _.GetEvictCount() = @@ -94,7 +111,7 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = for key in this.TryGetItemsToEvict () do let (removed, value) = store.TryRemove(key) if removed then - eviction.Trigger(key, value.Value) + eviction.Trigger(key, EvictionReason.Evicted) member private this.TryEvictTask () = // This will spin in the background trying to evict items. @@ -128,11 +145,21 @@ type Cache<'Key, 'Value when 'Key: struct> (options: CacheOptions) as this = ValueNone member this.Add(key: 'Key, value: 'Value) = let _ = this.TryAdd(key, value) in () + member this.TryAdd<'Key>(key: 'Key, value: 'Value) = + + if options.Weak then + let weak = new Weak<'Key>(key) + conditionalWeakTable.TryAdd(key :> obj, weak) |> ignore + weak.Collected.Add(this.RemoveCollected) - member this.TryAdd(key: 'Key, value: 'Value) = this.TryEvict() store.TryAdd(key, CachedEntity<'Key, 'Value>(key, value)) + // TODO: This needs heavy testing to ensure we aren't leaking anything. + member private _.RemoveCollected(key: 'Key) = + store.TryRemove(key) |> ignore + eviction.Trigger(key, EvictionReason.Collected); + interface IDisposable with member _.Dispose() = cts.Cancel() From a44dbe5711ad19a73e2bda4b88dfa673bff16dca Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Thu, 2 Jan 2025 17:11:18 +0100 Subject: [PATCH 07/17] wip --- src/Compiler/Utilities/Caches.fs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index dad3c58e4d0..94a05858b04 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -102,8 +102,8 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies, it will always walk the dictionary to find the items to evict, this is not efficient. member private this.TryGetItemsToEvict () = match options.Strategy with - | CachingStrategy.LRU -> store.Values |> Seq.sortByDescending _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - | CachingStrategy.MRU -> store.Values |> Seq.sortBy _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + | CachingStrategy.LRU -> store.Values |> Seq.sortBy _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + | CachingStrategy.MRU -> store.Values |> Seq.sortByDescending _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) | CachingStrategy.LFU -> store.Values |> Seq.sortBy _.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) member private this.TryEvictItems () = From 2c044425046d1c79119c9393e465f5931dd936bb Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Thu, 2 Jan 2025 17:12:38 +0100 Subject: [PATCH 08/17] wip --- src/Compiler/Utilities/Caches.fs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index 94a05858b04..eb8e2156a03 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -46,6 +46,7 @@ type Weak<'K>(key) = [] member val Collected = collected.Publish + // TODO: Do we want to store it as WeakReference here? override _.Finalize() = collected.Trigger key // TODO: This has a very naive and straightforward implementation for managing lifetimes, when evicting, will have to traverse the dictionary. From 8865d2e45ed700fa5279b5cb469b19f61581b130 Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Thu, 2 Jan 2025 19:47:53 +0100 Subject: [PATCH 09/17] wip --- src/Compiler/Utilities/Caches.fs | 79 +++++++++++++++----------------- 1 file changed, 36 insertions(+), 43 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index eb8e2156a03..da4399d4102 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -7,13 +7,7 @@ open System.Threading.Tasks open System.Runtime.CompilerServices [] -type CachingStrategy = - /// Least Recently Used - replaces/evicts the item not requested for the longest time. - | LRU - /// Most Recently Used - replaces/evicts the item requested most recently. - | MRU - /// Least Frequently Used - replaces/evicts the item with the least number of requests. - | LFU +type CachingStrategy = LRU | MRU | LFU [] type EvictionMethod = Blocking | Background @@ -31,7 +25,7 @@ type CacheOptions = Weak: bool } with static member Default = { MaximumCapacity = 100; PercentageToEvict = 5; Strategy = CachingStrategy.LRU; LevelOfConcurrency = Environment.ProcessorCount; EvictionMethod = EvictionMethod.Blocking; Weak = false } -[] +[] type CachedEntity<'Key, 'Value> = val Key: 'Key val Value: 'Value @@ -40,7 +34,7 @@ type CachedEntity<'Key, 'Value> = new(key: 'Key, value: 'Value) = { Key = key; Value = value; LastAccessed = DateTimeOffset.Now.Ticks; AccessCount = 0UL } -[] +[] type Weak<'K>(key) = let collected = new Event<'K>() [] @@ -50,15 +44,10 @@ type Weak<'K>(key) = override _.Finalize() = collected.Trigger key // TODO: This has a very naive and straightforward implementation for managing lifetimes, when evicting, will have to traverse the dictionary. -[] +[] type Cache<'Key, 'Value> (options: CacheOptions) as this = - // Increase expected capacity by the percentage to evict, since we want to not resize the dictionary. let capacity = options.MaximumCapacity + (options.MaximumCapacity * options.PercentageToEvict / 100) - let store = ConcurrentDictionary<'Key, CachedEntity<'Key,'Value>>(options.LevelOfConcurrency, capacity) - - let conditionalWeakTable = new ConditionalWeakTable<_, Weak<_>>(); - let cts = new CancellationTokenSource() do @@ -69,16 +58,13 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = let cacheMiss = Event<'Key>() let eviction = Event<'Key * EvictionReason>() - [] - member val CacheHit = cacheHit.Publish - - [] - member val CacheMiss = cacheMiss.Publish - - [] - - member val Eviction = eviction.Publish + [] member val CacheHit = cacheHit.Publish + [] member val CacheMiss = cacheMiss.Publish + [] member val Eviction = eviction.Publish + // Increase expected capacity by the percentage to evict, since we want to not resize the dictionary. + member val Store = ConcurrentDictionary<'Key, CachedEntity<'Key,'Value>>(options.LevelOfConcurrency, capacity) + member val ConditionalWeakTable = new ConditionalWeakTable<_, Weak<_>>(); member _.GetStats() = {| @@ -86,31 +72,34 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = PercentageToEvict = options.PercentageToEvict Strategy = options.Strategy LevelOfConcurrency = options.LevelOfConcurrency - Count = store.Count - LeastRecentlyAccesssed = store.Values |> Seq.minBy _.LastAccessed |> _.LastAccessed - MostRecentlyAccesssed = store.Values |> Seq.maxBy _.LastAccessed |> _.LastAccessed - LeastFrequentlyAccessed = store.Values |> Seq.minBy _.AccessCount |> _.AccessCount - MostFrequentlyAccessed = store.Values |> Seq.maxBy _.AccessCount |> _.AccessCount + Count = this.Store.Count + LeastRecentlyAccesssed = this.Store.Values |> Seq.minBy _.LastAccessed |> _.LastAccessed + MostRecentlyAccesssed = this.Store.Values |> Seq.maxBy _.LastAccessed |> _.LastAccessed + LeastFrequentlyAccessed = this.Store.Values |> Seq.minBy _.AccessCount |> _.AccessCount + MostFrequentlyAccessed = this.Store.Values |> Seq.maxBy _.AccessCount |> _.AccessCount |} member private _.GetEvictCount() = - if store.Count >= options.MaximumCapacity then - store.Count * options.PercentageToEvict / 100 + if this.Store.Count >= options.MaximumCapacity then + this.Store.Count * options.PercentageToEvict / 100 else 0 // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies, it will always walk the dictionary to find the items to evict, this is not efficient. member private this.TryGetItemsToEvict () = match options.Strategy with - | CachingStrategy.LRU -> store.Values |> Seq.sortBy _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - | CachingStrategy.MRU -> store.Values |> Seq.sortByDescending _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - | CachingStrategy.LFU -> store.Values |> Seq.sortBy _.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + | CachingStrategy.LRU -> + this.Store.Values |> Seq.sortBy _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + | CachingStrategy.MRU -> + this.Store.Values |> Seq.sortByDescending _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + | CachingStrategy.LFU -> + this.Store.Values |> Seq.sortBy _.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) member private this.TryEvictItems () = if this.GetEvictCount() > 0 then for key in this.TryGetItemsToEvict () do - let (removed, value) = store.TryRemove(key) + let (removed, _) = this.Store.TryRemove(key) if removed then eviction.Trigger(key, EvictionReason.Evicted) @@ -119,8 +108,7 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = // One of the issues is that if the delay is high (>100ms), it will not be able to evict items in time, and the cache will grow beyond the maximum capacity. backgroundTask { while not cts.Token.IsCancellationRequested do - let evictCount = this.GetEvictCount() - if evictCount > 0 then + if this.GetEvictCount() > 0 then this.TryEvictItems () // do! Task.Delay(100, cts.Token) } @@ -132,9 +120,8 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = | EvictionMethod.Blocking -> this.TryEvictItems () | EvictionMethod.Background -> () - member _.TryGet(key: 'Key) = - match store.TryGetValue(key) with + match this.Store.TryGetValue(key) with | true, value -> // this is fine to be non-atomic, I guess, we are okay with race if the time is within the time of multiple concurrent calls. value.LastAccessed <- DateTimeOffset.Now.Ticks @@ -145,20 +132,26 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = cacheMiss.Trigger(key) ValueNone + [] member this.Add(key: 'Key, value: 'Value) = let _ = this.TryAdd(key, value) in () + + [] member this.TryAdd<'Key>(key: 'Key, value: 'Value) = + // Weak table/references only make sense if we work with reference types (for obvious reasons). + // So, if this collection is storing value types as keys, it will simply box them. + // GC-based eviction shall not be used with value types as keys. if options.Weak then - let weak = new Weak<'Key>(key) - conditionalWeakTable.TryAdd(key :> obj, weak) |> ignore + let weak = Weak<'Key>(key) + this.ConditionalWeakTable.TryAdd(key :> obj, weak) |> ignore weak.Collected.Add(this.RemoveCollected) this.TryEvict() - store.TryAdd(key, CachedEntity<'Key, 'Value>(key, value)) + this.Store.TryAdd(key, CachedEntity<'Key, 'Value>(key, value)) // TODO: This needs heavy testing to ensure we aren't leaking anything. member private _.RemoveCollected(key: 'Key) = - store.TryRemove(key) |> ignore + this.Store.TryRemove(key) |> ignore eviction.Trigger(key, EvictionReason.Collected); interface IDisposable with From c16bea8ec0cb40a4787163a3d38d7be216ead56b Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Thu, 2 Jan 2025 19:49:49 +0100 Subject: [PATCH 10/17] wip --- src/Compiler/Utilities/Caches.fs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index da4399d4102..52989c1de87 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -139,7 +139,7 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = member this.TryAdd<'Key>(key: 'Key, value: 'Value) = // Weak table/references only make sense if we work with reference types (for obvious reasons). - // So, if this collection is storing value types as keys, it will simply box them. + // So, if `Weak` is enabled for value types as keys of the cache, it will simply box them, resulting in additional allocations. // GC-based eviction shall not be used with value types as keys. if options.Weak then let weak = Weak<'Key>(key) From ae1c06504073203c4a2e6de351592b18c5542a8f Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Mon, 6 Jan 2025 14:10:14 +0100 Subject: [PATCH 11/17] wip --- src/Compiler/Utilities/Caches.fs | 80 ++++++++++++-------------------- 1 file changed, 30 insertions(+), 50 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index 52989c1de87..2616ae0c848 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -4,7 +4,7 @@ open System open System.Collections.Concurrent open System.Threading open System.Threading.Tasks -open System.Runtime.CompilerServices +open System.Diagnostics [] type CachingStrategy = LRU | MRU | LFU @@ -21,30 +21,19 @@ type CacheOptions = PercentageToEvict: int Strategy: CachingStrategy EvictionMethod: EvictionMethod - LevelOfConcurrency: int - Weak: bool } with - static member Default = { MaximumCapacity = 100; PercentageToEvict = 5; Strategy = CachingStrategy.LRU; LevelOfConcurrency = Environment.ProcessorCount; EvictionMethod = EvictionMethod.Blocking; Weak = false } + LevelOfConcurrency: int } with + static member Default = { MaximumCapacity = 100; PercentageToEvict = 5; Strategy = CachingStrategy.LRU; LevelOfConcurrency = Environment.ProcessorCount; EvictionMethod = EvictionMethod.Blocking; } [] -type CachedEntity<'Key, 'Value> = - val Key: 'Key +type CachedEntity<'Value> = val Value: 'Value val mutable LastAccessed: int64 val mutable AccessCount: uint64 - new(key: 'Key, value: 'Value) = { Key = key; Value = value; LastAccessed = DateTimeOffset.Now.Ticks; AccessCount = 0UL } + new(value: 'Value) = { Value = value; LastAccessed = DateTimeOffset.Now.Ticks; AccessCount = 0UL } [] -type Weak<'K>(key) = - let collected = new Event<'K>() - [] - member val Collected = collected.Publish - - // TODO: Do we want to store it as WeakReference here? - override _.Finalize() = collected.Trigger key - -// TODO: This has a very naive and straightforward implementation for managing lifetimes, when evicting, will have to traverse the dictionary. -[] +[] type Cache<'Key, 'Value> (options: CacheOptions) as this = let capacity = options.MaximumCapacity + (options.MaximumCapacity * options.PercentageToEvict / 100) @@ -54,17 +43,16 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = if options.EvictionMethod = EvictionMethod.Background then Task.Run(this.TryEvictTask, cts.Token) |> ignore - let cacheHit = Event<'Key * 'Value>() - let cacheMiss = Event<'Key>() - let eviction = Event<'Key * EvictionReason>() + let cacheHit = Event<_ * _>() + let cacheMiss = Event<_>() + let eviction = Event<_ * EvictionReason>() [] member val CacheHit = cacheHit.Publish [] member val CacheMiss = cacheMiss.Publish [] member val Eviction = eviction.Publish // Increase expected capacity by the percentage to evict, since we want to not resize the dictionary. - member val Store = ConcurrentDictionary<'Key, CachedEntity<'Key,'Value>>(options.LevelOfConcurrency, capacity) - member val ConditionalWeakTable = new ConditionalWeakTable<_, Weak<_>>(); + member val Store = ConcurrentDictionary<_, CachedEntity<'Value>>(options.LevelOfConcurrency, capacity) member _.GetStats() = {| @@ -87,23 +75,23 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = 0 // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies, it will always walk the dictionary to find the items to evict, this is not efficient. - member private this.TryGetItemsToEvict () = + member private _.TryGetItemsToEvict () = match options.Strategy with | CachingStrategy.LRU -> - this.Store.Values |> Seq.sortBy _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + this.Store |> Seq.sortBy _.Value.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) | CachingStrategy.MRU -> - this.Store.Values |> Seq.sortByDescending _.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + this.Store |> Seq.sortByDescending _.Value.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) | CachingStrategy.LFU -> - this.Store.Values |> Seq.sortBy _.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + this.Store |> Seq.sortBy _.Value.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - member private this.TryEvictItems () = + member private _.TryEvictItems () = if this.GetEvictCount() > 0 then for key in this.TryGetItemsToEvict () do - let (removed, _) = this.Store.TryRemove(key) - if removed then - eviction.Trigger(key, EvictionReason.Evicted) + match this.Store.TryRemove(key) with + | true, _ -> eviction.Trigger(key, EvictionReason.Evicted) + | _ -> () // TODO: We probably want to count eviction misses as well? - member private this.TryEvictTask () = + member private _.TryEvictTask () = // This will spin in the background trying to evict items. // One of the issues is that if the delay is high (>100ms), it will not be able to evict items in time, and the cache will grow beyond the maximum capacity. backgroundTask { @@ -114,13 +102,13 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = } // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. - member this.TryEvict() = + member _.TryEvict() = if this.GetEvictCount() > 0 then match options.EvictionMethod with | EvictionMethod.Blocking -> this.TryEvictItems () | EvictionMethod.Background -> () - member _.TryGet(key: 'Key) = + member _.TryGet(key) = match this.Store.TryGetValue(key) with | true, value -> // this is fine to be non-atomic, I guess, we are okay with race if the time is within the time of multiple concurrent calls. @@ -132,27 +120,19 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = cacheMiss.Trigger(key) ValueNone - [] - member this.Add(key: 'Key, value: 'Value) = let _ = this.TryAdd(key, value) in () + member _.TryAdd(key, value: 'Value, ?update: bool) = - [] - member this.TryAdd<'Key>(key: 'Key, value: 'Value) = - - // Weak table/references only make sense if we work with reference types (for obvious reasons). - // So, if `Weak` is enabled for value types as keys of the cache, it will simply box them, resulting in additional allocations. - // GC-based eviction shall not be used with value types as keys. - if options.Weak then - let weak = Weak<'Key>(key) - this.ConditionalWeakTable.TryAdd(key :> obj, weak) |> ignore - weak.Collected.Add(this.RemoveCollected) + let update = defaultArg update false this.TryEvict() - this.Store.TryAdd(key, CachedEntity<'Key, 'Value>(key, value)) - // TODO: This needs heavy testing to ensure we aren't leaking anything. - member private _.RemoveCollected(key: 'Key) = - this.Store.TryRemove(key) |> ignore - eviction.Trigger(key, EvictionReason.Collected); + let value = CachedEntity<'Value>(value) + + if update then + this.Store.AddOrUpdate(key, value, (fun _ _ -> value)) |> ignore + true + else + this.Store.TryAdd(key, value) interface IDisposable with member _.Dispose() = cts.Cancel() From b40fd5a47b9500a17f9e1aa28ed9c641420e8686 Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Mon, 6 Jan 2025 19:28:42 +0100 Subject: [PATCH 12/17] wip --- src/Compiler/Utilities/Caches.fs | 78 +++++++++++++++++++++++--------- 1 file changed, 56 insertions(+), 22 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index 2616ae0c848..a59f77019fc 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -6,15 +6,40 @@ open System.Threading open System.Threading.Tasks open System.Diagnostics +[] +// Default Seq.* function have one issue - when doing `Seq.sortBy`, it will call a `ToArray` on the collection, +// which is *not* calling `ConcurrentDictionary.ToArray`, but uses a custom one instead (treating it as `ICollection`) +// this leads to and exception when trying to evict without locking (The index is equal to or greater than the length of the array, +// or the number of elements in the dictionary is greater than the available space from index to the end of the destination array.) +// this is casuedby insertions happened between reading the `Count` and doing the `CopyTo`. +// This solution introduces a custom `ConcurrentDictionary.sortBy` which will be calling a proper `CopyTo`, the one on the ConcurrentDictionary itself. +module ConcurrentDictionary = + + open System.Collections + open System.Collections.Generic + + let inline mkSeq f = + { new IEnumerable<'U> with + member _.GetEnumerator() = f() + + interface IEnumerable with + member _.GetEnumerator() = (f() :> IEnumerator) } + + let inline mkDelayedSeq (f: unit -> IEnumerable<'T>) = + mkSeq (fun () -> f().GetEnumerator()) + + let inline sortBy ([] projection) (source: ConcurrentDictionary<_, _>) = + mkDelayedSeq (fun () -> + let array = source.ToArray() + Array.sortInPlaceBy projection array + array :> seq<_>) + [] -type CachingStrategy = LRU | MRU | LFU +type CachingStrategy = LRU | LFU [] type EvictionMethod = Blocking | Background -[] -type EvictionReason = Evicted | Collected - [] type CacheOptions = { MaximumCapacity: int @@ -32,6 +57,7 @@ type CachedEntity<'Value> = new(value: 'Value) = { Value = value; LastAccessed = DateTimeOffset.Now.Ticks; AccessCount = 0UL } + [] [] type Cache<'Key, 'Value> (options: CacheOptions) as this = @@ -45,7 +71,7 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = let cacheHit = Event<_ * _>() let cacheMiss = Event<_>() - let eviction = Event<_ * EvictionReason>() + let eviction = Event<_>() [] member val CacheHit = cacheHit.Publish [] member val CacheMiss = cacheMiss.Publish @@ -61,44 +87,52 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = Strategy = options.Strategy LevelOfConcurrency = options.LevelOfConcurrency Count = this.Store.Count - LeastRecentlyAccesssed = this.Store.Values |> Seq.minBy _.LastAccessed |> _.LastAccessed MostRecentlyAccesssed = this.Store.Values |> Seq.maxBy _.LastAccessed |> _.LastAccessed - LeastFrequentlyAccessed = this.Store.Values |> Seq.minBy _.AccessCount |> _.AccessCount + LeastRecentlyAccesssed = this.Store.Values |> Seq.minBy _.LastAccessed |> _.LastAccessed MostFrequentlyAccessed = this.Store.Values |> Seq.maxBy _.AccessCount |> _.AccessCount + LeastFrequentlyAccessed = this.Store.Values |> Seq.minBy _.AccessCount |> _.AccessCount |} member private _.GetEvictCount() = if this.Store.Count >= options.MaximumCapacity then - this.Store.Count * options.PercentageToEvict / 100 + (this.Store.Count - options.MaximumCapacity) + (options.MaximumCapacity * options.PercentageToEvict / 100) else 0 // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies, it will always walk the dictionary to find the items to evict, this is not efficient. member private _.TryGetItemsToEvict () = - match options.Strategy with - | CachingStrategy.LRU -> - this.Store |> Seq.sortBy _.Value.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - | CachingStrategy.MRU -> - this.Store |> Seq.sortByDescending _.Value.LastAccessed |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - | CachingStrategy.LFU -> - this.Store |> Seq.sortBy _.Value.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + this.Store |> + match options.Strategy with + | CachingStrategy.LRU -> + ConcurrentDictionary.sortBy _.Value.LastAccessed + | CachingStrategy.LFU -> + ConcurrentDictionary.sortBy _.Value.AccessCount + |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) member private _.TryEvictItems () = if this.GetEvictCount() > 0 then for key in this.TryGetItemsToEvict () do match this.Store.TryRemove(key) with - | true, _ -> eviction.Trigger(key, EvictionReason.Evicted) + | true, _ -> eviction.Trigger(key) | _ -> () // TODO: We probably want to count eviction misses as well? - member private _.TryEvictTask () = - // This will spin in the background trying to evict items. - // One of the issues is that if the delay is high (>100ms), it will not be able to evict items in time, and the cache will grow beyond the maximum capacity. + // TODO: Shall this be a safer task, wrapping everything in try .. with, so it's not crashing silently? + member private this.TryEvictTask () = backgroundTask { while not cts.Token.IsCancellationRequested do - if this.GetEvictCount() > 0 then - this.TryEvictItems () - // do! Task.Delay(100, cts.Token) + let evictionCount = this.GetEvictCount() + if evictionCount > 0 then + this.TryEvictItems () + let utilization = (this.Store.Count / options.MaximumCapacity) + // So, based on utilization this will scale the delay between 0 and 1 seconds. + // Worst case scenario would be when 1 second delay happens, then cache will grow rapidly, and beyond the maximum capacity. + // In this case underlying dictionary will resize, AND we will have to evict items, which will likely be slow. + // In this case, cache stats should be used to adjust MaximumCapacity and PercentageToEvict. + let delay = 1000 - (1000 * utilization) + if delay > 0 then + do! Task.Delay(delay) + } // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. From c36c119cd35e35154b88184ba411d79eafdcdd75 Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Mon, 6 Jan 2025 19:49:01 +0100 Subject: [PATCH 13/17] uint64->int64 --- src/Compiler/Utilities/Caches.fs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index a59f77019fc..ea24e599087 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -53,9 +53,9 @@ type CacheOptions = type CachedEntity<'Value> = val Value: 'Value val mutable LastAccessed: int64 - val mutable AccessCount: uint64 + val mutable AccessCount: int64 - new(value: 'Value) = { Value = value; LastAccessed = DateTimeOffset.Now.Ticks; AccessCount = 0UL } + new(value: 'Value) = { Value = value; LastAccessed = DateTimeOffset.Now.Ticks; AccessCount = 0L } [] From f05240d9435fe6871461c82bb8185dfb829d880b Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Wed, 8 Jan 2025 19:55:22 +0100 Subject: [PATCH 14/17] wip --- src/Compiler/Utilities/Caches.fs | 51 ++++++++++++++++++-------------- 1 file changed, 29 insertions(+), 22 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index ea24e599087..0258eaf73aa 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -46,28 +46,25 @@ type CacheOptions = PercentageToEvict: int Strategy: CachingStrategy EvictionMethod: EvictionMethod - LevelOfConcurrency: int } with - static member Default = { MaximumCapacity = 100; PercentageToEvict = 5; Strategy = CachingStrategy.LRU; LevelOfConcurrency = Environment.ProcessorCount; EvictionMethod = EvictionMethod.Blocking; } + LevelOfConcurrency: int } + static member Default = + { MaximumCapacity = 100 + PercentageToEvict = 5 + Strategy = CachingStrategy.LRU + LevelOfConcurrency = Environment.ProcessorCount + EvictionMethod = EvictionMethod.Blocking } [] type CachedEntity<'Value> = val Value: 'Value val mutable LastAccessed: int64 val mutable AccessCount: int64 - new(value: 'Value) = { Value = value; LastAccessed = DateTimeOffset.Now.Ticks; AccessCount = 0L } [] [] -type Cache<'Key, 'Value> (options: CacheOptions) as this = - - let capacity = options.MaximumCapacity + (options.MaximumCapacity * options.PercentageToEvict / 100) - let cts = new CancellationTokenSource() - - do - if options.EvictionMethod = EvictionMethod.Background then - Task.Run(this.TryEvictTask, cts.Token) |> ignore +type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = let cacheHit = Event<_ * _>() let cacheMiss = Event<_>() @@ -80,7 +77,17 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = // Increase expected capacity by the percentage to evict, since we want to not resize the dictionary. member val Store = ConcurrentDictionary<_, CachedEntity<'Value>>(options.LevelOfConcurrency, capacity) - member _.GetStats() = + static member Create(options: CacheOptions) = + let capacity = options.MaximumCapacity + (options.MaximumCapacity * options.PercentageToEvict / 100) + let cts = new CancellationTokenSource() + let cache = new Cache<'Key, 'Value>(options, capacity, cts) + + if options.EvictionMethod = EvictionMethod.Background then + Task.Run(cache.TryEvictTask, cts.Token) |> ignore + + cache + + member this.GetStats() = {| Capacity = options.MaximumCapacity PercentageToEvict = options.PercentageToEvict @@ -94,14 +101,14 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = |} - member private _.GetEvictCount() = + member private this.GetEvictCount() = if this.Store.Count >= options.MaximumCapacity then (this.Store.Count - options.MaximumCapacity) + (options.MaximumCapacity * options.PercentageToEvict / 100) else 0 // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies, it will always walk the dictionary to find the items to evict, this is not efficient. - member private _.TryGetItemsToEvict () = + member private this.TryGetItemsToEvict () = this.Store |> match options.Strategy with | CachingStrategy.LRU -> @@ -110,7 +117,8 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = ConcurrentDictionary.sortBy _.Value.AccessCount |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) - member private _.TryEvictItems () = + // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. + member private this.TryEvictItems () = if this.GetEvictCount() > 0 then for key in this.TryGetItemsToEvict () do match this.Store.TryRemove(key) with @@ -118,7 +126,7 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = | _ -> () // TODO: We probably want to count eviction misses as well? // TODO: Shall this be a safer task, wrapping everything in try .. with, so it's not crashing silently? - member private this.TryEvictTask () = + member private this.TryEvictTask() = backgroundTask { while not cts.Token.IsCancellationRequested do let evictionCount = this.GetEvictCount() @@ -126,23 +134,22 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = this.TryEvictItems () let utilization = (this.Store.Count / options.MaximumCapacity) // So, based on utilization this will scale the delay between 0 and 1 seconds. - // Worst case scenario would be when 1 second delay happens, then cache will grow rapidly, and beyond the maximum capacity. + // Worst case scenario would be when 1 second delay happens, + // if the cache will grow rapidly (or in bursts), it will go beyond the maximum capacity. // In this case underlying dictionary will resize, AND we will have to evict items, which will likely be slow. // In this case, cache stats should be used to adjust MaximumCapacity and PercentageToEvict. let delay = 1000 - (1000 * utilization) if delay > 0 then do! Task.Delay(delay) - } - // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. - member _.TryEvict() = + member this.TryEvict() = if this.GetEvictCount() > 0 then match options.EvictionMethod with | EvictionMethod.Blocking -> this.TryEvictItems () | EvictionMethod.Background -> () - member _.TryGet(key) = + member this.TryGet(key) = match this.Store.TryGetValue(key) with | true, value -> // this is fine to be non-atomic, I guess, we are okay with race if the time is within the time of multiple concurrent calls. @@ -154,7 +161,7 @@ type Cache<'Key, 'Value> (options: CacheOptions) as this = cacheMiss.Trigger(key) ValueNone - member _.TryAdd(key, value: 'Value, ?update: bool) = + member this.TryAdd(key, value: 'Value, ?update: bool) = let update = defaultArg update false From 0663bb91e7882ef3392a1b33e80dd9fbfb9b063f Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Wed, 8 Jan 2025 19:59:12 +0100 Subject: [PATCH 15/17] wip --- src/Compiler/Utilities/Caches.fs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index 0258eaf73aa..e91c337b3f3 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -154,7 +154,7 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = | true, value -> // this is fine to be non-atomic, I guess, we are okay with race if the time is within the time of multiple concurrent calls. value.LastAccessed <- DateTimeOffset.Now.Ticks - value.AccessCount <- Interlocked.Increment(&value.AccessCount) + let _ = Interlocked.Increment(&value.AccessCount) in () cacheHit.Trigger(key, value.Value) ValueSome value | _ -> From b947d179105f916d20873b44c93c22c697892283 Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Wed, 8 Jan 2025 20:00:01 +0100 Subject: [PATCH 16/17] wip --- src/Compiler/Utilities/Caches.fs | 121 +++++++++++++++++++------------ 1 file changed, 73 insertions(+), 48 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index e91c337b3f3..e126a75da3f 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -20,13 +20,13 @@ module ConcurrentDictionary = let inline mkSeq f = { new IEnumerable<'U> with - member _.GetEnumerator() = f() + member _.GetEnumerator() = f () interface IEnumerable with - member _.GetEnumerator() = (f() :> IEnumerator) } + member _.GetEnumerator() = (f () :> IEnumerator) + } - let inline mkDelayedSeq (f: unit -> IEnumerable<'T>) = - mkSeq (fun () -> f().GetEnumerator()) + let inline mkDelayedSeq (f: unit -> IEnumerable<'T>) = mkSeq (fun () -> f().GetEnumerator()) let inline sortBy ([] projection) (source: ConcurrentDictionary<_, _>) = mkDelayedSeq (fun () -> @@ -35,32 +35,46 @@ module ConcurrentDictionary = array :> seq<_>) [] -type CachingStrategy = LRU | LFU +type CachingStrategy = + | LRU + | LFU [] -type EvictionMethod = Blocking | Background +type EvictionMethod = + | Blocking + | Background [] type CacheOptions = - { MaximumCapacity: int - PercentageToEvict: int - Strategy: CachingStrategy - EvictionMethod: EvictionMethod - LevelOfConcurrency: int } + { + MaximumCapacity: int + PercentageToEvict: int + Strategy: CachingStrategy + EvictionMethod: EvictionMethod + LevelOfConcurrency: int + } + static member Default = - { MaximumCapacity = 100 - PercentageToEvict = 5 - Strategy = CachingStrategy.LRU - LevelOfConcurrency = Environment.ProcessorCount - EvictionMethod = EvictionMethod.Blocking } + { + MaximumCapacity = 100 + PercentageToEvict = 5 + Strategy = CachingStrategy.LRU + LevelOfConcurrency = Environment.ProcessorCount + EvictionMethod = EvictionMethod.Blocking + } [] type CachedEntity<'Value> = val Value: 'Value val mutable LastAccessed: int64 val mutable AccessCount: int64 - new(value: 'Value) = { Value = value; LastAccessed = DateTimeOffset.Now.Ticks; AccessCount = 0L } + new(value: 'Value) = + { + Value = value + LastAccessed = DateTimeOffset.Now.Ticks + AccessCount = 0L + } [] [] @@ -70,15 +84,23 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = let cacheMiss = Event<_>() let eviction = Event<_>() - [] member val CacheHit = cacheHit.Publish - [] member val CacheMiss = cacheMiss.Publish - [] member val Eviction = eviction.Publish + [] + member val CacheHit = cacheHit.Publish + + [] + member val CacheMiss = cacheMiss.Publish + + [] + member val Eviction = eviction.Publish // Increase expected capacity by the percentage to evict, since we want to not resize the dictionary. member val Store = ConcurrentDictionary<_, CachedEntity<'Value>>(options.LevelOfConcurrency, capacity) static member Create(options: CacheOptions) = - let capacity = options.MaximumCapacity + (options.MaximumCapacity * options.PercentageToEvict / 100) + let capacity = + options.MaximumCapacity + + (options.MaximumCapacity * options.PercentageToEvict / 100) + let cts = new CancellationTokenSource() let cache = new Cache<'Key, 'Value>(options, capacity, cts) @@ -100,27 +122,26 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = LeastFrequentlyAccessed = this.Store.Values |> Seq.minBy _.AccessCount |> _.AccessCount |} - member private this.GetEvictCount() = if this.Store.Count >= options.MaximumCapacity then - (this.Store.Count - options.MaximumCapacity) + (options.MaximumCapacity * options.PercentageToEvict / 100) + (this.Store.Count - options.MaximumCapacity) + + (options.MaximumCapacity * options.PercentageToEvict / 100) else 0 // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies, it will always walk the dictionary to find the items to evict, this is not efficient. - member private this.TryGetItemsToEvict () = - this.Store |> - match options.Strategy with - | CachingStrategy.LRU -> - ConcurrentDictionary.sortBy _.Value.LastAccessed - | CachingStrategy.LFU -> - ConcurrentDictionary.sortBy _.Value.AccessCount - |> Seq.take (this.GetEvictCount()) |> Seq.map (fun x -> x.Key) + member private this.TryGetItemsToEvict() = + this.Store + |> match options.Strategy with + | CachingStrategy.LRU -> ConcurrentDictionary.sortBy _.Value.LastAccessed + | CachingStrategy.LFU -> ConcurrentDictionary.sortBy _.Value.AccessCount + |> Seq.take (this.GetEvictCount()) + |> Seq.map (fun x -> x.Key) // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. - member private this.TryEvictItems () = + member private this.TryEvictItems() = if this.GetEvictCount() > 0 then - for key in this.TryGetItemsToEvict () do + for key in this.TryGetItemsToEvict() do match this.Store.TryRemove(key) with | true, _ -> eviction.Trigger(key) | _ -> () // TODO: We probably want to count eviction misses as well? @@ -129,24 +150,27 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = member private this.TryEvictTask() = backgroundTask { while not cts.Token.IsCancellationRequested do - let evictionCount = this.GetEvictCount() - if evictionCount > 0 then - this.TryEvictItems () - let utilization = (this.Store.Count / options.MaximumCapacity) - // So, based on utilization this will scale the delay between 0 and 1 seconds. - // Worst case scenario would be when 1 second delay happens, - // if the cache will grow rapidly (or in bursts), it will go beyond the maximum capacity. - // In this case underlying dictionary will resize, AND we will have to evict items, which will likely be slow. - // In this case, cache stats should be used to adjust MaximumCapacity and PercentageToEvict. - let delay = 1000 - (1000 * utilization) - if delay > 0 then - do! Task.Delay(delay) + let evictionCount = this.GetEvictCount() + + if evictionCount > 0 then + this.TryEvictItems() + + let utilization = (this.Store.Count / options.MaximumCapacity) + // So, based on utilization this will scale the delay between 0 and 1 seconds. + // Worst case scenario would be when 1 second delay happens, + // if the cache will grow rapidly (or in bursts), it will go beyond the maximum capacity. + // In this case underlying dictionary will resize, AND we will have to evict items, which will likely be slow. + // In this case, cache stats should be used to adjust MaximumCapacity and PercentageToEvict. + let delay = 1000 - (1000 * utilization) + + if delay > 0 then + do! Task.Delay(delay) } member this.TryEvict() = if this.GetEvictCount() > 0 then match options.EvictionMethod with - | EvictionMethod.Blocking -> this.TryEvictItems () + | EvictionMethod.Blocking -> this.TryEvictItems() | EvictionMethod.Background -> () member this.TryGet(key) = @@ -154,7 +178,8 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = | true, value -> // this is fine to be non-atomic, I guess, we are okay with race if the time is within the time of multiple concurrent calls. value.LastAccessed <- DateTimeOffset.Now.Ticks - let _ = Interlocked.Increment(&value.AccessCount) in () + let _ = Interlocked.Increment(&value.AccessCount) in + () cacheHit.Trigger(key, value.Value) ValueSome value | _ -> @@ -178,4 +203,4 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = interface IDisposable with member _.Dispose() = cts.Cancel() - member this.Dispose() = (this :> IDisposable).Dispose() \ No newline at end of file + member this.Dispose() = (this :> IDisposable).Dispose() From 5224504a1c17953934b11a916ca3db2091ab7f02 Mon Sep 17 00:00:00 2001 From: Vlad Zarytovskii Date: Wed, 8 Jan 2025 20:23:05 +0100 Subject: [PATCH 17/17] wip --- src/Compiler/Utilities/Caches.fs | 34 ++++++++++++++++---------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/Compiler/Utilities/Caches.fs b/src/Compiler/Utilities/Caches.fs index e126a75da3f..469f56a3b98 100644 --- a/src/Compiler/Utilities/Caches.fs +++ b/src/Compiler/Utilities/Caches.fs @@ -122,7 +122,7 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = LeastFrequentlyAccessed = this.Store.Values |> Seq.minBy _.AccessCount |> _.AccessCount |} - member private this.GetEvictCount() = + member private this.CalculateEvictionCount() = if this.Store.Count >= options.MaximumCapacity then (this.Store.Count - options.MaximumCapacity) + (options.MaximumCapacity * options.PercentageToEvict / 100) @@ -130,18 +130,18 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = 0 // TODO: All of these are proofs of concept, a very naive implementation of eviction strategies, it will always walk the dictionary to find the items to evict, this is not efficient. - member private this.TryGetItemsToEvict() = + member private this.TryGetPickToEvict() = this.Store |> match options.Strategy with | CachingStrategy.LRU -> ConcurrentDictionary.sortBy _.Value.LastAccessed | CachingStrategy.LFU -> ConcurrentDictionary.sortBy _.Value.AccessCount - |> Seq.take (this.GetEvictCount()) + |> Seq.take (this.CalculateEvictionCount()) |> Seq.map (fun x -> x.Key) // TODO: Explore an eviction shortcut, some sort of list of keys to evict first, based on the strategy. member private this.TryEvictItems() = - if this.GetEvictCount() > 0 then - for key in this.TryGetItemsToEvict() do + if this.CalculateEvictionCount() > 0 then + for key in this.TryGetPickToEvict() do match this.Store.TryRemove(key) with | true, _ -> eviction.Trigger(key) | _ -> () // TODO: We probably want to count eviction misses as well? @@ -150,7 +150,7 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = member private this.TryEvictTask() = backgroundTask { while not cts.Token.IsCancellationRequested do - let evictionCount = this.GetEvictCount() + let evictionCount = this.CalculateEvictionCount() if evictionCount > 0 then this.TryEvictItems() @@ -168,26 +168,26 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = } member this.TryEvict() = - if this.GetEvictCount() > 0 then + if this.CalculateEvictionCount() > 0 then match options.EvictionMethod with | EvictionMethod.Blocking -> this.TryEvictItems() | EvictionMethod.Background -> () - member this.TryGet(key) = + member this.TryGet(key, value: outref<'Value>) = match this.Store.TryGetValue(key) with - | true, value -> + | true, cachedEntity -> // this is fine to be non-atomic, I guess, we are okay with race if the time is within the time of multiple concurrent calls. - value.LastAccessed <- DateTimeOffset.Now.Ticks - let _ = Interlocked.Increment(&value.AccessCount) in - () - cacheHit.Trigger(key, value.Value) - ValueSome value + cachedEntity.LastAccessed <- DateTimeOffset.Now.Ticks + let _ = Interlocked.Increment(&cachedEntity.AccessCount) + cacheHit.Trigger(key, cachedEntity.Value) + value <- cachedEntity.Value + true | _ -> cacheMiss.Trigger(key) - ValueNone + value <- Unchecked.defaultof<'Value> + false member this.TryAdd(key, value: 'Value, ?update: bool) = - let update = defaultArg update false this.TryEvict() @@ -195,7 +195,7 @@ type Cache<'Key, 'Value> private (options: CacheOptions, capacity, cts) = let value = CachedEntity<'Value>(value) if update then - this.Store.AddOrUpdate(key, value, (fun _ _ -> value)) |> ignore + let _ = this.Store.AddOrUpdate(key, value, (fun _ _ -> value)) true else this.Store.TryAdd(key, value)