Skip to content

Commit

Permalink
Fix/arguments sample() simulate() mismatch (#129)
Browse files Browse the repository at this point in the history
* Fix/arguments sample() simulate() mismatch

* bump version

* format

* Remove unused import:
  • Loading branch information
L-M-Sherlock authored Jul 29, 2024
1 parent 884c2ef commit 4b56fd9
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 109 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "FSRS-Optimizer"
version = "5.0.4"
version = "5.0.5"
readme = "README.md"
dependencies = [
"matplotlib>=3.7.0",
Expand Down
10 changes: 3 additions & 7 deletions src/fsrs_optimizer/fsrs_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1272,9 +1272,7 @@ def preview_sequence(self, test_rating_sequence: str, requestRetention: float):
(
f"{ivl}d"
if ivl < 30
else f"{ivl / 30:.1f}m"
if ivl < 365
else f"{ivl / 365:.1f}y"
else f"{ivl / 30:.1f}m" if ivl < 365 else f"{ivl / 365:.1f}y"
)
for ivl in map(int, t_history.split(","))
]
Expand Down Expand Up @@ -1330,7 +1328,7 @@ def find_optimal_retention(
self,
learn_span=365,
max_ivl=36500,
loss_aversion=1,
loss_aversion=2.5,
verbose=True,
):
"""should not be called before predict_memory_states"""
Expand Down Expand Up @@ -1425,9 +1423,7 @@ def moving_average(data, window_size=365 // 20):
ax.legend()
ax.grid(True)

simulate_config["deck_size"] = 20000
simulate_config["max_cost_perday"] = 1200
simulate_config["learn_limit_perday"] = math.inf
simulate_config["loss_aversion"] = 1
fig6 = workload_graph(simulate_config)

return (fig1, fig2, fig3, fig4, fig5, fig6)
Expand Down
99 changes: 6 additions & 93 deletions src/fsrs_optimizer/fsrs_simulator.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import math
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import trange

Expand Down Expand Up @@ -230,7 +229,6 @@ def mean_reversion(init, current):
learn_cnt_per_day[today] = np.sum(true_learn)
memorized_cnt_per_day[today] = card_table[col["retrievability"]].sum()
cost_per_day[today] = card_table[col["cost"]][true_review | true_learn].sum()
card_table = pd.DataFrame(card_table.T, columns=columns)
return (
card_table,
review_cnt_per_day,
Expand Down Expand Up @@ -278,9 +276,9 @@ def sample(
deck_size,
learn_span,
max_cost_perday,
max_ivl,
learn_limit_perday,
review_limit_perday,
max_ivl,
learn_costs,
review_costs,
first_rating_prob,
Expand All @@ -296,106 +294,21 @@ def sample(
return np.mean(memorization)


def bracket(xa=0.75, xb=0.95, maxiter=20, **kwargs):
u_lim = 0.95
l_lim = 0.75

grow_limit = 100.0
gold = 1.6180339
verysmall_num = 1e-21

fa = sample(xa, **kwargs)
fb = sample(xb, **kwargs)
funccalls = 2

if fa < fb: # Switch so fa > fb
xa, xb = xb, xa
fa, fb = fb, fa
xc = max(min(xb + gold * (xb - xa), u_lim), l_lim)
fc = sample(xc, **kwargs)
funccalls += 1

iter = 0
while fc < fb:
tmp1 = (xb - xa) * (fb - fc)
tmp2 = (xb - xc) * (fb - fa)
val = tmp2 - tmp1
if np.abs(val) < verysmall_num:
denom = 2.0 * verysmall_num
else:
denom = 2.0 * val
w = max(min((xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom), u_lim), l_lim)
wlim = max(min(xb + grow_limit * (xc - xb), u_lim), l_lim)

if iter > maxiter:
print("Failed to converge")
break

iter += 1
if (w - xc) * (xb - w) > 0.0:
fw = sample(w, **kwargs)
funccalls += 1
if fw < fc:
xa = max(min(xb, u_lim), l_lim)
xb = max(min(w, u_lim), l_lim)
fa = fb
fb = fw
break
elif fw > fb:
xc = max(min(w, u_lim), l_lim)
fc = fw
break
w = max(min(xc + gold * (xc - xb), u_lim), l_lim)
fw = sample(w, **kwargs)
funccalls += 1
elif (w - wlim) * (wlim - xc) >= 0.0:
w = wlim
fw = sample(w, **kwargs)
funccalls += 1
elif (w - wlim) * (xc - w) > 0.0:
fw = sample(w, **kwargs)
funccalls += 1
if fw < fc:
xb = max(min(xc, u_lim), l_lim)
xc = max(min(w, u_lim), l_lim)
w = max(min(xc + gold * (xc - xb), u_lim), l_lim)
fb = fc
fc = fw
fw = sample(w, **kwargs)
funccalls += 1
else:
w = max(min(xc + gold * (xc - xb), u_lim), l_lim)
fw = sample(w, **kwargs)
funccalls += 1
xa = max(min(xb, u_lim), l_lim)
xb = max(min(xc, u_lim), l_lim)
xc = max(min(w, u_lim), l_lim)
fa = fb
fb = fc
fc = fw

return xa, xb, xc, fa, fb, fc, funccalls


def brent(tol=0.01, maxiter=20, **kwargs):
mintol = 1.0e-11
cg = 0.3819660

xa, xb, xc, fa, fb, fc, funccalls = bracket(
xa=0.75, xb=0.95, maxiter=maxiter, **kwargs
)
funccalls = 0
xb = 0.75
fb = sample(xb, **kwargs)

#################################
# BEGIN
#################################
x = w = v = xb
fw = fv = fx = fb
if xa < xc:
a = xa
b = xc
else:
a = xc
b = xa
a = 0.75
b = 0.95
deltax = 0.0
iter = 0

Expand Down
11 changes: 3 additions & 8 deletions tests/simulator_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,8 @@ def test_simulate(self):
learn_cnt_per_day,
memorized_cnt_per_day,
cost_per_day,
) = simulate(w=DEFAULT_PARAMETER)
card_table.to_csv("card_table.csv", index=False)
print(review_cnt_per_day)
print(learn_cnt_per_day)
print(memorized_cnt_per_day)
print(cost_per_day)
# assert memorized_cnt_per_day[-1] == 3145.3779679589484
) = simulate(w=DEFAULT_PARAMETER, request_retention=0.9)
assert memorized_cnt_per_day[-1] == 5918.574208243532

def test_optimal_retention(self):
default_params = {
Expand All @@ -29,4 +24,4 @@ def test_optimal_retention(self):
"loss_aversion": 2.5,
}
r = optimal_retention(**default_params)
assert r == 0.7791796050312
assert r == 0.8346739534878145

0 comments on commit 4b56fd9

Please sign in to comment.