Skip to content

Commit

Permalink
Feat/output cost per day in simulator (#88)
Browse files Browse the repository at this point in the history
  • Loading branch information
L-M-Sherlock authored Mar 8, 2024
1 parent 73323af commit 1a3bb12
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 9 deletions.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "FSRS-Optimizer"
version = "4.25.3"
version = "4.26.0"
readme = "README.md"
dependencies = [
"matplotlib>=3.7.0",
Expand Down
40 changes: 35 additions & 5 deletions src/fsrs_optimizer/fsrs_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1283,9 +1283,13 @@ def find_optimal_retention(
if not verbose:
return ()

(_, review_cnt_per_day, learn_cnt_per_day, memorized_cnt_per_day) = simulate(
**simulate_config
)
(
_,
review_cnt_per_day,
learn_cnt_per_day,
memorized_cnt_per_day,
cost_per_day,
) = simulate(**simulate_config)

def moving_average(data, window_size=365 // 20):
weights = np.ones(window_size) / window_size
Expand Down Expand Up @@ -1324,9 +1328,16 @@ def moving_average(data, window_size=365 // 20):
ax.legend()
ax.grid(True)

fig5 = workload_graph(simulate_config)
fig5 = plt.figure()
ax = fig5.gca()
ax.plot(cost_per_day, label=f"R={self.optimal_retention*100:.0f}%")
ax.set_title("Cost per Day")
ax.legend()
ax.grid(True)

fig6 = workload_graph(simulate_config)

return (fig1, fig2, fig3, fig4, fig5)
return (fig1, fig2, fig3, fig4, fig5, fig6)

def evaluate(self, save_to_file=True):
my_collection = Collection(DEFAULT_WEIGHT)
Expand Down Expand Up @@ -1870,3 +1881,22 @@ def rmse_matrix(df):
return mean_squared_error(
tmp["y"], tmp["p"], sample_weight=tmp["card_id"], squared=False
)


if __name__ == "__main__":
model = FSRS(DEFAULT_WEIGHT)
stability = torch.tensor([5.0] * 4)
difficulty = torch.tensor([1.0, 2.0, 3.0, 4.0])
retention = torch.tensor([0.9, 0.8, 0.7, 0.6])
rating = torch.tensor([1, 2, 3, 4])
state = torch.stack([stability, difficulty]).unsqueeze(0)
s_recall = model.stability_after_success(state, retention, rating)
print(s_recall)
s_forget = model.stability_after_failure(state, retention)
print(s_forget)

retentions = torch.tensor([0.1, 0.2, 0.3, 0.4])
labels = torch.tensor([0.0, 1.0, 0.0, 1.0])
loss_fn = nn.BCELoss()
loss = loss_fn(retentions, labels)
print(loss)
14 changes: 11 additions & 3 deletions src/fsrs_optimizer/fsrs_simulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ def simulate(
review_cnt_per_day = np.zeros(learn_span)
learn_cnt_per_day = np.zeros(learn_span)
memorized_cnt_per_day = np.zeros(learn_span)
cost_per_day = np.zeros(learn_span)

def stability_after_success(s, r, d, response):
hard_penalty = np.where(response == 2, w[15], 1)
Expand Down Expand Up @@ -176,7 +177,14 @@ def stability_after_failure(s, r, d):
review_cnt_per_day[today] = np.sum(true_review)
learn_cnt_per_day[today] = np.sum(true_learn)
memorized_cnt_per_day[today] = card_table[col["retrievability"]].sum()
return card_table, review_cnt_per_day, learn_cnt_per_day, memorized_cnt_per_day
cost_per_day[today] = card_table[col["cost"]][true_review | true_learn].sum()
return (
card_table,
review_cnt_per_day,
learn_cnt_per_day,
memorized_cnt_per_day,
cost_per_day,
)


def optimal_retention(**kwargs):
Expand All @@ -200,7 +208,7 @@ def sample(
):
memorization = []
for i in range(SAMPLE_SIZE):
_, _, _, memorized_cnt_per_day = simulate(
_, _, _, memorized_cnt_per_day, _ = simulate(
w,
request_retention=r,
deck_size=deck_size,
Expand Down Expand Up @@ -613,7 +621,7 @@ def workload_graph(default_params):
"first_rating_prob": np.array([0.15, 0.2, 0.6, 0.05]),
"review_rating_prob": np.array([0.3, 0.6, 0.1]),
}
(_, review_cnt_per_day, learn_cnt_per_day, memorized_cnt_per_day) = simulate(
(_, review_cnt_per_day, learn_cnt_per_day, memorized_cnt_per_day, _) = simulate(
w=default_params["w"],
max_cost_perday=math.inf,
learn_limit_perday=10,
Expand Down

0 comments on commit 1a3bb12

Please sign in to comment.