Skip to content

Commit

Permalink
feat: lstm bayesian, without parallel evaluation
Browse files Browse the repository at this point in the history
  • Loading branch information
freddysongg committed Nov 28, 2024
1 parent 7732cec commit 80747a3
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 26 deletions.
8 changes: 4 additions & 4 deletions params/best_lstm_bayesian_params.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"batch_size": 62.598222275087714,
"epochs": 78.22049407993065,
"learning_rate": 0.009939390993585992,
"num_units": 135.79288308193682
"batch_size": 29.319082252806748,
"epochs": 18.493564427131048,
"learning_rate": 0.0019000671753502962,
"num_units": 75.73788863061976
}
45 changes: 23 additions & 22 deletions src/lstm_bayesian_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -425,31 +425,32 @@ def main():
init_points = 5
optimizer.maximize(init_points=init_points, n_iter=0)
torch.cuda.empty_cache()
seen_params = set()
# seen_params = set()

# Run Bayesian Optimization iterations with batch parallelization
n_iter = 10
for _ in range(n_iter):
logger.info("Generating batch of suggestions.")
suggested_params = generate_unique_params(bounds, 5, seen_params)
logger.info(f"Evaluating {len(suggested_params)} parameter sets in parallel.")
n_iter = 5
# for _ in range(n_iter):
# logger.info("Generating batch of suggestions.")
# suggested_params = generate_unique_params(bounds, 5, seen_params)
# logger.info(f"Evaluating {len(suggested_params)} parameter sets in parallel.")

# Evaluate in parallel
results = parallel_evaluate(
suggested_params,
seq_length,
target_indices,
product_train,
X_train,
product_test,
X_test,
y_train,
y_test,
num_products,
embedding_dim
)
for params, result in zip(suggested_params, results):
optimizer.register(params=params, target=result)
# # Evaluate in parallel
# results = parallel_evaluate(
# suggested_params,
# seq_length,
# target_indices,
# product_train,
# X_train,
# product_test,
# X_test,
# y_train,
# y_test,
# num_products,
# embedding_dim
# )
# for params, result in zip(suggested_params, results):
# optimizer.register(params=params, target=result)
optimizer.maximize(init_points=init_points, n_iter=n_iter)

best_params = optimizer.max['params']
logger.info(f"Best parameters found: {best_params}")
Expand Down

0 comments on commit 80747a3

Please sign in to comment.