diff --git a/params/best_lstm_bayesian_params.json b/params/best_lstm_bayesian_params.json index 7dc4b3a..5cfe918 100644 --- a/params/best_lstm_bayesian_params.json +++ b/params/best_lstm_bayesian_params.json @@ -1,6 +1,6 @@ { - "batch_size": 62.598222275087714, - "epochs": 78.22049407993065, - "learning_rate": 0.009939390993585992, - "num_units": 135.79288308193682 + "batch_size": 29.319082252806748, + "epochs": 18.493564427131048, + "learning_rate": 0.0019000671753502962, + "num_units": 75.73788863061976 } \ No newline at end of file diff --git a/src/lstm_bayesian_torch.py b/src/lstm_bayesian_torch.py index b4033a3..d02f600 100644 --- a/src/lstm_bayesian_torch.py +++ b/src/lstm_bayesian_torch.py @@ -425,31 +425,32 @@ def main(): init_points = 5 optimizer.maximize(init_points=init_points, n_iter=0) torch.cuda.empty_cache() - seen_params = set() + # seen_params = set() # Run Bayesian Optimization iterations with batch parallelization - n_iter = 10 - for _ in range(n_iter): - logger.info("Generating batch of suggestions.") - suggested_params = generate_unique_params(bounds, 5, seen_params) - logger.info(f"Evaluating {len(suggested_params)} parameter sets in parallel.") + n_iter = 5 + # for _ in range(n_iter): + # logger.info("Generating batch of suggestions.") + # suggested_params = generate_unique_params(bounds, 5, seen_params) + # logger.info(f"Evaluating {len(suggested_params)} parameter sets in parallel.") - # Evaluate in parallel - results = parallel_evaluate( - suggested_params, - seq_length, - target_indices, - product_train, - X_train, - product_test, - X_test, - y_train, - y_test, - num_products, - embedding_dim - ) - for params, result in zip(suggested_params, results): - optimizer.register(params=params, target=result) + # # Evaluate in parallel + # results = parallel_evaluate( + # suggested_params, + # seq_length, + # target_indices, + # product_train, + # X_train, + # product_test, + # X_test, + # y_train, + # y_test, + # num_products, + # embedding_dim + # ) + # for params, result in zip(suggested_params, results): + # optimizer.register(params=params, target=result) + optimizer.maximize(init_points=init_points, n_iter=n_iter) best_params = optimizer.max['params'] logger.info(f"Best parameters found: {best_params}")