Skip to content

Commit

Permalink
reverting to changes from the main branch. This is to remove code not…
Browse files Browse the repository at this point in the history
… related to tests passing
  • Loading branch information
akataba committed Mar 19, 2024
1 parent 0b80bd5 commit 8371cf2
Show file tree
Hide file tree
Showing 2 changed files with 37 additions and 71 deletions.
30 changes: 10 additions & 20 deletions scripts/deterministic_agent.py
Original file line number Diff line number Diff line change
@@ -1,49 +1,41 @@
from relaqs.save_results import SaveResults
import numpy as np
from relaqs.api.utils import (run,
run_noisy_one_qubit_experiment,
sample_noise_parameters,
get_best_episode_information,
return_env_from_alg
)
from relaqs.api.gates import H, X
from relaqs.plot_data import plot_results, plot_data
from relaqs.api.gates import H


n_training_iterations = 300
n_training_iterations = 250
figure_title ="Inferencing on multiple noisy environments with different detuning noise"
noise_file = "april/ibmq_belem_month_is_4.json"
noise_file_2 = "april/ibmq_quito_month_is_4.json"
path_to_detuning = "qubit_detuning_data.json"

# --------------------------> Training of model <-----------------------------------------------------
alg, list_of_results = run_noisy_one_qubit_experiment(X(),
alg = run(H(),
n_training_iterations,
noise_file=noise_file
)

# ----------------------- Creating the deterministic agent using actions from the best episode -------------------------------
env = return_env_from_alg(alg)
t1_list,t2_list,_ = sample_noise_parameters(noise_file_2, detuning_noise_file=path_to_detuning)
detuning_list = np.random.normal(1e8, 1e8, 9).tolist()
t1_list,t2_list, = sample_noise_parameters(noise_file_2, detuning_noise_file=path_to_detuning)
detuning_list = np.random.normal(1e8, 1e4, 9).tolist()
# t2_list = np.random.normal(1e-9, 1e-5, 135)
env.relaxation_rates_list = [np.reciprocal(t1_list).tolist(), np.reciprocal(t2_list).tolist()]
env.delta = detuning_list

sr = SaveResults(env, alg, results=list_of_results)
sr = SaveResults(env, alg)
save_dir = sr.save_results()
print("Results saved to:", save_dir)
plot_data(save_dir, episode_length=alg._episode_history[0].episode_length, figure_title=figure_title)
plot_results(save_dir, figure_title=figure_title)

# best_episode_information = get_best_episode_information(save_dir + "env_data.csv")
best_episode_information = get_best_episode_information(save_dir + "env_data.pkl")
best_episode_information = get_best_episode_information(save_dir + "env_data.csv")

actions = [np.asarray(eval(best_episode_information.iloc[0,2])), np.asarray(eval(best_episode_information.iloc[1,2]))]

print("best_episode_information actions first row: ", best_episode_information["Actions"].iloc[0])
print("best_episode_information actions second row: ", best_episode_information["Actions"].iloc[1])
# actions = [np.asarray(eval(best_episode_information.iloc[0,2])), np.asarray(eval(best_episode_information.iloc[1,2]))]
actions = [best_episode_information["Actions"].iloc[0], best_episode_information["Actions"].iloc[1]]
print("actions: ", actions)
num_episodes = 0
episode_reward = 0.0

Expand All @@ -60,6 +52,4 @@
print(f"Episode done: Total reward = {episode_reward}")
obs, info = env.reset()
num_episodes += 1
episode_reward = 0.0


episode_reward = 0.0
78 changes: 27 additions & 51 deletions scripts/inferencing.py
Original file line number Diff line number Diff line change
@@ -1,59 +1,35 @@
from relaqs.save_results import SaveResults
from relaqs.plot_data import plot_data, plot_results
from relaqs.plot_data import plot_data
import numpy as np
from relaqs.api.utils import do_inferencing, get_best_episode_information
from relaqs.api.gates import X, H
from relaqs.api.utils import (
run_noisy_one_qubit_experiment,
sample_noise_parameters,
return_env_from_alg
)

best_fidelities_found = []
for _ in range(6):
n_training_iterations = 250
n_episodes_for_inferencing= 400
figure_title ="Inferencing on multiple noisy environments with different detuning noise for X gate"
noise_file = "april/ibmq_belem_month_is_4.json"
noise_file_2 = "april/ibmq_quito_month_is_4.json"
path_to_detuning = "qubit_detuning_data.json"

# -----------------------> Training model <------------------------
alg, list_of_results = run_noisy_one_qubit_experiment(X(),
n_training_iterations,
from relaqs.api.utils import do_inferencing, run
from relaqs.api.gates import H

noise_file = "april/ibmq_belem_month_is_4.json"
inferencing_noise_file = "april/ibmq_manila_month_is_4.json"
n_episodes_for_inferencing = 10
save = True
plot = True
figure_title = "Inferencing with model"
n_training_iterations = 1

# -----------------------> Training model <------------------------
alg = run(gate=H(),
n_training_iterations=n_training_iterations,
noise_file=noise_file
)

# ----------------------- Creating new environment with new detuning -------------------------------
env = return_env_from_alg(alg)
t1_list,t2_list,_ = sample_noise_parameters(noise_file_2, detuning_noise_file=path_to_detuning)
detuning_list = np.random.normal(1e8, 1e12, 9).tolist()
# t2_list = np.random.normal(1e-9, 1e-5, 135)x
env.relaxation_rates_list = [np.reciprocal(t1_list).tolist(), np.reciprocal(t2_list).tolist()]
env.delta = detuning_list

# -----------------------> Inferencing <---------------------------
inferencing_env, inferencing_alg = do_inferencing(alg, n_episodes_for_inferencing,quantum_noise_file_path=noise_file_2)

# -------------------> Save Inferencing Results <---------------------------------------
sr = SaveResults(inferencing_env, inferencing_alg)
save_dir = sr.save_results()
print("Results saved to:", save_dir)
# best_episode_information = get_best_episode_information(save_dir + "env_data.csv")
best_episode_information = get_best_episode_information(save_dir + "env_data.pkl")

print("Fidelities from best epsiode: ", [best_episode_information.iloc[0,0], best_episode_information.iloc[1,0]])
best_fidelities_found.append((best_episode_information.iloc[0,0],best_episode_information.iloc[1,0] ))
best_fidelity_tuple = str((best_episode_information.iloc[0,0],best_episode_information.iloc[1,0]))
best_fidelity_file = "best_fidelities.txt"
with open(save_dir + best_fidelity_file, 'w') as file:
file.write(best_fidelity_tuple)
)

# -----------------------> Inferencing <---------------------------
env, alg = do_inferencing(alg, n_episodes_for_inferencing,quantum_noise_file_path=inferencing_noise_file)

# ---------------------> Plot Data <-------------------------------------------
plot_data(save_dir, episode_length=inferencing_alg._episode_history[0].episode_length, figure_title=figure_title)

# -------------------> Save Inferencing Results <---------------------------------------
sr = SaveResults(env, alg)
save_dir = sr.save_results()
print("Results saved to:", save_dir)

print(best_fidelities_found)
# ---------------------> Plot Data <-------------------------------------------
assert save is True, "If plot=True, then save must also be set to True"

plot_data(save_dir, episode_length=alg._episode_history[0].episode_length, figure_title=figure_title)
print("Plots Created")
# --------------------------------------------------------------

0 comments on commit 8371cf2

Please sign in to comment.