Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cf/baseline #64

Merged
merged 7 commits into from
Jul 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 9 additions & 5 deletions analysis/sampled_noise_effect.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@
noisy_env = NoisySingleQubitEnv(env_config)

n_targets = 1000
abs_superop_fidelity_differences = []
noisy_superop_fidelities = []
noiseless_superop_fidelities = []
for _ in range(n_targets):
# Set random U_target
U_target = RandomSU2().get_matrix()
Expand All @@ -43,13 +44,16 @@
noisy_fidelity = noisy_env.compute_fidelity()
noisy_env.U = noiseless_superop
noiseless_fidelity = noisy_env.compute_fidelity()

# Compute superoperator fidelity difference
superop_fidelity_difference = noiseless_fidelity - noisy_fidelity
abs_superop_fidelity_differences.append(np.abs(superop_fidelity_difference))
noisy_superop_fidelities.append(noisy_fidelity)
noiseless_superop_fidelities.append(noiseless_fidelity)

# Reset envs
noiseless_env.reset()
noisy_env.reset()

abs_superop_fidelity_differences = [np.abs(noisy_fidelity - noiseless_fidelity) for (noisy_fidelity, noiseless_fidelity)
in zip(noisy_superop_fidelities, noiseless_superop_fidelities)]

print("Mean noisy superoperator fidelity: ", np.mean(noisy_superop_fidelities))
print("Mean noiseless superoperator fidelity: ", np.mean(noiseless_superop_fidelities))
print("Mean fidelity difference: ", np.mean(abs_superop_fidelity_differences))
57 changes: 57 additions & 0 deletions scripts/baseline.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
"""
Steps through the environment taking constant actions per step.
E.g., for a 2-step episode, with action space = 3 the actions should have a shape of (2 x 3).

The intended use of this script is to take the best actions from the noiseless environment
and apply them to the noisy environment to serve as a baseline to compare against an
agent trained on the noise environment.
"""
from relaqs.api.utils import sample_noise_parameters
from relaqs.environments import NoisySingleQubitEnv
from relaqs.api import gates
from qutip.operators import sigmaz, sigmam
from relaqs.save_results import SaveResults
from relaqs.plot_data import plot_data
from relaqs.api.utils import get_best_actions

def run_baseline_actions(actions: list,
target_gate: gates.Gate = gates.X(),
n_episodes: int = 1,
steps_per_episode: int = 2,
save: bool = True,
plot: bool = True,
):
env_config = NoisySingleQubitEnv.get_default_env_config()
t1_list, t2_list, detuning_list = sample_noise_parameters()
env_config["relaxation_rates_list"] = [t1_list, t2_list]
env_config["relaxation_ops"] = [sigmam(),sigmaz()]
env_config["detuning_list"] = detuning_list
env_config["U_target"] = target_gate.get_matrix()
noisy_env = NoisySingleQubitEnv(env_config)

for _ in range(n_episodes):
for step_id in range(steps_per_episode):
noisy_env.step(actions[step_id])
noisy_env.reset()

# ---------------------> Save Results <-------------------------
if save is True:
sr = SaveResults(noisy_env, target_gate_string=str(target_gate))
save_dir = sr.save_results()
print("Results saved to:", save_dir)
# --------------------------------------------------------------

# ---------------------> Plot Data <-------------------------
if plot is True:
assert save is True, "If plot=True, then save must also be set to True"
plot_data(save_dir, episode_length=steps_per_episode, figure_title=str(target_gate) + " baseline actions, gamma/7")
print("Plots Created")
# --------------------------------------------------------------

if __name__ == "__main__":
target_gate = gates.X()
n_episodes = 25000
steps_per_episode = 2
file = "/Users/collinfarquhar/Code/rl-repo/results/paper_results/noiseless/2024-07-11_09-45-35_X/env_data.csv"
actions, fidelity = get_best_actions(file)
run_baseline_actions(actions, target_gate, n_episodes, steps_per_episode)
22 changes: 13 additions & 9 deletions scripts/run_and_save_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,18 @@

import ray
from ray.rllib.algorithms.ddpg import DDPGConfig
import gymnasium as gym
from relaqs.environments.single_qubit_env import SingleQubitEnv
from relaqs.environments.noisy_single_qubit_env import NoisySingleQubitEnv
from relaqs.save_results import SaveResults
from relaqs.plot_data import plot_data
from relaqs.api import gates

def run(env_class=SingleQubitEnv, n_training_iterations=1, save=True, plot=True):
def run(env_class: gym.Env = SingleQubitEnv,
target_gate: gates.Gate = gates.X(),
n_training_iterations: int = 1,
save: bool = True,
plot: bool = True):
ray.init()

# ---------------------> Configure algorithm and Environment <-------------------------
Expand All @@ -17,7 +22,6 @@ def run(env_class=SingleQubitEnv, n_training_iterations=1, save=True, plot=True)
env_config = env_class.get_default_env_config()

# Set target gate
target_gate = gates.H()
env_config["U_target"] = target_gate.get_matrix()

alg_config.environment(env_class, env_config=env_config)
Expand Down Expand Up @@ -52,14 +56,14 @@ def run(env_class=SingleQubitEnv, n_training_iterations=1, save=True, plot=True)
if plot is True:
assert save is True, "If plot=True, then save must also be set to True"
print("epiosde length", alg._episode_history[0].episode_length)
plot_data(save_dir, episode_length=alg._episode_history[0].episode_length)
plot_data(save_dir, episode_length=alg._episode_history[0].episode_length, figure_title=str(target_gate) + " noiselesss, gamma/7")
print("Plots Created")
# --------------------------------------------------------------

if __name__ == "__main__":
env_class = NoisySingleQubitEnv
n_training_iterations = 1
save = True
plot = True
run(env_class, n_training_iterations, save, plot)

env_class = SingleQubitEnv
target_gate = gates.X()
n_training_iterations = 50
save = plot = True
run(env_class, target_gate, n_training_iterations, save, plot)

30 changes: 25 additions & 5 deletions src/relaqs/api/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,13 @@ def dm_fidelity(rho, sigma):
#return np.abs(np.trace(sqrtm(sqrtm(rho) @ sigma @ sqrtm(rho))))**2
return np.trace(sqrtm(sqrtm(rho) @ sigma @ sqrtm(rho))).real**2

def sample_noise_parameters(t1_t2_noise_file, detuning_noise_file = None):
def sample_noise_parameters(t1_t2_noise_file=None, detuning_noise_file=None):
# ---------------------> Get quantum noise data <-------------------------
t1_list, t2_list = get_month_of_all_qubit_data(QUANTUM_NOISE_DATA_DIR + t1_t2_noise_file) #in seconds
if t1_t2_noise_file is None:
t1_list = np.random.uniform(40e-6, 200e-6, 100)
t2_list = np.random.uniform(40e-6, 200e-6, 100)
else:
t1_list, t2_list = get_month_of_all_qubit_data(QUANTUM_NOISE_DATA_DIR + t1_t2_noise_file) # in seconds

if detuning_noise_file is None:
mean = 0
Expand All @@ -43,7 +47,7 @@ def sample_noise_parameters(t1_t2_noise_file, detuning_noise_file = None):
else:
detunings = get_single_qubit_detuning(QUANTUM_NOISE_DATA_DIR + detuning_noise_file)

return t1_list, t2_list, detunings
return list(t1_list), list(t2_list), detunings

def do_inferencing(alg, n_episodes_for_inferencing, quantum_noise_file_path):
"""
Expand Down Expand Up @@ -89,8 +93,24 @@ def get_best_episode_information(filename):
max_fidelity_idx = fidelity.argmax()
fidelity = df.iloc[max_fidelity_idx, 0]
episode = df.iloc[max_fidelity_idx, 4]
best_episodes = df[df["Episode Id"] == episode]
return best_episodes
best_episode = df[df["Episode Id"] == episode]
return best_episode

def get_best_actions(filename):
best_episode = get_best_episode_information(filename)
action_str_array = best_episode['Actions'].to_numpy()

best_actions = []
for actions_str in action_str_array:
# Remove the brackets and split the string by spaces
str_values = actions_str.strip('[]').split()

# Convert the string values to float
float_values = [float(value) for value in str_values]

# Convert the list to a numpy array (optional)
best_actions.append(float_values)
return best_actions, best_episode['Fidelity'].to_numpy()

def run(env_class, gate, n_training_iterations=1, noise_file=""):
"""Args
Expand Down
Loading