Skip to content

Commit

Permalink
Merge pull request #54 from akataba/cf/noise-experiments
Browse files Browse the repository at this point in the history
Cf/noise experiments
  • Loading branch information
Farquhar13 authored Jun 25, 2024
2 parents 5ccb908 + e3d52f0 commit 260fd28
Show file tree
Hide file tree
Showing 5 changed files with 82 additions and 116 deletions.
113 changes: 0 additions & 113 deletions scripts/IBM_T1_T2_H_gate.py

This file was deleted.

72 changes: 72 additions & 0 deletions scripts/IBM_noise.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import ray
import numpy as np
from ray.rllib.algorithms.ddpg import DDPGConfig
from relaqs.environments.noisy_single_qubit_env import NoisySingleQubitEnv
from relaqs.save_results import SaveResults
from relaqs.plot_data import plot_data
from relaqs.api import gates
from relaqs.api.utils import sample_noise_parameters
from qutip.operators import sigmaz, sigmam

def run(n_training_iterations=1, save=True, plot=True):
ray.init()

# ---------------------> Configure algorithm and Environment <-------------------------
alg_config = DDPGConfig()
alg_config.framework("torch")
env_config = NoisySingleQubitEnv.get_default_env_config()

# Set noise parameters
noise_file = "april/ibmq_belem_month_is_4.json"
t1_list, t2_list, detuning_list = sample_noise_parameters(noise_file)
env_config["relaxation_rates_list"] = [np.reciprocal(t1_list).tolist(), np.reciprocal(t2_list).tolist()]
env_config["delta"] = detuning_list
env_config["relaxation_ops"] = [sigmam(), sigmaz()]
env_config["observation_space_size"] += 1 # for T2

# Set target gate
target_gate = gates.X()
env_config["U_target"] = target_gate.get_matrix()

alg_config.environment(NoisySingleQubitEnv, env_config=env_config)

alg_config.rollouts(batch_mode="complete_episodes")
alg_config.train_batch_size = env_config["steps_per_Haar"]

### working 1-3 sets
alg_config.actor_lr = 4e-5
alg_config.critic_lr = 5e-4

alg_config.actor_hidden_activation = "relu"
alg_config.critic_hidden_activation = "relu"
alg_config.num_steps_sampled_before_learning_starts = 1000
alg_config.actor_hiddens = [30,30,30]
alg_config.exploration_config["scale_timesteps"] = 10000

alg = alg_config.build()
# ---------------------------------------------------------------------

# ---------------------> Train Agent <-------------------------
results = [alg.train() for _ in range(n_training_iterations)]
# ---------------------> Save Results <-------------------------
if save is True:
env = alg.workers.local_worker().env
sr = SaveResults(env, alg, target_gate_string=str(target_gate))
save_dir = sr.save_results()
print("Results saved to:", save_dir)
# --------------------------------------------------------------

# ---------------------> Plot Data <-------------------------
if plot is True:
assert save is True, "If plot=True, then save must also be set to True"
print("epiosde length", alg._episode_history[0].episode_length)
plot_data(save_dir, episode_length=alg._episode_history[0].episode_length, figure_title="IBM April Belem Noise, noisy X, gamma/5")
print("Plots Created")
# --------------------------------------------------------------

if __name__ == "__main__":
n_training_iterations = 50
save = True
plot = True
run(n_training_iterations, save, plot)

2 changes: 1 addition & 1 deletion scripts/run_and_save_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def run(env_class=SingleQubitEnv, n_training_iterations=1, save=True, plot=True)
alg_config.environment(env_class, env_config=env_config)

alg_config.rollouts(batch_mode="complete_episodes")
alg_config.train_batch_size = env_config["steps_per_Haar"] # TOOD use env_config
alg_config.train_batch_size = env_config["steps_per_Haar"]

### working 1-3 sets
alg_config.actor_lr = 4e-5
Expand Down
7 changes: 5 additions & 2 deletions src/relaqs/environments/noisy_single_qubit_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,11 @@ def get_relaxation_rate(self):
return sampled_rate_list

def get_observation(self):
normalizedDetuning = [(self.detuning - min(self.detuning_list)+1E-15)/(max(self.detuning_list)-min(self.detuning_list)+1E-15)]
return np.append([self.compute_fidelity()]+[x//6283185 for x in self.relaxation_rate]+normalizedDetuning, self.unitary_to_observation(self.U)) #6283185 assuming 500 nanosecond relaxation is max
normalized_detuning = [(self.detuning - min(self.detuning_list) + 1E-15) / (max(self.detuning_list) - min(self.detuning_list) + 1E-15)]
return np.append([self.compute_fidelity()] +
[x // 6283185 for x in self.relaxation_rate] +
normalized_detuning,
self.unitary_to_observation(self.U)) #6283185 assuming 500 nanosecond relaxation is max

def hamiltonian(self, detuning, alpha, gamma_magnitude, gamma_phase):
return (detuning + alpha)*Z + gamma_magnitude*(np.cos(gamma_phase)*X + np.sin(gamma_phase)*Y)
Expand Down
4 changes: 4 additions & 0 deletions src/relaqs/plot_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,10 @@ def plot_data(save_dir, episode_length, figure_title=''):
avg_final_infelity_per_episode = np.round(avg_final_infelity_per_episode, rounding_precision)
avg_sum_of_rewards_per_episode = np.round(avg_sum_of_rewards_per_episode, rounding_precision)


if len(avg_final_fidelity_per_episode) >= 100:
print("Average final fidelity over last 100 episodes", np.mean(avg_final_fidelity_per_episode[-100:]))

# -------------------------------> Plotting <-------------------------------------
rcParams['font.family'] = 'serif'
mpl.style.use('seaborn-v0_8')
Expand Down

0 comments on commit 260fd28

Please sign in to comment.