diff --git a/experiments/air_quality/air_quality_bayesnewton.slrm b/experiments/air_quality/air_quality_bayesnewton.slrm deleted file mode 100644 index 6e47daa..0000000 --- a/experiments/air_quality/air_quality_bayesnewton.slrm +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 8 -#SBATCH --mem-per-cpu=5000 -#SBATCH --array=0-4 -#SBATCH -o air_newt-%a.out -module load miniconda -source activate venv - -srun python air_quality_bayesnewton.py $SLURM_ARRAY_TASK_ID 0 0 \ No newline at end of file diff --git a/experiments/air_quality/air_quality_bayesnewton_mf.slrm b/experiments/air_quality/air_quality_bayesnewton_mf.slrm deleted file mode 100644 index 9611914..0000000 --- a/experiments/air_quality/air_quality_bayesnewton_mf.slrm +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 8 -#SBATCH --mem-per-cpu=5000 -#SBATCH --array=0-4 -#SBATCH -o air_newt-%a.out -module load miniconda -source activate venv - -srun python air_quality_bayesnewton.py $SLURM_ARRAY_TASK_ID 1 0 \ No newline at end of file diff --git a/experiments/air_quality/air_quality_gpflow0.slrm b/experiments/air_quality/air_quality_gpflow0.slrm deleted file mode 100644 index 18cc911..0000000 --- a/experiments/air_quality/air_quality_gpflow0.slrm +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=4000 -#SBATCH --array=0-4 -#SBATCH -o air_gpflow-%a.out -module load miniconda -source activate venv - -srun python air_quality_gpflow.py $SLURM_ARRAY_TASK_ID 0 \ No newline at end of file diff --git a/experiments/air_quality/air_quality_gpflow1.slrm b/experiments/air_quality/air_quality_gpflow1.slrm deleted file mode 100644 index 857cf1f..0000000 --- a/experiments/air_quality/air_quality_gpflow1.slrm +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=4000 -#SBATCH --array=0-4 -#SBATCH -o air_gpflow-%a.out -module load miniconda -source activate venv - -srun python air_quality_gpflow.py $SLURM_ARRAY_TASK_ID 1 \ No newline at end of file diff --git a/experiments/air_quality/air_quality_gpflow2.slrm b/experiments/air_quality/air_quality_gpflow2.slrm deleted file mode 100644 index b7b4919..0000000 --- a/experiments/air_quality/air_quality_gpflow2.slrm +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=4000 -#SBATCH --array=0-4 -#SBATCH -o air_gpflow-%a.out -module load miniconda -source activate venv - -srun python air_quality_gpflow.py $SLURM_ARRAY_TASK_ID 2 \ No newline at end of file diff --git a/experiments/air_quality/air_quality_ski.slrm b/experiments/air_quality/air_quality_ski.slrm deleted file mode 100644 index c4b0ad3..0000000 --- a/experiments/air_quality/air_quality_ski.slrm +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 8 -#SBATCH --mem-per-cpu=5000 -#SBATCH --array=0-4 -#SBATCH -o air_ski-%a.out -module load miniconda -source activate venv - -srun python air_quality_ski.py $SLURM_ARRAY_TASK_ID \ No newline at end of file diff --git a/experiments/air_quality/results.py b/experiments/air_quality/results.py deleted file mode 100644 index a8591f1..0000000 --- a/experiments/air_quality/results.py +++ /dev/null @@ -1,298 +0,0 @@ -import pickle -import numpy as np - -cpugpu = 'gpu' - -if cpugpu == 'gpu': - par = 1 -else: - par = 0 - -print('ST-SVGP') - -ind_time = np.zeros([5]) -for ind in range(5): - with open("output/0_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - -ind_rmse = np.zeros([5]) -for ind in range(5): - with open("output/0_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - -ind_nlpd = np.zeros([5]) -for ind in range(5): - with open("output/0_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - -print('time:') -np.set_printoptions(precision=3) -print(np.mean(ind_time)) -np.set_printoptions(precision=2) -print(np.std(ind_time)) - -print('rmse:') -np.set_printoptions(precision=3) -print(np.mean(ind_rmse)) -np.set_printoptions(precision=2) -print(np.std(ind_rmse)) - -print('nlpd:') -np.set_printoptions(precision=3) -print(np.mean(ind_nlpd)) -np.set_printoptions(precision=2) -print(np.std(ind_nlpd)) - - -print('MF-ST-SVGP') - -ind_time = np.zeros([5]) -for ind in range(5): - with open("output/1_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - -ind_rmse = np.zeros([5]) -for ind in range(5): - with open("output/1_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - -ind_nlpd = np.zeros([5]) -for ind in range(5): - with open("output/1_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - -print('time:') -np.set_printoptions(precision=3) -print(np.mean(ind_time)) -np.set_printoptions(precision=2) -print(np.std(ind_time)) - -print('rmse:') -np.set_printoptions(precision=3) -print(np.mean(ind_rmse)) -np.set_printoptions(precision=2) -print(np.std(ind_rmse)) - -print('nlpd:') -np.set_printoptions(precision=3) -print(np.mean(ind_nlpd)) -np.set_printoptions(precision=2) -print(np.std(ind_nlpd)) - - -print('SVGP0') - -ind_time = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_0_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - -ind_rmse = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_0_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - -ind_nlpd = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_0_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - -print('time:') -np.set_printoptions(precision=3) -print(np.mean(ind_time)) -np.set_printoptions(precision=2) -print(np.std(ind_time)) - -print('rmse:') -np.set_printoptions(precision=3) -print(np.mean(ind_rmse)) -np.set_printoptions(precision=2) -print(np.std(ind_rmse)) - -print('nlpd:') -np.set_printoptions(precision=3) -print(np.mean(ind_nlpd)) -np.set_printoptions(precision=2) -print(np.std(ind_nlpd)) - - -print('SVGP1') - -ind_time = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_1_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - -ind_rmse = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_1_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - -ind_nlpd = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_1_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - -print('time:') -np.set_printoptions(precision=3) -print(np.mean(ind_time)) -np.set_printoptions(precision=2) -print(np.std(ind_time)) - -print('rmse:') -np.set_printoptions(precision=3) -print(np.mean(ind_rmse)) -np.set_printoptions(precision=2) -print(np.std(ind_rmse)) - -print('nlpd:') -np.set_printoptions(precision=3) -print(np.mean(ind_nlpd)) -np.set_printoptions(precision=2) -print(np.std(ind_nlpd)) - - -print('SVGP2') - -ind_time = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_2_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - -ind_rmse = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_2_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - -ind_nlpd = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_2_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - -print('time:') -np.set_printoptions(precision=3) -print(np.mean(ind_time)) -np.set_printoptions(precision=2) -print(np.std(ind_time)) - -print('rmse:') -np.set_printoptions(precision=3) -print(np.mean(ind_rmse)) -np.set_printoptions(precision=2) -print(np.std(ind_rmse)) - -print('nlpd:') -np.set_printoptions(precision=3) -print(np.mean(ind_nlpd)) -np.set_printoptions(precision=2) -print(np.std(ind_nlpd)) - - -if cpugpu == 'gpu': - - print('SVGP3') - - ind_time = np.zeros([5]) - for ind in range(5): - with open("output/gpflow_" + str(ind) + "_3_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - - ind_rmse = np.zeros([5]) - for ind in range(5): - with open("output/gpflow_" + str(ind) + "_3_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - - ind_nlpd = np.zeros([5]) - for ind in range(5): - with open("output/gpflow_" + str(ind) + "_3_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - - print('time:') - np.set_printoptions(precision=3) - print(np.mean(ind_time)) - np.set_printoptions(precision=2) - print(np.std(ind_time)) - - print('rmse:') - np.set_printoptions(precision=3) - print(np.mean(ind_rmse)) - np.set_printoptions(precision=2) - print(np.std(ind_rmse)) - - print('nlpd:') - np.set_printoptions(precision=3) - print(np.mean(ind_nlpd)) - np.set_printoptions(precision=2) - print(np.std(ind_nlpd)) - - - print('SVGP4') - - ind_time = np.zeros([5]) - for ind in range(5): - with open("output/gpflow_" + str(ind) + "_4_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - - ind_rmse = np.zeros([5]) - for ind in range(5): - with open("output/gpflow_" + str(ind) + "_4_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - - ind_nlpd = np.zeros([5]) - for ind in range(5): - with open("output/gpflow_" + str(ind) + "_4_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - - print('time:') - np.set_printoptions(precision=3) - print(np.mean(ind_time)) - np.set_printoptions(precision=2) - print(np.std(ind_time)) - - print('rmse:') - np.set_printoptions(precision=3) - print(np.mean(ind_rmse)) - np.set_printoptions(precision=2) - print(np.std(ind_rmse)) - - print('nlpd:') - np.set_printoptions(precision=3) - print(np.mean(ind_nlpd)) - np.set_printoptions(precision=2) - print(np.std(ind_nlpd)) - - -print('SKI') - -ind_time = np.zeros([5]) -for ind in range(5): - with open("output/ski_" + str(ind) + "_" + str(par) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - -ind_rmse = np.zeros([5]) -for ind in range(5): - with open("output/ski_" + str(ind) + "_" + str(par) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - -ind_nlpd = np.zeros([5]) -for ind in range(5): - with open("output/ski_" + str(ind) + "_" + str(par) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - -print('time:') -np.set_printoptions(precision=3) -print(np.mean(ind_time)) -np.set_printoptions(precision=2) -print(np.std(ind_time)) - -print('rmse:') -np.set_printoptions(precision=3) -print(np.mean(ind_rmse)) -np.set_printoptions(precision=2) -print(np.std(ind_rmse)) - -print('nlpd:') -np.set_printoptions(precision=3) -print(np.mean(ind_nlpd)) -np.set_printoptions(precision=2) -print(np.std(ind_nlpd)) diff --git a/experiments/air_quality/timings_parallel.sh b/experiments/air_quality/timings_parallel.sh deleted file mode 100644 index 50caf28..0000000 --- a/experiments/air_quality/timings_parallel.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -for (( counter=0; counter<5; counter++ )) -do -python air_quality_timings.py "$counter" 0 1 -done -printf "\n" diff --git a/experiments/aircraft/aircraft.slrm b/experiments/aircraft/aircraft.slrm deleted file mode 100644 index 4a4a79f..0000000 --- a/experiments/aircraft/aircraft.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 48:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=3000 -#SBATCH --array=0-5 -#SBATCH -o aircraft-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python aircraft.py $SLURM_ARRAY_TASK_ID $run -done \ No newline at end of file diff --git a/experiments/aircraft/aircraft_baselines.slrm b/experiments/aircraft/aircraft_baselines.slrm deleted file mode 100644 index 5d2443b..0000000 --- a/experiments/aircraft/aircraft_baselines.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 48:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=3000 -#SBATCH --array=0-5 -#SBATCH -o aircraft-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python aircraft.py $SLURM_ARRAY_TASK_ID $run 1 -done \ No newline at end of file diff --git a/experiments/aircraft/results.py b/experiments/aircraft/results.py deleted file mode 100644 index caca00b..0000000 --- a/experiments/aircraft/results.py +++ /dev/null @@ -1,25 +0,0 @@ -import pickle -import numpy as np - -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) - -print('baselines:') -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/baseline_" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) diff --git a/experiments/audio/audio.slrm b/experiments/audio/audio.slrm deleted file mode 100644 index 686cef7..0000000 --- a/experiments/audio/audio.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 48:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=5000 -#SBATCH --array=0-5 -#SBATCH -o audio-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python audio.py $SLURM_ARRAY_TASK_ID $run -done \ No newline at end of file diff --git a/experiments/audio/audioDFO.py b/experiments/audio/audioDFO.py deleted file mode 100644 index b255a21..0000000 --- a/experiments/audio/audioDFO.py +++ /dev/null @@ -1,366 +0,0 @@ -import sys -import bayesnewton -import objax -from bayesnewton.cubature import Unscented -import numpy as np -import matplotlib.pyplot as plt -import time -from scipy.io import loadmat - -print('loading data ...') -y_raw = loadmat('speech_female')['y'] -fs = 44100 # sampling rate (Hz) -scale_x = 1000 # convert to milliseconds -scale_y = 1. # scale signal up to deal with Gauss-Newton instability at low obs noise - -# normaliser = 0.5 * np.sqrt(np.var(y_raw)) -# y = y_raw / normaliser * scale_y # rescale the data -y = y_raw * scale_y # rescale the data - -# y = y[:10000] - -N = y.shape[0] -x = np.linspace(0., N, num=N) / fs * scale_x # arbitrary evenly spaced inputs -# batch_size = 20000 -M = 3000 -z = np.linspace(x[0], x[-1], num=M) - -np.random.seed(123) -# 10-fold cross-validation setup -ind_shuffled = np.random.permutation(N) -ind_split = np.stack(np.split(ind_shuffled, 10)) # 10 random batches of data indices - -if len(sys.argv) > 1: - method = int(sys.argv[1]) - # plot_final = False - # save_result = True -else: - method = 12 - # plot_final = True - # save_result = False - -if len(sys.argv) > 2: - fold = int(sys.argv[2]) -else: - fold = 0 - -if len(sys.argv) > 3: - parallel = bool(int(sys.argv[3])) -else: - parallel = None - -if len(sys.argv) > 4: - num_subbands = int(sys.argv[4]) -else: - num_subbands = 8 - -if len(sys.argv) > 5: - num_modulators = int(sys.argv[5]) -else: - num_modulators = 3 - -if len(sys.argv) > 6: - iters = int(sys.argv[6]) -else: - iters = 200 - -print('method number:', method) -print('batch number:', fold) -print('parallel:', parallel) -print('num subbands:', num_subbands) -print('num modulators:', num_modulators) -print('num iterations:', iters) - -# Get training and test indices -ind_test = ind_split[fold] # np.sort(ind_shuffled[:N//10]) -ind_train = np.concatenate(ind_split[np.arange(10) != fold]) -x_train = x[ind_train] # 90/10 train/test split -x_test = x[ind_test] -y_train = y[ind_train] -y_test = y[ind_test] - -fundamental_freq = 220 # Hz -# radial_freq = 2 * np.pi * fundamental_freq / scale # radial freq = 2pi * f / scale - -subband_kernel = bayesnewton.kernels.DivergenceFreeOscillator -modulator_kernel = bayesnewton.kernels.Matern52 -subband_frequencies = fundamental_freq / scale_x * (np.arange(num_subbands) + 1) -radial_frequencies = 2 * np.pi * subband_frequencies -# subband_lengthscales = 75. * np.ones(num_subbands) -modulator_lengthscales = 10. * np.ones(num_modulators) -# modulator_variances = 0.5 * np.ones(num_modulators) * scale_y -modulator_variances = 2. * np.ones(num_modulators) * scale_y -dfo_smoothness = 75. * radial_frequencies[0] - -subband_kernel_list = [ - subband_kernel(radial_frequency=radial_frequencies[i], smoothness=dfo_smoothness) for i in range(num_subbands) -] -modulator_kernel_list = [ - modulator_kernel(variance=modulator_variances[i], lengthscale=modulator_lengthscales[i]) for i in range(num_modulators) -] -kernel_list = subband_kernel_list + modulator_kernel_list -kern = bayesnewton.kernels.Independent( - kernels=kernel_list -) - -lik = bayesnewton.likelihoods.NonnegativeMatrixFactorisation( - num_subbands=num_subbands, - num_modulators=num_modulators, - variance=0.17 * scale_y, - # fix_variance=True -) - -if method == 0: - model = bayesnewton.models.MarkovTaylorGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, parallel=parallel) -elif method == 1: - model = bayesnewton.models.MarkovPosteriorLinearisationGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel) -elif method == 2: - # model = bayesnewton.models.MarkovExpectationPropagationGaussNewtonGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - # parallel=parallel, power=1.) - model = bayesnewton.models.MarkovExpectationPropagationGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel, power=1.) -elif method == 3: - # model = bayesnewton.models.MarkovExpectationPropagationGaussNewtonGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - # parallel=parallel, power=0.5) - model = bayesnewton.models.MarkovExpectationPropagationGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel, power=0.5) -elif method == 4: - # model = bayesnewton.models.MarkovExpectationPropagationGaussNewtonGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - # parallel=parallel, power=0.01) - model = bayesnewton.models.MarkovExpectationPropagationGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel, power=0.01) -elif method == 5: - model = bayesnewton.models.MarkovVariationalGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel) -elif method == 6: - model = bayesnewton.models.MarkovLaplaceGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, parallel=parallel) -elif method == 7: - model = bayesnewton.models.MarkovPosteriorLinearisation2ndOrderGaussNewtonGP(kernel=kern, likelihood=lik, X=x_train, - Y=y_train, parallel=parallel) -elif method == 8: - model = bayesnewton.models.MarkovVariationalGaussNewtonGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel) -# elif method == 9: -# model = bayesnewton.models.MarkovExpectationPropagationGaussNewtonGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, -# parallel=parallel, power=1.) -elif method == 10: - model = bayesnewton.models.MarkovVGNEPGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, parallel=parallel) -elif method == 11: - model = bayesnewton.models.MarkovGaussNewtonGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, parallel=parallel) -elif method == 12: - model = bayesnewton.models.MarkovQuasiNewtonGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel) -elif method == 13: - model = bayesnewton.models.MarkovVariationalQuasiNewtonGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel) -elif method == 14: - model = bayesnewton.models.MarkovExpectationPropagationQuasiNewtonGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel, power=0.5) -elif method == 15: - model = bayesnewton.models.MarkovPosteriorLinearisation2ndOrderQuasiNewtonGP(kernel=kern, likelihood=lik, X=x_train, - Y=y_train, parallel=parallel) -elif method == 16: - model = bayesnewton.models.MarkovPosteriorLinearisationQuasiNewtonGP(kernel=kern, likelihood=lik, X=x_train, - Y=y_train, parallel=parallel) -elif method == 17: - model = bayesnewton.models.MarkovVariationalRiemannGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, parallel=parallel) -elif method == 18: - model = bayesnewton.models.MarkovExpectationPropagationRiemannGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel) -elif method == 19: - model = bayesnewton.models.MarkovExpectationPropagationRiemannGP(kernel=kern, likelihood=lik, X=x_train, Y=y_train, - parallel=parallel, power=0.5) -print('model:', model) - -# prior_samples = np.squeeze(model.prior_sample(X=x, num_samps=1)) -# prior_samples_subbands = prior_samples[:, :num_subbands] -# prior_samples_modulators = bayesnewton.utils.softplus(prior_samples[:, num_subbands:]) -# prior_samples_sig = np.sum( -# prior_samples_subbands * (model.likelihood.weights[None] @ prior_samples_modulators[..., None])[..., 0], -# axis=-1 -# ) -# plt.figure(3, figsize=(12, 5)) -# plt.clf() -# plt.plot(x, prior_samples_sig, 'k', linewidth=0.6) -# plt.xlim(x[0], x[-1]) -# plt.legend() -# plt.xlabel('time (milliseconds)') -# -# plt.figure(4, figsize=(12, 8)) -# plt.subplot(2, 1, 1) -# plt.plot(x, prior_samples_subbands, linewidth=0.6) -# plt.xlim(x[0], x[-1]) -# plt.title('subbands') -# plt.subplot(2, 1, 2) -# plt.plot(x, prior_samples_modulators, linewidth=0.6) -# plt.xlim(x[0], x[-1]) -# plt.xlabel('time (milliseconds)') -# plt.title('amplitude modulators') -# -# plt.show() - - -# unscented_transform = Unscented(dim=num_modulators) # 5th-order unscented transform -# unscented_transform = Unscented(dim=num_modulators+num_subbands) # 5th-order unscented transform -unscented_transform = Unscented(dim=None) # 5th-order unscented transform - -lr_adam = 0.05 -lr_newton = 0.2 -opt_hypers = objax.optimizer.Adam(model.vars()) -energy = objax.GradValues(model.energy, model.vars()) - - -@objax.Function.with_vars(model.vars() + opt_hypers.vars()) -def train_op(): - model.inference(lr=lr_newton, cubature=unscented_transform) # perform inference and update variational params - dE, E = energy(cubature=unscented_transform) # compute energy and its gradients w.r.t. hypers - opt_hypers(lr_adam, dE) - return E - - -train_op = objax.Jit(train_op) - - -t0 = time.time() -for i in range(1, iters + 1): - if i == 2: - t2 = time.time() - loss = train_op() - print('iter %2d, energy: %1.4f' % (i, loss[0])) - print(model.likelihood.variance) - # print( - # 'lengthscales: ', - # model.kernel.kernel0.lengthscale, - # model.kernel.kernel1.lengthscale, - # model.kernel.kernel2.lengthscale, - # model.kernel.kernel3.lengthscale, - # model.kernel.kernel4.lengthscale, - # model.kernel.kernel5.lengthscale, - # model.kernel.kernel6.lengthscale, - # model.kernel.kernel7.lengthscale, - # model.kernel.kernel8.lengthscale, - # model.kernel.kernel9.lengthscale, - # model.kernel.kernel10.lengthscale, - # model.kernel.kernel11.lengthscale, - # model.kernel.kernel12.lengthscale, - # model.kernel.kernel13.lengthscale, - # model.kernel.kernel14.lengthscale, - # model.kernel.kernel15.lengthscale, - # ) - # print( - # 'variances: ', - # model.kernel.kernel0.variance, - # model.kernel.kernel1.variance, - # model.kernel.kernel2.variance, - # model.kernel.kernel3.variance, - # model.kernel.kernel4.variance, - # model.kernel.kernel5.variance, - # model.kernel.kernel6.variance, - # model.kernel.kernel7.variance, - # model.kernel.kernel8.variance, - # model.kernel.kernel9.variance, - # model.kernel.kernel10.variance, - # model.kernel.kernel11.variance, - # model.kernel.kernel12.variance, - # model.kernel.kernel13.variance, - # model.kernel.kernel14.variance, - # model.kernel.kernel15.variance, - # ) - # print( - # 'radial freqs.: ', - # model.kernel.kernel0.radial_frequency, - # model.kernel.kernel1.radial_frequency, - # model.kernel.kernel2.radial_frequency, - # model.kernel.kernel3.radial_frequency, - # model.kernel.kernel4.radial_frequency, - # model.kernel.kernel5.radial_frequency, - # model.kernel.kernel6.radial_frequency, - # model.kernel.kernel7.radial_frequency, - # model.kernel.kernel8.radial_frequency, - # model.kernel.kernel9.radial_frequency, - # model.kernel.kernel10.radial_frequency, - # model.kernel.kernel11.radial_frequency, - # ) - # print('weights: ', model.likelihood.weights) - # print('lik. variance: ', model.likelihood.variance) -t1 = time.time() -t3 = time.time() -print('optimisation time: %2.2f secs' % (t1-t0)) -print('per-iteration time (excl. compile): %2.2f secs' % ((t3-t2)/(iters-1))) - -# calculate posterior predictive distribution via filtering and smoothing at train & test locations: -# print('calculating the posterior predictive distribution ...') -t0 = time.time() -nlpd = model.negative_log_predictive_density(X=x_test, Y=y_test, cubature=unscented_transform) -t1 = time.time() -print('NLPD: %1.2f' % nlpd) -print('prediction time: %2.2f secs' % (t1-t0)) - -# if plot_final: -posterior_mean, posterior_var = model.predict(X=x) -# lb = posterior_mean[:, 0] - np.sqrt(posterior_var[:, 0]) * 1.96 -# ub = posterior_mean[:, 0] + np.sqrt(posterior_var[:, 0]) * 1.96 - -posterior_mean_subbands = posterior_mean[:, :num_subbands] -posterior_mean_modulators = bayesnewton.utils.softplus(posterior_mean[:, num_subbands:]) -posterior_mean_sig = np.sum( - posterior_mean_subbands * (model.likelihood.weights[None] @ posterior_mean_modulators[..., None])[..., 0], - axis=-1 -) -posterior_var_subbands = posterior_var[:, :num_subbands] -posterior_var_modulators = bayesnewton.utils.softplus(posterior_var[:, num_subbands:]) - -print('plotting ...') -plt.figure(1, figsize=(12, 5)) -plt.clf() -plt.plot(x, y, 'k', label='signal', linewidth=0.6) -plt.plot(x_test, y_test, 'g.', label='test', markersize=4) -plt.plot(x, posterior_mean_sig, 'r', label='posterior mean', linewidth=0.6) -# plt.fill_between(x_pred, lb, ub, color='r', alpha=0.05, label='95% confidence') -plt.xlim(x[0], x[-1]) -plt.legend() -plt.title('Audio Signal Processing via Kalman smoothing (human speech signal)') -plt.xlabel('time (milliseconds)') -plt.savefig('fig1.png') - -plt.figure(2, figsize=(12, 8)) -plt.subplot(2, 1, 1) -plt.plot(x, posterior_mean_subbands, linewidth=0.6) -plt.xlim(x[0], x[-1]) -# plt.plot(z, inducing_mean[:, :3, 0], 'r.', label='inducing mean', markersize=4) -plt.title('subbands') -plt.subplot(2, 1, 2) -plt.plot(x, posterior_mean_modulators, linewidth=0.6) -# plt.plot(z, softplus(inducing_mean[:, 3:, 0]), 'r.', label='inducing mean', markersize=4) -plt.xlim(x[0], x[-1]) -plt.xlabel('time (milliseconds)') -plt.title('amplitude modulators') -plt.savefig('fig2.png') - -prior_samples = np.squeeze(model.prior_sample(X=x, num_samps=1)) -prior_samples_subbands = prior_samples[:, :num_subbands] -prior_samples_modulators = bayesnewton.utils.softplus(prior_samples[:, num_subbands:]) -prior_samples_sig = np.sum( - prior_samples_subbands * (model.likelihood.weights[None] @ prior_samples_modulators[..., None])[..., 0], - axis=-1 -) -plt.figure(3, figsize=(12, 5)) -plt.clf() -plt.plot(x, prior_samples_sig, 'k', linewidth=0.6) -plt.xlim(x[0], x[-1]) -plt.legend() -plt.xlabel('time (milliseconds)') - -plt.figure(4, figsize=(12, 8)) -plt.subplot(2, 1, 1) -plt.plot(x, prior_samples_subbands, linewidth=0.6) -plt.xlim(x[0], x[-1]) -plt.title('subbands') -plt.subplot(2, 1, 2) -plt.plot(x, prior_samples_modulators, linewidth=0.6) -plt.xlim(x[0], x[-1]) -plt.xlabel('time (milliseconds)') -plt.title('amplitude modulators') - -plt.show() diff --git a/experiments/audio/audio_baseline.slrm b/experiments/audio/audio_baseline.slrm deleted file mode 100644 index 1401387..0000000 --- a/experiments/audio/audio_baseline.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 48:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=5000 -#SBATCH --array=0-5 -#SBATCH -o audio-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python audio.py $SLURM_ARRAY_TASK_ID $run 1 -done \ No newline at end of file diff --git a/experiments/audio/create_txts.py b/experiments/audio/create_txts.py deleted file mode 100644 index a5ea3f7..0000000 --- a/experiments/audio/create_txts.py +++ /dev/null @@ -1,9 +0,0 @@ -import pickle -import numpy as np - -empty_data = np.nan * np.zeros([20, 3]) - -for method in range(6): - for fold in range(10): - with open("output/varyM" + str(method) + "_" + str(fold) + ".txt", "wb") as fp: - pickle.dump(empty_data, fp) diff --git a/experiments/audio/results.py b/experiments/audio/results.py deleted file mode 100644 index caca00b..0000000 --- a/experiments/audio/results.py +++ /dev/null @@ -1,25 +0,0 @@ -import pickle -import numpy as np - -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) - -print('baselines:') -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/baseline_" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) diff --git a/experiments/audio/results_varyM.py b/experiments/audio/results_varyM.py deleted file mode 100644 index 004d96b..0000000 --- a/experiments/audio/results_varyM.py +++ /dev/null @@ -1,184 +0,0 @@ -import pickle -import numpy as np -import matplotlib.pyplot as plt -from mpl_toolkits.axes_grid1.inset_locator import inset_axes, zoomed_inset_axes, mark_inset -import tikzplotlib -from matplotlib._png import read_png - -save_tikz = True - -method_nlml = np.zeros([6, 10, 20]) -method_nlpd = np.zeros([6, 10, 20]) -method_rmse = np.zeros([6, 10, 20]) -for method in range(6): - for dataset in range(10): - with open("output/varyM" + str(method) + "_" + str(dataset) + ".txt", "rb") as fp: - file = pickle.load(fp) - nlml = np.array(file[:, 0]) - nlpd = np.array(file[:, 1]) - rmse = np.array(file[:, 2]) - method_nlml[method, dataset] = nlml - method_nlpd[method, dataset] = nlpd - method_rmse[method, dataset] = rmse -# svgp_nlml = np.zeros([10, 50]) -# svgp_nlpd = np.zeros([10, 50]) -# svgp_classerror = np.zeros([10, 50]) -# for dataset in range(10): -# with open("baselines/output/varyM" + str(dataset) + ".txt", "rb") as fp: -# file = pickle.load(fp) -# nlml = np.array(file[:, 0]) -# nlpd = np.array(file[:, 1]) -# classerror = np.array(file[:, 2]) -# svgp_nlml[dataset] = nlml -# svgp_nlpd[dataset] = nlpd -# svgp_classerror[dataset] = classerror - -# np.set_printoptions(precision=3) -# print(np.mean(method_nlml, axis=1)) -# np.set_printoptions(precision=2) -# print(np.nanstd(method_nlml, axis=1)) -lb_nlml = np.mean(method_nlml, axis=1) - 1 * np.std(method_nlml, axis=1) -ub_nlml = np.mean(method_nlml, axis=1) + 1 * np.std(method_nlml, axis=1) -# lb_nlml_svgp = np.mean(svgp_nlml, axis=0) - 0.1 * np.std(svgp_nlml, axis=0) -# ub_nlml_svgp = np.mean(svgp_nlml, axis=0) + 0.1 * np.std(svgp_nlml, axis=0) - -# np.set_printoptions(precision=3) -# print(np.nanmean(method_nlpd, axis=1)) -# np.set_printoptions(precision=2) -# print(np.nanstd(method_nlpd, axis=1)) -lb_nlpd = np.mean(method_nlpd, axis=1) - 1 * np.std(method_nlpd, axis=1) -ub_nlpd = np.mean(method_nlpd, axis=1) + 1 * np.std(method_nlpd, axis=1) -# lb_nlpd_svgp = np.mean(svgp_nlpd, axis=0) - 0.1 * np.std(svgp_nlpd, axis=0) -# ub_nlpd_svgp = np.mean(svgp_nlpd, axis=0) + 0.1 * np.std(svgp_nlpd, axis=0) - -lb_classerror = np.mean(method_rmse, axis=1) - 1 * np.std(method_rmse, axis=1) -ub_classerror = np.mean(method_rmse, axis=1) + 1 * np.std(method_rmse, axis=1) -# lb_classerror_svgp = np.mean(svgp_classerror, axis=0) - 0.1 * np.std(svgp_classerror, axis=0) -# ub_classerror_svgp = np.mean(svgp_classerror, axis=0) + 0.1 * np.std(svgp_classerror, axis=0) - -legend_entries = ['S$^2$EKS', 'S$^2$PL', 'S$^2$PEP($\\alpha=1$)', 'S$^2$PEP($\\alpha=0.5$)', 'S$^2$PEP($\\alpha=0.01$)', 'S$^2$CVI'] - -num_inducing = np.linspace(100, 2000, 20, dtype=int) -fig0, ax0 = plt.subplots() -for method in [0, 1, 2, 3, 4, 5]: - ax0.plot(num_inducing, np.mean(method_nlml, axis=1)[method].T, label=legend_entries[method], linewidth=2) - ax0.fill_between(num_inducing, lb_nlml[method], ub_nlml[method], alpha=0.05) -# plt.plot(num_inducing, np.mean(svgp_nlml, axis=0).T, label='VI (sparse)') -# plt.fill_between(num_inducing, lb_nlml_svgp, ub_nlml_svgp, alpha=0.05) -# plt.legend(loc=1) -# plt.ylabel('NLML') -plt.legend(loc=3) -plt.tick_params( - axis='both', # changes apply to the x-axis - which='both', # both major and minor ticks are affected - direction='in') -plt.xlabel('Number of inducing inputs, M') -plt.xlim(0, 2000) -plt.ylim(0, 30000) -plt.yticks([0, 10000, 20000]) - -# yl = ax0.get_ylim() -# x1, x2, y1, y2 = 100, 700, 18850, 43000 # specify the limits -# ypos = yl[0]+(yl[1]-yl[0])/1.7 -# print(ypos) -# ax0.text(1200, ypos, '\\tikz\\node[coordinate] (n1) {};') -# ax0.text(x1, y2, '\\tikz\\node[coordinate] (r1) {};') -# ax0.text(x2, y1, '\\tikz\\node[coordinate] (r2) {};') - -if save_tikz: - tikzplotlib.save('audio_nlml.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./graphs/') - -# fig1, ax1 = plt.subplots() -# for method in [0, 1, 2, 3, 4, 5]: -# ax1.plot(num_inducing, np.mean(method_nlml, axis=1)[method].T, label=legend_entries[method], linewidth=2) -# ax1.fill_between(num_inducing, lb_nlml[method], ub_nlml[method], alpha=0.05) -# ax1.set_xlim(x1, x2) -# ax1.set_ylim(y1, y2) -# ax1.set_xticks([]) -# ax1.set_yticks([]) -# ax1.text(x1, y2, '\\tikz\\node[coordinate] (z1) {};') -# ax1.text(x2, y1, '\\tikz\\node[coordinate] (z2) {};') -# -# if save_tikz: -# tikzplotlib.save('audio_nlml_zoom.tex', -# axis_width='\\figurewidth', -# axis_height='\\figureheight', -# tex_relative_path_to_data='./graphs/') - - -fig2, ax2 = plt.subplots() -for method in [0, 1, 2, 3, 4, 5]: - ax2.plot(num_inducing, np.mean(method_nlpd, axis=1)[method].T, label=legend_entries[method], linewidth=2) - ax2.fill_between(num_inducing, lb_nlpd[method], ub_nlpd[method], alpha=0.05) -# plt.plot(num_inducing, np.mean(svgp_nlpd, axis=0).T, label='VI (sparse)') -# plt.fill_between(num_inducing, lb_nlpd_svgp, ub_nlpd_svgp, alpha=0.05) -# ax2.legend(loc=1) -# plt.ylabel('NLPD') -plt.tick_params( - axis='both', # changes apply to the x-axis - which='both', # both major and minor ticks are affected - direction='in') -plt.xlabel('Number of inducing inputs, M') -ax2.set_xlim(0, 2000) -ax2.set_ylim(-0.8, 1.25) - -# yl = ax2.get_ylim() -# x1, x2, y1, y2 = 100, 640, 0.69, 1.75 # specify the limits -# ypos = yl[0]+(yl[1]-yl[0])/1.5 -# ax2.text(1200, ypos, '\\tikz\\node[coordinate] (n2) {};') -# ax2.text(x1, y2, '\\tikz\\node[coordinate] (r3) {};') -# ax2.text(x2, y1, '\\tikz\\node[coordinate] (r4) {};') - -if save_tikz: - tikzplotlib.save('audio_nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./graphs/') - -# fig3, ax3 = plt.subplots() -# for method in [0, 1, 2, 3, 4, 5]: -# ax3.plot(num_inducing, np.mean(method_nlpd, axis=1)[method].T, label=legend_entries[method], linewidth=2) -# ax3.fill_between(num_inducing, lb_nlpd[method], ub_nlpd[method], alpha=0.05) -# # plt.plot(num_inducing, np.mean(svgp_nlml, axis=0).T, label='VI (sparse)') -# # plt.fill_between(num_inducing, lb_nlml_svgp, ub_nlml_svgp, alpha=0.05) -# # plt.ylabel('NLPD') -# # plt.xlabel('Number of inducing inputs, M') -# ax3.set_xlim(x1, x2) -# ax3.set_ylim(y1, y2) -# ax3.set_xticks([]) -# ax3.set_yticks([]) -# ax3.text(x1, y2, '\\tikz\\node[coordinate] (z3) {};') -# ax3.text(x2, y1, '\\tikz\\node[coordinate] (z4) {};') -# -# if save_tikz: -# tikzplotlib.save('audio_nlpd_zoom.tex', -# axis_width='\\figurewidth', -# axis_height='\\figureheight', -# tex_relative_path_to_data='./graphs/') - -fig4, ax4 = plt.subplots() -for method in [0, 1, 2, 3, 4, 5]: - ax4.plot(num_inducing, np.mean(method_rmse, axis=1)[method].T, label=legend_entries[method], linewidth=2) - ax4.fill_between(num_inducing, lb_classerror[method], ub_classerror[method], alpha=0.05) -# ax4.plot(num_inducing, np.mean(svgp_classerror, axis=0).T, label='VI (sparse)') -# ax4.fill_between(num_inducing, lb_classerror_svgp, ub_classerror_svgp, alpha=0.05) -# plt.legend(loc=9) -# plt.ylabel('Classification Error') -plt.xlabel('Number of inducing inputs, M') -plt.xlim(0, 2000) -# plt.ylim(0.0235, 0.05) -plt.tick_params( - axis='both', # changes apply to the x-axis - which='both', # both major and minor ticks are affected - direction='in') - -if save_tikz: - tikzplotlib.save('audio_rmse.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./graphs/') - -plt.show() diff --git a/experiments/audio/timings_parallel.sh b/experiments/audio/timings_parallel.sh deleted file mode 100644 index 26d9a05..0000000 --- a/experiments/audio/timings_parallel.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -for (( counter=0; counter<4; counter++ )) -do -python3 audio_timings.py 2 1 7 "$counter" -done -printf "\n" diff --git a/experiments/audio/timings_plot.py b/experiments/audio/timings_plot.py deleted file mode 100644 index 5db818e..0000000 --- a/experiments/audio/timings_plot.py +++ /dev/null @@ -1,69 +0,0 @@ -from matplotlib import rc - -import matplotlib.pyplot as plt -import numpy as np -import pickle - -rc('font', **{'family': 'serif', 'serif': ['Computer Modern']}) -plt.style.use('ggplot') - -rc('text', usetex=False) - -SMALL_SIZE = 18 -MEDIUM_SIZE = 20 -BIGGER_SIZE = 22 - -plt.rc('font', size=SMALL_SIZE) # controls default text sizes -plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title -plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels -plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels -plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels -plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize -plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title - - -time_steps = [5000, 10000, 15000, 20000] -cpu_times = [] -gpu_times = [] -parallel_times = [] -for i in range(4): - with open('output/2_{0}_3_0_gpu.txt'.format(i), 'rb') as f: - gpu_times.append(pickle.load(f)) - - with open('output/2_{0}_3_0_cpu.txt'.format(i), 'rb') as f: - cpu_times.append(pickle.load(f)) - - with open('output/2_{0}_3_1_gpu.txt'.format(i), 'rb') as f: - parallel_times.append(pickle.load(f)) - -gpu_times = np.array(gpu_times) -cpu_times = np.array(cpu_times) -parallel_times = np.array(parallel_times) - -plt.figure() -plt.plot(time_steps, gpu_times, 'k-o', label='GPU (sequential)') -plt.plot(time_steps, cpu_times, 'r-o', label='CPU (sequential)') -plt.plot(time_steps, parallel_times, 'k--o', label='GPU (parallel)') -plt.ylabel('iteration time') -plt.xlabel('time steps') -plt.legend() -plt.savefig('output/timings.pdf', bbox_inches='tight') - -plt.figure() -plt.plot(time_steps, cpu_times, 'r-o', label='CPU (sequential)') -plt.plot(time_steps, parallel_times, 'k--o', label='GPU (parallel)') -plt.ylabel('iteration time') -plt.xlabel('time steps') -plt.legend() -plt.savefig('output/timings_zoom.pdf', bbox_inches='tight') - -# plt.figure() -# plt.plot(time_steps, gpu_times/gpu_times[0], 'k-o', label='GPU') -# plt.plot(time_steps, cpu_times/cpu_times[0], 'r-o', label='CPU') -# plt.ylabel('wall-time/(max(wall-time))') -# plt.xlabel('$spatial points$') -# plt.legend() -# plt.savefig('timings_normalised.pdf', bbox_inches='tight') - - - diff --git a/experiments/audio/timings_sequential.sh b/experiments/audio/timings_sequential.sh deleted file mode 100644 index 3811a40..0000000 --- a/experiments/audio/timings_sequential.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -for (( counter=0; counter<4; counter++ )) -do -python3 audio_timings.py 2 0 7 "$counter" -done -printf "\n" diff --git a/experiments/banana/banana.slrm b/experiments/banana/banana.slrm deleted file mode 100644 index eeec626..0000000 --- a/experiments/banana/banana.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=1000 -#SBATCH --array=0-5 -#SBATCH -o banana-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python banana.py $SLURM_ARRAY_TASK_ID $run -done \ No newline at end of file diff --git a/experiments/banana/banana_baseline.slrm b/experiments/banana/banana_baseline.slrm deleted file mode 100644 index 2b99f18..0000000 --- a/experiments/banana/banana_baseline.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=1000 -#SBATCH --array=0-5 -#SBATCH -o banana-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python banana.py $SLURM_ARRAY_TASK_ID $run 1 -done \ No newline at end of file diff --git a/experiments/banana/results.py b/experiments/banana/results.py deleted file mode 100644 index caca00b..0000000 --- a/experiments/banana/results.py +++ /dev/null @@ -1,25 +0,0 @@ -import pickle -import numpy as np - -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) - -print('baselines:') -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/baseline_" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) diff --git a/experiments/binary/binary.slrm b/experiments/binary/binary.slrm deleted file mode 100644 index 8b5921e..0000000 --- a/experiments/binary/binary.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=1000 -#SBATCH --array=0-5 -#SBATCH -o binary-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python binary.py $SLURM_ARRAY_TASK_ID $run -done \ No newline at end of file diff --git a/experiments/binary/binary_baselines.slrm b/experiments/binary/binary_baselines.slrm deleted file mode 100644 index 7749d82..0000000 --- a/experiments/binary/binary_baselines.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=1000 -#SBATCH --array=0-5 -#SBATCH -o binary-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python binary.py $SLURM_ARRAY_TASK_ID $run 1 -done \ No newline at end of file diff --git a/experiments/binary/results.py b/experiments/binary/results.py deleted file mode 100644 index 8681baa..0000000 --- a/experiments/binary/results.py +++ /dev/null @@ -1,26 +0,0 @@ -import pickle -import numpy as np - -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) - - -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/baseline_" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -print('baselines') -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) diff --git a/experiments/coal/coal.slrm b/experiments/coal/coal.slrm deleted file mode 100644 index cd15d66..0000000 --- a/experiments/coal/coal.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 12:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=1000 -#SBATCH --array=0-5 -#SBATCH -o coal-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python coal.py $SLURM_ARRAY_TASK_ID $run -done \ No newline at end of file diff --git a/experiments/coal/coal_baseline.slrm b/experiments/coal/coal_baseline.slrm deleted file mode 100644 index 1b4ecb5..0000000 --- a/experiments/coal/coal_baseline.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 12:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=1000 -#SBATCH --array=0-5 -#SBATCH -o coal-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python coal.py $SLURM_ARRAY_TASK_ID $run 1 -done \ No newline at end of file diff --git a/experiments/coal/results.py b/experiments/coal/results.py deleted file mode 100644 index 6ecc9e1..0000000 --- a/experiments/coal/results.py +++ /dev/null @@ -1,26 +0,0 @@ -import pickle -import numpy as np - -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) - - -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/baseline_" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -print('baselines:') -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) diff --git a/experiments/electricity/electricity.slrm b/experiments/electricity/electricity.slrm deleted file mode 100644 index 6f613e2..0000000 --- a/experiments/electricity/electricity.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=50000 -#SBATCH --array=0-5 -#SBATCH -o electricity-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python electricity.py $SLURM_ARRAY_TASK_ID $run -done \ No newline at end of file diff --git a/experiments/electricity/electricity_baselines.slrm b/experiments/electricity/electricity_baselines.slrm deleted file mode 100644 index f18bf1e..0000000 --- a/experiments/electricity/electricity_baselines.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=50000 -#SBATCH --array=0-5 -#SBATCH -o electricity-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python electricity.py $SLURM_ARRAY_TASK_ID $run 1 -done \ No newline at end of file diff --git a/experiments/electricity/results.py b/experiments/electricity/results.py deleted file mode 100644 index caca00b..0000000 --- a/experiments/electricity/results.py +++ /dev/null @@ -1,25 +0,0 @@ -import pickle -import numpy as np - -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) - -print('baselines:') -method_nlpd = np.zeros([6, 10]) -for method in range(6): - for fold in range(10): - with open("output/baseline_" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - method_nlpd[method, fold] = pickle.load(fp) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) diff --git a/experiments/gprn/results_bn.py b/experiments/gprn/results_bn.py deleted file mode 100644 index d8425f6..0000000 --- a/experiments/gprn/results_bn.py +++ /dev/null @@ -1,285 +0,0 @@ -import pickle -import numpy as np -import matplotlib.pyplot as plt -import tikzplotlib - -num_methods = 4 -num_approx = 5 -num_folds = 4 -num_iters = 100 - -method_loss = np.zeros([num_methods, num_approx, num_folds, num_iters]) * np.nan -for method in range(num_methods): - for approx in range(num_approx): - for fold in range(num_folds): - if not (method == 2 and approx == 1): - if method > 2 or approx < 4: - with open("output/gprn_" + str(method) + "_" + str(approx) + "_" + str(fold) + "_loss.txt", "rb") as fp: - result = pickle.load(fp) - for i in range(1, num_iters): - if np.isnan(result[i]): - result[i] = result[i-1] - method_loss[method, approx, fold] = result[:num_iters] - -np.set_printoptions(precision=3) -loss_mean = np.mean(method_loss, axis=2) -# print(loss_mean) -# print(np.nanmean(method_loss, axis=2)) -np.set_printoptions(precision=2) -loss_std = np.std(method_loss, axis=2) -# print(loss_std) -# print(np.nanstd(method_loss, axis=2)) - -method_nlpd = np.zeros([num_methods, num_approx, num_folds, num_iters]) * np.nan -for method in range(num_methods): - for approx in range(num_approx): - for fold in range(num_folds): - if not (method == 2 and approx == 1): - if method > 2 or approx < 4: - with open("output/gprn_" + str(method) + "_" + str(approx) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - result = pickle.load(fp) - for i in range(1, num_iters): - if np.isnan(result[i]): - result[i] = result[i-1] - if result[i] < 1e-4: - result[i] = result[i-1] - method_nlpd[method, approx, fold] = result[:num_iters] - -np.set_printoptions(precision=3) -nlpd_mean = np.mean(method_nlpd, axis=2) -# print(nlpd_mean) -# print(np.nanmean(method_nlpd, axis=2)) -np.set_printoptions(precision=2) -nlpd_std = np.std(method_nlpd, axis=2) -# print(nlpd_std) -# print(np.nanstd(method_nlpd, axis=2)) - -method_rmse = np.zeros([num_methods, num_approx, num_folds, num_iters]) * np.nan -for method in range(num_methods): - for approx in range(num_approx): - for fold in range(num_folds): - if not (method == 2 and approx == 1): - if method > 2 or approx < 4: - with open("output/gprn_" + str(method) + "_" + str(approx) + "_" + str(fold) + "_rmse.txt", "rb") as fp: - result = pickle.load(fp) - for i in range(1, num_iters): - if np.isnan(result[i]): - result[i] = result[i-1] - method_rmse[method, approx, fold] = result[:num_iters] - -np.set_printoptions(precision=3) -rmse_mean = np.mean(method_rmse, axis=2) -# print(nlpd_mean) -# print(np.nanmean(method_rmse, axis=2)) -np.set_printoptions(precision=2) -rmse_std = np.std(method_rmse, axis=2) -# print(nlpd_std) -# print(np.nanstd(method_rmse, axis=2)) - -print(loss_mean.shape) -iter_num = np.arange(num_iters) * 10 - -plot_iters = 50 - -fig_width = 8 -fig_height = 5 - -linewidth = 3 -linestyleh = '--' -linestyleg = '-' -linestyleq = '-' -linestyler = '--' - -losslims = [-160, 900] -nlpdlims = [0.9, 3.3] -rmselims = [0.6, 3.0] - -method = 0 - -plt.figure(1) -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic') -plt.plot(iter_num, np.nan*loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler) # , label='Riemannian grads') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='Gauss-Newton') -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='quasi-Newton') -plt.title('Training Loss (Newton)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-newton-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(2) -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic') -plt.plot(iter_num, np.nan*nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler) # , label='Riemannian grads') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='Gauss-Newton') -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='quasi-Newton') -plt.title('Test NLPD (Newton)') -plt.xlabel('iteration number') -plt.gca().tick_params(axis='both', direction='in') -plt.ylim(nlpdlims) -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-newton-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(3) -plt.plot(iter_num, rmse_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic') -plt.plot(iter_num, np.nan*rmse_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler) # , label='Riemannian grads') -plt.plot(iter_num, rmse_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='Gauss-Newton') -plt.plot(iter_num, rmse_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='quasi-Newton') -plt.title('Ground Truth RMSE (Newton)') -plt.xlabel('iteration number') -plt.gca().tick_params(axis='both', direction='in') -plt.ylim(rmselims) -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-newton-rmse.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -method = 1 - -plt.figure(4) -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic VI') -plt.plot(iter_num, loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads VI') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='variational Gauss-Newton') -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='variational quasi-Newton') -plt.title('Training Loss (VI)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-vi-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(5) -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic VI') -plt.plot(iter_num, nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads VI') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='variational Gauss-Newton') -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='variational quasi-Newton') -plt.title('Test NLPD (VI)') -plt.xlabel('iteration number') -plt.ylim(nlpdlims) -plt.gca().tick_params(axis='both', direction='in') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-vi-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(6) -plt.plot(iter_num, rmse_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic VI') -plt.plot(iter_num, rmse_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads VI') -plt.plot(iter_num, rmse_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='variational Gauss-Newton') -plt.plot(iter_num, rmse_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='variational quasi-Newton') -plt.title('Ground Truth RMSE (VI)') -plt.xlabel('iteration number') -plt.ylim(rmselims) -plt.gca().tick_params(axis='both', direction='in') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-vi-rmse.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -method = 2 - -plt.figure(7) -plt.plot(iter_num, np.nan*loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh) # , label='heuristic PEP') -plt.plot(iter_num, np.nan*loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler) #, label='Riemannian grads PEP') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg) -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PEP quasi-Newton') -plt.title('Training Loss (PEP)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-pep-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(8) -plt.plot(iter_num, np.nan*nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh) # , label='heuristic PEP') -plt.plot(iter_num, np.nan*nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler) #, label='Riemannian grads PEP') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg) -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PEP quasi-Newton') -plt.title('Test NLPD (PEP)') -plt.xlabel('iteration number') -plt.ylim(nlpdlims) -plt.gca().tick_params(axis='both', direction='in') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-pep-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(9) -plt.plot(iter_num, np.nan*rmse_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh) # , label='heuristic PEP') -plt.plot(iter_num, np.nan*rmse_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler) # , label='Riemannian grads PEP') -plt.plot(iter_num, rmse_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg) -plt.plot(iter_num, rmse_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PEP quasi-Newton') -plt.title('Ground Truth RMSE (PEP)') -plt.xlabel('iteration number') -plt.ylim(rmselims) -plt.gca().tick_params(axis='both', direction='in') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-pep-rmse.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -method = 3 - -plt.figure(10) -plt.plot(iter_num, loss_mean[method, 4].T, linewidth=linewidth/1.5, linestyle=linestyleh, label='PL') -plt.plot(iter_num, loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PL2') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth/1.5, linestyle=linestyleg, label='PL2 Gauss-Newton') -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PL2 quasi-Newton') -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PL2') -# plt.plot(iter_num, loss_mean[method, 5].T, linewidth=linewidth, linestyle=linestyleq, label='PL quasi-Newton', color='c') -plt.title('Training Loss (PL2)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-pl2-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(11) -plt.plot(iter_num, nlpd_mean[method, 4].T, linewidth=linewidth/1.5, linestyle=linestyleh, label='PL') -plt.plot(iter_num, nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PL2') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth/1.5, linestyle=linestyleg, label='PL2 Gauss-Newton') -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PL2 quasi-Newton') -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PL2') -# plt.plot(iter_num, nlpd_mean[method, 5].T, linewidth=linewidth, linestyle=linestyleq, label='PL quasi-Newton', color='c') -plt.title('Test NLPD (PL2)') -plt.xlabel('iteration number') -plt.ylim(nlpdlims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-pl2-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(12) -plt.plot(iter_num, rmse_mean[method, 4].T, linewidth=linewidth/1.5, linestyle=linestyleh, label='PL') -plt.plot(iter_num, rmse_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PL2') -plt.plot(iter_num, rmse_mean[method, 1].T, linewidth=linewidth/1.5, linestyle=linestyleg, label='PL2 Gauss-Newton') -plt.plot(iter_num, rmse_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PL2 quasi-Newton') -plt.plot(iter_num, rmse_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PL2') -# plt.plot(iter_num, rmse_mean[method, 5].T, linewidth=linewidth, linestyle=linestyleq, label='PL quasi-Newton', color='c') -plt.title('Ground Truth RMSE (PL2)') -plt.xlabel('iteration number') -plt.ylim(rmselims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/gprn-pl2-rmse.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.show() diff --git a/experiments/motorcycle/motorcycle.slrm b/experiments/motorcycle/motorcycle.slrm deleted file mode 100644 index 08c1696..0000000 --- a/experiments/motorcycle/motorcycle.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 12:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=1000 -#SBATCH --array=0-6 -#SBATCH -o motorcycle-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python motorcycle.py $SLURM_ARRAY_TASK_ID $run -done \ No newline at end of file diff --git a/experiments/motorcycle/motorcycle_baselines.slrm b/experiments/motorcycle/motorcycle_baselines.slrm deleted file mode 100644 index 932be49..0000000 --- a/experiments/motorcycle/motorcycle_baselines.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 12:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=1000 -#SBATCH --array=0-6 -#SBATCH -o motorcycle_baselines-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and run range -echo This is task $SLURM_ARRAY_TASK_ID, which will do runs $START_NUM to $END_NUM - -# Run the loop of runs for this task. -for (( run=$START_NUM; run<=END_NUM; run++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, run number $run - srun python motorcycle.py $SLURM_ARRAY_TASK_ID $run 1 -done \ No newline at end of file diff --git a/experiments/motorcycle/results.py b/experiments/motorcycle/results.py deleted file mode 100644 index 1acf467..0000000 --- a/experiments/motorcycle/results.py +++ /dev/null @@ -1,36 +0,0 @@ -import pickle -import numpy as np - -method_nlpd = np.zeros([7, 10]) -for method in range(7): - for fold in range(10): - with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - result = pickle.load(fp) - print(result) - method_nlpd[method, fold] = result - -# for fold in range(10): -# with open("output/" + str(15) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: -# print(pickle.load(fp)) - -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -# print(np.nanmean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) -# print(np.nanstd(method_nlpd, axis=1)) - - -method_nlpd = np.zeros([7, 10]) -for method in range(7): - for fold in range(10): - with open("output/baseline_" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - result = pickle.load(fp) - print(result) - method_nlpd[method, fold] = result - -print('baselines:') -np.set_printoptions(precision=3) -print(np.mean(method_nlpd, axis=1)) -np.set_printoptions(precision=2) -print(np.std(method_nlpd, axis=1)) diff --git a/experiments/motorcycle/results_bn.py b/experiments/motorcycle/results_bn.py deleted file mode 100644 index 2fd1c10..0000000 --- a/experiments/motorcycle/results_bn.py +++ /dev/null @@ -1,195 +0,0 @@ -import pickle -import numpy as np -import matplotlib.pyplot as plt -import tikzplotlib - -num_methods = 4 -num_approx = 6 -num_folds = 4 -num_iters = 25 - -method_loss = np.zeros([num_methods, num_approx, num_folds, num_iters]) * np.nan -for method in range(num_methods): - for approx in range(num_approx): - for fold in range(num_folds): - if not (method == 2 and approx == 1): - if method > 2 or approx < 4: - with open("output/hsced_" + str(method) + "_" + str(approx) + "_" + str(fold) + "_loss.txt", "rb") as fp: - result = pickle.load(fp) - for i in range(1, num_iters): - if np.isnan(result[i]): - result[i] = result[i-1] - method_loss[method, approx, fold] = result[:num_iters] - -np.set_printoptions(precision=3) -loss_mean = np.mean(method_loss, axis=2) -# print(loss_mean) -# print(np.nanmean(method_loss, axis=2)) -np.set_printoptions(precision=2) -loss_std = np.std(method_loss, axis=2) -# print(loss_std) -# print(np.nanstd(method_loss, axis=2)) - -method_nlpd = np.zeros([num_methods, num_approx, num_folds, num_iters]) * np.nan -for method in range(num_methods): - for approx in range(num_approx): - for fold in range(num_folds): - if not (method == 2 and approx == 1): - if method > 2 or approx < 4: - with open("output/hsced_" + str(method) + "_" + str(approx) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - result = pickle.load(fp) - for i in range(1, num_iters): - if np.isnan(result[i]): - result[i] = result[i-1] - method_nlpd[method, approx, fold] = result[:num_iters] - -np.set_printoptions(precision=3) -nlpd_mean = np.mean(method_nlpd, axis=2) -# print(nlpd_mean) -# print(np.nanmean(method_nlpd, axis=2)) -np.set_printoptions(precision=2) -nlpd_std = np.std(method_nlpd, axis=2) -# print(nlpd_std) -# print(np.nanstd(method_nlpd, axis=2)) - -print(loss_mean.shape) -iter_num = np.arange(num_iters) * 10 - -linewidth = 3 -linestyleh = '--' -linestyleg = '-' -linestyleq = '-' -linestyler = '--' - -losslims = [62, 120] -nlpdlims = [0.31, 1.0] - -method = 0 - -plt.figure(1) -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic') -plt.plot(iter_num, loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='Gauss-Newton') -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='quasi-Newton') -plt.title('Training Loss (Newton)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/hsced-newton-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') -plt.figure(2) -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic') -plt.plot(iter_num, nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='Gauss-Newton') -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='quasi-Newton') -plt.title('Test NLPD (Newton)') -plt.xlabel('iteration number') -plt.ylim(nlpdlims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/hsced-newton-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -method = 1 - -plt.figure(3) -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic VI') -plt.plot(iter_num, loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads VI') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='variational Gauss-Newton') -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='variational quasi-Newton') -plt.title('Training Loss (VI)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/hsced-vi-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') -plt.figure(4) -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic VI') -plt.plot(iter_num, nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads VI') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='variational Gauss-Newton') -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='variational quasi-Newton') -plt.title('Test NLPD (VI)') -plt.xlabel('iteration number') -plt.ylim(nlpdlims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/hsced-vi-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -method = 2 - -plt.figure(5) -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PEP') -plt.plot(iter_num, loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PEP') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg) -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PEP quasi-Newton') -plt.title('Training Loss (PEP)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/hsced-pep-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') -plt.figure(6) -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PEP') -plt.plot(iter_num, nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PEP') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg) -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PEP quasi-Newton') -plt.title('Test NLPD (PEP)') -plt.xlabel('iteration number') -plt.ylim(nlpdlims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/hsced-pep-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -method = 3 - -plt.figure(7) -plt.plot(iter_num, loss_mean[method, 4].T, linewidth=linewidth, linestyle=linestyleh, label='PL') -plt.plot(iter_num, np.nan*loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler) # , label='Riemannian grads PL2') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='PL2 Gauss-Newton') -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PL2 quasi-Newton') -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PL2') -# plt.plot(iter_num, loss_mean[method, 5].T, linewidth=linewidth, linestyle=linestyleq, label='PL quasi-Newton', color='c') -plt.title('Training Loss (PL2)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/hsced-pl2-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') -plt.figure(8) -plt.plot(iter_num, nlpd_mean[method, 4].T, linewidth=linewidth, linestyle=linestyleh, label='PL') -plt.plot(iter_num, np.nan*nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler) # , label='Riemannian grads PL2') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='PL2 Gauss-Newton') -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PL2 quasi-Newton') -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PL2') -# plt.plot(iter_num, nlpd_mean[method, 5].T, linewidth=linewidth, linestyle=linestyleq, label='PL quasi-Newton', color='c') -plt.title('Test NLPD (PL2)') -plt.xlabel('iteration number') -plt.ylim(nlpdlims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/hsced-pl2-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.show() diff --git a/experiments/nyc_crime/nyc_crime_bayesnewton.slrm b/experiments/nyc_crime/nyc_crime_bayesnewton.slrm deleted file mode 100644 index 2ce1e07..0000000 --- a/experiments/nyc_crime/nyc_crime_bayesnewton.slrm +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=6000 -#SBATCH --array=0-4 -#SBATCH -o nyc_newt-%a.out -module load miniconda -source activate venv - -srun python nyc_crime_bayesnewton.py $SLURM_ARRAY_TASK_ID 0 0 \ No newline at end of file diff --git a/experiments/nyc_crime/nyc_crime_bayesnewton_mf.slrm b/experiments/nyc_crime/nyc_crime_bayesnewton_mf.slrm deleted file mode 100644 index cabf250..0000000 --- a/experiments/nyc_crime/nyc_crime_bayesnewton_mf.slrm +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=6000 -#SBATCH --array=0-4 -#SBATCH -o nyc_newt-%a.out -module load miniconda -source activate venv - -srun python nyc_crime_bayesnewton.py $SLURM_ARRAY_TASK_ID 1 0 \ No newline at end of file diff --git a/experiments/nyc_crime/nyc_crime_gpflow0.slrm b/experiments/nyc_crime/nyc_crime_gpflow0.slrm deleted file mode 100644 index c828baa..0000000 --- a/experiments/nyc_crime/nyc_crime_gpflow0.slrm +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=4000 -#SBATCH --array=0-4 -#SBATCH -o nyc_gpflow-%a.out -module load miniconda -source activate venv - -srun python nyc_crime_gpflow.py $SLURM_ARRAY_TASK_ID 0 \ No newline at end of file diff --git a/experiments/nyc_crime/nyc_crime_gpflow1.slrm b/experiments/nyc_crime/nyc_crime_gpflow1.slrm deleted file mode 100644 index 3967f60..0000000 --- a/experiments/nyc_crime/nyc_crime_gpflow1.slrm +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=4000 -#SBATCH --array=0-4 -#SBATCH -o nyc_gpflow-%a.out -module load miniconda -source activate venv - -srun python nyc_crime_gpflow.py $SLURM_ARRAY_TASK_ID 1 \ No newline at end of file diff --git a/experiments/nyc_crime/results.py b/experiments/nyc_crime/results.py deleted file mode 100644 index 2d4245c..0000000 --- a/experiments/nyc_crime/results.py +++ /dev/null @@ -1,152 +0,0 @@ -import pickle -import numpy as np - -cpugpu = 'cpu' - -if cpugpu == 'gpu': - par = 1 -else: - par = 0 - -print('ST-SVGP') - -ind_time = np.zeros([5]) -for ind in range(5): - with open("output/0_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - -ind_rmse = np.zeros([5]) -for ind in range(5): - with open("output/0_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - -ind_nlpd = np.zeros([5]) -for ind in range(5): - with open("output/0_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - -print('time:') -np.set_printoptions(precision=3) -print(np.mean(ind_time)) -np.set_printoptions(precision=2) -print(np.std(ind_time)) - -print('rmse:') -np.set_printoptions(precision=3) -print(np.mean(ind_rmse)) -np.set_printoptions(precision=2) -print(np.std(ind_rmse)) - -print('nlpd:') -np.set_printoptions(precision=3) -print(np.mean(ind_nlpd)) -np.set_printoptions(precision=2) -print(np.std(ind_nlpd)) - - -print('MF-ST-SVGP') - -ind_time = np.zeros([5]) -for ind in range(5): - with open("output/1_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - -ind_rmse = np.zeros([5]) -for ind in range(5): - with open("output/1_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - -ind_nlpd = np.zeros([5]) -for ind in range(5): - with open("output/1_" + str(ind) + "_" + str(par) + "_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - -print('time:') -np.set_printoptions(precision=3) -print(np.mean(ind_time)) -np.set_printoptions(precision=2) -print(np.std(ind_time)) - -print('rmse:') -np.set_printoptions(precision=3) -print(np.mean(ind_rmse)) -np.set_printoptions(precision=2) -print(np.std(ind_rmse)) - -print('nlpd:') -np.set_printoptions(precision=3) -print(np.mean(ind_nlpd)) -np.set_printoptions(precision=2) -print(np.std(ind_nlpd)) - - -print('SVGP0') - -ind_time = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_0_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - -ind_rmse = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_0_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - -ind_nlpd = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_0_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - -print('time:') -np.set_printoptions(precision=3) -print(np.mean(ind_time)) -np.set_printoptions(precision=2) -print(np.std(ind_time)) - -print('rmse:') -np.set_printoptions(precision=3) -print(np.mean(ind_rmse)) -np.set_printoptions(precision=2) -print(np.std(ind_rmse)) - -print('nlpd:') -np.set_printoptions(precision=3) -print(np.mean(ind_nlpd)) -np.set_printoptions(precision=2) -print(np.std(ind_nlpd)) - - -print('SVGP1') - -ind_time = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_1_" + str(cpugpu) + "_time.txt", "rb") as fp: - ind_time[ind] = pickle.load(fp) - -ind_rmse = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_1_" + str(cpugpu) + "_rmse.txt", "rb") as fp: - ind_rmse[ind] = pickle.load(fp) - -ind_nlpd = np.zeros([5]) -for ind in range(5): - with open("output/gpflow_" + str(ind) + "_1_" + str(cpugpu) + "_nlpd.txt", "rb") as fp: - ind_nlpd[ind] = pickle.load(fp) - -print('time:') -np.set_printoptions(precision=3) -print(np.mean(ind_time)) -np.set_printoptions(precision=2) -print(np.std(ind_time)) - -print('rmse:') -np.set_printoptions(precision=3) -print(np.mean(ind_rmse)) -np.set_printoptions(precision=2) -print(np.std(ind_rmse)) - -print('nlpd:') -np.set_printoptions(precision=3) -print(np.mean(ind_nlpd)) -np.set_printoptions(precision=2) -print(np.std(ind_nlpd)) diff --git a/experiments/product/results_bn.py b/experiments/product/results_bn.py deleted file mode 100644 index 9c96600..0000000 --- a/experiments/product/results_bn.py +++ /dev/null @@ -1,312 +0,0 @@ -import pickle -import numpy as np -import matplotlib -import matplotlib.pyplot as plt -import tikzplotlib - -num_methods = 4 -num_approx = 6 -num_folds = 4 -num_iters = 25 - -method_loss = np.zeros([num_methods, num_approx, num_folds, num_iters]) * np.nan -for method in range(num_methods): - for approx in range(num_approx): - for fold in range(num_folds): - if not (method == 2 and approx == 1): - if method > 2 or approx < 4: - with open("output/product_" + str(method) + "_" + str(approx) + "_" + str(fold) + "_loss.txt", "rb") as fp: - result = pickle.load(fp) - for i in range(1, num_iters): - if np.isnan(result[i]): - result[i] = result[i-1] - method_loss[method, approx, fold] = result[:num_iters] - -np.set_printoptions(precision=3) -loss_mean = np.mean(method_loss, axis=2) -# print(loss_mean) -# print(np.nanmean(method_loss, axis=2)) -np.set_printoptions(precision=2) -loss_std = np.std(method_loss, axis=2) -# print(loss_std) -# print(np.nanstd(method_loss, axis=2)) - -method_nlpd = np.zeros([num_methods, num_approx, num_folds, num_iters]) * np.nan -for method in range(num_methods): - for approx in range(num_approx): - for fold in range(num_folds): - if not (method == 2 and approx == 1): - if method > 2 or approx < 4: - with open("output/product_" + str(method) + "_" + str(approx) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - result = pickle.load(fp) - for i in range(1, num_iters): - if np.isnan(result[i]): - result[i] = result[i-1] - method_nlpd[method, approx, fold] = result[:num_iters] - -np.set_printoptions(precision=3) -nlpd_mean = np.mean(method_nlpd, axis=2) -# print(nlpd_mean) -# print(np.nanmean(method_nlpd, axis=2)) -np.set_printoptions(precision=2) -nlpd_std = np.std(method_nlpd, axis=2) -# print(nlpd_std) -# print(np.nanstd(method_nlpd, axis=2)) - -method_rmse = np.zeros([num_methods, num_approx, num_folds, num_iters]) * np.nan -for method in range(num_methods): - for approx in range(num_approx): - for fold in range(num_folds): - if not (method == 2 and approx == 1): - if method > 2 or approx < 4: - with open("output/product_" + str(method) + "_" + str(approx) + "_" + str(fold) + "_rmse.txt", "rb") as fp: - result = pickle.load(fp) - for i in range(1, num_iters): - if np.isnan(result[i]): - result[i] = result[i-1] - method_rmse[method, approx, fold] = result[:num_iters] - -np.set_printoptions(precision=3) -rmse_mean = np.mean(method_rmse, axis=2) -# print(nlpd_mean) -# print(np.nanmean(method_rmse, axis=2)) -np.set_printoptions(precision=2) -rmse_std = np.std(method_rmse, axis=2) -# print(nlpd_std) -# print(np.nanstd(method_rmse, axis=2)) - -print(loss_mean.shape) -iter_num = np.arange(num_iters) * 10 - -plot_iters = 50 - -# matplotlib.use("pgf") -# matplotlib.rcParams.update({ -# "pgf.texsystem": "pdflatex", -# 'font.family': 'serif', -# 'text.usetex': True, -# 'pgf.rcfonts': False, -# }) -fig_width = 8 -fig_height = 5 - -linewidth = 3 -linestyleh = '--' -linestyleg = '-' -linestyleq = '-' -linestyler = '--' - -losslims = [-50, 1000] -nlpdlims = [-0.2, 0.6] -rmselims = [0., 1.4] - -method = 0 - -# plt.figure(1, figsize=(fig_width, fig_height)) -plt.figure(1) -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic') -plt.plot(iter_num, loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='Gauss-Newton') -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='quasi-Newton') -plt.title('Training Loss (Newton)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -# plt.savefig('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-newton-loss.pgf') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-newton-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -# plt.figure(2, figsize=(fig_width, fig_height)) -plt.figure(2) -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic') -plt.plot(iter_num, nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='Gauss-Newton') -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='quasi-Newton') -plt.title('Test NLPD (Newton)') -plt.xlabel('iteration number') -plt.gca().tick_params(axis='both', direction='in') -plt.ylim(nlpdlims) -# plt.legend() -# plt.savefig('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-newton-nlpd.pgf') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-newton-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(3) -plt.plot(iter_num, rmse_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic') -plt.plot(iter_num, rmse_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads') -plt.plot(iter_num, rmse_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='Gauss-Newton') -plt.plot(iter_num, rmse_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='quasi-Newton') -plt.title('Ground Truth RMSE (Newton)') -plt.xlabel('iteration number') -plt.gca().tick_params(axis='both', direction='in') -plt.ylim(rmselims) -# plt.legend() -# plt.savefig('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-newton-rmse.pgf') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-newton-rmse.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -method = 1 - -# plt.figure(3, figsize=(fig_width, fig_height)) -plt.figure(4) -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic VI') -plt.plot(iter_num, loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads VI') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='variational Gauss-Newton') -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='variational quasi-Newton') -plt.title('Training Loss (VI)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -# plt.savefig('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-vi-loss.pgf') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-vi-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -# plt.figure(4, figsize=(fig_width, fig_height)) -plt.figure(5) -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic VI') -plt.plot(iter_num, nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads VI') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='variational Gauss-Newton') -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='variational quasi-Newton') -plt.title('Test NLPD (VI)') -plt.xlabel('iteration number') -plt.ylim(nlpdlims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -# plt.savefig('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-vi-nlpd.pgf') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-vi-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(6) -plt.plot(iter_num, rmse_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic VI') -plt.plot(iter_num, rmse_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads VI') -plt.plot(iter_num, rmse_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='variational Gauss-Newton') -plt.plot(iter_num, rmse_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='variational quasi-Newton') -plt.title('Ground Truth RMSE (VI)') -plt.xlabel('iteration number') -plt.ylim(rmselims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -# plt.savefig('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-vi-rmse.pgf') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-vi-rmse.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -method = 2 - -# plt.figure(5, figsize=(fig_width, fig_height)) -plt.figure(7) -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PEP') -plt.plot(iter_num, loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PEP') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg) -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PEP quasi-Newton') -plt.title('Training Loss (PEP)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -# plt.savefig('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-pep-loss.pgf') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-pep-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -# plt.figure(6, figsize=(fig_width, fig_height)) -plt.figure(8) -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PEP') -plt.plot(iter_num, nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PEP') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg) -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PEP quasi-Newton') -plt.title('Test NLPD (PEP)') -plt.xlabel('iteration number') -plt.ylim(nlpdlims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -# plt.savefig('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-pep-nlpd.pgf') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-pep-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(9) -plt.plot(iter_num, rmse_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PEP') -plt.plot(iter_num, rmse_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PEP') -plt.plot(iter_num, rmse_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg) -plt.plot(iter_num, rmse_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PEP quasi-Newton') -plt.title('Ground Truth RMSE (PEP)') -plt.xlabel('iteration number') -plt.ylim(rmselims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -# plt.savefig('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-pep-rmse.pgf') -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-pep-rmse.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -method = 3 - -plt.figure(10) -plt.plot(iter_num, loss_mean[method, 4].T, linewidth=linewidth, linestyle=linestyleh, label='PL') -plt.plot(iter_num, loss_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PL2') -plt.plot(iter_num, loss_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='PL2 Gauss-Newton') -plt.plot(iter_num, loss_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PL2 quasi-Newton') -plt.plot(iter_num, loss_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PL2') -# plt.plot(iter_num, loss_mean[method, 5].T, linewidth=linewidth, linestyle=linestyleq, label='PL quasi-Newton', color='c') -plt.title('Training Loss (PL2)') -plt.xlabel('iteration number') -plt.ylim(losslims) -plt.gca().tick_params(axis='both', direction='in') -plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-pl2-loss.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(11) -plt.plot(iter_num, nlpd_mean[method, 4].T, linewidth=linewidth, linestyle=linestyleh, label='PL') -plt.plot(iter_num, nlpd_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PL2') -plt.plot(iter_num, nlpd_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='PL2 Gauss-Newton') -plt.plot(iter_num, nlpd_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PL2 quasi-Newton') -plt.plot(iter_num, nlpd_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PL2') -# plt.plot(iter_num, nlpd_mean[method, 5].T, linewidth=linewidth, linestyle=linestyleq, label='PL quasi-Newton', color='c') -plt.title('Test NLPD (PL2)') -plt.xlabel('iteration number') -plt.ylim(nlpdlims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-pl2-nlpd.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(12) -plt.plot(iter_num, rmse_mean[method, 4].T, linewidth=linewidth, linestyle=linestyleh, label='PL') -plt.plot(iter_num, rmse_mean[method, 3].T, linewidth=linewidth, linestyle=linestyler, label='Riemannian grads PL2') -plt.plot(iter_num, rmse_mean[method, 1].T, linewidth=linewidth, linestyle=linestyleg, label='PL2 Gauss-Newton') -plt.plot(iter_num, rmse_mean[method, 2].T, linewidth=linewidth, linestyle=linestyleq, label='PL2 quasi-Newton') -plt.plot(iter_num, rmse_mean[method, 0].T, linewidth=linewidth, linestyle=linestyleh, label='heuristic PL2') -# plt.plot(iter_num, rmse_mean[method, 5].T, linewidth=linewidth, linestyle=linestyleq, label='PL quasi-Newton', color='c') -plt.title('Ground Truth RMSE (PL2)') -plt.xlabel('iteration number') -plt.ylim(rmselims) -plt.gca().tick_params(axis='both', direction='in') -# plt.legend() -tikzplotlib.save('/Users/wilkinw1/postdoc/inprogress/newton-smoothers/paper/fig/product-pl2-rmse.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.show() diff --git a/experiments/rainforest/rainforest0.slrm b/experiments/rainforest/rainforest0.slrm deleted file mode 100644 index b1f1bde..0000000 --- a/experiments/rainforest/rainforest0.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=2000 -#SBATCH --array=0-9 -#SBATCH -o rainforest1-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and fold range -echo This is task $SLURM_ARRAY_TASK_ID, which will run folds $START_NUM to $END_NUM - -# Run the loop of folds for this task. -for (( fold=$START_NUM; fold<=END_NUM; fold++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, fold number $fold - srun python rainforest.py 0 $SLURM_ARRAY_TASK_ID $fold -done \ No newline at end of file diff --git a/experiments/rainforest/rainforest1.slrm b/experiments/rainforest/rainforest1.slrm deleted file mode 100644 index 500ccd6..0000000 --- a/experiments/rainforest/rainforest1.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=2000 -#SBATCH --array=0-9 -#SBATCH -o rainforest1-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and fold range -echo This is task $SLURM_ARRAY_TASK_ID, which will run folds $START_NUM to $END_NUM - -# Run the loop of folds for this task. -for (( fold=$START_NUM; fold<=END_NUM; fold++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, fold number $fold - srun python rainforest.py 1 $SLURM_ARRAY_TASK_ID $fold -done \ No newline at end of file diff --git a/experiments/rainforest/rainforest2.slrm b/experiments/rainforest/rainforest2.slrm deleted file mode 100644 index f195f52..0000000 --- a/experiments/rainforest/rainforest2.slrm +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -l -#SBATCH -p short -#SBATCH -t 24:00:00 -#SBATCH -n 1 -#SBATCH --mem-per-cpu=2000 -#SBATCH --array=0-9 -#SBATCH -o rainforest2-%a.out -module load miniconda -source activate venv - -START_NUM=0 -END_NUM=9 - -# Print the task and fold range -echo This is task $SLURM_ARRAY_TASK_ID, which will run folds $START_NUM to $END_NUM - -# Run the loop of folds for this task. -for (( fold=$START_NUM; fold<=END_NUM; fold++ )); do - echo This is SLURM task $SLURM_ARRAY_TASK_ID, fold number $fold - srun python rainforest.py 2 $SLURM_ARRAY_TASK_ID $fold -done \ No newline at end of file diff --git a/experiments/rainforest/rainforest_timings.py b/experiments/rainforest/rainforest_timings.py deleted file mode 100644 index e61421c..0000000 --- a/experiments/rainforest/rainforest_timings.py +++ /dev/null @@ -1,117 +0,0 @@ -import bayesnewton -import objax -import numpy as np -import time -import pickle -import sys -from jax.lib import xla_bridge - -print('loading rainforest data ...') -data = np.loadtxt('../../data/TRI2TU-data.csv', delimiter=',') - -time_points = np.array([200, 1000, 2000, 3000, 4000, 5000]) -spatial_points = np.array([10, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500]) - -if len(sys.argv) > 1: - model_type = int(sys.argv[1]) - nt_ind = int(sys.argv[2]) - nr_ind = int(sys.argv[3]) - # fold = int(sys.argv[3]) - parallel = bool(int(sys.argv[4])) -else: - model_type = 0 - nt_ind = 0 - nr_ind = 1 - # nr = 100 # spatial grid point (y-axis) - # fold = 0 - parallel = None - -fold = 0 - -nr = spatial_points[nr_ind] -nt = time_points[nt_ind] # temporal grid points (x-axis) -scale = 1000 / nt - -t, r, Y_ = bayesnewton.utils.discretegrid(data, [0, 1000, 0, 500], [nt, nr]) -t_flat, r_flat, Y_flat = t.flatten(), r.flatten(), Y_.flatten() - -N = nr * nt # number of data points - -# sort out the train/test split -np.random.seed(99) -ind_shuffled = np.random.permutation(N) -ind_split = np.stack(np.split(ind_shuffled, 10)) # 10 random batches of data indices -test_ind = ind_split[fold] # test_ind = np.random.permutation(N)[:N//10] -t_test = t_flat[test_ind] -r_test = r_flat[test_ind] -Y_test = Y_flat[test_ind] -Y_flat[test_ind] = np.nan -Y = Y_flat.reshape(nt, nr) - -# put test points on a grid to speed up prediction -X_test = np.concatenate([t_test[:, None], r_test[:, None]], axis=1) -t_test, r_test, Y_test = bayesnewton.utils.create_spatiotemporal_grid(X_test, Y_test) - -var_f = 1. # GP variance -len_f = 10. # lengthscale - -kern = bayesnewton.kernels.SpatialMatern32(variance=var_f, lengthscale=len_f, z=r[0, ...], sparse=False) -lik = bayesnewton.likelihoods.Poisson() -if model_type == 0: - model = bayesnewton.models.MarkovVariationalGP(kernel=kern, likelihood=lik, X=t, R=r, Y=Y, parallel=parallel) -elif model_type == 1: - model = bayesnewton.models.MarkovVariationalMeanFieldGP(kernel=kern, likelihood=lik, X=t, R=r, Y=Y, parallel=parallel) -elif model_type == 2: - model = bayesnewton.models.InfiniteHorizonVariationalGP(kernel=kern, likelihood=lik, X=t, R=r, Y=Y, parallel=parallel) - -print('num time pts:', nt) -print('num spatial pts:', nr) -print('batch number:', fold) -print('parallel:', parallel) -print(model) - -lr_adam = 0.2 -lr_newton = 0.2 -iters = 11 -opt = objax.optimizer.Adam(model.vars()) -energy = objax.GradValues(model.energy, model.vars()) -# inf_args = {"cubature": bayesnewton.cubature.Unscented()} - - -@objax.Function.with_vars(model.vars() + opt.vars()) -def train_op(): - model.inference(lr=lr_newton) # perform inference and update variational params - dE, E = energy() # compute energy and its gradients w.r.t. hypers - opt(lr_adam, dE) - return E - - -train_op = objax.Jit(train_op) - -t0 = time.time() -for i in range(1, iters + 1): - if i == 2: - t0 = time.time() - loss = train_op() - print('iter %2d: energy: %1.4f' % (i, loss[0])) -t1 = time.time() -# print('optimisation time: %2.2f secs' % (t1-t0)) -avg_time_taken = (t1-t0)/(iters - 1) -print('average iter time: %2.2f secs' % avg_time_taken) - -cpugpu = xla_bridge.get_backend().platform - -with open("output/" + str(model_type) + "_" + str(nr_ind) + "_" + str(nt_ind) + "_" + str(int(parallel)) - + "_" + cpugpu + ".txt", "wb") as fp: - pickle.dump(avg_time_taken, fp) - -# calculate posterior predictive distribution via filtering and smoothing at train & test locations: -# print('calculating the posterior predictive distribution ...') -# t0 = time.time() -# nlpd = model.negative_log_predictive_density(X=t_test, R=r_test, Y=Y_test) -# t1 = time.time() -# print('prediction time: %2.2f secs' % (t1-t0)) -# print('nlpd: %2.3f' % nlpd) -# -# with open("output/" + str(model_type) + "_" + str(nr_ind) + "_" + str(fold) + "_nlpd.txt", "wb") as fp: -# pickle.dump(nlpd, fp) diff --git a/experiments/rainforest/results.py b/experiments/rainforest/results.py deleted file mode 100644 index a928b84..0000000 --- a/experiments/rainforest/results.py +++ /dev/null @@ -1,77 +0,0 @@ -import pickle -import numpy as np -import matplotlib.pyplot as plt -import tikzplotlib - -color_palette = { - 'black': '#000000', - 'orange': '#E69F00', - 'blue': '#56B4E9', - 'green': '#009E73', - 'orange': '#F0E442', - 'dark_blue': '#0072B2', - 'dark_orange': '#D55E00', - 'pink': '#CC79A7', - 'white': '#111111', - 'grey': 'grey' -} - -# timings = np.zeros([3, 10]) -num_complete = 7 -timings = np.zeros([3, num_complete]) -for model_type in range(3): - # for nr_ind in range(10): - for nr_ind in range(num_complete): - with open("output/cpu_" + str(model_type) + "_" + str(nr_ind) + "_time.txt", "rb") as fp: - result = pickle.load(fp) - # print(result) - timings[model_type, nr_ind] = result - -num_complete_nlpd = 7 -nlpd = np.zeros([3, num_complete_nlpd]) -for model_type in range(3): - # for nr_ind in range(10): - for nr_ind in range(num_complete_nlpd): - nlpdf = 0. - for fold in range(10): - print("output/" + str(model_type) + "_" + str(nr_ind) + "_" + str(fold) + "_nlpd.txt") - with open("output/" + str(model_type) + "_" + str(nr_ind) + "_" + str(fold) + "_nlpd.txt", "rb") as fp: - result = pickle.load(fp) - nlpdf += result - # print(result) - nlpd[model_type, nr_ind] = nlpdf / 10 - -num_space = np.array([50, 100, 150, 200, 250, 300, 350, 400, 450, 500]) -plt.figure(1) -plt.plot(num_space[:num_complete], timings.T[:, 0], '.--', markersize=3, linewidth=2.5, color=color_palette['dark_orange']) -plt.plot(num_space[:num_complete], timings.T[:, 1], 'x--', markersize=5, linewidth=2.5, color=color_palette['dark_blue']) -plt.plot(num_space[:num_complete], timings.T[:, 2], 'x-.', markersize=5, linewidth=2.5, color=color_palette['green']) -plt.xlabel('Number of spatial points') -plt.ylabel('Training step time (secs)') -plt.ylim([-0., 82]) -ax = plt.gca() -ax.set_xticks(num_space[:num_complete]) -plt.legend(['Full', 'Spatial mean-field', 'Infinite-horizon'], loc=2) -if True: - tikzplotlib.save('//Users/wilkinw1/postdoc/inprogress/ati-fcai/paper/icml2021/fig/scalability.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') - -plt.figure(2) -plt.plot(num_space[:num_complete_nlpd], nlpd.T[:, 0], '.--', markersize=3, linewidth=2.5, color=color_palette['dark_orange']) -plt.plot(num_space[:num_complete_nlpd], nlpd.T[:, 1], 'x--', markersize=5, linewidth=2.5, color=color_palette['dark_blue']) -plt.plot(num_space[:num_complete_nlpd], nlpd.T[:, 2], 'x-.', markersize=6, linewidth=2.5, color=color_palette['green']) -plt.plot(num_space[:num_complete_nlpd], nlpd.T[:, 0], '.--', markersize=3, linewidth=2.5, color=color_palette['dark_orange']) -plt.xlabel('Number of spatial points') -plt.ylabel('Test NLPD') -# plt.ylim([-0., 82]) -ax = plt.gca() -ax.set_xticks(num_space[:num_complete_nlpd]) -plt.legend(['Full', 'Spatial mean-field', 'Infinite-horizon'], loc=1) -if True: - tikzplotlib.save('//Users/wilkinw1/postdoc/inprogress/ati-fcai/paper/icml2021/fig/approx_perf.tex', - axis_width='\\figurewidth', - axis_height='\\figureheight', - tex_relative_path_to_data='./fig/') -plt.show() diff --git a/experiments/rainforest/timings_parallel.sh b/experiments/rainforest/timings_parallel.sh deleted file mode 100644 index 85b973a..0000000 --- a/experiments/rainforest/timings_parallel.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -for (( counter=0; counter<6; counter++ )) -do -python3 rainforest_timings.py 0 "$counter" 0 1 -done -printf "\n" diff --git a/experiments/rainforest/timings_plot.py b/experiments/rainforest/timings_plot.py deleted file mode 100644 index 785a7ab..0000000 --- a/experiments/rainforest/timings_plot.py +++ /dev/null @@ -1,69 +0,0 @@ -from matplotlib import rc - -import matplotlib.pyplot as plt -import numpy as np -import pickle - -rc('font', **{'family': 'serif', 'serif': ['Computer Modern']}) -plt.style.use('ggplot') - -rc('text', usetex=False) - -SMALL_SIZE = 18 -MEDIUM_SIZE = 20 -BIGGER_SIZE = 22 - -plt.rc('font', size=SMALL_SIZE) # controls default text sizes -plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title -plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels -plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels -plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels -plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize -plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title - - -time_steps = [200, 1000, 2000, 3000] -cpu_times = [] -gpu_times = [] -parallel_times = [] -for i in range(4): - with open('output/0_0_{0}_0_gpu.txt'.format(i), 'rb') as f: - gpu_times.append(pickle.load(f)) - - with open('output/0_0_{0}_0_cpu.txt'.format(i), 'rb') as f: - cpu_times.append(pickle.load(f)) - - with open('output/0_0_{0}_1_gpu.txt'.format(i), 'rb') as f: - parallel_times.append(pickle.load(f)) - -gpu_times = np.array(gpu_times) -cpu_times = np.array(cpu_times) -parallel_times = np.array(parallel_times) - -plt.figure() -plt.plot(time_steps, gpu_times, 'k-o', label='GPU (sequential)') -plt.plot(time_steps, cpu_times, 'r-o', label='CPU (sequential)') -plt.plot(time_steps, parallel_times, 'k--o', label='GPU (parallel)') -plt.ylabel('iteration time') -plt.xlabel('time steps') -plt.legend() -plt.savefig('output/timings.pdf', bbox_inches='tight') - -plt.figure() -plt.plot(time_steps, cpu_times, 'r-o', label='CPU (sequential)') -plt.plot(time_steps, parallel_times, 'k--o', label='GPU (parallel)') -plt.ylabel('iteration time') -plt.xlabel('time steps') -plt.legend() -plt.savefig('output/timings_zoom.pdf', bbox_inches='tight') - -# plt.figure() -# plt.plot(time_steps, gpu_times/gpu_times[0], 'k-o', label='GPU') -# plt.plot(time_steps, cpu_times/cpu_times[0], 'r-o', label='CPU') -# plt.ylabel('wall-time/(max(wall-time))') -# plt.xlabel('$spatial points$') -# plt.legend() -# plt.savefig('timings_normalised.pdf', bbox_inches='tight') - - - diff --git a/experiments/rainforest/timings_sequential.sh b/experiments/rainforest/timings_sequential.sh deleted file mode 100644 index e67f364..0000000 --- a/experiments/rainforest/timings_sequential.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -for (( counter=0; counter<6; counter++ )) -do -python3 rainforest_timings.py 0 "$counter" 0 0 -done -printf "\n"