diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml
new file mode 100644
index 00000000..52233378
--- /dev/null
+++ b/.github/workflows/cd.yml
@@ -0,0 +1,42 @@
+# This workflow will install Python dependencies, run tests and lint with a single version of Python
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
+
+name: CD
+
+on:
+ push:
+ branches:
+ - main
+
+
+jobs:
+ docs:
+ name: Deploy API documentation
+ runs-on: [ubuntu-latest]
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Set up Python 3.10.5
+ uses: actions/setup-python@v3
+ with:
+ python-version: "3.10.5"
+
+ - name: Check Python Version
+ run: python --version
+
+ - name: Install dependencies
+ run: |
+ python -m pip install ".[docs]"
+
+ - name: Build API documentation
+ run: |
+ sphinx-apidoc -Mfeo docs/source src/wf_psf
+ sphinx-build docs/source docs/build
+
+ - name: Deploy API documentation
+ uses: peaceiris/actions-gh-pages@v3.5.9
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: docs/build
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 00000000..48244a87
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,29 @@
+# This workflow will install Python dependencies, run tests and lint with a single version of Python
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
+
+name: CI
+
+on:
+ pull_request:
+ branches:
+ - main
+
+
+jobs:
+ test-full:
+ runs-on: [ubuntu-latest]
+
+ steps:
+ - name:
+ uses: actions/checkout@v3
+
+ - name: Set up Python 3.10.5
+ uses: actions/setup-python@v3
+ with:
+ python-version: "3.10.5"
+
+ - name: Install dependencies
+ run: python -m pip install ".[test]"
+
+ - name: Test with pytest
+ run: python -m pytest
diff --git a/.gitignore b/.gitignore
index c69a12e8..ec01ff77 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@ __pycache__/
# Remove method comparison data
method-comparison/compatible-datasets/*
+tf_notebooks
# Log files from slurm
*.err
@@ -61,11 +62,13 @@ htmlcov/
.coverage.*
.cache
nosetests.xml
+pytest.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
+src/wf_psf/pytest.xml
# Translations
*.mo
@@ -86,6 +89,11 @@ instance/
# Sphinx documentation
docs/_build/
+docs/source/wf_psf*.rst
+docs/source/_static/file.png
+docs/source/_static/images/logo_colab.png
+docs/source/_static/minus.png
+docs/source/_static/plus.png
# PyBuilder
target/
@@ -143,3 +151,7 @@ dmypy.json
# Pyre type checker
.pyre/
+
+
+# WF-PSF Debug
+/debug/
diff --git a/README.md b/README.md
index 54269260..443969a5 100644
--- a/README.md
+++ b/README.md
@@ -3,103 +3,11 @@
WaveDiff
A differentiable data-driven wavefront-based PSF modelling framework.
+WaveDiff is a differentiable PSF modelling pipeline constructed with [Tensorflow](https://github.com/tensorflow/tensorflow). It was developed at the [CosmoStat lab](https://www.cosmostat.org) at CEA Paris-Saclay.
+
+See the [documentation](https://cosmostat.github.io/wf-psf/) for details on how to install and run WaveDiff.
+
This repository includes:
- A differentiable PSF model entirely built in [Tensorflow](https://github.com/tensorflow/tensorflow).
-- A numpy-based PSF simulator [here](https://github.com/tobias-liaudat/wf-psf/blob/main/wf_psf/SimPSFToolkit.py).
+- A [numpy-based PSF simulator](https://github.com/CosmoStat/wf-psf/tree/dummy_main/src/wf_psf/sims).
- All the scripts, jobs and notebooks required to reproduce the results in [arXiv:2203.04908](http://arxiv.org/abs/2203.04908) and [arXiv:2111.12541](https://arxiv.org/abs/2111.12541).
-
-For more information on how to use the WaveDiff model through configurable scripts see the `long-runs` directory's [README](https://github.com/tobias-liaudat/wf-psf/blob/main/long-runs/README.md).
-
-## Proposed framework
-
-A schematic of the proposed framework can be seen below. The PSF model is estimated (trained) using star observations in the field-of-view.
-
-
-
-
-
-
-## Install
-
-`wf-psf` is pure python and can be easily installed with `pip`. After cloning the repository, run the following commands:
-
-```bash
-$ cd wf-psf
-$ pip install .
-```
-
-The package can then be imported in Python as `import wf_psf as wf`. We recommend using the release `1.2.0` for stability as the current main branch is under development.
-
-## Requirements
-- [numpy](https://github.com/numpy/numpy) [>=1.19.2]
-- [scipy](https://github.com/scipy/scipy) [>=1.5.2]
-- [TensorFlow](https://www.tensorflow.org/) [==2.4.1]
-- [TensorFlow Addons](https://github.com/tensorflow/addons) [==0.12.1]
-- [Astropy](https://github.com/astropy/astropy) [==4.2]
-- [zernike](https://github.com/jacopoantonello/zernike) [==0.0.31]
-- [opencv-python](https://github.com/opencv/opencv-python) [>=4.5.1.48]
-- [pillow](https://github.com/python-pillow/Pillow) [>=8.1.0]
-- [galsim](https://github.com/GalSim-developers/GalSim) [>=2.3.1]
-
-Optional packages:
-- [matplotlib](https://github.com/matplotlib/matplotlib) [=3.3.2]
-- [seaborn](https://github.com/mwaskom/seaborn) [>=0.11]
-
-
-## Reproducible research
-
-#### [arXiv:2203.04908](http://arxiv.org/abs/2203.04908) Rethinking data-driven point spread function modeling with a differentiable optical model (2022)
-_Submitted._
-
-- Use the release 1.2.0.
-- All the scripts, jobs and notebooks to reproduce the figures from the article can be found [here](https://github.com/tobias-liaudat/wf-psf/tree/main/papers/article_IOP).
-- The trained PSF models are found [here](https://github.com/tobias-liaudat/wf-psf/tree/main/papers/article_IOP/data/models).
-- The input PSF field can be found [here](https://github.com/tobias-liaudat/wf-psf/tree/main/data).
-- The script used to generate the input PSF field is [this one](https://github.com/tobias-liaudat/wf-psf/blob/main/long-runs/LR-PSF-field-gen-coherentFields.py).
-- The code required to run the comparison against pixel-based PSF models is in [this directory](https://github.com/tobias-liaudat/wf-psf/tree/main/method-comparison).
-- The training of the models was done using [this script](https://github.com/tobias-liaudat/wf-psf/blob/main/long-runs/train_eval_plot_script_click.py). In order to match the script's option for the different models with the article you should follow:
- - `poly->WaveDiff-original`
- - `graph->WaveDiff-graph`
- - `mccd->WaveDiff-Polygraph`
-
-_Note: To run the comparison to other PSF models you need to install them first. See [RCA](https://github.com/CosmoStat/rca), [PSFEx](https://github.com/astromatic/psfex) and [MCCD](https://github.com/CosmoStat/mccd)._
-
-
-#### [arXiv:2111.12541](https://arxiv.org/abs/2111.12541) Rethinking the modeling of the instrumental response of telescopes with a differentiable optical model (2021)
-_NeurIPS 2021 Workshop on Machine Learning and the Physical Sciences._
-
-- Use the release 1.2.0.
-- All the scripts, jobs and notebooks to reproduce the figures from the article can be found [here](https://github.com/tobias-liaudat/wf-psf/tree/main/papers/Neurips2021_ML4Physics_workshop).
-
-
-
-## Citation
-
-If you use `wf-psf` in a scientific publication, we would appreciate citations to the following paper:
-
-*Rethinking data-driven point spread function modeling with a differentiable optical model*, T. Liaudat, J.-L. Starck, M. Kilbinger, P.-A. Frugier, [arXiv:2203.04908](http://arxiv.org/abs/2203.04908), 2022.
-
-
-The BibTeX citation is the following:
-```
-@misc{https://doi.org/10.48550/arxiv.2203.04908,
- doi = {10.48550/ARXIV.2203.04908},
-
- url = {https://arxiv.org/abs/2203.04908},
-
- author = {Liaudat, Tobias and Starck, Jean-Luc and Kilbinger, Martin and Frugier, Pierre-Antoine},
-
- keywords = {Instrumentation and Methods for Astrophysics (astro-ph.IM), Computer Vision and Pattern Recognition (cs.CV), FOS: Physical sciences, FOS: Physical sciences, FOS: Computer and information sciences, FOS: Computer and information sciences},
-
- title = {Rethinking data-driven point spread function modeling with a differentiable optical model},
-
- publisher = {arXiv},
-
- year = {2022},
-
- copyright = {arXiv.org perpetual, non-exclusive license}
-}
-```
-
diff --git a/config/configs.yaml b/config/configs.yaml
new file mode 100644
index 00000000..f24561ac
--- /dev/null
+++ b/config/configs.yaml
@@ -0,0 +1,2 @@
+---
+ training_conf: training_config.yaml
diff --git a/config/data_config.yaml b/config/data_config.yaml
new file mode 100644
index 00000000..a0ea4986
--- /dev/null
+++ b/config/data_config.yaml
@@ -0,0 +1,44 @@
+# Training and test data sets for training and/or metrics evaluation
+data:
+ training:
+ # Specify directory path to data; Default setting is /path/to/repo/data
+ data_dir: data/coherent_euclid_dataset/
+ file: train_Euclid_res_200_TrainStars_id_001.npy
+ # if training data set file does not exist, generate a new one by setting values below
+ stars: null
+ positions: null
+ SEDS: null
+ zernike_coef: null
+ C_poly: null
+ params: #
+ d_max: 2
+ max_order: 45
+ x_lims: [0, 1000.0]
+ y_lims: [0, 1000.0]
+ grid_points: [4, 4]
+ n_bins: 20
+ max_wfe_rms: 0.1
+ oversampling_rate: 3.0
+ output_Q: 3.0
+ output_dim: 32
+ LP_filter_length: 2
+ pupil_diameter: 256
+ euclid_obsc: true
+ n_stars: 200
+ test:
+ data_dir: data/coherent_euclid_dataset/
+ file: test_Euclid_res_id_001.npy
+ # If test data set file not provided produce a new one
+ stars: null
+ noisy_stars: null
+ positions: null
+ SEDS: null
+ zernike_coef: null
+ C_poly: null
+ parameters:
+ d_max: 2
+ max_order: 45
+ x_lims: [0, 1000.0]
+ y_lims: [0, 1000.0]
+ grid_points: [4,4]
+ max_wfe_rms: 0.1
\ No newline at end of file
diff --git a/config/logging.conf b/config/logging.conf
new file mode 100644
index 00000000..69d7a2ee
--- /dev/null
+++ b/config/logging.conf
@@ -0,0 +1,33 @@
+[loggers]
+keys=root, simpleLogger
+
+[handlers]
+keys=consoleHandler, fileHandler
+
+[formatters]
+keys=simpleFormatter
+
+[logger_root]
+level=DEBUG
+handlers=consoleHandler, fileHandler
+
+[logger_simpleLogger]
+level=DEBUG
+handlers=consoleHandler, fileHandler
+qualname=simpleLogger
+propagate=0
+
+[handler_consoleHandler]
+class=StreamHandler
+level=DEBUG
+formatter=simpleFormatter
+args=(sys.stdout,)
+
+[handler_fileHandler]
+class=FileHandler
+level=DEBUG
+formatter=simpleFormatter
+args=('%(filename)s', 'w')
+
+[formatter_simpleFormatter]
+format=%(asctime)s - %(name)s - %(levelname)s - %(message)s
diff --git a/config/metrics_config.yaml b/config/metrics_config.yaml
new file mode 100644
index 00000000..50731786
--- /dev/null
+++ b/config/metrics_config.yaml
@@ -0,0 +1,124 @@
+metrics:
+ # Specify the type of model weights to load by entering "psf_model" to load weights of final psf model or "checkpoint" to load weights from a checkpoint callback.
+ model_save_path:
+ # Choose the training cycle for which to evaluate the psf_model. Can be: 1, 2, ...
+ saved_training_cycle: 2
+ # Metrics-only run: Specify model_params for a pre-trained model else leave blank if running training + metrics
+ # Specify path to Parent Directory of Trained Model
+ trained_model_path:
+ # Name of the Trained Model Config file stored in config sub-directory in the trained_model_path parent directory
+ trained_model_config:
+ #Evaluate the monchromatic RMSE metric.
+ eval_mono_metric_rmse: True
+ #Evaluate the OPD RMSE metric.
+ eval_opd_metric_rmse: True
+ #Evaluate the super-resolution and the shape RMSE metrics for the train dataset.
+ eval_train_shape_sr_metric_rmse: True
+ # Name of Plotting Config file - Enter name of yaml file to run plot metrics else if empty run metrics evaluation only
+ plotting_config:
+ ground_truth_model:
+ model_params:
+ #Model used as ground truth for the evaluation. Options are: 'poly' for polychromatic and 'physical' [not available].
+ model_name: poly
+
+ # Evaluation parameters
+ #Number of bins used for the ground truth model poly PSF generation
+ n_bins_lda: 20
+
+ #Downsampling rate to match the oversampled model to the specified telescope's sampling.
+ output_Q: 3
+
+ #Oversampling rate used for the OPD/WFE PSF model.
+ oversampling_rate: 3
+
+ #Dimension of the pixel PSF postage stamp
+ output_dim: 32
+
+ #Dimension of the OPD/Wavefront space."
+ pupil_diameter: 256
+
+ #Boolean to define if we use sample weights based on the noise standard deviation estimation
+ use_sample_weights: True
+
+ #Interpolation type for the physical poly model. Options are: 'none', 'all', 'top_K', 'independent_Zk'."
+ interpolation_type: None
+
+ # SED intepolation points per bin
+ sed_interp_pts_per_bin: 0
+
+ # SED extrapolate
+ sed_extrapolate: True
+
+ # SED interpolate kind
+ sed_interp_kind: linear
+
+ # Standard deviation of the multiplicative SED Gaussian noise.
+ sed_sigma: 0
+
+ #Limits of the PSF field coordinates for the x axis.
+ x_lims: [0.0, 1.0e+3]
+
+ #Limits of the PSF field coordinates for the y axis.
+ y_lims: [0.0, 1.0e+3]
+
+ # Hyperparameters for Parametric model
+ param_hparams:
+ # Random seed for Tensor Flow Initialization
+ random_seed: 3877572
+
+ # Parameter for the l2 loss function for the Optical path differences (OPD)/WFE
+ l2_param: 0.
+
+ #Zernike polynomial modes to use on the parametric part.
+ n_zernikes: 45
+
+ #Max polynomial degree of the parametric part.
+ d_max: 2
+
+ #Flag to save optimisation history for parametric model
+ save_optim_history_param: true
+
+ # Hyperparameters for non-parametric model
+ nonparam_hparams:
+ #Max polynomial degree of the non-parametric part.
+ d_max_nonparam: 5
+
+ # Number of graph features
+ num_graph_features: 10
+
+ #L1 regularisation parameter for the non-parametric part."
+ l1_rate: 1.0e-8
+
+ #Flag to enable Projected learning for DD_features to be used with `poly` or `semiparametric` model.
+ project_dd_features: False
+
+ #Flag to reset DD_features to be used with `poly` or `semiparametric` model
+ reset_dd_features: False
+
+ #Flag to save optimisation history for non-parametric model
+ save_optim_history_nonparam: True
+
+ metrics_hparams:
+ # Batch size to use for the evaluation.
+ batch_size: 16
+
+ #Save RMS error for each super resolved PSF in the test dataset in addition to the mean across the FOV."
+ #Flag to get Super-Resolution pixel PSF RMSE for each individual test star.
+ #If `True`, the relative pixel RMSE of each star is added to ther saving dictionary.
+ opt_stars_rel_pix_rmse: False
+
+ ## Specific parameters
+ # Parameter for the l2 loss of the OPD.
+ l2_param: 0.
+
+ ## Define the resolution at which you'd like to measure the shape of the PSFs
+ #Downsampling rate from the high-resolution pixel modelling space.
+ # Recommended value: 1
+ output_Q: 1
+
+ #Dimension of the pixel PSF postage stamp; it should be big enough so that most of the signal is contained inside the postage stamp.
+ # It also depends on the Q values used.
+ # Recommended value: 64 or higher
+ output_dim: 64
+
+
diff --git a/config/plotting_config.yaml b/config/plotting_config.yaml
new file mode 100644
index 00000000..4a133ec2
--- /dev/null
+++ b/config/plotting_config.yaml
@@ -0,0 +1,13 @@
+plotting_params:
+ # Specify path to parent folder containing wf-outputs-xxxxxxxxxxx for all runs, ex: $WORK/wf-outputs/
+ metrics_output_path:
+ # List all of the parent output directories (i.e. wf-outputs-xxxxxxxxxxx) that contain metrics results to be included in the plot
+ metrics_dir:
+ # - wf-outputs-xxxxxxxxxxx1
+ # - wf-outputs-xxxxxxxxxxx2
+ # List of name of metric config file to add to plot (would like to change such that code goes and finds them in the metrics_dir)
+ metrics_config:
+ # - metrics_config_1.yaml
+ # - metrics_config_2.yaml
+ # Show Plots Flag
+ plot_show: False
\ No newline at end of file
diff --git a/config/training_config.yaml b/config/training_config.yaml
new file mode 100644
index 00000000..7ec4f9e8
--- /dev/null
+++ b/config/training_config.yaml
@@ -0,0 +1,118 @@
+training:
+ # Run ID name
+ id_name: -coherent_euclid_200stars
+ # Name of Data Config file
+ data_config: data_config.yaml
+ # Metrics Config file - Enter file to run metrics evaluation else if empty run train only
+ metrics_config: metrics_config.yaml
+ # PSF model parameters
+ model_params:
+ # Model type. Options are: 'mccd', 'graph', 'poly, 'param', 'poly_physical'."
+ model_name: poly
+
+ #Num of wavelength bins to reconstruct polychromatic objects.
+ n_bins_lda: 8
+
+ #Downsampling rate to match the oversampled model to the specified telescope's sampling.
+ output_Q: 3
+
+ #Oversampling rate used for the OPD/WFE PSF model.
+ oversampling_rate: 3
+
+ #Dimension of the pixel PSF postage stamp
+ output_dim: 32
+
+ #Dimension of the OPD/Wavefront space."
+ pupil_diameter: 256
+
+ #Boolean to define if we use sample weights based on the noise standard deviation estimation
+ use_sample_weights: True
+
+ #Interpolation type for the physical poly model. Options are: 'none', 'all', 'top_K', 'independent_Zk'."
+ interpolation_type: None
+
+ # SED intepolation points per bin
+ sed_interp_pts_per_bin: 0
+
+ # SED extrapolate
+ sed_extrapolate: True
+
+ # SED interpolate kind
+ sed_interp_kind: linear
+
+ # Standard deviation of the multiplicative SED Gaussian noise.
+ sed_sigma: 0
+
+ #Limits of the PSF field coordinates for the x axis.
+ x_lims: [0.0, 1.0e+3]
+
+ #Limits of the PSF field coordinates for the y axis.
+ y_lims: [0.0, 1.0e+3]
+
+ # Hyperparameters for Parametric model
+ param_hparams:
+ # Set the random seed for Tensor Flow Initialization
+ random_seed: 3877572
+
+ # Set the parameter for the l2 loss function for the Optical path differences (OPD)/WFE
+ l2_param: 0.
+
+ #Zernike polynomial modes to use on the parametric part.
+ n_zernikes: 15
+
+ #Max polynomial degree of the parametric part. m
+ d_max: 2
+
+ #Flag to save optimisation history for parametric model
+ save_optim_history_param: true
+
+ # Hyperparameters for non-parametric model
+ nonparam_hparams:
+ #Max polynomial degree of the non-parametric part. chg to max_deg_nonparam
+ d_max_nonparam: 5
+
+ # Number of graph features
+ num_graph_features: 10
+
+ #L1 regularisation parameter for the non-parametric part."
+ l1_rate: 1.0e-8
+
+ #Flag to enable Projected learning for DD_features to be used with `poly` or `semiparametric` model.
+ project_dd_features: False
+
+ #Flag to reset DD_features to be used with `poly` or `semiparametric` model
+ reset_dd_features: False
+
+ #Flag to save optimisation history for non-parametric model
+ save_optim_history_nonparam: true
+
+ # Training hyperparameters
+ training_hparams:
+
+ # Batch Size
+ batch_size: 32
+
+ # Multi-cyclic Parameters
+ multi_cycle_params:
+
+ # Total number of cycles to perform for training.
+ total_cycles: 1
+
+ # Train cycle definition. It can be: 'parametric', 'non-parametric', 'complete', 'only-non-parametric' and 'only-parametric'."
+ cycle_def: complete
+
+ # Flag to save all cycles. If "True", create a checkpoint at every cycle, else if "False" only save the checkpoint at the end of the training."
+ save_all_cycles: False
+
+ # Learning rates for training the parametric model parameters per cycle.
+ learning_rate_params: [1.0e-2, 1.0e-2]
+
+ # Learning rates for training the non-parametric model parameters per cycle.
+ learning_rate_non_params: [1.0e-1, 1.0e-1]
+
+ # Number of training epochs for training the parametric model parameters per cycle.
+ n_epochs_params: [20, 20]
+
+ # Number of training epochs for training the non-parametric model parameters per cycle.
+ n_epochs_non_params: [100, 120]
+
diff --git a/debug/candide_helper_eval_plot_script.py b/debug/candide_helper_eval_plot_script.py
deleted file mode 100644
index 549650a3..00000000
--- a/debug/candide_helper_eval_plot_script.py
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-import wf_psf as wf
-
-# ----------------------- #
-# WaveDiff-original
-args = {
- 'model': 'poly',
- 'id_name': '_sample_w_bis1_2k',
- 'base_path': '/n05data/tliaudat/wf_psf_exp/',
- 'log_folder': "log-files/",
- 'model_folder': "chkp/",
- 'optim_hist_folder': "optim-hist/",
- 'plots_folder': "plots/" ,
- 'dataset_folder': '/home/tliaudat/github/wf-psf/data/coherent_euclid_dataset/',
- 'metric_base_path': '/n05data/tliaudat/wf_psf_exp/metrics/',
- 'chkp_save_path': '/home/tliaudat/github/wf-psf/papers/article_IOP/data/models/wavediff-original/',
- 'train_dataset_file': 'train_Euclid_res_2000_TrainStars_id_001.npy',
- 'test_dataset_file': 'test_Euclid_res_id_001.npy',
- 'n_zernikes': 15,
- 'pupil_diameter': 256,
- 'n_bins_lda': 20,
- 'output_q': 3.,
- 'oversampling_rate': 3.,
- 'output_dim': 32,
- 'd_max': 2,
- 'd_max_nonparam': 5,
- 'x_lims': [0, 1e3],
- 'y_lims': [0, 1e3],
- 'graph_features': 10,
- 'l1_rate': 1e-8,
- 'use_sample_weights': True,
- 'batch_size': 32,
- 'l_rate_param': [0.01,0.004],
- 'l_rate_non_param': [0.1,0.06],
- 'n_epochs_param': [15,15],
- 'n_epochs_non_param': [100,50],
- 'total_cycles': 2,
- 'saved_model_type': 'checkpoint',
- 'saved_cycle': 'cycle2',
- 'gt_n_zernikes': 45,
- 'eval_batch_size': 16,
- 'l2_param': 0.,
- 'base_id_name': '_sample_w_bis1_',
- 'suffix_id_name': '2k',
- 'star_numbers': 2000,
-}
-wf.script_utils.evaluate_model(**args)
-wf.script_utils.plot_metrics(**args)
-
-# ----------------------- #
-# WaveDiff-graph
-args = {
- 'model': 'graph',
- 'id_name': '_sample_w_tunned_2k',
- 'base_path': '/n05data/tliaudat/wf_psf_exp/',
- 'log_folder': "log-files/",
- 'model_folder': "chkp/",
- 'optim_hist_folder': "optim-hist/",
- 'plots_folder': "plots/" ,
- 'dataset_folder': '/home/tliaudat/github/wf-psf/data/coherent_euclid_dataset/',
- 'metric_base_path': '/n05data/tliaudat/wf_psf_exp/metrics/',
- 'chkp_save_path': '/home/tliaudat/github/wf-psf/papers/article_IOP/data/models/wavediff-graph/',
- 'train_dataset_file': 'train_Euclid_res_2000_TrainStars_id_001.npy',
- 'test_dataset_file': 'test_Euclid_res_id_001.npy',
- 'n_zernikes': 15,
- 'pupil_diameter': 256,
- 'n_bins_lda': 20,
- 'output_q': 3.,
- 'oversampling_rate': 3.,
- 'output_dim': 32,
- 'd_max': 2,
- 'd_max_nonparam': 3,
- 'x_lims': [0, 1e3],
- 'y_lims': [0, 1e3],
- 'graph_features': 21,
- 'l1_rate': 1e-8,
- 'use_sample_weights': True,
- 'batch_size': 32,
- 'l_rate_param': [0.01,0.004],
- 'l_rate_non_param': [0.4,0.2],
- 'n_epochs_param': [15,15],
- 'n_epochs_non_param': [100,50],
- 'total_cycles': 2,
- 'saved_model_type': 'checkpoint',
- 'saved_cycle': 'cycle2',
- 'gt_n_zernikes': 45,
- 'eval_batch_size': 16,
- 'l2_param': 0.,
- 'base_id_name': '_sample_w_tunned_',
- 'suffix_id_name': '2k',
- 'star_numbers': 2000,
-}
-wf.script_utils.evaluate_model(**args)
-wf.script_utils.plot_metrics(**args)
-
-# ----------------------- #
-# WaveDiff-polygraph
-args = {
- 'model': 'mccd',
- 'id_name': '_sample_w_bis2_2k',
- 'base_path': '/n05data/tliaudat/wf_psf_exp/',
- 'log_folder': "log-files/",
- 'model_folder': "chkp/",
- 'optim_hist_folder': "optim-hist/",
- 'plots_folder': "plots/" ,
- 'dataset_folder': '/home/tliaudat/github/wf-psf/data/coherent_euclid_dataset/',
- 'metric_base_path': '/n05data/tliaudat/wf_psf_exp/metrics/',
- 'chkp_save_path': '/home/tliaudat/github/wf-psf/papers/article_IOP/data/models/wavediff-polygraph/',
- 'train_dataset_file': 'train_Euclid_res_2000_TrainStars_id_001.npy',
- 'test_dataset_file': 'test_Euclid_res_id_001.npy',
- 'n_zernikes': 15,
- 'pupil_diameter': 256,
- 'n_bins_lda': 20,
- 'output_q': 3.,
- 'oversampling_rate': 3.,
- 'output_dim': 32,
- 'd_max': 2,
- 'd_max_nonparam': 3,
- 'x_lims': [0, 1e3],
- 'y_lims': [0, 1e3],
- 'graph_features': 10,
- 'l1_rate': 1e-8,
- 'use_sample_weights': True,
- 'batch_size': 32,
- 'l_rate_param': [0.01,0.004],
- 'l_rate_non_param': [0.1,0.06],
- 'n_epochs_param': [15,15],
- 'n_epochs_non_param': [100,50],
- 'total_cycles': 2,
- 'saved_model_type': 'checkpoint',
- 'saved_cycle': 'cycle2',
- 'gt_n_zernikes': 45,
- 'eval_batch_size': 16,
- 'l2_param': 0.,
- 'base_id_name': '_sample_w_bis2_',
- 'suffix_id_name': '2k',
- 'star_numbers': 2000,
-}
-wf.script_utils.evaluate_model(**args)
-wf.script_utils.plot_metrics(**args)
-
-# ----------------------- #
-# WaveDiff-zernike15
-args = {
- 'model': 'param',
- 'id_name': '_incomplete_15_sample_w_2k',
- 'base_path': '/n05data/tliaudat/wf_psf_exp/',
- 'log_folder': "log-files/",
- 'model_folder': "chkp/",
- 'optim_hist_folder': "optim-hist/",
- 'plots_folder': "plots/" ,
- 'dataset_folder': '/home/tliaudat/github/wf-psf/data/coherent_euclid_dataset/',
- 'metric_base_path': '/n05data/tliaudat/wf_psf_exp/metrics/',
- 'chkp_save_path': '/home/tliaudat/github/wf-psf/papers/article_IOP/data/models/zernike_15/',
- 'train_dataset_file': 'train_Euclid_res_2000_TrainStars_id_001.npy',
- 'test_dataset_file': 'test_Euclid_res_id_001.npy',
- 'n_zernikes': 15,
- 'pupil_diameter': 256,
- 'n_bins_lda': 20,
- 'output_q': 3.,
- 'oversampling_rate': 3.,
- 'output_dim': 32,
- 'd_max': 2,
- 'd_max_nonparam': 3,
- 'x_lims': [0, 1e3],
- 'y_lims': [0, 1e3],
- 'graph_features': 10,
- 'l1_rate': 1e-8,
- 'use_sample_weights': True,
- 'batch_size': 32,
- 'l_rate_param': [0.005,0.001],
- 'l_rate_non_param': [0.1,0.06],
- 'n_epochs_param': [20,20],
- 'n_epochs_non_param': [100,50],
- 'total_cycles': 2,
- 'saved_model_type': 'checkpoint',
- 'saved_cycle': 'cycle2',
- 'gt_n_zernikes': 45,
- 'eval_batch_size': 16,
- 'l2_param': 0.,
- 'base_id_name': '_incomplete_15_sample_w_',
- 'suffix_id_name': '2k',
- 'star_numbers': 2000,
-}
-wf.script_utils.evaluate_model(**args)
-wf.script_utils.plot_metrics(**args)
-
-# ----------------------- #
-# WaveDiff-zernike45
-args = {
- 'model': 'param',
- 'id_name': '_incomplete_40_sample_w_bis1_2k',
- 'base_path': '/n05data/tliaudat/wf_psf_exp/',
- 'log_folder': "log-files/",
- 'model_folder': "chkp/",
- 'optim_hist_folder': "optim-hist/",
- 'plots_folder': "plots/" ,
- 'dataset_folder': '/home/tliaudat/github/wf-psf/data/coherent_euclid_dataset/',
- 'metric_base_path': '/n05data/tliaudat/wf_psf_exp/metrics/',
- 'chkp_save_path': '/home/tliaudat/github/wf-psf/papers/article_IOP/data/models/zernike_40/',
- 'train_dataset_file': 'train_Euclid_res_2000_TrainStars_id_001.npy',
- 'test_dataset_file': 'test_Euclid_res_id_001.npy',
- 'n_zernikes': 40,
- 'pupil_diameter': 256,
- 'n_bins_lda': 20,
- 'output_q': 3.,
- 'oversampling_rate': 3.,
- 'output_dim': 32,
- 'd_max': 2,
- 'd_max_nonparam': 3,
- 'x_lims': [0, 1e3],
- 'y_lims': [0, 1e3],
- 'graph_features': 10,
- 'l1_rate': 1e-8,
- 'use_sample_weights': True,
- 'batch_size': 32,
- 'l_rate_param': [0.005,0.001],
- 'l_rate_non_param': [0.1,0.06],
- 'n_epochs_param': [20,20],
- 'n_epochs_non_param': [100,50],
- 'total_cycles': 2,
- 'saved_model_type': 'checkpoint',
- 'saved_cycle': 'cycle2',
- 'gt_n_zernikes': 45,
- 'eval_batch_size': 16,
- 'l2_param': 0.,
- 'base_id_name': '_incomplete_40_sample_w_bis1_',
- 'suffix_id_name': '2k',
- 'star_numbers': 2000,
-}
-wf.script_utils.evaluate_model(**args)
-wf.script_utils.plot_metrics(**args)
-
-print('Done..')
diff --git a/debug/candide_job_eval_plot.sh b/debug/candide_job_eval_plot.sh
deleted file mode 100644
index dd774ae7..00000000
--- a/debug/candide_job_eval_plot.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N re_eval_wf_psf
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=95:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=4:hasgpu
-
-# Full path to environment
-export SPENV="$HOME/.conda/envs/shapepipe"
-export CONFDIR="$HOME/github/aziz_repos/deep_mccd"
-
-# Activate conda environment
-module purge
-module load tensorflow/2.4
-# module load intel/19.0/2
-# source activate shapepipe
-
-cd /home/tliaudat/github/wf-psf/debug
-
-# Run scripts
-python candide_helper_eval_plot_script.py
-
-# Return exit code
-exit 0
diff --git a/debug/debug_tf_model.py b/debug/debug_tf_model.py
deleted file mode 100644
index cba9a038..00000000
--- a/debug/debug_tf_model.py
+++ /dev/null
@@ -1,163 +0,0 @@
-import numpy as np
-import scipy.signal as spsig
-import scipy.interpolate as sinterp
-import scipy.io as sio
-import matplotlib.pyplot as plt
-import matplotlib as mpl
-from matplotlib.colors import ListedColormap, LinearSegmentedColormap
-from mpl_toolkits.axes_grid1 import make_axes_locatable
-import PIL
-import time
-from tqdm.notebook import tqdm
-
-# Import wavefront code
-import wf_psf.SimPSFToolkit as wf_sim
-import wf_psf.tf_layers as wf_layers
-import wf_psf.tf_modules as wf_modules
-import wf_psf.tf_psf_field as wf_psf_field
-import wf_psf.utils as wf_utils
-import wf_psf.GenPolyFieldPSF as wf_gen
-
-
-plt.rcParams['figure.figsize'] = (16, 8)
-import tensorflow as tf
-
-# Pre-defined colormap
-top = mpl.cm.get_cmap('Oranges_r', 128)
-bottom = mpl.cm.get_cmap('Blues', 128)
-newcolors = np.vstack((top(np.linspace(0, 1, 128)),
- bottom(np.linspace(0, 1, 128))))
-newcmp = ListedColormap(newcolors, name='OrangeBlue')
-
-
-## Prepare inputs
-
-# Zcube_path = '/content/drive/MyDrive/Colab Notebooks/psf_data/Zernike45.mat'
-Zcube_path = '/Users/tliaudat/Documents/PhD/codes/WF_PSF/data/PA-zernike-cubes/Zernike45.mat'
-Zcube = sio.loadmat(Zcube_path)
-zernikes = []
-
-# Decimation factor for Zernike polynomials
-decim_f = 4 # Original shape (1024x1024)
-n_bins_lda = 15
-n_zernikes = 15
-batch_size = 16
-output_dim=64
-d_max=2
-x_lims=[0, 1e3]
-y_lims=[0, 1e3]
-l_rate = 1e-2
-
-for it in range(n_zernikes):
- zernike_map = wf_utils.decimate_im(Zcube['Zpols'][0, it][5], decim_f)
- zernikes.append(zernike_map)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-
-# dataset_path = '/content/wf-psf/tf_notebooks/psf_field_dataset/'
-# dataset_path = '/content/wf-psf/data/psf_field/'
-# dataset_path = '/content/drive/MyDrive/Colab Notebooks/psf_field_dataset/'
-dataset_path = '/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/tf_notebooks/psf_field_dataset/'
-# dataset_path = '/Users/tliaudat/Documents/PhD/codes/WF_PSF/data/psf_field_datasets/'
-# Load the dictionaries
-train_dataset = np.load(dataset_path + 'train_dataset_256_bin15_z15_bis.npy', allow_pickle=True)[()]
-train_stars = train_dataset['stars']
-train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-tf_train_stars = tf.convert_to_tensor(train_stars, dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_pos, dtype=tf.float32)
-
-
-# Generate initializations
-
-pupil_diameter = 1024 // decim_f
-
-# Prepare np input
-simPSF_np = wf_sim.SimPSFToolkit(zernikes, max_order=n_zernikes, pupil_diameter=pupil_diameter)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf_utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-# Some parameters
-
-tf_PSF_field_model = wf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# Define the model optimisation
-loss = tf.keras.losses.MeanSquaredError()
-
-optimizer = tf.keras.optimizers.Adam(
- learning_rate=l_rate, beta_1=0.9, beta_2=0.999,
- epsilon=1e-07, amsgrad=False)
-# optimizer = tf.keras.optimizers.SGD(
-# learning_rate=l_rate, momentum=0.9, nesterov=True)
-
-metrics = [tf.keras.metrics.MeanSquaredError()]
-
-tf_PSF_field_model = wf_psf_field.build_PSF_model(tf_PSF_field_model, optimizer=optimizer,
- loss=loss, metrics=metrics)
-
-# Assign the new init variable matrix
-# tf_PSF_field_model.tf_poly_Z_field.coeff_mat.assign(init_C)
-tf_PSF_field_model.tf_poly_Z_field.coeff_mat.assign(train_C_poly)
-
-
-
-
-
-# Compare the Pi generating matrix
-gen_poly_field = wf_gen.GenPolyFieldPSF(
- sim_psf_toolkit=simPSF_np, d_max=d_max, grid_points=[4, 4], max_order=15,
- x_lims=x_lims, y_lims=y_lims, n_bins=n_bins_lda,
- lim_max_wfe_rms=simPSF_np.max_wfe_rms, verbose=False)
-
-gen_poly_field.C_poly = train_C_poly
-
-
-
-# Regenerate the sample
-_it = 6
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-pred_inputs = [train_pos[_it:_it+1,:] , tf_packed_SED_data[_it:_it+1,:,:]]
-
-expected_outputs = tf_train_stars[_it:_it+1,:,:]
-
-predictions = tf_PSF_field_model.predict(x=pred_inputs, batch_size=batch_size)
-
-xv_flat = train_pos[_it,0]
-yv_flat = train_pos[_it,1]
-SED = train_SEDs[_it,:,:]
-
-np_poly_psf, np_zernikes, np_opd = gen_poly_field.get_poly_PSF(xv_flat, yv_flat, SED)
-
-
-print('Bye')
\ No newline at end of file
diff --git a/debug/jz_helper_eval_plot_script.py b/debug/jz_helper_eval_plot_script.py
deleted file mode 100644
index 11f4e00d..00000000
--- a/debug/jz_helper_eval_plot_script.py
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-import wf_psf as wf
-
-# ----------------------- #
-# WaveDiff-original
-args = {
- 'model': 'poly',
- 'id_name': '_sample_w_bis1_2k',
- 'base_path': '/gpfswork/rech/ynx/ulx23va/wf-outputs/rerun_paper_results/',
- 'log_folder': "log-files/",
- 'model_folder': "chkp/",
- 'optim_hist_folder': "optim-hist/",
- 'plots_folder': "plots/" ,
- 'dataset_folder': '/gpfswork/rech/ynx/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/',
- 'metric_base_path': '/gpfswork/rech/ynx/ulx23va/wf-outputs/rerun_paper_results/metrics/',
- 'chkp_save_path': '/gpfswork/rech/ynx/ulx23va/repo/wf-psf/papers/article_IOP/data/models/wavediff-original/',
- 'train_dataset_file': 'train_Euclid_res_2000_TrainStars_id_001.npy',
- 'test_dataset_file': 'test_Euclid_res_id_001.npy',
- 'n_zernikes': 15,
- 'pupil_diameter': 256,
- 'n_bins_lda': 20,
- 'output_q': 3.,
- 'oversampling_rate': 3.,
- 'output_dim': 32,
- 'd_max': 2,
- 'd_max_nonparam': 5,
- 'x_lims': [0, 1e3],
- 'y_lims': [0, 1e3],
- 'graph_features': 10,
- 'l1_rate': 1e-8,
- 'use_sample_weights': True,
- 'batch_size': 32,
- 'l_rate_param': [0.01,0.004],
- 'l_rate_non_param': [0.1,0.06],
- 'n_epochs_param': [15,15],
- 'n_epochs_non_param': [100,50],
- 'total_cycles': 2,
- 'saved_model_type': 'checkpoint',
- 'saved_cycle': 'cycle2',
- 'gt_n_zernikes': 45,
- 'eval_batch_size': 16,
- 'l2_param': 0.,
- 'base_id_name': '_sample_w_bis1_',
- 'suffix_id_name': '2k',
- 'star_numbers': 2000,
-}
-wf.script_utils.evaluate_model(**args)
-wf.script_utils.plot_metrics(**args)
-
-# ----------------------- #
-# WaveDiff-graph
-args = {
- 'model': 'graph',
- 'id_name': '_sample_w_tunned_2k',
- 'base_path': '/gpfswork/rech/ynx/ulx23va/wf-outputs/rerun_paper_results/',
- 'log_folder': "log-files/",
- 'model_folder': "chkp/",
- 'optim_hist_folder': "optim-hist/",
- 'plots_folder': "plots/" ,
- 'dataset_folder': '/gpfswork/rech/ynx/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/',
- 'metric_base_path': '/gpfswork/rech/ynx/ulx23va/wf-outputs/rerun_paper_results/metrics/',
- 'chkp_save_path': '/gpfswork/rech/ynx/ulx23va/repo/wf-psf/papers/article_IOP/data/models/wavediff-graph/',
- 'train_dataset_file': 'train_Euclid_res_2000_TrainStars_id_001.npy',
- 'test_dataset_file': 'test_Euclid_res_id_001.npy',
- 'n_zernikes': 15,
- 'pupil_diameter': 256,
- 'n_bins_lda': 20,
- 'output_q': 3.,
- 'oversampling_rate': 3.,
- 'output_dim': 32,
- 'd_max': 2,
- 'd_max_nonparam': 3,
- 'x_lims': [0, 1e3],
- 'y_lims': [0, 1e3],
- 'graph_features': 21,
- 'l1_rate': 1e-8,
- 'use_sample_weights': True,
- 'batch_size': 32,
- 'l_rate_param': [0.01,0.004],
- 'l_rate_non_param': [0.4,0.2],
- 'n_epochs_param': [15,15],
- 'n_epochs_non_param': [100,50],
- 'total_cycles': 2,
- 'saved_model_type': 'checkpoint',
- 'saved_cycle': 'cycle2',
- 'gt_n_zernikes': 45,
- 'eval_batch_size': 16,
- 'l2_param': 0.,
- 'base_id_name': '_sample_w_tunned_',
- 'suffix_id_name': '2k',
- 'star_numbers': 2000,
-}
-wf.script_utils.evaluate_model(**args)
-wf.script_utils.plot_metrics(**args)
-
-# ----------------------- #
-# WaveDiff-polygraph
-args = {
- 'model': 'mccd',
- 'id_name': '_sample_w_bis2_2k',
- 'base_path': '/gpfswork/rech/ynx/ulx23va/wf-outputs/rerun_paper_results/',
- 'log_folder': "log-files/",
- 'model_folder': "chkp/",
- 'optim_hist_folder': "optim-hist/",
- 'plots_folder': "plots/" ,
- 'dataset_folder': '/gpfswork/rech/ynx/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/',
- 'metric_base_path': '/gpfswork/rech/ynx/ulx23va/wf-outputs/rerun_paper_results/metrics/',
- 'chkp_save_path': '/gpfswork/rech/ynx/ulx23va/repo/wf-psf/papers/article_IOP/data/models/wavediff-polygraph/',
- 'train_dataset_file': 'train_Euclid_res_2000_TrainStars_id_001.npy',
- 'test_dataset_file': 'test_Euclid_res_id_001.npy',
- 'n_zernikes': 15,
- 'pupil_diameter': 256,
- 'n_bins_lda': 20,
- 'output_q': 3.,
- 'oversampling_rate': 3.,
- 'output_dim': 32,
- 'd_max': 2,
- 'd_max_nonparam': 3,
- 'x_lims': [0, 1e3],
- 'y_lims': [0, 1e3],
- 'graph_features': 10,
- 'l1_rate': 1e-8,
- 'use_sample_weights': True,
- 'batch_size': 32,
- 'l_rate_param': [0.01,0.004],
- 'l_rate_non_param': [0.1,0.06],
- 'n_epochs_param': [15,15],
- 'n_epochs_non_param': [100,50],
- 'total_cycles': 2,
- 'saved_model_type': 'checkpoint',
- 'saved_cycle': 'cycle2',
- 'gt_n_zernikes': 45,
- 'eval_batch_size': 16,
- 'l2_param': 0.,
- 'base_id_name': '_sample_w_bis2_',
- 'suffix_id_name': '2k',
- 'star_numbers': 2000,
-}
-wf.script_utils.evaluate_model(**args)
-wf.script_utils.plot_metrics(**args)
-
-# ----------------------- #
-# WaveDiff-zernike15
-args = {
- 'model': 'param',
- 'id_name': '_incomplete_15_sample_w_2k',
- 'base_path': '/gpfswork/rech/ynx/ulx23va/wf-outputs/rerun_paper_results/',
- 'log_folder': "log-files/",
- 'model_folder': "chkp/",
- 'optim_hist_folder': "optim-hist/",
- 'plots_folder': "plots/" ,
- 'dataset_folder': '/gpfswork/rech/ynx/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/',
- 'metric_base_path': '/gpfswork/rech/ynx/ulx23va/wf-outputs/rerun_paper_results/metrics/',
- 'chkp_save_path': '/gpfswork/rech/ynx/ulx23va/repo/wf-psf/papers/article_IOP/data/models/zernike_15/',
- 'train_dataset_file': 'train_Euclid_res_2000_TrainStars_id_001.npy',
- 'test_dataset_file': 'test_Euclid_res_id_001.npy',
- 'n_zernikes': 15,
- 'pupil_diameter': 256,
- 'n_bins_lda': 20,
- 'output_q': 3.,
- 'oversampling_rate': 3.,
- 'output_dim': 32,
- 'd_max': 2,
- 'd_max_nonparam': 3,
- 'x_lims': [0, 1e3],
- 'y_lims': [0, 1e3],
- 'graph_features': 10,
- 'l1_rate': 1e-8,
- 'use_sample_weights': True,
- 'batch_size': 32,
- 'l_rate_param': [0.005,0.001],
- 'l_rate_non_param': [0.1,0.06],
- 'n_epochs_param': [20,20],
- 'n_epochs_non_param': [100,50],
- 'total_cycles': 2,
- 'saved_model_type': 'checkpoint',
- 'saved_cycle': 'cycle2',
- 'gt_n_zernikes': 45,
- 'eval_batch_size': 16,
- 'l2_param': 0.,
- 'base_id_name': '_incomplete_15_sample_w_',
- 'suffix_id_name': '2k',
- 'star_numbers': 2000,
-}
-wf.script_utils.evaluate_model(**args)
-wf.script_utils.plot_metrics(**args)
-
-# ----------------------- #
-# WaveDiff-zernike45
-args = {
- 'model': 'param',
- 'id_name': '_incomplete_40_sample_w_bis1_2k',
- 'base_path': '/gpfswork/rech/ynx/ulx23va/wf-outputs/rerun_paper_results/',
- 'log_folder': "log-files/",
- 'model_folder': "chkp/",
- 'optim_hist_folder': "optim-hist/",
- 'plots_folder': "plots/" ,
- 'dataset_folder': '/gpfswork/rech/ynx/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/',
- 'metric_base_path': '/gpfswork/rech/ynx/ulx23va/wf-outputs/rerun_paper_results/metrics/',
- 'chkp_save_path': '/gpfswork/rech/ynx/ulx23va/repo/wf-psf/papers/article_IOP/data/models/zernike_40/',
- 'train_dataset_file': 'train_Euclid_res_2000_TrainStars_id_001.npy',
- 'test_dataset_file': 'test_Euclid_res_id_001.npy',
- 'n_zernikes': 40,
- 'pupil_diameter': 256,
- 'n_bins_lda': 20,
- 'output_q': 3.,
- 'oversampling_rate': 3.,
- 'output_dim': 32,
- 'd_max': 2,
- 'd_max_nonparam': 3,
- 'x_lims': [0, 1e3],
- 'y_lims': [0, 1e3],
- 'graph_features': 10,
- 'l1_rate': 1e-8,
- 'use_sample_weights': True,
- 'batch_size': 32,
- 'l_rate_param': [0.005,0.001],
- 'l_rate_non_param': [0.1,0.06],
- 'n_epochs_param': [20,20],
- 'n_epochs_non_param': [100,50],
- 'total_cycles': 2,
- 'saved_model_type': 'checkpoint',
- 'saved_cycle': 'cycle2',
- 'gt_n_zernikes': 45,
- 'eval_batch_size': 16,
- 'l2_param': 0.,
- 'base_id_name': '_incomplete_40_sample_w_bis1_',
- 'suffix_id_name': '2k',
- 'star_numbers': 2000,
-}
-wf.script_utils.evaluate_model(**args)
-wf.script_utils.plot_metrics(**args)
-
-print('Done..')
diff --git a/debug/jz_job_eval_plot.sh b/debug/jz_job_eval_plot.sh
deleted file mode 100644
index fd809baa..00000000
--- a/debug/jz_job_eval_plot.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=rerun_metrics # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=rerun_metrics%j.out # nom du fichier de sortie
-#SBATCH --error=rerun_metrics%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-##SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-srun python -u $WORK/repo/wf-psf/debug/jz_helper_eval_plot_script.py
diff --git a/debug/table-WFE-error-metrics-rerun.ipynb b/debug/table-WFE-error-metrics-rerun.ipynb
deleted file mode 100644
index a51414fd..00000000
--- a/debug/table-WFE-error-metrics-rerun.ipynb
+++ /dev/null
@@ -1,263 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 1,
- "id": "b74eea0a",
- "metadata": {},
- "outputs": [],
- "source": [
- "import numpy as np\n"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "44ed1270",
- "metadata": {},
- "source": [
- "# Zernike 15"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "id": "3d149a95",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Absolute WFE RMSE: 1.851e+02 nm\n",
- "Relative WFE RMSE: 284.26 %\n"
- ]
- }
- ],
- "source": [
- "# Load metrics\n",
- "metrics_wf_inc15_2k_path = './../data/metrics/zernike_15/metrics-param_incomplete_15_sample_w_2k.npy'\n",
- "metrics_wf_inc15_2k = np.load(metrics_wf_inc15_2k_path, allow_pickle=True)[()]\n",
- "metrics = metrics_wf_inc15_2k\n",
- "\n",
- "print('Absolute WFE RMSE: %.3e nm'% (1e3*metrics['test_metrics']['opd_metric']['rmse_opd']))\n",
- "print('Relative WFE RMSE: %.2f %%'% metrics['test_metrics']['opd_metric']['rel_rmse_opd'])\n"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "7eeec55a",
- "metadata": {},
- "source": [
- "# Zernike 40"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "id": "b15a2e36",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Absolute WFE RMSE: 9.809e+01 nm\n",
- "Relative WFE RMSE: 148.36 %\n"
- ]
- }
- ],
- "source": [
- "# Load metrics\n",
- "metrics_wf_inc40_2k_path = './../data/metrics/zernike_40/metrics-param_incomplete_40_sample_w_bis1_2k.npy'\n",
- "metrics_wf_inc40_2k = np.load(metrics_wf_inc40_2k_path, allow_pickle=True)[()]\n",
- "metrics = metrics_wf_inc40_2k\n",
- "\n",
- "print('Absolute WFE RMSE: %.3e nm'% (1e3*metrics['test_metrics']['opd_metric']['rmse_opd']))\n",
- "print('Relative WFE RMSE: %.2f %%'% metrics['test_metrics']['opd_metric']['rel_rmse_opd'])\n",
- "\n"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "cd6faff4",
- "metadata": {},
- "source": [
- "# WaveDiff-original"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "id": "a9f05243",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Absolute WFE RMSE: 1.400e+02 nm\n",
- "Relative WFE RMSE: 212.29 %\n"
- ]
- }
- ],
- "source": [
- "# Load metrics\n",
- "metrics_2k_path = './../data/metrics/wavediff-original/metrics-poly_sample_w_bis1_2k.npy'\n",
- "wf_orifinal_metrics = np.load(metrics_2k_path, allow_pickle=True)[()]\n",
- "metrics = wf_orifinal_metrics\n",
- "\n",
- "print('Absolute WFE RMSE: %.3e nm'% (1e3*metrics['test_metrics']['opd_metric']['rmse_opd']))\n",
- "print('Relative WFE RMSE: %.2f %%'% metrics['test_metrics']['opd_metric']['rel_rmse_opd'])\n"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "d559cda6",
- "metadata": {},
- "source": [
- "# WaveDiff-graph"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "8ae83f49",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Absolute WFE RMSE: 1.987e+02 nm\n",
- "Relative WFE RMSE: 304.64 %\n"
- ]
- }
- ],
- "source": [
- "# Load metrics\n",
- "metrics_2k_path = './../data/metrics/wavediff-graph/metrics-graph_sample_w_tunned_2k.npy'\n",
- "wf_graph_metrics = np.load(metrics_2k_path, allow_pickle=True)[()]\n",
- "metrics = wf_graph_metrics\n",
- "\n",
- "print('Absolute WFE RMSE: %.3e nm'% (1e3*metrics['test_metrics']['opd_metric']['rmse_opd']))\n",
- "print('Relative WFE RMSE: %.2f %%'% metrics['test_metrics']['opd_metric']['rel_rmse_opd'])\n"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "7301f33a",
- "metadata": {},
- "source": [
- "# WaveDiff-polygraph"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "fadbe593",
- "metadata": {},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Absolute WFE RMSE: 1.588e+02 nm\n",
- "Relative WFE RMSE: 241.65 %\n"
- ]
- }
- ],
- "source": [
- "# Load metrics\n",
- "metrics_2k_path = './../data/metrics/wavediff-polygraph/metrics-mccd_sample_w_bis2_2k.npy'\n",
- "wf_polygraph_metrics = np.load(metrics_2k_path, allow_pickle=True)[()]\n",
- "metrics = wf_polygraph_metrics\n",
- "\n",
- "print('Absolute WFE RMSE: %.3e nm'% (1e3*metrics['test_metrics']['opd_metric']['rmse_opd']))\n",
- "print('Relative WFE RMSE: %.2f %%'% metrics['test_metrics']['opd_metric']['rel_rmse_opd'])\n",
- "\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "f08ee11f",
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "cb644843",
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "4d326ac3",
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "eb466dd2",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.6.12"
- },
- "varInspector": {
- "cols": {
- "lenName": 16,
- "lenType": 16,
- "lenVar": 40
- },
- "kernels_config": {
- "python": {
- "delete_cmd_postfix": "",
- "delete_cmd_prefix": "del ",
- "library": "var_list.py",
- "varRefreshCmd": "print(var_dic_list())"
- },
- "r": {
- "delete_cmd_postfix": ") ",
- "delete_cmd_prefix": "rm(",
- "library": "var_list.r",
- "varRefreshCmd": "cat(var_dic_list()) "
- }
- },
- "types_to_exclude": [
- "module",
- "function",
- "builtin_function_or_method",
- "instance",
- "_Feature"
- ],
- "window_display": false
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 00000000..d0c3cbf1
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 00000000..747ffb7b
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.https://www.sphinx-doc.org/
+ exit /b 1
+)
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
+
+:end
+popd
diff --git a/docs/source/_static/_sphinx_javascript_frameworks_compat.js b/docs/source/_static/_sphinx_javascript_frameworks_compat.js
new file mode 100644
index 00000000..81415803
--- /dev/null
+++ b/docs/source/_static/_sphinx_javascript_frameworks_compat.js
@@ -0,0 +1,123 @@
+/* Compatability shim for jQuery and underscores.js.
+ *
+ * Copyright Sphinx contributors
+ * Released under the two clause BSD licence
+ */
+
+/**
+ * small helper function to urldecode strings
+ *
+ * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL
+ */
+jQuery.urldecode = function(x) {
+ if (!x) {
+ return x
+ }
+ return decodeURIComponent(x.replace(/\+/g, ' '));
+};
+
+/**
+ * small helper function to urlencode strings
+ */
+jQuery.urlencode = encodeURIComponent;
+
+/**
+ * This function returns the parsed url parameters of the
+ * current request. Multiple values per key are supported,
+ * it will always return arrays of strings for the value parts.
+ */
+jQuery.getQueryParameters = function(s) {
+ if (typeof s === 'undefined')
+ s = document.location.search;
+ var parts = s.substr(s.indexOf('?') + 1).split('&');
+ var result = {};
+ for (var i = 0; i < parts.length; i++) {
+ var tmp = parts[i].split('=', 2);
+ var key = jQuery.urldecode(tmp[0]);
+ var value = jQuery.urldecode(tmp[1]);
+ if (key in result)
+ result[key].push(value);
+ else
+ result[key] = [value];
+ }
+ return result;
+};
+
+/**
+ * highlight a given string on a jquery object by wrapping it in
+ * span elements with the given class name.
+ */
+jQuery.fn.highlightText = function(text, className) {
+ function highlight(node, addItems) {
+ if (node.nodeType === 3) {
+ var val = node.nodeValue;
+ var pos = val.toLowerCase().indexOf(text);
+ if (pos >= 0 &&
+ !jQuery(node.parentNode).hasClass(className) &&
+ !jQuery(node.parentNode).hasClass("nohighlight")) {
+ var span;
+ var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
+ if (isInSVG) {
+ span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
+ } else {
+ span = document.createElement("span");
+ span.className = className;
+ }
+ span.appendChild(document.createTextNode(val.substr(pos, text.length)));
+ node.parentNode.insertBefore(span, node.parentNode.insertBefore(
+ document.createTextNode(val.substr(pos + text.length)),
+ node.nextSibling));
+ node.nodeValue = val.substr(0, pos);
+ if (isInSVG) {
+ var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
+ var bbox = node.parentElement.getBBox();
+ rect.x.baseVal.value = bbox.x;
+ rect.y.baseVal.value = bbox.y;
+ rect.width.baseVal.value = bbox.width;
+ rect.height.baseVal.value = bbox.height;
+ rect.setAttribute('class', className);
+ addItems.push({
+ "parent": node.parentNode,
+ "target": rect});
+ }
+ }
+ }
+ else if (!jQuery(node).is("button, select, textarea")) {
+ jQuery.each(node.childNodes, function() {
+ highlight(this, addItems);
+ });
+ }
+ }
+ var addItems = [];
+ var result = this.each(function() {
+ highlight(this, addItems);
+ });
+ for (var i = 0; i < addItems.length; ++i) {
+ jQuery(addItems[i].parent).before(addItems[i].target);
+ }
+ return result;
+};
+
+/*
+ * backward compatibility for jQuery.browser
+ * This will be supported until firefox bug is fixed.
+ */
+if (!jQuery.browser) {
+ jQuery.uaMatch = function(ua) {
+ ua = ua.toLowerCase();
+
+ var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
+ /(webkit)[ \/]([\w.]+)/.exec(ua) ||
+ /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
+ /(msie) ([\w.]+)/.exec(ua) ||
+ ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
+ [];
+
+ return {
+ browser: match[ 1 ] || "",
+ version: match[ 2 ] || "0"
+ };
+ };
+ jQuery.browser = {};
+ jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
+}
diff --git a/docs/source/_static/alabaster.css b/docs/source/_static/alabaster.css
new file mode 100644
index 00000000..517d0b29
--- /dev/null
+++ b/docs/source/_static/alabaster.css
@@ -0,0 +1,703 @@
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: Georgia, serif;
+ font-size: 17px;
+ background-color: #fff;
+ color: #000;
+ margin: 0;
+ padding: 0;
+}
+
+
+div.document {
+ width: 940px;
+ margin: 30px auto 0 auto;
+}
+
+div.documentwrapper {
+ float: left;
+ width: 100%;
+}
+
+div.bodywrapper {
+ margin: 0 0 0 220px;
+}
+
+div.sphinxsidebar {
+ width: 220px;
+ font-size: 14px;
+ line-height: 1.5;
+}
+
+hr {
+ border: 1px solid #B1B4B6;
+}
+
+div.body {
+ background-color: #fff;
+ color: #3E4349;
+ padding: 0 30px 0 30px;
+}
+
+div.body > .section {
+ text-align: left;
+}
+
+div.footer {
+ width: 940px;
+ margin: 20px auto 30px auto;
+ font-size: 14px;
+ color: #888;
+ text-align: right;
+}
+
+div.footer a {
+ color: #888;
+}
+
+p.caption {
+ font-family: inherit;
+ font-size: inherit;
+}
+
+
+div.relations {
+ display: none;
+}
+
+
+div.sphinxsidebar a {
+ color: #444;
+ text-decoration: none;
+ border-bottom: 1px dotted #999;
+}
+
+div.sphinxsidebar a:hover {
+ border-bottom: 1px solid #999;
+}
+
+div.sphinxsidebarwrapper {
+ padding: 18px 10px;
+}
+
+div.sphinxsidebarwrapper p.logo {
+ padding: 0;
+ margin: -10px 0 0 0px;
+ text-align: center;
+}
+
+div.sphinxsidebarwrapper h1.logo {
+ margin-top: -10px;
+ text-align: center;
+ margin-bottom: 5px;
+ text-align: left;
+}
+
+div.sphinxsidebarwrapper h1.logo-name {
+ margin-top: 0px;
+}
+
+div.sphinxsidebarwrapper p.blurb {
+ margin-top: 0;
+ font-style: normal;
+}
+
+div.sphinxsidebar h3,
+div.sphinxsidebar h4 {
+ font-family: Georgia, serif;
+ color: #444;
+ font-size: 24px;
+ font-weight: normal;
+ margin: 0 0 5px 0;
+ padding: 0;
+}
+
+div.sphinxsidebar h4 {
+ font-size: 20px;
+}
+
+div.sphinxsidebar h3 a {
+ color: #444;
+}
+
+div.sphinxsidebar p.logo a,
+div.sphinxsidebar h3 a,
+div.sphinxsidebar p.logo a:hover,
+div.sphinxsidebar h3 a:hover {
+ border: none;
+}
+
+div.sphinxsidebar p {
+ color: #555;
+ margin: 10px 0;
+}
+
+div.sphinxsidebar ul {
+ margin: 10px 0;
+ padding: 0;
+ color: #000;
+}
+
+div.sphinxsidebar ul li.toctree-l1 > a {
+ font-size: 120%;
+}
+
+div.sphinxsidebar ul li.toctree-l2 > a {
+ font-size: 110%;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #CCC;
+ font-family: Georgia, serif;
+ font-size: 1em;
+}
+
+div.sphinxsidebar hr {
+ border: none;
+ height: 1px;
+ color: #AAA;
+ background: #AAA;
+
+ text-align: left;
+ margin-left: 0;
+ width: 50%;
+}
+
+div.sphinxsidebar .badge {
+ border-bottom: none;
+}
+
+div.sphinxsidebar .badge:hover {
+ border-bottom: none;
+}
+
+/* To address an issue with donation coming after search */
+div.sphinxsidebar h3.donation {
+ margin-top: 10px;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+a {
+ color: #004B6B;
+ text-decoration: underline;
+}
+
+a:hover {
+ color: #6D4100;
+ text-decoration: underline;
+}
+
+div.body h1,
+div.body h2,
+div.body h3,
+div.body h4,
+div.body h5,
+div.body h6 {
+ font-family: Georgia, serif;
+ font-weight: normal;
+ margin: 30px 0px 10px 0px;
+ padding: 0;
+}
+
+div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; }
+div.body h2 { font-size: 180%; }
+div.body h3 { font-size: 150%; }
+div.body h4 { font-size: 130%; }
+div.body h5 { font-size: 100%; }
+div.body h6 { font-size: 100%; }
+
+a.headerlink {
+ color: #DDD;
+ padding: 0 4px;
+ text-decoration: none;
+}
+
+a.headerlink:hover {
+ color: #444;
+ background: #EAEAEA;
+}
+
+div.body p, div.body dd, div.body li {
+ line-height: 1.4em;
+}
+
+div.admonition {
+ margin: 20px 0px;
+ padding: 10px 30px;
+ background-color: #EEE;
+ border: 1px solid #CCC;
+}
+
+div.admonition tt.xref, div.admonition code.xref, div.admonition a tt {
+ background-color: #FBFBFB;
+ border-bottom: 1px solid #fafafa;
+}
+
+div.admonition p.admonition-title {
+ font-family: Georgia, serif;
+ font-weight: normal;
+ font-size: 24px;
+ margin: 0 0 10px 0;
+ padding: 0;
+ line-height: 1;
+}
+
+div.admonition p.last {
+ margin-bottom: 0;
+}
+
+div.highlight {
+ background-color: #fff;
+}
+
+dt:target, .highlight {
+ background: #FAF3E8;
+}
+
+div.warning {
+ background-color: #FCC;
+ border: 1px solid #FAA;
+}
+
+div.danger {
+ background-color: #FCC;
+ border: 1px solid #FAA;
+ -moz-box-shadow: 2px 2px 4px #D52C2C;
+ -webkit-box-shadow: 2px 2px 4px #D52C2C;
+ box-shadow: 2px 2px 4px #D52C2C;
+}
+
+div.error {
+ background-color: #FCC;
+ border: 1px solid #FAA;
+ -moz-box-shadow: 2px 2px 4px #D52C2C;
+ -webkit-box-shadow: 2px 2px 4px #D52C2C;
+ box-shadow: 2px 2px 4px #D52C2C;
+}
+
+div.caution {
+ background-color: #FCC;
+ border: 1px solid #FAA;
+}
+
+div.attention {
+ background-color: #FCC;
+ border: 1px solid #FAA;
+}
+
+div.important {
+ background-color: #EEE;
+ border: 1px solid #CCC;
+}
+
+div.note {
+ background-color: #EEE;
+ border: 1px solid #CCC;
+}
+
+div.tip {
+ background-color: #EEE;
+ border: 1px solid #CCC;
+}
+
+div.hint {
+ background-color: #EEE;
+ border: 1px solid #CCC;
+}
+
+div.seealso {
+ background-color: #EEE;
+ border: 1px solid #CCC;
+}
+
+div.topic {
+ background-color: #EEE;
+}
+
+p.admonition-title {
+ display: inline;
+}
+
+p.admonition-title:after {
+ content: ":";
+}
+
+pre, tt, code {
+ font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+ font-size: 0.9em;
+}
+
+.hll {
+ background-color: #FFC;
+ margin: 0 -12px;
+ padding: 0 12px;
+ display: block;
+}
+
+img.screenshot {
+}
+
+tt.descname, tt.descclassname, code.descname, code.descclassname {
+ font-size: 0.95em;
+}
+
+tt.descname, code.descname {
+ padding-right: 0.08em;
+}
+
+img.screenshot {
+ -moz-box-shadow: 2px 2px 4px #EEE;
+ -webkit-box-shadow: 2px 2px 4px #EEE;
+ box-shadow: 2px 2px 4px #EEE;
+}
+
+table.docutils {
+ border: 1px solid #888;
+ -moz-box-shadow: 2px 2px 4px #EEE;
+ -webkit-box-shadow: 2px 2px 4px #EEE;
+ box-shadow: 2px 2px 4px #EEE;
+}
+
+table.docutils td, table.docutils th {
+ border: 1px solid #888;
+ padding: 0.25em 0.7em;
+}
+
+table.field-list, table.footnote {
+ border: none;
+ -moz-box-shadow: none;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+
+table.footnote {
+ margin: 15px 0;
+ width: 100%;
+ border: 1px solid #EEE;
+ background: #FDFDFD;
+ font-size: 0.9em;
+}
+
+table.footnote + table.footnote {
+ margin-top: -15px;
+ border-top: none;
+}
+
+table.field-list th {
+ padding: 0 0.8em 0 0;
+}
+
+table.field-list td {
+ padding: 0;
+}
+
+table.field-list p {
+ margin-bottom: 0.8em;
+}
+
+/* Cloned from
+ * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68
+ */
+.field-name {
+ -moz-hyphens: manual;
+ -ms-hyphens: manual;
+ -webkit-hyphens: manual;
+ hyphens: manual;
+}
+
+table.footnote td.label {
+ width: .1px;
+ padding: 0.3em 0 0.3em 0.5em;
+}
+
+table.footnote td {
+ padding: 0.3em 0.5em;
+}
+
+dl {
+ margin-left: 0;
+ margin-right: 0;
+ margin-top: 0;
+ padding: 0;
+}
+
+dl dd {
+ margin-left: 30px;
+}
+
+blockquote {
+ margin: 0 0 0 30px;
+ padding: 0;
+}
+
+ul, ol {
+ /* Matches the 30px from the narrow-screen "li > ul" selector below */
+ margin: 10px 0 10px 30px;
+ padding: 0;
+}
+
+pre {
+ background: #EEE;
+ padding: 7px 30px;
+ margin: 15px 0px;
+ line-height: 1.3em;
+}
+
+div.viewcode-block:target {
+ background: #ffd;
+}
+
+dl pre, blockquote pre, li pre {
+ margin-left: 0;
+ padding-left: 30px;
+}
+
+tt, code {
+ background-color: #ecf0f3;
+ color: #222;
+ /* padding: 1px 2px; */
+}
+
+tt.xref, code.xref, a tt {
+ background-color: #FBFBFB;
+ border-bottom: 1px solid #fff;
+}
+
+a.reference {
+ text-decoration: none;
+ border-bottom: 1px dotted #004B6B;
+}
+
+/* Don't put an underline on images */
+a.image-reference, a.image-reference:hover {
+ border-bottom: none;
+}
+
+a.reference:hover {
+ border-bottom: 1px solid #6D4100;
+}
+
+a.footnote-reference {
+ text-decoration: none;
+ font-size: 0.7em;
+ vertical-align: top;
+ border-bottom: 1px dotted #004B6B;
+}
+
+a.footnote-reference:hover {
+ border-bottom: 1px solid #6D4100;
+}
+
+a:hover tt, a:hover code {
+ background: #EEE;
+}
+
+
+@media screen and (max-width: 870px) {
+
+ div.sphinxsidebar {
+ display: none;
+ }
+
+ div.document {
+ width: 100%;
+
+ }
+
+ div.documentwrapper {
+ margin-left: 0;
+ margin-top: 0;
+ margin-right: 0;
+ margin-bottom: 0;
+ }
+
+ div.bodywrapper {
+ margin-top: 0;
+ margin-right: 0;
+ margin-bottom: 0;
+ margin-left: 0;
+ }
+
+ ul {
+ margin-left: 0;
+ }
+
+ li > ul {
+ /* Matches the 30px from the "ul, ol" selector above */
+ margin-left: 30px;
+ }
+
+ .document {
+ width: auto;
+ }
+
+ .footer {
+ width: auto;
+ }
+
+ .bodywrapper {
+ margin: 0;
+ }
+
+ .footer {
+ width: auto;
+ }
+
+ .github {
+ display: none;
+ }
+
+
+
+}
+
+
+
+@media screen and (max-width: 875px) {
+
+ body {
+ margin: 0;
+ padding: 20px 30px;
+ }
+
+ div.documentwrapper {
+ float: none;
+ background: #fff;
+ }
+
+ div.sphinxsidebar {
+ display: block;
+ float: none;
+ width: 102.5%;
+ margin: 50px -30px -20px -30px;
+ padding: 10px 20px;
+ background: #333;
+ color: #FFF;
+ }
+
+ div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p,
+ div.sphinxsidebar h3 a {
+ color: #fff;
+ }
+
+ div.sphinxsidebar a {
+ color: #AAA;
+ }
+
+ div.sphinxsidebar p.logo {
+ display: none;
+ }
+
+ div.document {
+ width: 100%;
+ margin: 0;
+ }
+
+ div.footer {
+ display: none;
+ }
+
+ div.bodywrapper {
+ margin: 0;
+ }
+
+ div.body {
+ min-height: 0;
+ padding: 0;
+ }
+
+ .rtd_doc_footer {
+ display: none;
+ }
+
+ .document {
+ width: auto;
+ }
+
+ .footer {
+ width: auto;
+ }
+
+ .footer {
+ width: auto;
+ }
+
+ .github {
+ display: none;
+ }
+}
+
+
+/* misc. */
+
+.revsys-inline {
+ display: none!important;
+}
+
+/* Make nested-list/multi-paragraph items look better in Releases changelog
+ * pages. Without this, docutils' magical list fuckery causes inconsistent
+ * formatting between different release sub-lists.
+ */
+div#changelog > div.section > ul > li > p:only-child {
+ margin-bottom: 0;
+}
+
+/* Hide fugly table cell borders in ..bibliography:: directive output */
+table.docutils.citation, table.docutils.citation td, table.docutils.citation th {
+ border: none;
+ /* Below needed in some edge cases; if not applied, bottom shadows appear */
+ -moz-box-shadow: none;
+ -webkit-box-shadow: none;
+ box-shadow: none;
+}
+
+
+/* relbar */
+
+.related {
+ line-height: 30px;
+ width: 100%;
+ font-size: 0.9rem;
+}
+
+.related.top {
+ border-bottom: 1px solid #EEE;
+ margin-bottom: 20px;
+}
+
+.related.bottom {
+ border-top: 1px solid #EEE;
+}
+
+.related ul {
+ padding: 0;
+ margin: 0;
+ list-style: none;
+}
+
+.related li {
+ display: inline;
+}
+
+nav#rellinks {
+ float: right;
+}
+
+nav#rellinks li+li:before {
+ content: "|";
+}
+
+nav#breadcrumbs li+li:before {
+ content: "\00BB";
+}
+
+/* Hide certain items when printing */
+@media print {
+ div.related {
+ display: none;
+ }
+}
\ No newline at end of file
diff --git a/docs/source/_static/basic.css b/docs/source/_static/basic.css
new file mode 100644
index 00000000..7577acb1
--- /dev/null
+++ b/docs/source/_static/basic.css
@@ -0,0 +1,903 @@
+/*
+ * basic.css
+ * ~~~~~~~~~
+ *
+ * Sphinx stylesheet -- basic theme.
+ *
+ * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+
+/* -- main layout ----------------------------------------------------------- */
+
+div.clearer {
+ clear: both;
+}
+
+div.section::after {
+ display: block;
+ content: '';
+ clear: left;
+}
+
+/* -- relbar ---------------------------------------------------------------- */
+
+div.related {
+ width: 100%;
+ font-size: 90%;
+}
+
+div.related h3 {
+ display: none;
+}
+
+div.related ul {
+ margin: 0;
+ padding: 0 0 0 10px;
+ list-style: none;
+}
+
+div.related li {
+ display: inline;
+}
+
+div.related li.right {
+ float: right;
+ margin-right: 5px;
+}
+
+/* -- sidebar --------------------------------------------------------------- */
+
+div.sphinxsidebarwrapper {
+ padding: 10px 5px 0 10px;
+}
+
+div.sphinxsidebar {
+ float: left;
+ width: 230px;
+ margin-left: -100%;
+ font-size: 90%;
+ word-wrap: break-word;
+ overflow-wrap : break-word;
+}
+
+div.sphinxsidebar ul {
+ list-style: none;
+}
+
+div.sphinxsidebar ul ul,
+div.sphinxsidebar ul.want-points {
+ margin-left: 20px;
+ list-style: square;
+}
+
+div.sphinxsidebar ul ul {
+ margin-top: 0;
+ margin-bottom: 0;
+}
+
+div.sphinxsidebar form {
+ margin-top: 10px;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #98dbcc;
+ font-family: sans-serif;
+ font-size: 1em;
+}
+
+div.sphinxsidebar #searchbox form.search {
+ overflow: hidden;
+}
+
+div.sphinxsidebar #searchbox input[type="text"] {
+ float: left;
+ width: 80%;
+ padding: 0.25em;
+ box-sizing: border-box;
+}
+
+div.sphinxsidebar #searchbox input[type="submit"] {
+ float: left;
+ width: 20%;
+ border-left: none;
+ padding: 0.25em;
+ box-sizing: border-box;
+}
+
+
+img {
+ border: 0;
+ max-width: 100%;
+}
+
+/* -- search page ----------------------------------------------------------- */
+
+ul.search {
+ margin: 10px 0 0 20px;
+ padding: 0;
+}
+
+ul.search li {
+ padding: 5px 0 5px 20px;
+ background-image: url(file.png);
+ background-repeat: no-repeat;
+ background-position: 0 7px;
+}
+
+ul.search li a {
+ font-weight: bold;
+}
+
+ul.search li p.context {
+ color: #888;
+ margin: 2px 0 0 30px;
+ text-align: left;
+}
+
+ul.keywordmatches li.goodmatch a {
+ font-weight: bold;
+}
+
+/* -- index page ------------------------------------------------------------ */
+
+table.contentstable {
+ width: 90%;
+ margin-left: auto;
+ margin-right: auto;
+}
+
+table.contentstable p.biglink {
+ line-height: 150%;
+}
+
+a.biglink {
+ font-size: 1.3em;
+}
+
+span.linkdescr {
+ font-style: italic;
+ padding-top: 5px;
+ font-size: 90%;
+}
+
+/* -- general index --------------------------------------------------------- */
+
+table.indextable {
+ width: 100%;
+}
+
+table.indextable td {
+ text-align: left;
+ vertical-align: top;
+}
+
+table.indextable ul {
+ margin-top: 0;
+ margin-bottom: 0;
+ list-style-type: none;
+}
+
+table.indextable > tbody > tr > td > ul {
+ padding-left: 0em;
+}
+
+table.indextable tr.pcap {
+ height: 10px;
+}
+
+table.indextable tr.cap {
+ margin-top: 10px;
+ background-color: #f2f2f2;
+}
+
+img.toggler {
+ margin-right: 3px;
+ margin-top: 3px;
+ cursor: pointer;
+}
+
+div.modindex-jumpbox {
+ border-top: 1px solid #ddd;
+ border-bottom: 1px solid #ddd;
+ margin: 1em 0 1em 0;
+ padding: 0.4em;
+}
+
+div.genindex-jumpbox {
+ border-top: 1px solid #ddd;
+ border-bottom: 1px solid #ddd;
+ margin: 1em 0 1em 0;
+ padding: 0.4em;
+}
+
+/* -- domain module index --------------------------------------------------- */
+
+table.modindextable td {
+ padding: 2px;
+ border-collapse: collapse;
+}
+
+/* -- general body styles --------------------------------------------------- */
+
+div.body {
+ min-width: 360px;
+ max-width: 800px;
+}
+
+div.body p, div.body dd, div.body li, div.body blockquote {
+ -moz-hyphens: auto;
+ -ms-hyphens: auto;
+ -webkit-hyphens: auto;
+ hyphens: auto;
+}
+
+a.headerlink {
+ visibility: hidden;
+}
+
+h1:hover > a.headerlink,
+h2:hover > a.headerlink,
+h3:hover > a.headerlink,
+h4:hover > a.headerlink,
+h5:hover > a.headerlink,
+h6:hover > a.headerlink,
+dt:hover > a.headerlink,
+caption:hover > a.headerlink,
+p.caption:hover > a.headerlink,
+div.code-block-caption:hover > a.headerlink {
+ visibility: visible;
+}
+
+div.body p.caption {
+ text-align: inherit;
+}
+
+div.body td {
+ text-align: left;
+}
+
+.first {
+ margin-top: 0 !important;
+}
+
+p.rubric {
+ margin-top: 30px;
+ font-weight: bold;
+}
+
+img.align-left, figure.align-left, .figure.align-left, object.align-left {
+ clear: left;
+ float: left;
+ margin-right: 1em;
+}
+
+img.align-right, figure.align-right, .figure.align-right, object.align-right {
+ clear: right;
+ float: right;
+ margin-left: 1em;
+}
+
+img.align-center, figure.align-center, .figure.align-center, object.align-center {
+ display: block;
+ margin-left: auto;
+ margin-right: auto;
+}
+
+img.align-default, figure.align-default, .figure.align-default {
+ display: block;
+ margin-left: auto;
+ margin-right: auto;
+}
+
+.align-left {
+ text-align: left;
+}
+
+.align-center {
+ text-align: center;
+}
+
+.align-default {
+ text-align: center;
+}
+
+.align-right {
+ text-align: right;
+}
+
+/* -- sidebars -------------------------------------------------------------- */
+
+div.sidebar,
+aside.sidebar {
+ margin: 0 0 0.5em 1em;
+ border: 1px solid #ddb;
+ padding: 7px;
+ background-color: #ffe;
+ width: 40%;
+ float: right;
+ clear: right;
+ overflow-x: auto;
+}
+
+p.sidebar-title {
+ font-weight: bold;
+}
+
+nav.contents,
+aside.topic,
+div.admonition, div.topic, blockquote {
+ clear: left;
+}
+
+/* -- topics ---------------------------------------------------------------- */
+
+nav.contents,
+aside.topic,
+div.topic {
+ border: 1px solid #ccc;
+ padding: 7px;
+ margin: 10px 0 10px 0;
+}
+
+p.topic-title {
+ font-size: 1.1em;
+ font-weight: bold;
+ margin-top: 10px;
+}
+
+/* -- admonitions ----------------------------------------------------------- */
+
+div.admonition {
+ margin-top: 10px;
+ margin-bottom: 10px;
+ padding: 7px;
+}
+
+div.admonition dt {
+ font-weight: bold;
+}
+
+p.admonition-title {
+ margin: 0px 10px 5px 0px;
+ font-weight: bold;
+}
+
+div.body p.centered {
+ text-align: center;
+ margin-top: 25px;
+}
+
+/* -- content of sidebars/topics/admonitions -------------------------------- */
+
+div.sidebar > :last-child,
+aside.sidebar > :last-child,
+nav.contents > :last-child,
+aside.topic > :last-child,
+div.topic > :last-child,
+div.admonition > :last-child {
+ margin-bottom: 0;
+}
+
+div.sidebar::after,
+aside.sidebar::after,
+nav.contents::after,
+aside.topic::after,
+div.topic::after,
+div.admonition::after,
+blockquote::after {
+ display: block;
+ content: '';
+ clear: both;
+}
+
+/* -- tables ---------------------------------------------------------------- */
+
+table.docutils {
+ margin-top: 10px;
+ margin-bottom: 10px;
+ border: 0;
+ border-collapse: collapse;
+}
+
+table.align-center {
+ margin-left: auto;
+ margin-right: auto;
+}
+
+table.align-default {
+ margin-left: auto;
+ margin-right: auto;
+}
+
+table caption span.caption-number {
+ font-style: italic;
+}
+
+table caption span.caption-text {
+}
+
+table.docutils td, table.docutils th {
+ padding: 1px 8px 1px 5px;
+ border-top: 0;
+ border-left: 0;
+ border-right: 0;
+ border-bottom: 1px solid #aaa;
+}
+
+th {
+ text-align: left;
+ padding-right: 5px;
+}
+
+table.citation {
+ border-left: solid 1px gray;
+ margin-left: 1px;
+}
+
+table.citation td {
+ border-bottom: none;
+}
+
+th > :first-child,
+td > :first-child {
+ margin-top: 0px;
+}
+
+th > :last-child,
+td > :last-child {
+ margin-bottom: 0px;
+}
+
+/* -- figures --------------------------------------------------------------- */
+
+div.figure, figure {
+ margin: 0.5em;
+ padding: 0.5em;
+}
+
+div.figure p.caption, figcaption {
+ padding: 0.3em;
+}
+
+div.figure p.caption span.caption-number,
+figcaption span.caption-number {
+ font-style: italic;
+}
+
+div.figure p.caption span.caption-text,
+figcaption span.caption-text {
+}
+
+/* -- field list styles ----------------------------------------------------- */
+
+table.field-list td, table.field-list th {
+ border: 0 !important;
+}
+
+.field-list ul {
+ margin: 0;
+ padding-left: 1em;
+}
+
+.field-list p {
+ margin: 0;
+}
+
+.field-name {
+ -moz-hyphens: manual;
+ -ms-hyphens: manual;
+ -webkit-hyphens: manual;
+ hyphens: manual;
+}
+
+/* -- hlist styles ---------------------------------------------------------- */
+
+table.hlist {
+ margin: 1em 0;
+}
+
+table.hlist td {
+ vertical-align: top;
+}
+
+/* -- object description styles --------------------------------------------- */
+
+.sig {
+ font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
+}
+
+.sig-name, code.descname {
+ background-color: transparent;
+ font-weight: bold;
+}
+
+.sig-name {
+ font-size: 1.1em;
+}
+
+code.descname {
+ font-size: 1.2em;
+}
+
+.sig-prename, code.descclassname {
+ background-color: transparent;
+}
+
+.optional {
+ font-size: 1.3em;
+}
+
+.sig-paren {
+ font-size: larger;
+}
+
+.sig-param.n {
+ font-style: italic;
+}
+
+/* C++ specific styling */
+
+.sig-inline.c-texpr,
+.sig-inline.cpp-texpr {
+ font-family: unset;
+}
+
+.sig.c .k, .sig.c .kt,
+.sig.cpp .k, .sig.cpp .kt {
+ color: #0033B3;
+}
+
+.sig.c .m,
+.sig.cpp .m {
+ color: #1750EB;
+}
+
+.sig.c .s, .sig.c .sc,
+.sig.cpp .s, .sig.cpp .sc {
+ color: #067D17;
+}
+
+
+/* -- other body styles ----------------------------------------------------- */
+
+ol.arabic {
+ list-style: decimal;
+}
+
+ol.loweralpha {
+ list-style: lower-alpha;
+}
+
+ol.upperalpha {
+ list-style: upper-alpha;
+}
+
+ol.lowerroman {
+ list-style: lower-roman;
+}
+
+ol.upperroman {
+ list-style: upper-roman;
+}
+
+:not(li) > ol > li:first-child > :first-child,
+:not(li) > ul > li:first-child > :first-child {
+ margin-top: 0px;
+}
+
+:not(li) > ol > li:last-child > :last-child,
+:not(li) > ul > li:last-child > :last-child {
+ margin-bottom: 0px;
+}
+
+ol.simple ol p,
+ol.simple ul p,
+ul.simple ol p,
+ul.simple ul p {
+ margin-top: 0;
+}
+
+ol.simple > li:not(:first-child) > p,
+ul.simple > li:not(:first-child) > p {
+ margin-top: 0;
+}
+
+ol.simple p,
+ul.simple p {
+ margin-bottom: 0;
+}
+
+aside.footnote > span,
+div.citation > span {
+ float: left;
+}
+aside.footnote > span:last-of-type,
+div.citation > span:last-of-type {
+ padding-right: 0.5em;
+}
+aside.footnote > p {
+ margin-left: 2em;
+}
+div.citation > p {
+ margin-left: 4em;
+}
+aside.footnote > p:last-of-type,
+div.citation > p:last-of-type {
+ margin-bottom: 0em;
+}
+aside.footnote > p:last-of-type:after,
+div.citation > p:last-of-type:after {
+ content: "";
+ clear: both;
+}
+
+dl.field-list {
+ display: grid;
+ grid-template-columns: fit-content(30%) auto;
+}
+
+dl.field-list > dt {
+ font-weight: bold;
+ word-break: break-word;
+ padding-left: 0.5em;
+ padding-right: 5px;
+}
+
+dl.field-list > dd {
+ padding-left: 0.5em;
+ margin-top: 0em;
+ margin-left: 0em;
+ margin-bottom: 0em;
+}
+
+dl {
+ margin-bottom: 15px;
+}
+
+dd > :first-child {
+ margin-top: 0px;
+}
+
+dd ul, dd table {
+ margin-bottom: 10px;
+}
+
+dd {
+ margin-top: 3px;
+ margin-bottom: 10px;
+ margin-left: 30px;
+}
+
+dl > dd:last-child,
+dl > dd:last-child > :last-child {
+ margin-bottom: 0;
+}
+
+dt:target, span.highlighted {
+ background-color: #fbe54e;
+}
+
+rect.highlighted {
+ fill: #fbe54e;
+}
+
+dl.glossary dt {
+ font-weight: bold;
+ font-size: 1.1em;
+}
+
+.versionmodified {
+ font-style: italic;
+}
+
+.system-message {
+ background-color: #fda;
+ padding: 5px;
+ border: 3px solid red;
+}
+
+.footnote:target {
+ background-color: #ffa;
+}
+
+.line-block {
+ display: block;
+ margin-top: 1em;
+ margin-bottom: 1em;
+}
+
+.line-block .line-block {
+ margin-top: 0;
+ margin-bottom: 0;
+ margin-left: 1.5em;
+}
+
+.guilabel, .menuselection {
+ font-family: sans-serif;
+}
+
+.accelerator {
+ text-decoration: underline;
+}
+
+.classifier {
+ font-style: oblique;
+}
+
+.classifier:before {
+ font-style: normal;
+ margin: 0 0.5em;
+ content: ":";
+ display: inline-block;
+}
+
+abbr, acronym {
+ border-bottom: dotted 1px;
+ cursor: help;
+}
+
+/* -- code displays --------------------------------------------------------- */
+
+pre {
+ overflow: auto;
+ overflow-y: hidden; /* fixes display issues on Chrome browsers */
+}
+
+pre, div[class*="highlight-"] {
+ clear: both;
+}
+
+span.pre {
+ -moz-hyphens: none;
+ -ms-hyphens: none;
+ -webkit-hyphens: none;
+ hyphens: none;
+ white-space: nowrap;
+}
+
+div[class*="highlight-"] {
+ margin: 1em 0;
+}
+
+td.linenos pre {
+ border: 0;
+ background-color: transparent;
+ color: #aaa;
+}
+
+table.highlighttable {
+ display: block;
+}
+
+table.highlighttable tbody {
+ display: block;
+}
+
+table.highlighttable tr {
+ display: flex;
+}
+
+table.highlighttable td {
+ margin: 0;
+ padding: 0;
+}
+
+table.highlighttable td.linenos {
+ padding-right: 0.5em;
+}
+
+table.highlighttable td.code {
+ flex: 1;
+ overflow: hidden;
+}
+
+.highlight .hll {
+ display: block;
+}
+
+div.highlight pre,
+table.highlighttable pre {
+ margin: 0;
+}
+
+div.code-block-caption + div {
+ margin-top: 0;
+}
+
+div.code-block-caption {
+ margin-top: 1em;
+ padding: 2px 5px;
+ font-size: small;
+}
+
+div.code-block-caption code {
+ background-color: transparent;
+}
+
+table.highlighttable td.linenos,
+span.linenos,
+div.highlight span.gp { /* gp: Generic.Prompt */
+ user-select: none;
+ -webkit-user-select: text; /* Safari fallback only */
+ -webkit-user-select: none; /* Chrome/Safari */
+ -moz-user-select: none; /* Firefox */
+ -ms-user-select: none; /* IE10+ */
+}
+
+div.code-block-caption span.caption-number {
+ padding: 0.1em 0.3em;
+ font-style: italic;
+}
+
+div.code-block-caption span.caption-text {
+}
+
+div.literal-block-wrapper {
+ margin: 1em 0;
+}
+
+code.xref, a code {
+ background-color: transparent;
+ font-weight: bold;
+}
+
+h1 code, h2 code, h3 code, h4 code, h5 code, h6 code {
+ background-color: transparent;
+}
+
+.viewcode-link {
+ float: right;
+}
+
+.viewcode-back {
+ float: right;
+ font-family: sans-serif;
+}
+
+div.viewcode-block:target {
+ margin: -1px -10px;
+ padding: 0 10px;
+}
+
+/* -- math display ---------------------------------------------------------- */
+
+img.math {
+ vertical-align: middle;
+}
+
+div.body div.math p {
+ text-align: center;
+}
+
+span.eqno {
+ float: right;
+}
+
+span.eqno a.headerlink {
+ position: absolute;
+ z-index: 1;
+}
+
+div.math:hover a.headerlink {
+ visibility: visible;
+}
+
+/* -- printout stylesheet --------------------------------------------------- */
+
+@media print {
+ div.document,
+ div.documentwrapper,
+ div.bodywrapper {
+ margin: 0 !important;
+ width: 100%;
+ }
+
+ div.sphinxsidebar,
+ div.related,
+ div.footer,
+ #top-link {
+ display: none;
+ }
+}
\ No newline at end of file
diff --git a/docs/source/_static/css/badge_only.css b/docs/source/_static/css/badge_only.css
new file mode 100644
index 00000000..c718cee4
--- /dev/null
+++ b/docs/source/_static/css/badge_only.css
@@ -0,0 +1 @@
+.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}
\ No newline at end of file
diff --git a/docs/source/_static/css/fonts/Roboto-Slab-Bold.woff b/docs/source/_static/css/fonts/Roboto-Slab-Bold.woff
new file mode 100644
index 00000000..6cb60000
Binary files /dev/null and b/docs/source/_static/css/fonts/Roboto-Slab-Bold.woff differ
diff --git a/docs/source/_static/css/fonts/Roboto-Slab-Bold.woff2 b/docs/source/_static/css/fonts/Roboto-Slab-Bold.woff2
new file mode 100644
index 00000000..7059e231
Binary files /dev/null and b/docs/source/_static/css/fonts/Roboto-Slab-Bold.woff2 differ
diff --git a/docs/source/_static/css/fonts/Roboto-Slab-Regular.woff b/docs/source/_static/css/fonts/Roboto-Slab-Regular.woff
new file mode 100644
index 00000000..f815f63f
Binary files /dev/null and b/docs/source/_static/css/fonts/Roboto-Slab-Regular.woff differ
diff --git a/docs/source/_static/css/fonts/Roboto-Slab-Regular.woff2 b/docs/source/_static/css/fonts/Roboto-Slab-Regular.woff2
new file mode 100644
index 00000000..f2c76e5b
Binary files /dev/null and b/docs/source/_static/css/fonts/Roboto-Slab-Regular.woff2 differ
diff --git a/docs/source/_static/css/fonts/fontawesome-webfont.eot b/docs/source/_static/css/fonts/fontawesome-webfont.eot
new file mode 100644
index 00000000..e9f60ca9
Binary files /dev/null and b/docs/source/_static/css/fonts/fontawesome-webfont.eot differ
diff --git a/docs/source/_static/css/fonts/fontawesome-webfont.svg b/docs/source/_static/css/fonts/fontawesome-webfont.svg
new file mode 100644
index 00000000..855c845e
--- /dev/null
+++ b/docs/source/_static/css/fonts/fontawesome-webfont.svg
@@ -0,0 +1,2671 @@
+
+
+
diff --git a/docs/source/_static/css/fonts/fontawesome-webfont.ttf b/docs/source/_static/css/fonts/fontawesome-webfont.ttf
new file mode 100644
index 00000000..35acda2f
Binary files /dev/null and b/docs/source/_static/css/fonts/fontawesome-webfont.ttf differ
diff --git a/docs/source/_static/css/fonts/fontawesome-webfont.woff b/docs/source/_static/css/fonts/fontawesome-webfont.woff
new file mode 100644
index 00000000..400014a4
Binary files /dev/null and b/docs/source/_static/css/fonts/fontawesome-webfont.woff differ
diff --git a/docs/source/_static/css/fonts/fontawesome-webfont.woff2 b/docs/source/_static/css/fonts/fontawesome-webfont.woff2
new file mode 100644
index 00000000..4d13fc60
Binary files /dev/null and b/docs/source/_static/css/fonts/fontawesome-webfont.woff2 differ
diff --git a/docs/source/_static/css/fonts/lato-bold-italic.woff b/docs/source/_static/css/fonts/lato-bold-italic.woff
new file mode 100644
index 00000000..88ad05b9
Binary files /dev/null and b/docs/source/_static/css/fonts/lato-bold-italic.woff differ
diff --git a/docs/source/_static/css/fonts/lato-bold-italic.woff2 b/docs/source/_static/css/fonts/lato-bold-italic.woff2
new file mode 100644
index 00000000..c4e3d804
Binary files /dev/null and b/docs/source/_static/css/fonts/lato-bold-italic.woff2 differ
diff --git a/docs/source/_static/css/fonts/lato-bold.woff b/docs/source/_static/css/fonts/lato-bold.woff
new file mode 100644
index 00000000..c6dff51f
Binary files /dev/null and b/docs/source/_static/css/fonts/lato-bold.woff differ
diff --git a/docs/source/_static/css/fonts/lato-bold.woff2 b/docs/source/_static/css/fonts/lato-bold.woff2
new file mode 100644
index 00000000..bb195043
Binary files /dev/null and b/docs/source/_static/css/fonts/lato-bold.woff2 differ
diff --git a/docs/source/_static/css/fonts/lato-normal-italic.woff b/docs/source/_static/css/fonts/lato-normal-italic.woff
new file mode 100644
index 00000000..76114bc0
Binary files /dev/null and b/docs/source/_static/css/fonts/lato-normal-italic.woff differ
diff --git a/docs/source/_static/css/fonts/lato-normal-italic.woff2 b/docs/source/_static/css/fonts/lato-normal-italic.woff2
new file mode 100644
index 00000000..3404f37e
Binary files /dev/null and b/docs/source/_static/css/fonts/lato-normal-italic.woff2 differ
diff --git a/docs/source/_static/css/fonts/lato-normal.woff b/docs/source/_static/css/fonts/lato-normal.woff
new file mode 100644
index 00000000..ae1307ff
Binary files /dev/null and b/docs/source/_static/css/fonts/lato-normal.woff differ
diff --git a/docs/source/_static/css/fonts/lato-normal.woff2 b/docs/source/_static/css/fonts/lato-normal.woff2
new file mode 100644
index 00000000..3bf98433
Binary files /dev/null and b/docs/source/_static/css/fonts/lato-normal.woff2 differ
diff --git a/docs/source/_static/css/theme.css b/docs/source/_static/css/theme.css
new file mode 100644
index 00000000..3f264ca6
--- /dev/null
+++ b/docs/source/_static/css/theme.css
@@ -0,0 +1,7779 @@
+html {
+ box-sizing: border-box
+}
+
+*,
+:after,
+:before {
+ box-sizing: inherit
+}
+
+article,
+aside,
+details,
+figcaption,
+figure,
+footer,
+header,
+hgroup,
+nav,
+section {
+ display: block
+}
+
+audio,
+canvas,
+video {
+ display: inline-block;
+ *display: inline;
+ *zoom: 1
+}
+
+[hidden],
+audio:not([controls]) {
+ display: none
+}
+
+* {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box
+}
+
+html {
+ font-size: 100%;
+ -webkit-text-size-adjust: 100%;
+ -ms-text-size-adjust: 100%
+}
+
+body {
+ margin: 0
+}
+
+a:active,
+a:hover {
+ outline: 0
+}
+
+abbr[title] {
+ border-bottom: 1px dotted
+}
+
+b,
+strong {
+ font-weight: 700
+}
+
+blockquote {
+ margin: 0
+}
+
+dfn {
+ font-style: italic
+}
+
+ins {
+ background: #ff9;
+ text-decoration: none
+}
+
+ins,
+mark {
+ color: #000
+}
+
+mark {
+ background: #ff0;
+ font-style: italic;
+ font-weight: 700
+}
+
+.rst-content code,
+.rst-content tt,
+code,
+kbd,
+pre,
+samp {
+ font-family: monospace, serif;
+ _font-family: courier new, monospace;
+ font-size: 1em
+}
+
+pre {
+ white-space: pre
+}
+
+q {
+ quotes: none
+}
+
+q:after,
+q:before {
+ content: "";
+ content: none
+}
+
+small {
+ font-size: 85%
+}
+
+sub,
+sup {
+ font-size: 75%;
+ line-height: 0;
+ position: relative;
+ vertical-align: baseline
+}
+
+sup {
+ top: -.5em
+}
+
+sub {
+ bottom: -.25em
+}
+
+dl,
+ol,
+ul {
+ margin: 0;
+ padding: 0;
+ list-style: none;
+ list-style-image: none
+}
+
+li {
+ list-style: none
+}
+
+dd {
+ margin: 0
+}
+
+img {
+ border: 0;
+ -ms-interpolation-mode: bicubic;
+ vertical-align: middle;
+ max-width: 100%
+}
+
+svg:not(:root) {
+ overflow: hidden
+}
+
+figure,
+form {
+ margin: 0
+}
+
+label {
+ cursor: pointer
+}
+
+button,
+input,
+select,
+textarea {
+ font-size: 100%;
+ margin: 0;
+ vertical-align: baseline;
+ *vertical-align: middle
+}
+
+button,
+input {
+ line-height: normal
+}
+
+button,
+input[type=button],
+input[type=reset],
+input[type=submit] {
+ cursor: pointer;
+ -webkit-appearance: button;
+ *overflow: visible
+}
+
+button[disabled],
+input[disabled] {
+ cursor: default
+}
+
+input[type=search] {
+ -webkit-appearance: textfield;
+ -moz-box-sizing: content-box;
+ -webkit-box-sizing: content-box;
+ box-sizing: content-box
+}
+
+textarea {
+ resize: vertical
+}
+
+table {
+ border-collapse: collapse;
+ border-spacing: 0
+}
+
+td {
+ vertical-align: top
+}
+
+.chromeframe {
+ margin: .2em 0;
+ background: #ccc;
+ color: #000;
+ padding: .2em 0
+}
+
+.ir {
+ display: block;
+ border: 0;
+ text-indent: -999em;
+ overflow: hidden;
+ background-color: transparent;
+ background-repeat: no-repeat;
+ text-align: left;
+ direction: ltr;
+ *line-height: 0
+}
+
+.ir br {
+ display: none
+}
+
+.hidden {
+ display: none !important;
+ visibility: hidden
+}
+
+.visuallyhidden {
+ border: 0;
+ clip: rect(0 0 0 0);
+ height: 1px;
+ margin: -1px;
+ overflow: hidden;
+ padding: 0;
+ position: absolute;
+ width: 1px
+}
+
+.visuallyhidden.focusable:active,
+.visuallyhidden.focusable:focus {
+ clip: auto;
+ height: auto;
+ margin: 0;
+ overflow: visible;
+ position: static;
+ width: auto
+}
+
+.invisible {
+ visibility: hidden
+}
+
+.relative {
+ position: relative
+}
+
+big,
+small {
+ font-size: 100%
+}
+
+@media print {
+
+ body,
+ html,
+ section {
+ background: none !important
+ }
+
+ * {
+ box-shadow: none !important;
+ text-shadow: none !important;
+ filter: none !important;
+ -ms-filter: none !important
+ }
+
+ a,
+ a:visited {
+ text-decoration: underline
+ }
+
+ .ir a:after,
+ a[href^="#"]:after,
+ a[href^="javascript:"]:after {
+ content: ""
+ }
+
+ blockquote,
+ pre {
+ page-break-inside: avoid
+ }
+
+ thead {
+ display: table-header-group
+ }
+
+ img,
+ tr {
+ page-break-inside: avoid
+ }
+
+ img {
+ max-width: 100% !important
+ }
+
+ @page {
+ margin: .5cm
+ }
+
+ .rst-content .toctree-wrapper>p.caption,
+ h2,
+ h3,
+ p {
+ orphans: 3;
+ widows: 3
+ }
+
+ .rst-content .toctree-wrapper>p.caption,
+ h2,
+ h3 {
+ page-break-after: avoid
+ }
+}
+
+.btn,
+.fa:before,
+.icon:before,
+.rst-content .admonition,
+.rst-content .admonition-title:before,
+.rst-content .admonition-todo,
+.rst-content .attention,
+.rst-content .caution,
+.rst-content .code-block-caption .headerlink:before,
+.rst-content .danger,
+.rst-content .eqno .headerlink:before,
+.rst-content .error,
+.rst-content .hint,
+.rst-content .important,
+.rst-content .note,
+.rst-content .seealso,
+.rst-content .tip,
+.rst-content .warning,
+.rst-content code.download span:first-child:before,
+.rst-content dl dt .headerlink:before,
+.rst-content h1 .headerlink:before,
+.rst-content h2 .headerlink:before,
+.rst-content h3 .headerlink:before,
+.rst-content h4 .headerlink:before,
+.rst-content h5 .headerlink:before,
+.rst-content h6 .headerlink:before,
+.rst-content p.caption .headerlink:before,
+.rst-content p .headerlink:before,
+.rst-content table>caption .headerlink:before,
+.rst-content tt.download span:first-child:before,
+.wy-alert,
+.wy-dropdown .caret:before,
+.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,
+.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,
+.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,
+.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,
+.wy-menu-vertical li.current>a button.toctree-expand:before,
+.wy-menu-vertical li.on a button.toctree-expand:before,
+.wy-menu-vertical li button.toctree-expand:before,
+input[type=color],
+input[type=date],
+input[type=datetime-local],
+input[type=datetime],
+input[type=email],
+input[type=month],
+input[type=number],
+input[type=password],
+input[type=search],
+input[type=tel],
+input[type=text],
+input[type=time],
+input[type=url],
+input[type=week],
+select,
+textarea {
+ -webkit-font-smoothing: antialiased
+}
+
+.clearfix {
+ *zoom: 1
+}
+
+.clearfix:after,
+.clearfix:before {
+ display: table;
+ content: ""
+}
+
+.clearfix:after {
+ clear: both
+}
+
+/*!
+ * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome
+ * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */
+@font-face {
+ font-family: FontAwesome;
+ src: url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);
+ src: url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"), url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"), url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"), url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"), url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");
+ font-weight: 400;
+ font-style: normal
+}
+
+.fa,
+.icon,
+.rst-content .admonition-title,
+.rst-content .code-block-caption .headerlink,
+.rst-content .eqno .headerlink,
+.rst-content code.download span:first-child,
+.rst-content dl dt .headerlink,
+.rst-content h1 .headerlink,
+.rst-content h2 .headerlink,
+.rst-content h3 .headerlink,
+.rst-content h4 .headerlink,
+.rst-content h5 .headerlink,
+.rst-content h6 .headerlink,
+.rst-content p.caption .headerlink,
+.rst-content p .headerlink,
+.rst-content table>caption .headerlink,
+.rst-content tt.download span:first-child,
+.wy-menu-vertical li.current>a button.toctree-expand,
+.wy-menu-vertical li.on a button.toctree-expand,
+.wy-menu-vertical li button.toctree-expand {
+ display: inline-block;
+ font: normal normal normal 14px/1 FontAwesome;
+ font-size: inherit;
+ text-rendering: auto;
+ -webkit-font-smoothing: antialiased;
+ -moz-osx-font-smoothing: grayscale
+}
+
+.fa-lg {
+ font-size: 1.33333em;
+ line-height: .75em;
+ vertical-align: -15%
+}
+
+.fa-2x {
+ font-size: 2em
+}
+
+.fa-3x {
+ font-size: 3em
+}
+
+.fa-4x {
+ font-size: 4em
+}
+
+.fa-5x {
+ font-size: 5em
+}
+
+.fa-fw {
+ width: 1.28571em;
+ text-align: center
+}
+
+.fa-ul {
+ padding-left: 0;
+ margin-left: 2.14286em;
+ list-style-type: none
+}
+
+.fa-ul>li {
+ position: relative
+}
+
+.fa-li {
+ position: absolute;
+ left: -2.14286em;
+ width: 2.14286em;
+ top: .14286em;
+ text-align: center
+}
+
+.fa-li.fa-lg {
+ left: -1.85714em
+}
+
+.fa-border {
+ padding: .2em .25em .15em;
+ border: .08em solid #eee;
+ border-radius: .1em
+}
+
+.fa-pull-left {
+ float: left
+}
+
+.fa-pull-right {
+ float: right
+}
+
+.fa-pull-left.icon,
+.fa.fa-pull-left,
+.rst-content .code-block-caption .fa-pull-left.headerlink,
+.rst-content .eqno .fa-pull-left.headerlink,
+.rst-content .fa-pull-left.admonition-title,
+.rst-content code.download span.fa-pull-left:first-child,
+.rst-content dl dt .fa-pull-left.headerlink,
+.rst-content h1 .fa-pull-left.headerlink,
+.rst-content h2 .fa-pull-left.headerlink,
+.rst-content h3 .fa-pull-left.headerlink,
+.rst-content h4 .fa-pull-left.headerlink,
+.rst-content h5 .fa-pull-left.headerlink,
+.rst-content h6 .fa-pull-left.headerlink,
+.rst-content p .fa-pull-left.headerlink,
+.rst-content table>caption .fa-pull-left.headerlink,
+.rst-content tt.download span.fa-pull-left:first-child,
+.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,
+.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,
+.wy-menu-vertical li button.fa-pull-left.toctree-expand {
+ margin-right: .3em
+}
+
+.fa-pull-right.icon,
+.fa.fa-pull-right,
+.rst-content .code-block-caption .fa-pull-right.headerlink,
+.rst-content .eqno .fa-pull-right.headerlink,
+.rst-content .fa-pull-right.admonition-title,
+.rst-content code.download span.fa-pull-right:first-child,
+.rst-content dl dt .fa-pull-right.headerlink,
+.rst-content h1 .fa-pull-right.headerlink,
+.rst-content h2 .fa-pull-right.headerlink,
+.rst-content h3 .fa-pull-right.headerlink,
+.rst-content h4 .fa-pull-right.headerlink,
+.rst-content h5 .fa-pull-right.headerlink,
+.rst-content h6 .fa-pull-right.headerlink,
+.rst-content p .fa-pull-right.headerlink,
+.rst-content table>caption .fa-pull-right.headerlink,
+.rst-content tt.download span.fa-pull-right:first-child,
+.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,
+.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,
+.wy-menu-vertical li button.fa-pull-right.toctree-expand {
+ margin-left: .3em
+}
+
+.pull-right {
+ float: right
+}
+
+.pull-left {
+ float: left
+}
+
+.fa.pull-left,
+.pull-left.icon,
+.rst-content .code-block-caption .pull-left.headerlink,
+.rst-content .eqno .pull-left.headerlink,
+.rst-content .pull-left.admonition-title,
+.rst-content code.download span.pull-left:first-child,
+.rst-content dl dt .pull-left.headerlink,
+.rst-content h1 .pull-left.headerlink,
+.rst-content h2 .pull-left.headerlink,
+.rst-content h3 .pull-left.headerlink,
+.rst-content h4 .pull-left.headerlink,
+.rst-content h5 .pull-left.headerlink,
+.rst-content h6 .pull-left.headerlink,
+.rst-content p .pull-left.headerlink,
+.rst-content table>caption .pull-left.headerlink,
+.rst-content tt.download span.pull-left:first-child,
+.wy-menu-vertical li.current>a button.pull-left.toctree-expand,
+.wy-menu-vertical li.on a button.pull-left.toctree-expand,
+.wy-menu-vertical li button.pull-left.toctree-expand {
+ margin-right: .3em
+}
+
+.fa.pull-right,
+.pull-right.icon,
+.rst-content .code-block-caption .pull-right.headerlink,
+.rst-content .eqno .pull-right.headerlink,
+.rst-content .pull-right.admonition-title,
+.rst-content code.download span.pull-right:first-child,
+.rst-content dl dt .pull-right.headerlink,
+.rst-content h1 .pull-right.headerlink,
+.rst-content h2 .pull-right.headerlink,
+.rst-content h3 .pull-right.headerlink,
+.rst-content h4 .pull-right.headerlink,
+.rst-content h5 .pull-right.headerlink,
+.rst-content h6 .pull-right.headerlink,
+.rst-content p .pull-right.headerlink,
+.rst-content table>caption .pull-right.headerlink,
+.rst-content tt.download span.pull-right:first-child,
+.wy-menu-vertical li.current>a button.pull-right.toctree-expand,
+.wy-menu-vertical li.on a button.pull-right.toctree-expand,
+.wy-menu-vertical li button.pull-right.toctree-expand {
+ margin-left: .3em
+}
+
+.fa-spin {
+ -webkit-animation: fa-spin 2s linear infinite;
+ animation: fa-spin 2s linear infinite
+}
+
+.fa-pulse {
+ -webkit-animation: fa-spin 1s steps(8) infinite;
+ animation: fa-spin 1s steps(8) infinite
+}
+
+@-webkit-keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg)
+ }
+
+ to {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg)
+ }
+}
+
+@keyframes fa-spin {
+ 0% {
+ -webkit-transform: rotate(0deg);
+ transform: rotate(0deg)
+ }
+
+ to {
+ -webkit-transform: rotate(359deg);
+ transform: rotate(359deg)
+ }
+}
+
+.fa-rotate-90 {
+ -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";
+ -webkit-transform: rotate(90deg);
+ -ms-transform: rotate(90deg);
+ transform: rotate(90deg)
+}
+
+.fa-rotate-180 {
+ -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";
+ -webkit-transform: rotate(180deg);
+ -ms-transform: rotate(180deg);
+ transform: rotate(180deg)
+}
+
+.fa-rotate-270 {
+ -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";
+ -webkit-transform: rotate(270deg);
+ -ms-transform: rotate(270deg);
+ transform: rotate(270deg)
+}
+
+.fa-flip-horizontal {
+ -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";
+ -webkit-transform: scaleX(-1);
+ -ms-transform: scaleX(-1);
+ transform: scaleX(-1)
+}
+
+.fa-flip-vertical {
+ -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";
+ -webkit-transform: scaleY(-1);
+ -ms-transform: scaleY(-1);
+ transform: scaleY(-1)
+}
+
+:root .fa-flip-horizontal,
+:root .fa-flip-vertical,
+:root .fa-rotate-90,
+:root .fa-rotate-180,
+:root .fa-rotate-270 {
+ filter: none
+}
+
+.fa-stack {
+ position: relative;
+ display: inline-block;
+ width: 2em;
+ height: 2em;
+ line-height: 2em;
+ vertical-align: middle
+}
+
+.fa-stack-1x,
+.fa-stack-2x {
+ position: absolute;
+ left: 0;
+ width: 100%;
+ text-align: center
+}
+
+.fa-stack-1x {
+ line-height: inherit
+}
+
+.fa-stack-2x {
+ font-size: 2em
+}
+
+.fa-inverse {
+ color: #fff
+}
+
+.fa-glass:before {
+ content: ""
+}
+
+.fa-music:before {
+ content: ""
+}
+
+.fa-search:before,
+.icon-search:before {
+ content: ""
+}
+
+.fa-envelope-o:before {
+ content: ""
+}
+
+.fa-heart:before {
+ content: ""
+}
+
+.fa-star:before {
+ content: ""
+}
+
+.fa-star-o:before {
+ content: ""
+}
+
+.fa-user:before {
+ content: ""
+}
+
+.fa-film:before {
+ content: ""
+}
+
+.fa-th-large:before {
+ content: ""
+}
+
+.fa-th:before {
+ content: ""
+}
+
+.fa-th-list:before {
+ content: ""
+}
+
+.fa-check:before {
+ content: ""
+}
+
+.fa-close:before,
+.fa-remove:before,
+.fa-times:before {
+ content: ""
+}
+
+.fa-search-plus:before {
+ content: ""
+}
+
+.fa-search-minus:before {
+ content: ""
+}
+
+.fa-power-off:before {
+ content: ""
+}
+
+.fa-signal:before {
+ content: ""
+}
+
+.fa-cog:before,
+.fa-gear:before {
+ content: ""
+}
+
+.fa-trash-o:before {
+ content: ""
+}
+
+.fa-home:before,
+.icon-home:before {
+ content: ""
+}
+
+.fa-file-o:before {
+ content: ""
+}
+
+.fa-clock-o:before {
+ content: ""
+}
+
+.fa-road:before {
+ content: ""
+}
+
+.fa-download:before,
+.rst-content code.download span:first-child:before,
+.rst-content tt.download span:first-child:before {
+ content: ""
+}
+
+.fa-arrow-circle-o-down:before {
+ content: ""
+}
+
+.fa-arrow-circle-o-up:before {
+ content: ""
+}
+
+.fa-inbox:before {
+ content: ""
+}
+
+.fa-play-circle-o:before {
+ content: ""
+}
+
+.fa-repeat:before,
+.fa-rotate-right:before {
+ content: ""
+}
+
+.fa-refresh:before {
+ content: ""
+}
+
+.fa-list-alt:before {
+ content: ""
+}
+
+.fa-lock:before {
+ content: ""
+}
+
+.fa-flag:before {
+ content: ""
+}
+
+.fa-headphones:before {
+ content: ""
+}
+
+.fa-volume-off:before {
+ content: ""
+}
+
+.fa-volume-down:before {
+ content: ""
+}
+
+.fa-volume-up:before {
+ content: ""
+}
+
+.fa-qrcode:before {
+ content: ""
+}
+
+.fa-barcode:before {
+ content: ""
+}
+
+.fa-tag:before {
+ content: ""
+}
+
+.fa-tags:before {
+ content: ""
+}
+
+.fa-book:before,
+.icon-book:before {
+ content: ""
+}
+
+.fa-bookmark:before {
+ content: ""
+}
+
+.fa-print:before {
+ content: ""
+}
+
+.fa-camera:before {
+ content: ""
+}
+
+.fa-font:before {
+ content: ""
+}
+
+.fa-bold:before {
+ content: ""
+}
+
+.fa-italic:before {
+ content: ""
+}
+
+.fa-text-height:before {
+ content: ""
+}
+
+.fa-text-width:before {
+ content: ""
+}
+
+.fa-align-left:before {
+ content: ""
+}
+
+.fa-align-center:before {
+ content: ""
+}
+
+.fa-align-right:before {
+ content: ""
+}
+
+.fa-align-justify:before {
+ content: ""
+}
+
+.fa-list:before {
+ content: ""
+}
+
+.fa-dedent:before,
+.fa-outdent:before {
+ content: ""
+}
+
+.fa-indent:before {
+ content: ""
+}
+
+.fa-video-camera:before {
+ content: ""
+}
+
+.fa-image:before,
+.fa-photo:before,
+.fa-picture-o:before {
+ content: ""
+}
+
+.fa-pencil:before {
+ content: ""
+}
+
+.fa-map-marker:before {
+ content: ""
+}
+
+.fa-adjust:before {
+ content: ""
+}
+
+.fa-tint:before {
+ content: ""
+}
+
+.fa-edit:before,
+.fa-pencil-square-o:before {
+ content: ""
+}
+
+.fa-share-square-o:before {
+ content: ""
+}
+
+.fa-check-square-o:before {
+ content: ""
+}
+
+.fa-arrows:before {
+ content: ""
+}
+
+.fa-step-backward:before {
+ content: ""
+}
+
+.fa-fast-backward:before {
+ content: ""
+}
+
+.fa-backward:before {
+ content: ""
+}
+
+.fa-play:before {
+ content: ""
+}
+
+.fa-pause:before {
+ content: ""
+}
+
+.fa-stop:before {
+ content: ""
+}
+
+.fa-forward:before {
+ content: ""
+}
+
+.fa-fast-forward:before {
+ content: ""
+}
+
+.fa-step-forward:before {
+ content: ""
+}
+
+.fa-eject:before {
+ content: ""
+}
+
+.fa-chevron-left:before {
+ content: ""
+}
+
+.fa-chevron-right:before {
+ content: ""
+}
+
+.fa-plus-circle:before {
+ content: ""
+}
+
+.fa-minus-circle:before {
+ content: ""
+}
+
+.fa-times-circle:before,
+.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before {
+ content: ""
+}
+
+.fa-check-circle:before,
+.wy-inline-validate.wy-inline-validate-success .wy-input-context:before {
+ content: ""
+}
+
+.fa-question-circle:before {
+ content: ""
+}
+
+.fa-info-circle:before {
+ content: ""
+}
+
+.fa-crosshairs:before {
+ content: ""
+}
+
+.fa-times-circle-o:before {
+ content: ""
+}
+
+.fa-check-circle-o:before {
+ content: ""
+}
+
+.fa-ban:before {
+ content: ""
+}
+
+.fa-arrow-left:before {
+ content: ""
+}
+
+.fa-arrow-right:before {
+ content: ""
+}
+
+.fa-arrow-up:before {
+ content: ""
+}
+
+.fa-arrow-down:before {
+ content: ""
+}
+
+.fa-mail-forward:before,
+.fa-share:before {
+ content: ""
+}
+
+.fa-expand:before {
+ content: ""
+}
+
+.fa-compress:before {
+ content: ""
+}
+
+.fa-plus:before {
+ content: ""
+}
+
+.fa-minus:before {
+ content: ""
+}
+
+.fa-asterisk:before {
+ content: ""
+}
+
+.fa-exclamation-circle:before,
+.rst-content .admonition-title:before,
+.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,
+.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before {
+ content: ""
+}
+
+.fa-gift:before {
+ content: ""
+}
+
+.fa-leaf:before {
+ content: ""
+}
+
+.fa-fire:before,
+.icon-fire:before {
+ content: ""
+}
+
+.fa-eye:before {
+ content: ""
+}
+
+.fa-eye-slash:before {
+ content: ""
+}
+
+.fa-exclamation-triangle:before,
+.fa-warning:before {
+ content: ""
+}
+
+.fa-plane:before {
+ content: ""
+}
+
+.fa-calendar:before {
+ content: ""
+}
+
+.fa-random:before {
+ content: ""
+}
+
+.fa-comment:before {
+ content: ""
+}
+
+.fa-magnet:before {
+ content: ""
+}
+
+.fa-chevron-up:before {
+ content: ""
+}
+
+.fa-chevron-down:before {
+ content: ""
+}
+
+.fa-retweet:before {
+ content: ""
+}
+
+.fa-shopping-cart:before {
+ content: ""
+}
+
+.fa-folder:before {
+ content: ""
+}
+
+.fa-folder-open:before {
+ content: ""
+}
+
+.fa-arrows-v:before {
+ content: ""
+}
+
+.fa-arrows-h:before {
+ content: ""
+}
+
+.fa-bar-chart-o:before,
+.fa-bar-chart:before {
+ content: ""
+}
+
+.fa-twitter-square:before {
+ content: ""
+}
+
+.fa-facebook-square:before {
+ content: ""
+}
+
+.fa-camera-retro:before {
+ content: ""
+}
+
+.fa-key:before {
+ content: ""
+}
+
+.fa-cogs:before,
+.fa-gears:before {
+ content: ""
+}
+
+.fa-comments:before {
+ content: ""
+}
+
+.fa-thumbs-o-up:before {
+ content: ""
+}
+
+.fa-thumbs-o-down:before {
+ content: ""
+}
+
+.fa-star-half:before {
+ content: ""
+}
+
+.fa-heart-o:before {
+ content: ""
+}
+
+.fa-sign-out:before {
+ content: ""
+}
+
+.fa-linkedin-square:before {
+ content: ""
+}
+
+.fa-thumb-tack:before {
+ content: ""
+}
+
+.fa-external-link:before {
+ content: ""
+}
+
+.fa-sign-in:before {
+ content: ""
+}
+
+.fa-trophy:before {
+ content: ""
+}
+
+.fa-github-square:before {
+ content: ""
+}
+
+.fa-upload:before {
+ content: ""
+}
+
+.fa-lemon-o:before {
+ content: ""
+}
+
+.fa-phone:before {
+ content: ""
+}
+
+.fa-square-o:before {
+ content: ""
+}
+
+.fa-bookmark-o:before {
+ content: ""
+}
+
+.fa-phone-square:before {
+ content: ""
+}
+
+.fa-twitter:before {
+ content: ""
+}
+
+.fa-facebook-f:before,
+.fa-facebook:before {
+ content: ""
+}
+
+.fa-github:before,
+.icon-github:before {
+ content: ""
+}
+
+.fa-unlock:before {
+ content: ""
+}
+
+.fa-credit-card:before {
+ content: ""
+}
+
+.fa-feed:before,
+.fa-rss:before {
+ content: ""
+}
+
+.fa-hdd-o:before {
+ content: ""
+}
+
+.fa-bullhorn:before {
+ content: ""
+}
+
+.fa-bell:before {
+ content: ""
+}
+
+.fa-certificate:before {
+ content: ""
+}
+
+.fa-hand-o-right:before {
+ content: ""
+}
+
+.fa-hand-o-left:before {
+ content: ""
+}
+
+.fa-hand-o-up:before {
+ content: ""
+}
+
+.fa-hand-o-down:before {
+ content: ""
+}
+
+.fa-arrow-circle-left:before,
+.icon-circle-arrow-left:before {
+ content: ""
+}
+
+.fa-arrow-circle-right:before,
+.icon-circle-arrow-right:before {
+ content: ""
+}
+
+.fa-arrow-circle-up:before {
+ content: ""
+}
+
+.fa-arrow-circle-down:before {
+ content: ""
+}
+
+.fa-globe:before {
+ content: ""
+}
+
+.fa-wrench:before {
+ content: ""
+}
+
+.fa-tasks:before {
+ content: ""
+}
+
+.fa-filter:before {
+ content: ""
+}
+
+.fa-briefcase:before {
+ content: ""
+}
+
+.fa-arrows-alt:before {
+ content: ""
+}
+
+.fa-group:before,
+.fa-users:before {
+ content: ""
+}
+
+.fa-chain:before,
+.fa-link:before,
+.icon-link:before {
+ content: ""
+}
+
+.fa-cloud:before {
+ content: ""
+}
+
+.fa-flask:before {
+ content: ""
+}
+
+.fa-cut:before,
+.fa-scissors:before {
+ content: ""
+}
+
+.fa-copy:before,
+.fa-files-o:before {
+ content: ""
+}
+
+.fa-paperclip:before {
+ content: ""
+}
+
+.fa-floppy-o:before,
+.fa-save:before {
+ content: ""
+}
+
+.fa-square:before {
+ content: ""
+}
+
+.fa-bars:before,
+.fa-navicon:before,
+.fa-reorder:before {
+ content: ""
+}
+
+.fa-list-ul:before {
+ content: ""
+}
+
+.fa-list-ol:before {
+ content: ""
+}
+
+.fa-strikethrough:before {
+ content: ""
+}
+
+.fa-underline:before {
+ content: ""
+}
+
+.fa-table:before {
+ content: ""
+}
+
+.fa-magic:before {
+ content: ""
+}
+
+.fa-truck:before {
+ content: ""
+}
+
+.fa-pinterest:before {
+ content: ""
+}
+
+.fa-pinterest-square:before {
+ content: ""
+}
+
+.fa-google-plus-square:before {
+ content: ""
+}
+
+.fa-google-plus:before {
+ content: ""
+}
+
+.fa-money:before {
+ content: ""
+}
+
+.fa-caret-down:before,
+.icon-caret-down:before,
+.wy-dropdown .caret:before {
+ content: ""
+}
+
+.fa-caret-up:before {
+ content: ""
+}
+
+.fa-caret-left:before {
+ content: ""
+}
+
+.fa-caret-right:before {
+ content: ""
+}
+
+.fa-columns:before {
+ content: ""
+}
+
+.fa-sort:before,
+.fa-unsorted:before {
+ content: ""
+}
+
+.fa-sort-desc:before,
+.fa-sort-down:before {
+ content: ""
+}
+
+.fa-sort-asc:before,
+.fa-sort-up:before {
+ content: ""
+}
+
+.fa-envelope:before {
+ content: ""
+}
+
+.fa-linkedin:before {
+ content: ""
+}
+
+.fa-rotate-left:before,
+.fa-undo:before {
+ content: ""
+}
+
+.fa-gavel:before,
+.fa-legal:before {
+ content: ""
+}
+
+.fa-dashboard:before,
+.fa-tachometer:before {
+ content: ""
+}
+
+.fa-comment-o:before {
+ content: ""
+}
+
+.fa-comments-o:before {
+ content: ""
+}
+
+.fa-bolt:before,
+.fa-flash:before {
+ content: ""
+}
+
+.fa-sitemap:before {
+ content: ""
+}
+
+.fa-umbrella:before {
+ content: ""
+}
+
+.fa-clipboard:before,
+.fa-paste:before {
+ content: ""
+}
+
+.fa-lightbulb-o:before {
+ content: ""
+}
+
+.fa-exchange:before {
+ content: ""
+}
+
+.fa-cloud-download:before {
+ content: ""
+}
+
+.fa-cloud-upload:before {
+ content: ""
+}
+
+.fa-user-md:before {
+ content: ""
+}
+
+.fa-stethoscope:before {
+ content: ""
+}
+
+.fa-suitcase:before {
+ content: ""
+}
+
+.fa-bell-o:before {
+ content: ""
+}
+
+.fa-coffee:before {
+ content: ""
+}
+
+.fa-cutlery:before {
+ content: ""
+}
+
+.fa-file-text-o:before {
+ content: ""
+}
+
+.fa-building-o:before {
+ content: ""
+}
+
+.fa-hospital-o:before {
+ content: ""
+}
+
+.fa-ambulance:before {
+ content: ""
+}
+
+.fa-medkit:before {
+ content: ""
+}
+
+.fa-fighter-jet:before {
+ content: ""
+}
+
+.fa-beer:before {
+ content: ""
+}
+
+.fa-h-square:before {
+ content: ""
+}
+
+.fa-plus-square:before {
+ content: ""
+}
+
+.fa-angle-double-left:before {
+ content: ""
+}
+
+.fa-angle-double-right:before {
+ content: ""
+}
+
+.fa-angle-double-up:before {
+ content: ""
+}
+
+.fa-angle-double-down:before {
+ content: ""
+}
+
+.fa-angle-left:before {
+ content: ""
+}
+
+.fa-angle-right:before {
+ content: ""
+}
+
+.fa-angle-up:before {
+ content: ""
+}
+
+.fa-angle-down:before {
+ content: ""
+}
+
+.fa-desktop:before {
+ content: ""
+}
+
+.fa-laptop:before {
+ content: ""
+}
+
+.fa-tablet:before {
+ content: ""
+}
+
+.fa-mobile-phone:before,
+.fa-mobile:before {
+ content: ""
+}
+
+.fa-circle-o:before {
+ content: ""
+}
+
+.fa-quote-left:before {
+ content: ""
+}
+
+.fa-quote-right:before {
+ content: ""
+}
+
+.fa-spinner:before {
+ content: ""
+}
+
+.fa-circle:before {
+ content: ""
+}
+
+.fa-mail-reply:before,
+.fa-reply:before {
+ content: ""
+}
+
+.fa-github-alt:before {
+ content: ""
+}
+
+.fa-folder-o:before {
+ content: ""
+}
+
+.fa-folder-open-o:before {
+ content: ""
+}
+
+.fa-smile-o:before {
+ content: ""
+}
+
+.fa-frown-o:before {
+ content: ""
+}
+
+.fa-meh-o:before {
+ content: ""
+}
+
+.fa-gamepad:before {
+ content: ""
+}
+
+.fa-keyboard-o:before {
+ content: ""
+}
+
+.fa-flag-o:before {
+ content: ""
+}
+
+.fa-flag-checkered:before {
+ content: ""
+}
+
+.fa-terminal:before {
+ content: ""
+}
+
+.fa-code:before {
+ content: ""
+}
+
+.fa-mail-reply-all:before,
+.fa-reply-all:before {
+ content: ""
+}
+
+.fa-star-half-empty:before,
+.fa-star-half-full:before,
+.fa-star-half-o:before {
+ content: ""
+}
+
+.fa-location-arrow:before {
+ content: ""
+}
+
+.fa-crop:before {
+ content: ""
+}
+
+.fa-code-fork:before {
+ content: ""
+}
+
+.fa-chain-broken:before,
+.fa-unlink:before {
+ content: ""
+}
+
+.fa-question:before {
+ content: ""
+}
+
+.fa-info:before {
+ content: ""
+}
+
+.fa-exclamation:before {
+ content: ""
+}
+
+.fa-superscript:before {
+ content: ""
+}
+
+.fa-subscript:before {
+ content: ""
+}
+
+.fa-eraser:before {
+ content: ""
+}
+
+.fa-puzzle-piece:before {
+ content: ""
+}
+
+.fa-microphone:before {
+ content: ""
+}
+
+.fa-microphone-slash:before {
+ content: ""
+}
+
+.fa-shield:before {
+ content: ""
+}
+
+.fa-calendar-o:before {
+ content: ""
+}
+
+.fa-fire-extinguisher:before {
+ content: ""
+}
+
+.fa-rocket:before {
+ content: ""
+}
+
+.fa-maxcdn:before {
+ content: ""
+}
+
+.fa-chevron-circle-left:before {
+ content: ""
+}
+
+.fa-chevron-circle-right:before {
+ content: ""
+}
+
+.fa-chevron-circle-up:before {
+ content: ""
+}
+
+.fa-chevron-circle-down:before {
+ content: ""
+}
+
+.fa-html5:before {
+ content: ""
+}
+
+.fa-css3:before {
+ content: ""
+}
+
+.fa-anchor:before {
+ content: ""
+}
+
+.fa-unlock-alt:before {
+ content: ""
+}
+
+.fa-bullseye:before {
+ content: ""
+}
+
+.fa-ellipsis-h:before {
+ content: ""
+}
+
+.fa-ellipsis-v:before {
+ content: ""
+}
+
+.fa-rss-square:before {
+ content: ""
+}
+
+.fa-play-circle:before {
+ content: ""
+}
+
+.fa-ticket:before {
+ content: ""
+}
+
+.fa-minus-square:before {
+ content: ""
+}
+
+.fa-minus-square-o:before,
+.wy-menu-vertical li.current>a button.toctree-expand:before,
+.wy-menu-vertical li.on a button.toctree-expand:before {
+ content: ""
+}
+
+.fa-level-up:before {
+ content: ""
+}
+
+.fa-level-down:before {
+ content: ""
+}
+
+.fa-check-square:before {
+ content: ""
+}
+
+.fa-pencil-square:before {
+ content: ""
+}
+
+.fa-external-link-square:before {
+ content: ""
+}
+
+.fa-share-square:before {
+ content: ""
+}
+
+.fa-compass:before {
+ content: ""
+}
+
+.fa-caret-square-o-down:before,
+.fa-toggle-down:before {
+ content: ""
+}
+
+.fa-caret-square-o-up:before,
+.fa-toggle-up:before {
+ content: ""
+}
+
+.fa-caret-square-o-right:before,
+.fa-toggle-right:before {
+ content: ""
+}
+
+.fa-eur:before,
+.fa-euro:before {
+ content: ""
+}
+
+.fa-gbp:before {
+ content: ""
+}
+
+.fa-dollar:before,
+.fa-usd:before {
+ content: ""
+}
+
+.fa-inr:before,
+.fa-rupee:before {
+ content: ""
+}
+
+.fa-cny:before,
+.fa-jpy:before,
+.fa-rmb:before,
+.fa-yen:before {
+ content: ""
+}
+
+.fa-rouble:before,
+.fa-rub:before,
+.fa-ruble:before {
+ content: ""
+}
+
+.fa-krw:before,
+.fa-won:before {
+ content: ""
+}
+
+.fa-bitcoin:before,
+.fa-btc:before {
+ content: ""
+}
+
+.fa-file:before {
+ content: ""
+}
+
+.fa-file-text:before {
+ content: ""
+}
+
+.fa-sort-alpha-asc:before {
+ content: ""
+}
+
+.fa-sort-alpha-desc:before {
+ content: ""
+}
+
+.fa-sort-amount-asc:before {
+ content: ""
+}
+
+.fa-sort-amount-desc:before {
+ content: ""
+}
+
+.fa-sort-numeric-asc:before {
+ content: ""
+}
+
+.fa-sort-numeric-desc:before {
+ content: ""
+}
+
+.fa-thumbs-up:before {
+ content: ""
+}
+
+.fa-thumbs-down:before {
+ content: ""
+}
+
+.fa-youtube-square:before {
+ content: ""
+}
+
+.fa-youtube:before {
+ content: ""
+}
+
+.fa-xing:before {
+ content: ""
+}
+
+.fa-xing-square:before {
+ content: ""
+}
+
+.fa-youtube-play:before {
+ content: ""
+}
+
+.fa-dropbox:before {
+ content: ""
+}
+
+.fa-stack-overflow:before {
+ content: ""
+}
+
+.fa-instagram:before {
+ content: ""
+}
+
+.fa-flickr:before {
+ content: ""
+}
+
+.fa-adn:before {
+ content: ""
+}
+
+.fa-bitbucket:before,
+.icon-bitbucket:before {
+ content: ""
+}
+
+.fa-bitbucket-square:before {
+ content: ""
+}
+
+.fa-tumblr:before {
+ content: ""
+}
+
+.fa-tumblr-square:before {
+ content: ""
+}
+
+.fa-long-arrow-down:before {
+ content: ""
+}
+
+.fa-long-arrow-up:before {
+ content: ""
+}
+
+.fa-long-arrow-left:before {
+ content: ""
+}
+
+.fa-long-arrow-right:before {
+ content: ""
+}
+
+.fa-apple:before {
+ content: ""
+}
+
+.fa-windows:before {
+ content: ""
+}
+
+.fa-android:before {
+ content: ""
+}
+
+.fa-linux:before {
+ content: ""
+}
+
+.fa-dribbble:before {
+ content: ""
+}
+
+.fa-skype:before {
+ content: ""
+}
+
+.fa-foursquare:before {
+ content: ""
+}
+
+.fa-trello:before {
+ content: ""
+}
+
+.fa-female:before {
+ content: ""
+}
+
+.fa-male:before {
+ content: ""
+}
+
+.fa-gittip:before,
+.fa-gratipay:before {
+ content: ""
+}
+
+.fa-sun-o:before {
+ content: ""
+}
+
+.fa-moon-o:before {
+ content: ""
+}
+
+.fa-archive:before {
+ content: ""
+}
+
+.fa-bug:before {
+ content: ""
+}
+
+.fa-vk:before {
+ content: ""
+}
+
+.fa-weibo:before {
+ content: ""
+}
+
+.fa-renren:before {
+ content: ""
+}
+
+.fa-pagelines:before {
+ content: ""
+}
+
+.fa-stack-exchange:before {
+ content: ""
+}
+
+.fa-arrow-circle-o-right:before {
+ content: ""
+}
+
+.fa-arrow-circle-o-left:before {
+ content: ""
+}
+
+.fa-caret-square-o-left:before,
+.fa-toggle-left:before {
+ content: ""
+}
+
+.fa-dot-circle-o:before {
+ content: ""
+}
+
+.fa-wheelchair:before {
+ content: ""
+}
+
+.fa-vimeo-square:before {
+ content: ""
+}
+
+.fa-try:before,
+.fa-turkish-lira:before {
+ content: ""
+}
+
+.fa-plus-square-o:before,
+.wy-menu-vertical li button.toctree-expand:before {
+ content: ""
+}
+
+.fa-space-shuttle:before {
+ content: ""
+}
+
+.fa-slack:before {
+ content: ""
+}
+
+.fa-envelope-square:before {
+ content: ""
+}
+
+.fa-wordpress:before {
+ content: ""
+}
+
+.fa-openid:before {
+ content: ""
+}
+
+.fa-bank:before,
+.fa-institution:before,
+.fa-university:before {
+ content: ""
+}
+
+.fa-graduation-cap:before,
+.fa-mortar-board:before {
+ content: ""
+}
+
+.fa-yahoo:before {
+ content: ""
+}
+
+.fa-google:before {
+ content: ""
+}
+
+.fa-reddit:before {
+ content: ""
+}
+
+.fa-reddit-square:before {
+ content: ""
+}
+
+.fa-stumbleupon-circle:before {
+ content: ""
+}
+
+.fa-stumbleupon:before {
+ content: ""
+}
+
+.fa-delicious:before {
+ content: ""
+}
+
+.fa-digg:before {
+ content: ""
+}
+
+.fa-pied-piper-pp:before {
+ content: ""
+}
+
+.fa-pied-piper-alt:before {
+ content: ""
+}
+
+.fa-drupal:before {
+ content: ""
+}
+
+.fa-joomla:before {
+ content: ""
+}
+
+.fa-language:before {
+ content: ""
+}
+
+.fa-fax:before {
+ content: ""
+}
+
+.fa-building:before {
+ content: ""
+}
+
+.fa-child:before {
+ content: ""
+}
+
+.fa-paw:before {
+ content: ""
+}
+
+.fa-spoon:before {
+ content: ""
+}
+
+.fa-cube:before {
+ content: ""
+}
+
+.fa-cubes:before {
+ content: ""
+}
+
+.fa-behance:before {
+ content: ""
+}
+
+.fa-behance-square:before {
+ content: ""
+}
+
+.fa-steam:before {
+ content: ""
+}
+
+.fa-steam-square:before {
+ content: ""
+}
+
+.fa-recycle:before {
+ content: ""
+}
+
+.fa-automobile:before,
+.fa-car:before {
+ content: ""
+}
+
+.fa-cab:before,
+.fa-taxi:before {
+ content: ""
+}
+
+.fa-tree:before {
+ content: ""
+}
+
+.fa-spotify:before {
+ content: ""
+}
+
+.fa-deviantart:before {
+ content: ""
+}
+
+.fa-soundcloud:before {
+ content: ""
+}
+
+.fa-database:before {
+ content: ""
+}
+
+.fa-file-pdf-o:before {
+ content: ""
+}
+
+.fa-file-word-o:before {
+ content: ""
+}
+
+.fa-file-excel-o:before {
+ content: ""
+}
+
+.fa-file-powerpoint-o:before {
+ content: ""
+}
+
+.fa-file-image-o:before,
+.fa-file-photo-o:before,
+.fa-file-picture-o:before {
+ content: ""
+}
+
+.fa-file-archive-o:before,
+.fa-file-zip-o:before {
+ content: ""
+}
+
+.fa-file-audio-o:before,
+.fa-file-sound-o:before {
+ content: ""
+}
+
+.fa-file-movie-o:before,
+.fa-file-video-o:before {
+ content: ""
+}
+
+.fa-file-code-o:before {
+ content: ""
+}
+
+.fa-vine:before {
+ content: ""
+}
+
+.fa-codepen:before {
+ content: ""
+}
+
+.fa-jsfiddle:before {
+ content: ""
+}
+
+.fa-life-bouy:before,
+.fa-life-buoy:before,
+.fa-life-ring:before,
+.fa-life-saver:before,
+.fa-support:before {
+ content: ""
+}
+
+.fa-circle-o-notch:before {
+ content: ""
+}
+
+.fa-ra:before,
+.fa-rebel:before,
+.fa-resistance:before {
+ content: ""
+}
+
+.fa-empire:before,
+.fa-ge:before {
+ content: ""
+}
+
+.fa-git-square:before {
+ content: ""
+}
+
+.fa-git:before {
+ content: ""
+}
+
+.fa-hacker-news:before,
+.fa-y-combinator-square:before,
+.fa-yc-square:before {
+ content: ""
+}
+
+.fa-tencent-weibo:before {
+ content: ""
+}
+
+.fa-qq:before {
+ content: ""
+}
+
+.fa-wechat:before,
+.fa-weixin:before {
+ content: ""
+}
+
+.fa-paper-plane:before,
+.fa-send:before {
+ content: ""
+}
+
+.fa-paper-plane-o:before,
+.fa-send-o:before {
+ content: ""
+}
+
+.fa-history:before {
+ content: ""
+}
+
+.fa-circle-thin:before {
+ content: ""
+}
+
+.fa-header:before {
+ content: ""
+}
+
+.fa-paragraph:before {
+ content: ""
+}
+
+.fa-sliders:before {
+ content: ""
+}
+
+.fa-share-alt:before {
+ content: ""
+}
+
+.fa-share-alt-square:before {
+ content: ""
+}
+
+.fa-bomb:before {
+ content: ""
+}
+
+.fa-futbol-o:before,
+.fa-soccer-ball-o:before {
+ content: ""
+}
+
+.fa-tty:before {
+ content: ""
+}
+
+.fa-binoculars:before {
+ content: ""
+}
+
+.fa-plug:before {
+ content: ""
+}
+
+.fa-slideshare:before {
+ content: ""
+}
+
+.fa-twitch:before {
+ content: ""
+}
+
+.fa-yelp:before {
+ content: ""
+}
+
+.fa-newspaper-o:before {
+ content: ""
+}
+
+.fa-wifi:before {
+ content: ""
+}
+
+.fa-calculator:before {
+ content: ""
+}
+
+.fa-paypal:before {
+ content: ""
+}
+
+.fa-google-wallet:before {
+ content: ""
+}
+
+.fa-cc-visa:before {
+ content: ""
+}
+
+.fa-cc-mastercard:before {
+ content: ""
+}
+
+.fa-cc-discover:before {
+ content: ""
+}
+
+.fa-cc-amex:before {
+ content: ""
+}
+
+.fa-cc-paypal:before {
+ content: ""
+}
+
+.fa-cc-stripe:before {
+ content: ""
+}
+
+.fa-bell-slash:before {
+ content: ""
+}
+
+.fa-bell-slash-o:before {
+ content: ""
+}
+
+.fa-trash:before {
+ content: ""
+}
+
+.fa-copyright:before {
+ content: ""
+}
+
+.fa-at:before {
+ content: ""
+}
+
+.fa-eyedropper:before {
+ content: ""
+}
+
+.fa-paint-brush:before {
+ content: ""
+}
+
+.fa-birthday-cake:before {
+ content: ""
+}
+
+.fa-area-chart:before {
+ content: ""
+}
+
+.fa-pie-chart:before {
+ content: ""
+}
+
+.fa-line-chart:before {
+ content: ""
+}
+
+.fa-lastfm:before {
+ content: ""
+}
+
+.fa-lastfm-square:before {
+ content: ""
+}
+
+.fa-toggle-off:before {
+ content: ""
+}
+
+.fa-toggle-on:before {
+ content: ""
+}
+
+.fa-bicycle:before {
+ content: ""
+}
+
+.fa-bus:before {
+ content: ""
+}
+
+.fa-ioxhost:before {
+ content: ""
+}
+
+.fa-angellist:before {
+ content: ""
+}
+
+.fa-cc:before {
+ content: ""
+}
+
+.fa-ils:before,
+.fa-shekel:before,
+.fa-sheqel:before {
+ content: ""
+}
+
+.fa-meanpath:before {
+ content: ""
+}
+
+.fa-buysellads:before {
+ content: ""
+}
+
+.fa-connectdevelop:before {
+ content: ""
+}
+
+.fa-dashcube:before {
+ content: ""
+}
+
+.fa-forumbee:before {
+ content: ""
+}
+
+.fa-leanpub:before {
+ content: ""
+}
+
+.fa-sellsy:before {
+ content: ""
+}
+
+.fa-shirtsinbulk:before {
+ content: ""
+}
+
+.fa-simplybuilt:before {
+ content: ""
+}
+
+.fa-skyatlas:before {
+ content: ""
+}
+
+.fa-cart-plus:before {
+ content: ""
+}
+
+.fa-cart-arrow-down:before {
+ content: ""
+}
+
+.fa-diamond:before {
+ content: ""
+}
+
+.fa-ship:before {
+ content: ""
+}
+
+.fa-user-secret:before {
+ content: ""
+}
+
+.fa-motorcycle:before {
+ content: ""
+}
+
+.fa-street-view:before {
+ content: ""
+}
+
+.fa-heartbeat:before {
+ content: ""
+}
+
+.fa-venus:before {
+ content: ""
+}
+
+.fa-mars:before {
+ content: ""
+}
+
+.fa-mercury:before {
+ content: ""
+}
+
+.fa-intersex:before,
+.fa-transgender:before {
+ content: ""
+}
+
+.fa-transgender-alt:before {
+ content: ""
+}
+
+.fa-venus-double:before {
+ content: ""
+}
+
+.fa-mars-double:before {
+ content: ""
+}
+
+.fa-venus-mars:before {
+ content: ""
+}
+
+.fa-mars-stroke:before {
+ content: ""
+}
+
+.fa-mars-stroke-v:before {
+ content: ""
+}
+
+.fa-mars-stroke-h:before {
+ content: ""
+}
+
+.fa-neuter:before {
+ content: ""
+}
+
+.fa-genderless:before {
+ content: ""
+}
+
+.fa-facebook-official:before {
+ content: ""
+}
+
+.fa-pinterest-p:before {
+ content: ""
+}
+
+.fa-whatsapp:before {
+ content: ""
+}
+
+.fa-server:before {
+ content: ""
+}
+
+.fa-user-plus:before {
+ content: ""
+}
+
+.fa-user-times:before {
+ content: ""
+}
+
+.fa-bed:before,
+.fa-hotel:before {
+ content: ""
+}
+
+.fa-viacoin:before {
+ content: ""
+}
+
+.fa-train:before {
+ content: ""
+}
+
+.fa-subway:before {
+ content: ""
+}
+
+.fa-medium:before {
+ content: ""
+}
+
+.fa-y-combinator:before,
+.fa-yc:before {
+ content: ""
+}
+
+.fa-optin-monster:before {
+ content: ""
+}
+
+.fa-opencart:before {
+ content: ""
+}
+
+.fa-expeditedssl:before {
+ content: ""
+}
+
+.fa-battery-4:before,
+.fa-battery-full:before,
+.fa-battery:before {
+ content: ""
+}
+
+.fa-battery-3:before,
+.fa-battery-three-quarters:before {
+ content: ""
+}
+
+.fa-battery-2:before,
+.fa-battery-half:before {
+ content: ""
+}
+
+.fa-battery-1:before,
+.fa-battery-quarter:before {
+ content: ""
+}
+
+.fa-battery-0:before,
+.fa-battery-empty:before {
+ content: ""
+}
+
+.fa-mouse-pointer:before {
+ content: ""
+}
+
+.fa-i-cursor:before {
+ content: ""
+}
+
+.fa-object-group:before {
+ content: ""
+}
+
+.fa-object-ungroup:before {
+ content: ""
+}
+
+.fa-sticky-note:before {
+ content: ""
+}
+
+.fa-sticky-note-o:before {
+ content: ""
+}
+
+.fa-cc-jcb:before {
+ content: ""
+}
+
+.fa-cc-diners-club:before {
+ content: ""
+}
+
+.fa-clone:before {
+ content: ""
+}
+
+.fa-balance-scale:before {
+ content: ""
+}
+
+.fa-hourglass-o:before {
+ content: ""
+}
+
+.fa-hourglass-1:before,
+.fa-hourglass-start:before {
+ content: ""
+}
+
+.fa-hourglass-2:before,
+.fa-hourglass-half:before {
+ content: ""
+}
+
+.fa-hourglass-3:before,
+.fa-hourglass-end:before {
+ content: ""
+}
+
+.fa-hourglass:before {
+ content: ""
+}
+
+.fa-hand-grab-o:before,
+.fa-hand-rock-o:before {
+ content: ""
+}
+
+.fa-hand-paper-o:before,
+.fa-hand-stop-o:before {
+ content: ""
+}
+
+.fa-hand-scissors-o:before {
+ content: ""
+}
+
+.fa-hand-lizard-o:before {
+ content: ""
+}
+
+.fa-hand-spock-o:before {
+ content: ""
+}
+
+.fa-hand-pointer-o:before {
+ content: ""
+}
+
+.fa-hand-peace-o:before {
+ content: ""
+}
+
+.fa-trademark:before {
+ content: ""
+}
+
+.fa-registered:before {
+ content: ""
+}
+
+.fa-creative-commons:before {
+ content: ""
+}
+
+.fa-gg:before {
+ content: ""
+}
+
+.fa-gg-circle:before {
+ content: ""
+}
+
+.fa-tripadvisor:before {
+ content: ""
+}
+
+.fa-odnoklassniki:before {
+ content: ""
+}
+
+.fa-odnoklassniki-square:before {
+ content: ""
+}
+
+.fa-get-pocket:before {
+ content: ""
+}
+
+.fa-wikipedia-w:before {
+ content: ""
+}
+
+.fa-safari:before {
+ content: ""
+}
+
+.fa-chrome:before {
+ content: ""
+}
+
+.fa-firefox:before {
+ content: ""
+}
+
+.fa-opera:before {
+ content: ""
+}
+
+.fa-internet-explorer:before {
+ content: ""
+}
+
+.fa-television:before,
+.fa-tv:before {
+ content: ""
+}
+
+.fa-contao:before {
+ content: ""
+}
+
+.fa-500px:before {
+ content: ""
+}
+
+.fa-amazon:before {
+ content: ""
+}
+
+.fa-calendar-plus-o:before {
+ content: ""
+}
+
+.fa-calendar-minus-o:before {
+ content: ""
+}
+
+.fa-calendar-times-o:before {
+ content: ""
+}
+
+.fa-calendar-check-o:before {
+ content: ""
+}
+
+.fa-industry:before {
+ content: ""
+}
+
+.fa-map-pin:before {
+ content: ""
+}
+
+.fa-map-signs:before {
+ content: ""
+}
+
+.fa-map-o:before {
+ content: ""
+}
+
+.fa-map:before {
+ content: ""
+}
+
+.fa-commenting:before {
+ content: ""
+}
+
+.fa-commenting-o:before {
+ content: ""
+}
+
+.fa-houzz:before {
+ content: ""
+}
+
+.fa-vimeo:before {
+ content: ""
+}
+
+.fa-black-tie:before {
+ content: ""
+}
+
+.fa-fonticons:before {
+ content: ""
+}
+
+.fa-reddit-alien:before {
+ content: ""
+}
+
+.fa-edge:before {
+ content: ""
+}
+
+.fa-credit-card-alt:before {
+ content: ""
+}
+
+.fa-codiepie:before {
+ content: ""
+}
+
+.fa-modx:before {
+ content: ""
+}
+
+.fa-fort-awesome:before {
+ content: ""
+}
+
+.fa-usb:before {
+ content: ""
+}
+
+.fa-product-hunt:before {
+ content: ""
+}
+
+.fa-mixcloud:before {
+ content: ""
+}
+
+.fa-scribd:before {
+ content: ""
+}
+
+.fa-pause-circle:before {
+ content: ""
+}
+
+.fa-pause-circle-o:before {
+ content: ""
+}
+
+.fa-stop-circle:before {
+ content: ""
+}
+
+.fa-stop-circle-o:before {
+ content: ""
+}
+
+.fa-shopping-bag:before {
+ content: ""
+}
+
+.fa-shopping-basket:before {
+ content: ""
+}
+
+.fa-hashtag:before {
+ content: ""
+}
+
+.fa-bluetooth:before {
+ content: ""
+}
+
+.fa-bluetooth-b:before {
+ content: ""
+}
+
+.fa-percent:before {
+ content: ""
+}
+
+.fa-gitlab:before,
+.icon-gitlab:before {
+ content: ""
+}
+
+.fa-wpbeginner:before {
+ content: ""
+}
+
+.fa-wpforms:before {
+ content: ""
+}
+
+.fa-envira:before {
+ content: ""
+}
+
+.fa-universal-access:before {
+ content: ""
+}
+
+.fa-wheelchair-alt:before {
+ content: ""
+}
+
+.fa-question-circle-o:before {
+ content: ""
+}
+
+.fa-blind:before {
+ content: ""
+}
+
+.fa-audio-description:before {
+ content: ""
+}
+
+.fa-volume-control-phone:before {
+ content: ""
+}
+
+.fa-braille:before {
+ content: ""
+}
+
+.fa-assistive-listening-systems:before {
+ content: ""
+}
+
+.fa-american-sign-language-interpreting:before,
+.fa-asl-interpreting:before {
+ content: ""
+}
+
+.fa-deaf:before,
+.fa-deafness:before,
+.fa-hard-of-hearing:before {
+ content: ""
+}
+
+.fa-glide:before {
+ content: ""
+}
+
+.fa-glide-g:before {
+ content: ""
+}
+
+.fa-sign-language:before,
+.fa-signing:before {
+ content: ""
+}
+
+.fa-low-vision:before {
+ content: ""
+}
+
+.fa-viadeo:before {
+ content: ""
+}
+
+.fa-viadeo-square:before {
+ content: ""
+}
+
+.fa-snapchat:before {
+ content: ""
+}
+
+.fa-snapchat-ghost:before {
+ content: ""
+}
+
+.fa-snapchat-square:before {
+ content: ""
+}
+
+.fa-pied-piper:before {
+ content: ""
+}
+
+.fa-first-order:before {
+ content: ""
+}
+
+.fa-yoast:before {
+ content: ""
+}
+
+.fa-themeisle:before {
+ content: ""
+}
+
+.fa-google-plus-circle:before,
+.fa-google-plus-official:before {
+ content: ""
+}
+
+.fa-fa:before,
+.fa-font-awesome:before {
+ content: ""
+}
+
+.fa-handshake-o:before {
+ content: ""
+}
+
+.fa-envelope-open:before {
+ content: ""
+}
+
+.fa-envelope-open-o:before {
+ content: ""
+}
+
+.fa-linode:before {
+ content: ""
+}
+
+.fa-address-book:before {
+ content: ""
+}
+
+.fa-address-book-o:before {
+ content: ""
+}
+
+.fa-address-card:before,
+.fa-vcard:before {
+ content: ""
+}
+
+.fa-address-card-o:before,
+.fa-vcard-o:before {
+ content: ""
+}
+
+.fa-user-circle:before {
+ content: ""
+}
+
+.fa-user-circle-o:before {
+ content: ""
+}
+
+.fa-user-o:before {
+ content: ""
+}
+
+.fa-id-badge:before {
+ content: ""
+}
+
+.fa-drivers-license:before,
+.fa-id-card:before {
+ content: ""
+}
+
+.fa-drivers-license-o:before,
+.fa-id-card-o:before {
+ content: ""
+}
+
+.fa-quora:before {
+ content: ""
+}
+
+.fa-free-code-camp:before {
+ content: ""
+}
+
+.fa-telegram:before {
+ content: ""
+}
+
+.fa-thermometer-4:before,
+.fa-thermometer-full:before,
+.fa-thermometer:before {
+ content: ""
+}
+
+.fa-thermometer-3:before,
+.fa-thermometer-three-quarters:before {
+ content: ""
+}
+
+.fa-thermometer-2:before,
+.fa-thermometer-half:before {
+ content: ""
+}
+
+.fa-thermometer-1:before,
+.fa-thermometer-quarter:before {
+ content: ""
+}
+
+.fa-thermometer-0:before,
+.fa-thermometer-empty:before {
+ content: ""
+}
+
+.fa-shower:before {
+ content: ""
+}
+
+.fa-bath:before,
+.fa-bathtub:before,
+.fa-s15:before {
+ content: ""
+}
+
+.fa-podcast:before {
+ content: ""
+}
+
+.fa-window-maximize:before {
+ content: ""
+}
+
+.fa-window-minimize:before {
+ content: ""
+}
+
+.fa-window-restore:before {
+ content: ""
+}
+
+.fa-times-rectangle:before,
+.fa-window-close:before {
+ content: ""
+}
+
+.fa-times-rectangle-o:before,
+.fa-window-close-o:before {
+ content: ""
+}
+
+.fa-bandcamp:before {
+ content: ""
+}
+
+.fa-grav:before {
+ content: ""
+}
+
+.fa-etsy:before {
+ content: ""
+}
+
+.fa-imdb:before {
+ content: ""
+}
+
+.fa-ravelry:before {
+ content: ""
+}
+
+.fa-eercast:before {
+ content: ""
+}
+
+.fa-microchip:before {
+ content: ""
+}
+
+.fa-snowflake-o:before {
+ content: ""
+}
+
+.fa-superpowers:before {
+ content: ""
+}
+
+.fa-wpexplorer:before {
+ content: ""
+}
+
+.fa-meetup:before {
+ content: ""
+}
+
+.sr-only {
+ position: absolute;
+ width: 1px;
+ height: 1px;
+ padding: 0;
+ margin: -1px;
+ overflow: hidden;
+ clip: rect(0, 0, 0, 0);
+ border: 0
+}
+
+.sr-only-focusable:active,
+.sr-only-focusable:focus {
+ position: static;
+ width: auto;
+ height: auto;
+ margin: 0;
+ overflow: visible;
+ clip: auto
+}
+
+.fa,
+.icon,
+.rst-content .admonition-title,
+.rst-content .code-block-caption .headerlink,
+.rst-content .eqno .headerlink,
+.rst-content code.download span:first-child,
+.rst-content dl dt .headerlink,
+.rst-content h1 .headerlink,
+.rst-content h2 .headerlink,
+.rst-content h3 .headerlink,
+.rst-content h4 .headerlink,
+.rst-content h5 .headerlink,
+.rst-content h6 .headerlink,
+.rst-content p.caption .headerlink,
+.rst-content p .headerlink,
+.rst-content table>caption .headerlink,
+.rst-content tt.download span:first-child,
+.wy-dropdown .caret,
+.wy-inline-validate.wy-inline-validate-danger .wy-input-context,
+.wy-inline-validate.wy-inline-validate-info .wy-input-context,
+.wy-inline-validate.wy-inline-validate-success .wy-input-context,
+.wy-inline-validate.wy-inline-validate-warning .wy-input-context,
+.wy-menu-vertical li.current>a button.toctree-expand,
+.wy-menu-vertical li.on a button.toctree-expand,
+.wy-menu-vertical li button.toctree-expand {
+ font-family: inherit
+}
+
+.fa:before,
+.icon:before,
+.rst-content .admonition-title:before,
+.rst-content .code-block-caption .headerlink:before,
+.rst-content .eqno .headerlink:before,
+.rst-content code.download span:first-child:before,
+.rst-content dl dt .headerlink:before,
+.rst-content h1 .headerlink:before,
+.rst-content h2 .headerlink:before,
+.rst-content h3 .headerlink:before,
+.rst-content h4 .headerlink:before,
+.rst-content h5 .headerlink:before,
+.rst-content h6 .headerlink:before,
+.rst-content p.caption .headerlink:before,
+.rst-content p .headerlink:before,
+.rst-content table>caption .headerlink:before,
+.rst-content tt.download span:first-child:before,
+.wy-dropdown .caret:before,
+.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,
+.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,
+.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,
+.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,
+.wy-menu-vertical li.current>a button.toctree-expand:before,
+.wy-menu-vertical li.on a button.toctree-expand:before,
+.wy-menu-vertical li button.toctree-expand:before {
+ font-family: FontAwesome;
+ display: inline-block;
+ font-style: normal;
+ font-weight: 400;
+ line-height: 1;
+ text-decoration: inherit
+}
+
+.rst-content .code-block-caption a .headerlink,
+.rst-content .eqno a .headerlink,
+.rst-content a .admonition-title,
+.rst-content code.download a span:first-child,
+.rst-content dl dt a .headerlink,
+.rst-content h1 a .headerlink,
+.rst-content h2 a .headerlink,
+.rst-content h3 a .headerlink,
+.rst-content h4 a .headerlink,
+.rst-content h5 a .headerlink,
+.rst-content h6 a .headerlink,
+.rst-content p.caption a .headerlink,
+.rst-content p a .headerlink,
+.rst-content table>caption a .headerlink,
+.rst-content tt.download a span:first-child,
+.wy-menu-vertical li.current>a button.toctree-expand,
+.wy-menu-vertical li.on a button.toctree-expand,
+.wy-menu-vertical li a button.toctree-expand,
+a .fa,
+a .icon,
+a .rst-content .admonition-title,
+a .rst-content .code-block-caption .headerlink,
+a .rst-content .eqno .headerlink,
+a .rst-content code.download span:first-child,
+a .rst-content dl dt .headerlink,
+a .rst-content h1 .headerlink,
+a .rst-content h2 .headerlink,
+a .rst-content h3 .headerlink,
+a .rst-content h4 .headerlink,
+a .rst-content h5 .headerlink,
+a .rst-content h6 .headerlink,
+a .rst-content p.caption .headerlink,
+a .rst-content p .headerlink,
+a .rst-content table>caption .headerlink,
+a .rst-content tt.download span:first-child,
+a .wy-menu-vertical li button.toctree-expand {
+ display: inline-block;
+ text-decoration: inherit
+}
+
+.btn .fa,
+.btn .icon,
+.btn .rst-content .admonition-title,
+.btn .rst-content .code-block-caption .headerlink,
+.btn .rst-content .eqno .headerlink,
+.btn .rst-content code.download span:first-child,
+.btn .rst-content dl dt .headerlink,
+.btn .rst-content h1 .headerlink,
+.btn .rst-content h2 .headerlink,
+.btn .rst-content h3 .headerlink,
+.btn .rst-content h4 .headerlink,
+.btn .rst-content h5 .headerlink,
+.btn .rst-content h6 .headerlink,
+.btn .rst-content p .headerlink,
+.btn .rst-content table>caption .headerlink,
+.btn .rst-content tt.download span:first-child,
+.btn .wy-menu-vertical li.current>a button.toctree-expand,
+.btn .wy-menu-vertical li.on a button.toctree-expand,
+.btn .wy-menu-vertical li button.toctree-expand,
+.nav .fa,
+.nav .icon,
+.nav .rst-content .admonition-title,
+.nav .rst-content .code-block-caption .headerlink,
+.nav .rst-content .eqno .headerlink,
+.nav .rst-content code.download span:first-child,
+.nav .rst-content dl dt .headerlink,
+.nav .rst-content h1 .headerlink,
+.nav .rst-content h2 .headerlink,
+.nav .rst-content h3 .headerlink,
+.nav .rst-content h4 .headerlink,
+.nav .rst-content h5 .headerlink,
+.nav .rst-content h6 .headerlink,
+.nav .rst-content p .headerlink,
+.nav .rst-content table>caption .headerlink,
+.nav .rst-content tt.download span:first-child,
+.nav .wy-menu-vertical li.current>a button.toctree-expand,
+.nav .wy-menu-vertical li.on a button.toctree-expand,
+.nav .wy-menu-vertical li button.toctree-expand,
+.rst-content .btn .admonition-title,
+.rst-content .code-block-caption .btn .headerlink,
+.rst-content .code-block-caption .nav .headerlink,
+.rst-content .eqno .btn .headerlink,
+.rst-content .eqno .nav .headerlink,
+.rst-content .nav .admonition-title,
+.rst-content code.download .btn span:first-child,
+.rst-content code.download .nav span:first-child,
+.rst-content dl dt .btn .headerlink,
+.rst-content dl dt .nav .headerlink,
+.rst-content h1 .btn .headerlink,
+.rst-content h1 .nav .headerlink,
+.rst-content h2 .btn .headerlink,
+.rst-content h2 .nav .headerlink,
+.rst-content h3 .btn .headerlink,
+.rst-content h3 .nav .headerlink,
+.rst-content h4 .btn .headerlink,
+.rst-content h4 .nav .headerlink,
+.rst-content h5 .btn .headerlink,
+.rst-content h5 .nav .headerlink,
+.rst-content h6 .btn .headerlink,
+.rst-content h6 .nav .headerlink,
+.rst-content p .btn .headerlink,
+.rst-content p .nav .headerlink,
+.rst-content table>caption .btn .headerlink,
+.rst-content table>caption .nav .headerlink,
+.rst-content tt.download .btn span:first-child,
+.rst-content tt.download .nav span:first-child,
+.wy-menu-vertical li .btn button.toctree-expand,
+.wy-menu-vertical li.current>a .btn button.toctree-expand,
+.wy-menu-vertical li.current>a .nav button.toctree-expand,
+.wy-menu-vertical li .nav button.toctree-expand,
+.wy-menu-vertical li.on a .btn button.toctree-expand,
+.wy-menu-vertical li.on a .nav button.toctree-expand {
+ display: inline
+}
+
+.btn .fa-large.icon,
+.btn .fa.fa-large,
+.btn .rst-content .code-block-caption .fa-large.headerlink,
+.btn .rst-content .eqno .fa-large.headerlink,
+.btn .rst-content .fa-large.admonition-title,
+.btn .rst-content code.download span.fa-large:first-child,
+.btn .rst-content dl dt .fa-large.headerlink,
+.btn .rst-content h1 .fa-large.headerlink,
+.btn .rst-content h2 .fa-large.headerlink,
+.btn .rst-content h3 .fa-large.headerlink,
+.btn .rst-content h4 .fa-large.headerlink,
+.btn .rst-content h5 .fa-large.headerlink,
+.btn .rst-content h6 .fa-large.headerlink,
+.btn .rst-content p .fa-large.headerlink,
+.btn .rst-content table>caption .fa-large.headerlink,
+.btn .rst-content tt.download span.fa-large:first-child,
+.btn .wy-menu-vertical li button.fa-large.toctree-expand,
+.nav .fa-large.icon,
+.nav .fa.fa-large,
+.nav .rst-content .code-block-caption .fa-large.headerlink,
+.nav .rst-content .eqno .fa-large.headerlink,
+.nav .rst-content .fa-large.admonition-title,
+.nav .rst-content code.download span.fa-large:first-child,
+.nav .rst-content dl dt .fa-large.headerlink,
+.nav .rst-content h1 .fa-large.headerlink,
+.nav .rst-content h2 .fa-large.headerlink,
+.nav .rst-content h3 .fa-large.headerlink,
+.nav .rst-content h4 .fa-large.headerlink,
+.nav .rst-content h5 .fa-large.headerlink,
+.nav .rst-content h6 .fa-large.headerlink,
+.nav .rst-content p .fa-large.headerlink,
+.nav .rst-content table>caption .fa-large.headerlink,
+.nav .rst-content tt.download span.fa-large:first-child,
+.nav .wy-menu-vertical li button.fa-large.toctree-expand,
+.rst-content .btn .fa-large.admonition-title,
+.rst-content .code-block-caption .btn .fa-large.headerlink,
+.rst-content .code-block-caption .nav .fa-large.headerlink,
+.rst-content .eqno .btn .fa-large.headerlink,
+.rst-content .eqno .nav .fa-large.headerlink,
+.rst-content .nav .fa-large.admonition-title,
+.rst-content code.download .btn span.fa-large:first-child,
+.rst-content code.download .nav span.fa-large:first-child,
+.rst-content dl dt .btn .fa-large.headerlink,
+.rst-content dl dt .nav .fa-large.headerlink,
+.rst-content h1 .btn .fa-large.headerlink,
+.rst-content h1 .nav .fa-large.headerlink,
+.rst-content h2 .btn .fa-large.headerlink,
+.rst-content h2 .nav .fa-large.headerlink,
+.rst-content h3 .btn .fa-large.headerlink,
+.rst-content h3 .nav .fa-large.headerlink,
+.rst-content h4 .btn .fa-large.headerlink,
+.rst-content h4 .nav .fa-large.headerlink,
+.rst-content h5 .btn .fa-large.headerlink,
+.rst-content h5 .nav .fa-large.headerlink,
+.rst-content h6 .btn .fa-large.headerlink,
+.rst-content h6 .nav .fa-large.headerlink,
+.rst-content p .btn .fa-large.headerlink,
+.rst-content p .nav .fa-large.headerlink,
+.rst-content table>caption .btn .fa-large.headerlink,
+.rst-content table>caption .nav .fa-large.headerlink,
+.rst-content tt.download .btn span.fa-large:first-child,
+.rst-content tt.download .nav span.fa-large:first-child,
+.wy-menu-vertical li .btn button.fa-large.toctree-expand,
+.wy-menu-vertical li .nav button.fa-large.toctree-expand {
+ line-height: .9em
+}
+
+.btn .fa-spin.icon,
+.btn .fa.fa-spin,
+.btn .rst-content .code-block-caption .fa-spin.headerlink,
+.btn .rst-content .eqno .fa-spin.headerlink,
+.btn .rst-content .fa-spin.admonition-title,
+.btn .rst-content code.download span.fa-spin:first-child,
+.btn .rst-content dl dt .fa-spin.headerlink,
+.btn .rst-content h1 .fa-spin.headerlink,
+.btn .rst-content h2 .fa-spin.headerlink,
+.btn .rst-content h3 .fa-spin.headerlink,
+.btn .rst-content h4 .fa-spin.headerlink,
+.btn .rst-content h5 .fa-spin.headerlink,
+.btn .rst-content h6 .fa-spin.headerlink,
+.btn .rst-content p .fa-spin.headerlink,
+.btn .rst-content table>caption .fa-spin.headerlink,
+.btn .rst-content tt.download span.fa-spin:first-child,
+.btn .wy-menu-vertical li button.fa-spin.toctree-expand,
+.nav .fa-spin.icon,
+.nav .fa.fa-spin,
+.nav .rst-content .code-block-caption .fa-spin.headerlink,
+.nav .rst-content .eqno .fa-spin.headerlink,
+.nav .rst-content .fa-spin.admonition-title,
+.nav .rst-content code.download span.fa-spin:first-child,
+.nav .rst-content dl dt .fa-spin.headerlink,
+.nav .rst-content h1 .fa-spin.headerlink,
+.nav .rst-content h2 .fa-spin.headerlink,
+.nav .rst-content h3 .fa-spin.headerlink,
+.nav .rst-content h4 .fa-spin.headerlink,
+.nav .rst-content h5 .fa-spin.headerlink,
+.nav .rst-content h6 .fa-spin.headerlink,
+.nav .rst-content p .fa-spin.headerlink,
+.nav .rst-content table>caption .fa-spin.headerlink,
+.nav .rst-content tt.download span.fa-spin:first-child,
+.nav .wy-menu-vertical li button.fa-spin.toctree-expand,
+.rst-content .btn .fa-spin.admonition-title,
+.rst-content .code-block-caption .btn .fa-spin.headerlink,
+.rst-content .code-block-caption .nav .fa-spin.headerlink,
+.rst-content .eqno .btn .fa-spin.headerlink,
+.rst-content .eqno .nav .fa-spin.headerlink,
+.rst-content .nav .fa-spin.admonition-title,
+.rst-content code.download .btn span.fa-spin:first-child,
+.rst-content code.download .nav span.fa-spin:first-child,
+.rst-content dl dt .btn .fa-spin.headerlink,
+.rst-content dl dt .nav .fa-spin.headerlink,
+.rst-content h1 .btn .fa-spin.headerlink,
+.rst-content h1 .nav .fa-spin.headerlink,
+.rst-content h2 .btn .fa-spin.headerlink,
+.rst-content h2 .nav .fa-spin.headerlink,
+.rst-content h3 .btn .fa-spin.headerlink,
+.rst-content h3 .nav .fa-spin.headerlink,
+.rst-content h4 .btn .fa-spin.headerlink,
+.rst-content h4 .nav .fa-spin.headerlink,
+.rst-content h5 .btn .fa-spin.headerlink,
+.rst-content h5 .nav .fa-spin.headerlink,
+.rst-content h6 .btn .fa-spin.headerlink,
+.rst-content h6 .nav .fa-spin.headerlink,
+.rst-content p .btn .fa-spin.headerlink,
+.rst-content p .nav .fa-spin.headerlink,
+.rst-content table>caption .btn .fa-spin.headerlink,
+.rst-content table>caption .nav .fa-spin.headerlink,
+.rst-content tt.download .btn span.fa-spin:first-child,
+.rst-content tt.download .nav span.fa-spin:first-child,
+.wy-menu-vertical li .btn button.fa-spin.toctree-expand,
+.wy-menu-vertical li .nav button.fa-spin.toctree-expand {
+ display: inline-block
+}
+
+.btn.fa:before,
+.btn.icon:before,
+.rst-content .btn.admonition-title:before,
+.rst-content .code-block-caption .btn.headerlink:before,
+.rst-content .eqno .btn.headerlink:before,
+.rst-content code.download span.btn:first-child:before,
+.rst-content dl dt .btn.headerlink:before,
+.rst-content h1 .btn.headerlink:before,
+.rst-content h2 .btn.headerlink:before,
+.rst-content h3 .btn.headerlink:before,
+.rst-content h4 .btn.headerlink:before,
+.rst-content h5 .btn.headerlink:before,
+.rst-content h6 .btn.headerlink:before,
+.rst-content p .btn.headerlink:before,
+.rst-content table>caption .btn.headerlink:before,
+.rst-content tt.download span.btn:first-child:before,
+.wy-menu-vertical li button.btn.toctree-expand:before {
+ opacity: .5;
+ -webkit-transition: opacity .05s ease-in;
+ -moz-transition: opacity .05s ease-in;
+ transition: opacity .05s ease-in
+}
+
+.btn.fa:hover:before,
+.btn.icon:hover:before,
+.rst-content .btn.admonition-title:hover:before,
+.rst-content .code-block-caption .btn.headerlink:hover:before,
+.rst-content .eqno .btn.headerlink:hover:before,
+.rst-content code.download span.btn:first-child:hover:before,
+.rst-content dl dt .btn.headerlink:hover:before,
+.rst-content h1 .btn.headerlink:hover:before,
+.rst-content h2 .btn.headerlink:hover:before,
+.rst-content h3 .btn.headerlink:hover:before,
+.rst-content h4 .btn.headerlink:hover:before,
+.rst-content h5 .btn.headerlink:hover:before,
+.rst-content h6 .btn.headerlink:hover:before,
+.rst-content p .btn.headerlink:hover:before,
+.rst-content table>caption .btn.headerlink:hover:before,
+.rst-content tt.download span.btn:first-child:hover:before,
+.wy-menu-vertical li button.btn.toctree-expand:hover:before {
+ opacity: 1
+}
+
+.btn-mini .fa:before,
+.btn-mini .icon:before,
+.btn-mini .rst-content .admonition-title:before,
+.btn-mini .rst-content .code-block-caption .headerlink:before,
+.btn-mini .rst-content .eqno .headerlink:before,
+.btn-mini .rst-content code.download span:first-child:before,
+.btn-mini .rst-content dl dt .headerlink:before,
+.btn-mini .rst-content h1 .headerlink:before,
+.btn-mini .rst-content h2 .headerlink:before,
+.btn-mini .rst-content h3 .headerlink:before,
+.btn-mini .rst-content h4 .headerlink:before,
+.btn-mini .rst-content h5 .headerlink:before,
+.btn-mini .rst-content h6 .headerlink:before,
+.btn-mini .rst-content p .headerlink:before,
+.btn-mini .rst-content table>caption .headerlink:before,
+.btn-mini .rst-content tt.download span:first-child:before,
+.btn-mini .wy-menu-vertical li button.toctree-expand:before,
+.rst-content .btn-mini .admonition-title:before,
+.rst-content .code-block-caption .btn-mini .headerlink:before,
+.rst-content .eqno .btn-mini .headerlink:before,
+.rst-content code.download .btn-mini span:first-child:before,
+.rst-content dl dt .btn-mini .headerlink:before,
+.rst-content h1 .btn-mini .headerlink:before,
+.rst-content h2 .btn-mini .headerlink:before,
+.rst-content h3 .btn-mini .headerlink:before,
+.rst-content h4 .btn-mini .headerlink:before,
+.rst-content h5 .btn-mini .headerlink:before,
+.rst-content h6 .btn-mini .headerlink:before,
+.rst-content p .btn-mini .headerlink:before,
+.rst-content table>caption .btn-mini .headerlink:before,
+.rst-content tt.download .btn-mini span:first-child:before,
+.wy-menu-vertical li .btn-mini button.toctree-expand:before {
+ font-size: 14px;
+ vertical-align: -15%
+}
+
+.rst-content .admonition,
+.rst-content .admonition-todo,
+.rst-content .attention,
+.rst-content .caution,
+.rst-content .danger,
+.rst-content .error,
+.rst-content .hint,
+.rst-content .important,
+.rst-content .note,
+.rst-content .seealso,
+.rst-content .tip,
+.rst-content .warning,
+.wy-alert {
+ padding: 12px;
+ line-height: 24px;
+ margin-bottom: 24px;
+ background: #e7f2fa
+}
+
+.rst-content .admonition-title,
+.wy-alert-title {
+ font-weight: 700;
+ display: block;
+ color: #fff;
+ background: #6ab0de;
+ padding: 6px 12px;
+ margin: -12px -12px 12px
+}
+
+.rst-content .danger,
+.rst-content .error,
+.rst-content .wy-alert-danger.admonition,
+.rst-content .wy-alert-danger.admonition-todo,
+.rst-content .wy-alert-danger.attention,
+.rst-content .wy-alert-danger.caution,
+.rst-content .wy-alert-danger.hint,
+.rst-content .wy-alert-danger.important,
+.rst-content .wy-alert-danger.note,
+.rst-content .wy-alert-danger.seealso,
+.rst-content .wy-alert-danger.tip,
+.rst-content .wy-alert-danger.warning,
+.wy-alert.wy-alert-danger {
+ background: #fdf3f2
+}
+
+.rst-content .danger .admonition-title,
+.rst-content .danger .wy-alert-title,
+.rst-content .error .admonition-title,
+.rst-content .error .wy-alert-title,
+.rst-content .wy-alert-danger.admonition-todo .admonition-title,
+.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,
+.rst-content .wy-alert-danger.admonition .admonition-title,
+.rst-content .wy-alert-danger.admonition .wy-alert-title,
+.rst-content .wy-alert-danger.attention .admonition-title,
+.rst-content .wy-alert-danger.attention .wy-alert-title,
+.rst-content .wy-alert-danger.caution .admonition-title,
+.rst-content .wy-alert-danger.caution .wy-alert-title,
+.rst-content .wy-alert-danger.hint .admonition-title,
+.rst-content .wy-alert-danger.hint .wy-alert-title,
+.rst-content .wy-alert-danger.important .admonition-title,
+.rst-content .wy-alert-danger.important .wy-alert-title,
+.rst-content .wy-alert-danger.note .admonition-title,
+.rst-content .wy-alert-danger.note .wy-alert-title,
+.rst-content .wy-alert-danger.seealso .admonition-title,
+.rst-content .wy-alert-danger.seealso .wy-alert-title,
+.rst-content .wy-alert-danger.tip .admonition-title,
+.rst-content .wy-alert-danger.tip .wy-alert-title,
+.rst-content .wy-alert-danger.warning .admonition-title,
+.rst-content .wy-alert-danger.warning .wy-alert-title,
+.rst-content .wy-alert.wy-alert-danger .admonition-title,
+.wy-alert.wy-alert-danger .rst-content .admonition-title,
+.wy-alert.wy-alert-danger .wy-alert-title {
+ background: #f29f97
+}
+
+.rst-content .admonition-todo,
+.rst-content .attention,
+.rst-content .caution,
+.rst-content .warning,
+.rst-content .wy-alert-warning.admonition,
+.rst-content .wy-alert-warning.danger,
+.rst-content .wy-alert-warning.error,
+.rst-content .wy-alert-warning.hint,
+.rst-content .wy-alert-warning.important,
+.rst-content .wy-alert-warning.note,
+.rst-content .wy-alert-warning.seealso,
+.rst-content .wy-alert-warning.tip,
+.wy-alert.wy-alert-warning {
+ background: #ffedcc
+}
+
+.rst-content .admonition-todo .admonition-title,
+.rst-content .admonition-todo .wy-alert-title,
+.rst-content .attention .admonition-title,
+.rst-content .attention .wy-alert-title,
+.rst-content .caution .admonition-title,
+.rst-content .caution .wy-alert-title,
+.rst-content .warning .admonition-title,
+.rst-content .warning .wy-alert-title,
+.rst-content .wy-alert-warning.admonition .admonition-title,
+.rst-content .wy-alert-warning.admonition .wy-alert-title,
+.rst-content .wy-alert-warning.danger .admonition-title,
+.rst-content .wy-alert-warning.danger .wy-alert-title,
+.rst-content .wy-alert-warning.error .admonition-title,
+.rst-content .wy-alert-warning.error .wy-alert-title,
+.rst-content .wy-alert-warning.hint .admonition-title,
+.rst-content .wy-alert-warning.hint .wy-alert-title,
+.rst-content .wy-alert-warning.important .admonition-title,
+.rst-content .wy-alert-warning.important .wy-alert-title,
+.rst-content .wy-alert-warning.note .admonition-title,
+.rst-content .wy-alert-warning.note .wy-alert-title,
+.rst-content .wy-alert-warning.seealso .admonition-title,
+.rst-content .wy-alert-warning.seealso .wy-alert-title,
+.rst-content .wy-alert-warning.tip .admonition-title,
+.rst-content .wy-alert-warning.tip .wy-alert-title,
+.rst-content .wy-alert.wy-alert-warning .admonition-title,
+.wy-alert.wy-alert-warning .rst-content .admonition-title,
+.wy-alert.wy-alert-warning .wy-alert-title {
+ background: #f0b37e
+}
+
+.rst-content .note,
+.rst-content .seealso,
+.rst-content .wy-alert-info.admonition,
+.rst-content .wy-alert-info.admonition-todo,
+.rst-content .wy-alert-info.attention,
+.rst-content .wy-alert-info.caution,
+.rst-content .wy-alert-info.danger,
+.rst-content .wy-alert-info.error,
+.rst-content .wy-alert-info.hint,
+.rst-content .wy-alert-info.important,
+.rst-content .wy-alert-info.tip,
+.rst-content .wy-alert-info.warning,
+.wy-alert.wy-alert-info {
+ background: #e7f2fa
+}
+
+.rst-content .note .admonition-title,
+.rst-content .note .wy-alert-title,
+.rst-content .seealso .admonition-title,
+.rst-content .seealso .wy-alert-title,
+.rst-content .wy-alert-info.admonition-todo .admonition-title,
+.rst-content .wy-alert-info.admonition-todo .wy-alert-title,
+.rst-content .wy-alert-info.admonition .admonition-title,
+.rst-content .wy-alert-info.admonition .wy-alert-title,
+.rst-content .wy-alert-info.attention .admonition-title,
+.rst-content .wy-alert-info.attention .wy-alert-title,
+.rst-content .wy-alert-info.caution .admonition-title,
+.rst-content .wy-alert-info.caution .wy-alert-title,
+.rst-content .wy-alert-info.danger .admonition-title,
+.rst-content .wy-alert-info.danger .wy-alert-title,
+.rst-content .wy-alert-info.error .admonition-title,
+.rst-content .wy-alert-info.error .wy-alert-title,
+.rst-content .wy-alert-info.hint .admonition-title,
+.rst-content .wy-alert-info.hint .wy-alert-title,
+.rst-content .wy-alert-info.important .admonition-title,
+.rst-content .wy-alert-info.important .wy-alert-title,
+.rst-content .wy-alert-info.tip .admonition-title,
+.rst-content .wy-alert-info.tip .wy-alert-title,
+.rst-content .wy-alert-info.warning .admonition-title,
+.rst-content .wy-alert-info.warning .wy-alert-title,
+.rst-content .wy-alert.wy-alert-info .admonition-title,
+.wy-alert.wy-alert-info .rst-content .admonition-title,
+.wy-alert.wy-alert-info .wy-alert-title {
+ background: #6ab0de
+}
+
+.rst-content .hint,
+.rst-content .important,
+.rst-content .tip,
+.rst-content .wy-alert-success.admonition,
+.rst-content .wy-alert-success.admonition-todo,
+.rst-content .wy-alert-success.attention,
+.rst-content .wy-alert-success.caution,
+.rst-content .wy-alert-success.danger,
+.rst-content .wy-alert-success.error,
+.rst-content .wy-alert-success.note,
+.rst-content .wy-alert-success.seealso,
+.rst-content .wy-alert-success.warning,
+.wy-alert.wy-alert-success {
+ background: #dbfaf4
+}
+
+.rst-content .hint .admonition-title,
+.rst-content .hint .wy-alert-title,
+.rst-content .important .admonition-title,
+.rst-content .important .wy-alert-title,
+.rst-content .tip .admonition-title,
+.rst-content .tip .wy-alert-title,
+.rst-content .wy-alert-success.admonition-todo .admonition-title,
+.rst-content .wy-alert-success.admonition-todo .wy-alert-title,
+.rst-content .wy-alert-success.admonition .admonition-title,
+.rst-content .wy-alert-success.admonition .wy-alert-title,
+.rst-content .wy-alert-success.attention .admonition-title,
+.rst-content .wy-alert-success.attention .wy-alert-title,
+.rst-content .wy-alert-success.caution .admonition-title,
+.rst-content .wy-alert-success.caution .wy-alert-title,
+.rst-content .wy-alert-success.danger .admonition-title,
+.rst-content .wy-alert-success.danger .wy-alert-title,
+.rst-content .wy-alert-success.error .admonition-title,
+.rst-content .wy-alert-success.error .wy-alert-title,
+.rst-content .wy-alert-success.note .admonition-title,
+.rst-content .wy-alert-success.note .wy-alert-title,
+.rst-content .wy-alert-success.seealso .admonition-title,
+.rst-content .wy-alert-success.seealso .wy-alert-title,
+.rst-content .wy-alert-success.warning .admonition-title,
+.rst-content .wy-alert-success.warning .wy-alert-title,
+.rst-content .wy-alert.wy-alert-success .admonition-title,
+.wy-alert.wy-alert-success .rst-content .admonition-title,
+.wy-alert.wy-alert-success .wy-alert-title {
+ background: #1abc9c
+}
+
+.rst-content .wy-alert-neutral.admonition,
+.rst-content .wy-alert-neutral.admonition-todo,
+.rst-content .wy-alert-neutral.attention,
+.rst-content .wy-alert-neutral.caution,
+.rst-content .wy-alert-neutral.danger,
+.rst-content .wy-alert-neutral.error,
+.rst-content .wy-alert-neutral.hint,
+.rst-content .wy-alert-neutral.important,
+.rst-content .wy-alert-neutral.note,
+.rst-content .wy-alert-neutral.seealso,
+.rst-content .wy-alert-neutral.tip,
+.rst-content .wy-alert-neutral.warning,
+.wy-alert.wy-alert-neutral {
+ background: #f3f6f6
+}
+
+.rst-content .wy-alert-neutral.admonition-todo .admonition-title,
+.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,
+.rst-content .wy-alert-neutral.admonition .admonition-title,
+.rst-content .wy-alert-neutral.admonition .wy-alert-title,
+.rst-content .wy-alert-neutral.attention .admonition-title,
+.rst-content .wy-alert-neutral.attention .wy-alert-title,
+.rst-content .wy-alert-neutral.caution .admonition-title,
+.rst-content .wy-alert-neutral.caution .wy-alert-title,
+.rst-content .wy-alert-neutral.danger .admonition-title,
+.rst-content .wy-alert-neutral.danger .wy-alert-title,
+.rst-content .wy-alert-neutral.error .admonition-title,
+.rst-content .wy-alert-neutral.error .wy-alert-title,
+.rst-content .wy-alert-neutral.hint .admonition-title,
+.rst-content .wy-alert-neutral.hint .wy-alert-title,
+.rst-content .wy-alert-neutral.important .admonition-title,
+.rst-content .wy-alert-neutral.important .wy-alert-title,
+.rst-content .wy-alert-neutral.note .admonition-title,
+.rst-content .wy-alert-neutral.note .wy-alert-title,
+.rst-content .wy-alert-neutral.seealso .admonition-title,
+.rst-content .wy-alert-neutral.seealso .wy-alert-title,
+.rst-content .wy-alert-neutral.tip .admonition-title,
+.rst-content .wy-alert-neutral.tip .wy-alert-title,
+.rst-content .wy-alert-neutral.warning .admonition-title,
+.rst-content .wy-alert-neutral.warning .wy-alert-title,
+.rst-content .wy-alert.wy-alert-neutral .admonition-title,
+.wy-alert.wy-alert-neutral .rst-content .admonition-title,
+.wy-alert.wy-alert-neutral .wy-alert-title {
+ color: #404040;
+ background: #e1e4e5
+}
+
+.rst-content .wy-alert-neutral.admonition-todo a,
+.rst-content .wy-alert-neutral.admonition a,
+.rst-content .wy-alert-neutral.attention a,
+.rst-content .wy-alert-neutral.caution a,
+.rst-content .wy-alert-neutral.danger a,
+.rst-content .wy-alert-neutral.error a,
+.rst-content .wy-alert-neutral.hint a,
+.rst-content .wy-alert-neutral.important a,
+.rst-content .wy-alert-neutral.note a,
+.rst-content .wy-alert-neutral.seealso a,
+.rst-content .wy-alert-neutral.tip a,
+.rst-content .wy-alert-neutral.warning a,
+.wy-alert.wy-alert-neutral a {
+ color: #2980b9
+}
+
+.rst-content .admonition-todo p:last-child,
+.rst-content .admonition p:last-child,
+.rst-content .attention p:last-child,
+.rst-content .caution p:last-child,
+.rst-content .danger p:last-child,
+.rst-content .error p:last-child,
+.rst-content .hint p:last-child,
+.rst-content .important p:last-child,
+.rst-content .note p:last-child,
+.rst-content .seealso p:last-child,
+.rst-content .tip p:last-child,
+.rst-content .warning p:last-child,
+.wy-alert p:last-child {
+ margin-bottom: 0
+}
+
+.wy-tray-container {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ z-index: 600
+}
+
+.wy-tray-container li {
+ display: block;
+ width: 300px;
+ background: transparent;
+ color: #fff;
+ text-align: center;
+ box-shadow: 0 5px 5px 0 rgba(0, 0, 0, .1);
+ padding: 0 24px;
+ min-width: 20%;
+ opacity: 0;
+ height: 0;
+ line-height: 56px;
+ overflow: hidden;
+ -webkit-transition: all .3s ease-in;
+ -moz-transition: all .3s ease-in;
+ transition: all .3s ease-in
+}
+
+.wy-tray-container li.wy-tray-item-success {
+ background: #27ae60
+}
+
+.wy-tray-container li.wy-tray-item-info {
+ background: #2980b9
+}
+
+.wy-tray-container li.wy-tray-item-warning {
+ background: #e67e22
+}
+
+.wy-tray-container li.wy-tray-item-danger {
+ background: #e74c3c
+}
+
+.wy-tray-container li.on {
+ opacity: 1;
+ height: 56px
+}
+
+@media screen and (max-width:768px) {
+ .wy-tray-container {
+ bottom: auto;
+ top: 0;
+ width: 100%
+ }
+
+ .wy-tray-container li {
+ width: 100%
+ }
+}
+
+button {
+ font-size: 100%;
+ margin: 0;
+ vertical-align: baseline;
+ *vertical-align: middle;
+ cursor: pointer;
+ line-height: normal;
+ -webkit-appearance: button;
+ *overflow: visible
+}
+
+button::-moz-focus-inner,
+input::-moz-focus-inner {
+ border: 0;
+ padding: 0
+}
+
+button[disabled] {
+ cursor: default
+}
+
+.btn {
+ display: inline-block;
+ border-radius: 2px;
+ line-height: normal;
+ white-space: nowrap;
+ text-align: center;
+ cursor: pointer;
+ font-size: 100%;
+ padding: 6px 12px 8px;
+ color: #fff;
+ border: 1px solid rgba(0, 0, 0, .1);
+ background-color: #27ae60;
+ text-decoration: none;
+ font-weight: 400;
+ font-family: Lato, proxima-nova, Helvetica Neue, Arial, sans-serif;
+ box-shadow: inset 0 1px 2px -1px hsla(0, 0%, 100%, .5), inset 0 -2px 0 0 rgba(0, 0, 0, .1);
+ outline-none: false;
+ vertical-align: middle;
+ *display: inline;
+ zoom: 1;
+ -webkit-user-drag: none;
+ -webkit-user-select: none;
+ -moz-user-select: none;
+ -ms-user-select: none;
+ user-select: none;
+ -webkit-transition: all .1s linear;
+ -moz-transition: all .1s linear;
+ transition: all .1s linear
+}
+
+.btn-hover {
+ background: #2e8ece;
+ color: #fff
+}
+
+.btn:hover {
+ background: #2cc36b;
+ color: #fff
+}
+
+.btn:focus {
+ background: #2cc36b;
+ outline: 0
+}
+
+.btn:active {
+ box-shadow: inset 0 -1px 0 0 rgba(0, 0, 0, .05), inset 0 2px 0 0 rgba(0, 0, 0, .1);
+ padding: 8px 12px 6px
+}
+
+.btn:visited {
+ color: #fff
+}
+
+.btn-disabled,
+.btn-disabled:active,
+.btn-disabled:focus,
+.btn-disabled:hover,
+.btn:disabled {
+ background-image: none;
+ filter: progid:DXImageTransform.Microsoft.gradient(enabled=false);
+ filter: alpha(opacity=40);
+ opacity: .4;
+ cursor: not-allowed;
+ box-shadow: none
+}
+
+.btn::-moz-focus-inner {
+ padding: 0;
+ border: 0
+}
+
+.btn-small {
+ font-size: 80%
+}
+
+.btn-info {
+ background-color: #2980b9 !important
+}
+
+.btn-info:hover {
+ background-color: #2e8ece !important
+}
+
+.btn-neutral {
+ background-color: #f3f6f6 !important;
+ color: #404040 !important
+}
+
+.btn-neutral:hover {
+ background-color: #e5ebeb !important;
+ color: #404040
+}
+
+.btn-neutral:visited {
+ color: #404040 !important
+}
+
+.btn-success {
+ background-color: #27ae60 !important
+}
+
+.btn-success:hover {
+ background-color: #295 !important
+}
+
+.btn-danger {
+ background-color: #e74c3c !important
+}
+
+.btn-danger:hover {
+ background-color: #ea6153 !important
+}
+
+.btn-warning {
+ background-color: #e67e22 !important
+}
+
+.btn-warning:hover {
+ background-color: #e98b39 !important
+}
+
+.btn-invert {
+ background-color: #222
+}
+
+.btn-invert:hover {
+ background-color: #2f2f2f !important
+}
+
+.btn-link {
+ background-color: transparent !important;
+ color: #2980b9;
+ box-shadow: none;
+ border-color: transparent !important
+}
+
+.btn-link:active,
+.btn-link:hover {
+ background-color: transparent !important;
+ color: #409ad5 !important;
+ box-shadow: none
+}
+
+.btn-link:visited {
+ color: #9b59b6
+}
+
+.wy-btn-group .btn,
+.wy-control .btn {
+ vertical-align: middle
+}
+
+.wy-btn-group {
+ margin-bottom: 24px;
+ *zoom: 1
+}
+
+.wy-btn-group:after,
+.wy-btn-group:before {
+ display: table;
+ content: ""
+}
+
+.wy-btn-group:after {
+ clear: both
+}
+
+.wy-dropdown {
+ position: relative;
+ display: inline-block
+}
+
+.wy-dropdown-active .wy-dropdown-menu {
+ display: block
+}
+
+.wy-dropdown-menu {
+ position: absolute;
+ left: 0;
+ display: none;
+ float: left;
+ top: 100%;
+ min-width: 100%;
+ background: #fcfcfc;
+ z-index: 100;
+ border: 1px solid #cfd7dd;
+ box-shadow: 0 2px 2px 0 rgba(0, 0, 0, .1);
+ padding: 12px
+}
+
+.wy-dropdown-menu>dd>a {
+ display: block;
+ clear: both;
+ color: #404040;
+ white-space: nowrap;
+ font-size: 90%;
+ padding: 0 12px;
+ cursor: pointer
+}
+
+.wy-dropdown-menu>dd>a:hover {
+ background: #2980b9;
+ color: #fff
+}
+
+.wy-dropdown-menu>dd.divider {
+ border-top: 1px solid #cfd7dd;
+ margin: 6px 0
+}
+
+.wy-dropdown-menu>dd.search {
+ padding-bottom: 12px
+}
+
+.wy-dropdown-menu>dd.search input[type=search] {
+ width: 100%
+}
+
+.wy-dropdown-menu>dd.call-to-action {
+ background: #e3e3e3;
+ text-transform: uppercase;
+ font-weight: 500;
+ font-size: 80%
+}
+
+.wy-dropdown-menu>dd.call-to-action:hover {
+ background: #e3e3e3
+}
+
+.wy-dropdown-menu>dd.call-to-action .btn {
+ color: #fff
+}
+
+.wy-dropdown.wy-dropdown-up .wy-dropdown-menu {
+ bottom: 100%;
+ top: auto;
+ left: auto;
+ right: 0
+}
+
+.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu {
+ background: #fcfcfc;
+ margin-top: 2px
+}
+
+.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a {
+ padding: 6px 12px
+}
+
+.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover {
+ background: #2980b9;
+ color: #fff
+}
+
+.wy-dropdown.wy-dropdown-left .wy-dropdown-menu {
+ right: 0;
+ left: auto;
+ text-align: right
+}
+
+.wy-dropdown-arrow:before {
+ content: " ";
+ border-bottom: 5px solid #f5f5f5;
+ border-left: 5px solid transparent;
+ border-right: 5px solid transparent;
+ position: absolute;
+ display: block;
+ top: -4px;
+ left: 50%;
+ margin-left: -3px
+}
+
+.wy-dropdown-arrow.wy-dropdown-arrow-left:before {
+ left: 11px
+}
+
+.wy-form-stacked select {
+ display: block
+}
+
+.wy-form-aligned .wy-help-inline,
+.wy-form-aligned input,
+.wy-form-aligned label,
+.wy-form-aligned select,
+.wy-form-aligned textarea {
+ display: inline-block;
+ *display: inline;
+ *zoom: 1;
+ vertical-align: middle
+}
+
+.wy-form-aligned .wy-control-group>label {
+ display: inline-block;
+ vertical-align: middle;
+ width: 10em;
+ margin: 6px 12px 0 0;
+ float: left
+}
+
+.wy-form-aligned .wy-control {
+ float: left
+}
+
+.wy-form-aligned .wy-control label {
+ display: block
+}
+
+.wy-form-aligned .wy-control select {
+ margin-top: 6px
+}
+
+fieldset {
+ margin: 0
+}
+
+fieldset,
+legend {
+ border: 0;
+ padding: 0
+}
+
+legend {
+ width: 100%;
+ white-space: normal;
+ margin-bottom: 24px;
+ font-size: 150%;
+ *margin-left: -7px
+}
+
+label,
+legend {
+ display: block
+}
+
+label {
+ margin: 0 0 .3125em;
+ color: #333;
+ font-size: 90%
+}
+
+input,
+select,
+textarea {
+ font-size: 100%;
+ margin: 0;
+ vertical-align: baseline;
+ *vertical-align: middle
+}
+
+.wy-control-group {
+ margin-bottom: 24px;
+ max-width: 1200px;
+ margin-left: auto;
+ margin-right: auto;
+ *zoom: 1
+}
+
+.wy-control-group:after,
+.wy-control-group:before {
+ display: table;
+ content: ""
+}
+
+.wy-control-group:after {
+ clear: both
+}
+
+.wy-control-group.wy-control-group-required>label:after {
+ content: " *";
+ color: #e74c3c
+}
+
+.wy-control-group .wy-form-full,
+.wy-control-group .wy-form-halves,
+.wy-control-group .wy-form-thirds {
+ padding-bottom: 12px
+}
+
+.wy-control-group .wy-form-full input[type=color],
+.wy-control-group .wy-form-full input[type=date],
+.wy-control-group .wy-form-full input[type=datetime-local],
+.wy-control-group .wy-form-full input[type=datetime],
+.wy-control-group .wy-form-full input[type=email],
+.wy-control-group .wy-form-full input[type=month],
+.wy-control-group .wy-form-full input[type=number],
+.wy-control-group .wy-form-full input[type=password],
+.wy-control-group .wy-form-full input[type=search],
+.wy-control-group .wy-form-full input[type=tel],
+.wy-control-group .wy-form-full input[type=text],
+.wy-control-group .wy-form-full input[type=time],
+.wy-control-group .wy-form-full input[type=url],
+.wy-control-group .wy-form-full input[type=week],
+.wy-control-group .wy-form-full select,
+.wy-control-group .wy-form-halves input[type=color],
+.wy-control-group .wy-form-halves input[type=date],
+.wy-control-group .wy-form-halves input[type=datetime-local],
+.wy-control-group .wy-form-halves input[type=datetime],
+.wy-control-group .wy-form-halves input[type=email],
+.wy-control-group .wy-form-halves input[type=month],
+.wy-control-group .wy-form-halves input[type=number],
+.wy-control-group .wy-form-halves input[type=password],
+.wy-control-group .wy-form-halves input[type=search],
+.wy-control-group .wy-form-halves input[type=tel],
+.wy-control-group .wy-form-halves input[type=text],
+.wy-control-group .wy-form-halves input[type=time],
+.wy-control-group .wy-form-halves input[type=url],
+.wy-control-group .wy-form-halves input[type=week],
+.wy-control-group .wy-form-halves select,
+.wy-control-group .wy-form-thirds input[type=color],
+.wy-control-group .wy-form-thirds input[type=date],
+.wy-control-group .wy-form-thirds input[type=datetime-local],
+.wy-control-group .wy-form-thirds input[type=datetime],
+.wy-control-group .wy-form-thirds input[type=email],
+.wy-control-group .wy-form-thirds input[type=month],
+.wy-control-group .wy-form-thirds input[type=number],
+.wy-control-group .wy-form-thirds input[type=password],
+.wy-control-group .wy-form-thirds input[type=search],
+.wy-control-group .wy-form-thirds input[type=tel],
+.wy-control-group .wy-form-thirds input[type=text],
+.wy-control-group .wy-form-thirds input[type=time],
+.wy-control-group .wy-form-thirds input[type=url],
+.wy-control-group .wy-form-thirds input[type=week],
+.wy-control-group .wy-form-thirds select {
+ width: 100%
+}
+
+.wy-control-group .wy-form-full {
+ float: left;
+ display: block;
+ width: 100%;
+ margin-right: 0
+}
+
+.wy-control-group .wy-form-full:last-child {
+ margin-right: 0
+}
+
+.wy-control-group .wy-form-halves {
+ float: left;
+ display: block;
+ margin-right: 2.35765%;
+ width: 48.82117%
+}
+
+.wy-control-group .wy-form-halves:last-child,
+.wy-control-group .wy-form-halves:nth-of-type(2n) {
+ margin-right: 0
+}
+
+.wy-control-group .wy-form-halves:nth-of-type(odd) {
+ clear: left
+}
+
+.wy-control-group .wy-form-thirds {
+ float: left;
+ display: block;
+ margin-right: 2.35765%;
+ width: 31.76157%
+}
+
+.wy-control-group .wy-form-thirds:last-child,
+.wy-control-group .wy-form-thirds:nth-of-type(3n) {
+ margin-right: 0
+}
+
+.wy-control-group .wy-form-thirds:nth-of-type(3n+1) {
+ clear: left
+}
+
+.wy-control-group.wy-control-group-no-input .wy-control,
+.wy-control-no-input {
+ margin: 6px 0 0;
+ font-size: 90%
+}
+
+.wy-control-no-input {
+ display: inline-block
+}
+
+.wy-control-group.fluid-input input[type=color],
+.wy-control-group.fluid-input input[type=date],
+.wy-control-group.fluid-input input[type=datetime-local],
+.wy-control-group.fluid-input input[type=datetime],
+.wy-control-group.fluid-input input[type=email],
+.wy-control-group.fluid-input input[type=month],
+.wy-control-group.fluid-input input[type=number],
+.wy-control-group.fluid-input input[type=password],
+.wy-control-group.fluid-input input[type=search],
+.wy-control-group.fluid-input input[type=tel],
+.wy-control-group.fluid-input input[type=text],
+.wy-control-group.fluid-input input[type=time],
+.wy-control-group.fluid-input input[type=url],
+.wy-control-group.fluid-input input[type=week] {
+ width: 100%
+}
+
+.wy-form-message-inline {
+ padding-left: .3em;
+ color: #666;
+ font-size: 90%
+}
+
+.wy-form-message {
+ display: block;
+ color: #999;
+ font-size: 70%;
+ margin-top: .3125em;
+ font-style: italic
+}
+
+.wy-form-message p {
+ font-size: inherit;
+ font-style: italic;
+ margin-bottom: 6px
+}
+
+.wy-form-message p:last-child {
+ margin-bottom: 0
+}
+
+input {
+ line-height: normal
+}
+
+input[type=button],
+input[type=reset],
+input[type=submit] {
+ -webkit-appearance: button;
+ cursor: pointer;
+ font-family: Lato, proxima-nova, Helvetica Neue, Arial, sans-serif;
+ *overflow: visible
+}
+
+input[type=color],
+input[type=date],
+input[type=datetime-local],
+input[type=datetime],
+input[type=email],
+input[type=month],
+input[type=number],
+input[type=password],
+input[type=search],
+input[type=tel],
+input[type=text],
+input[type=time],
+input[type=url],
+input[type=week] {
+ -webkit-appearance: none;
+ padding: 6px;
+ display: inline-block;
+ border: 1px solid #ccc;
+ font-size: 80%;
+ font-family: Lato, proxima-nova, Helvetica Neue, Arial, sans-serif;
+ box-shadow: inset 0 1px 3px #ddd;
+ border-radius: 0;
+ -webkit-transition: border .3s linear;
+ -moz-transition: border .3s linear;
+ transition: border .3s linear
+}
+
+input[type=datetime-local] {
+ padding: .34375em .625em
+}
+
+input[disabled] {
+ cursor: default
+}
+
+input[type=checkbox],
+input[type=radio] {
+ padding: 0;
+ margin-right: .3125em;
+ *height: 13px;
+ *width: 13px
+}
+
+input[type=checkbox],
+input[type=radio],
+input[type=search] {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box
+}
+
+input[type=search]::-webkit-search-cancel-button,
+input[type=search]::-webkit-search-decoration {
+ -webkit-appearance: none
+}
+
+input[type=color]:focus,
+input[type=date]:focus,
+input[type=datetime-local]:focus,
+input[type=datetime]:focus,
+input[type=email]:focus,
+input[type=month]:focus,
+input[type=number]:focus,
+input[type=password]:focus,
+input[type=search]:focus,
+input[type=tel]:focus,
+input[type=text]:focus,
+input[type=time]:focus,
+input[type=url]:focus,
+input[type=week]:focus {
+ outline: 0;
+ outline: thin dotted\9;
+ border-color: #333
+}
+
+input.no-focus:focus {
+ border-color: #ccc !important
+}
+
+input[type=checkbox]:focus,
+input[type=file]:focus,
+input[type=radio]:focus {
+ outline: thin dotted #333;
+ outline: 1px auto #129fea
+}
+
+input[type=color][disabled],
+input[type=date][disabled],
+input[type=datetime-local][disabled],
+input[type=datetime][disabled],
+input[type=email][disabled],
+input[type=month][disabled],
+input[type=number][disabled],
+input[type=password][disabled],
+input[type=search][disabled],
+input[type=tel][disabled],
+input[type=text][disabled],
+input[type=time][disabled],
+input[type=url][disabled],
+input[type=week][disabled] {
+ cursor: not-allowed;
+ background-color: #fafafa
+}
+
+input:focus:invalid,
+select:focus:invalid,
+textarea:focus:invalid {
+ color: #e74c3c;
+ border: 1px solid #e74c3c
+}
+
+input:focus:invalid:focus,
+select:focus:invalid:focus,
+textarea:focus:invalid:focus {
+ border-color: #e74c3c
+}
+
+input[type=checkbox]:focus:invalid:focus,
+input[type=file]:focus:invalid:focus,
+input[type=radio]:focus:invalid:focus {
+ outline-color: #e74c3c
+}
+
+input.wy-input-large {
+ padding: 12px;
+ font-size: 100%
+}
+
+textarea {
+ overflow: auto;
+ vertical-align: top;
+ width: 100%;
+ font-family: Lato, proxima-nova, Helvetica Neue, Arial, sans-serif
+}
+
+select,
+textarea {
+ padding: .5em .625em;
+ display: inline-block;
+ border: 1px solid #ccc;
+ font-size: 80%;
+ box-shadow: inset 0 1px 3px #ddd;
+ -webkit-transition: border .3s linear;
+ -moz-transition: border .3s linear;
+ transition: border .3s linear
+}
+
+select {
+ border: 1px solid #ccc;
+ background-color: #fff
+}
+
+select[multiple] {
+ height: auto
+}
+
+select:focus,
+textarea:focus {
+ outline: 0
+}
+
+input[readonly],
+select[disabled],
+select[readonly],
+textarea[disabled],
+textarea[readonly] {
+ cursor: not-allowed;
+ background-color: #fafafa
+}
+
+input[type=checkbox][disabled],
+input[type=radio][disabled] {
+ cursor: not-allowed
+}
+
+.wy-checkbox,
+.wy-radio {
+ margin: 6px 0;
+ color: #404040;
+ display: block
+}
+
+.wy-checkbox input,
+.wy-radio input {
+ vertical-align: baseline
+}
+
+.wy-form-message-inline {
+ display: inline-block;
+ *display: inline;
+ *zoom: 1;
+ vertical-align: middle
+}
+
+.wy-input-prefix,
+.wy-input-suffix {
+ white-space: nowrap;
+ padding: 6px
+}
+
+.wy-input-prefix .wy-input-context,
+.wy-input-suffix .wy-input-context {
+ line-height: 27px;
+ padding: 0 8px;
+ display: inline-block;
+ font-size: 80%;
+ background-color: #f3f6f6;
+ border: 1px solid #ccc;
+ color: #999
+}
+
+.wy-input-suffix .wy-input-context {
+ border-left: 0
+}
+
+.wy-input-prefix .wy-input-context {
+ border-right: 0
+}
+
+.wy-switch {
+ position: relative;
+ display: block;
+ height: 24px;
+ margin-top: 12px;
+ cursor: pointer
+}
+
+.wy-switch:before {
+ left: 0;
+ top: 0;
+ width: 36px;
+ height: 12px;
+ background: #ccc
+}
+
+.wy-switch:after,
+.wy-switch:before {
+ position: absolute;
+ content: "";
+ display: block;
+ border-radius: 4px;
+ -webkit-transition: all .2s ease-in-out;
+ -moz-transition: all .2s ease-in-out;
+ transition: all .2s ease-in-out
+}
+
+.wy-switch:after {
+ width: 18px;
+ height: 18px;
+ background: #999;
+ left: -3px;
+ top: -3px
+}
+
+.wy-switch span {
+ position: absolute;
+ left: 48px;
+ display: block;
+ font-size: 12px;
+ color: #ccc;
+ line-height: 1
+}
+
+.wy-switch.active:before {
+ background: #1e8449
+}
+
+.wy-switch.active:after {
+ left: 24px;
+ background: #27ae60
+}
+
+.wy-switch.disabled {
+ cursor: not-allowed;
+ opacity: .8
+}
+
+.wy-control-group.wy-control-group-error .wy-form-message,
+.wy-control-group.wy-control-group-error>label {
+ color: #e74c3c
+}
+
+.wy-control-group.wy-control-group-error input[type=color],
+.wy-control-group.wy-control-group-error input[type=date],
+.wy-control-group.wy-control-group-error input[type=datetime-local],
+.wy-control-group.wy-control-group-error input[type=datetime],
+.wy-control-group.wy-control-group-error input[type=email],
+.wy-control-group.wy-control-group-error input[type=month],
+.wy-control-group.wy-control-group-error input[type=number],
+.wy-control-group.wy-control-group-error input[type=password],
+.wy-control-group.wy-control-group-error input[type=search],
+.wy-control-group.wy-control-group-error input[type=tel],
+.wy-control-group.wy-control-group-error input[type=text],
+.wy-control-group.wy-control-group-error input[type=time],
+.wy-control-group.wy-control-group-error input[type=url],
+.wy-control-group.wy-control-group-error input[type=week],
+.wy-control-group.wy-control-group-error textarea {
+ border: 1px solid #e74c3c
+}
+
+.wy-inline-validate {
+ white-space: nowrap
+}
+
+.wy-inline-validate .wy-input-context {
+ padding: .5em .625em;
+ display: inline-block;
+ font-size: 80%
+}
+
+.wy-inline-validate.wy-inline-validate-success .wy-input-context {
+ color: #27ae60
+}
+
+.wy-inline-validate.wy-inline-validate-danger .wy-input-context {
+ color: #e74c3c
+}
+
+.wy-inline-validate.wy-inline-validate-warning .wy-input-context {
+ color: #e67e22
+}
+
+.wy-inline-validate.wy-inline-validate-info .wy-input-context {
+ color: #2980b9
+}
+
+.rotate-90 {
+ -webkit-transform: rotate(90deg);
+ -moz-transform: rotate(90deg);
+ -ms-transform: rotate(90deg);
+ -o-transform: rotate(90deg);
+ transform: rotate(90deg)
+}
+
+.rotate-180 {
+ -webkit-transform: rotate(180deg);
+ -moz-transform: rotate(180deg);
+ -ms-transform: rotate(180deg);
+ -o-transform: rotate(180deg);
+ transform: rotate(180deg)
+}
+
+.rotate-270 {
+ -webkit-transform: rotate(270deg);
+ -moz-transform: rotate(270deg);
+ -ms-transform: rotate(270deg);
+ -o-transform: rotate(270deg);
+ transform: rotate(270deg)
+}
+
+.mirror {
+ -webkit-transform: scaleX(-1);
+ -moz-transform: scaleX(-1);
+ -ms-transform: scaleX(-1);
+ -o-transform: scaleX(-1);
+ transform: scaleX(-1)
+}
+
+.mirror.rotate-90 {
+ -webkit-transform: scaleX(-1) rotate(90deg);
+ -moz-transform: scaleX(-1) rotate(90deg);
+ -ms-transform: scaleX(-1) rotate(90deg);
+ -o-transform: scaleX(-1) rotate(90deg);
+ transform: scaleX(-1) rotate(90deg)
+}
+
+.mirror.rotate-180 {
+ -webkit-transform: scaleX(-1) rotate(180deg);
+ -moz-transform: scaleX(-1) rotate(180deg);
+ -ms-transform: scaleX(-1) rotate(180deg);
+ -o-transform: scaleX(-1) rotate(180deg);
+ transform: scaleX(-1) rotate(180deg)
+}
+
+.mirror.rotate-270 {
+ -webkit-transform: scaleX(-1) rotate(270deg);
+ -moz-transform: scaleX(-1) rotate(270deg);
+ -ms-transform: scaleX(-1) rotate(270deg);
+ -o-transform: scaleX(-1) rotate(270deg);
+ transform: scaleX(-1) rotate(270deg)
+}
+
+@media only screen and (max-width:480px) {
+ .wy-form button[type=submit] {
+ margin: .7em 0 0
+ }
+
+ .wy-form input[type=color],
+ .wy-form input[type=date],
+ .wy-form input[type=datetime-local],
+ .wy-form input[type=datetime],
+ .wy-form input[type=email],
+ .wy-form input[type=month],
+ .wy-form input[type=number],
+ .wy-form input[type=password],
+ .wy-form input[type=search],
+ .wy-form input[type=tel],
+ .wy-form input[type=text],
+ .wy-form input[type=time],
+ .wy-form input[type=url],
+ .wy-form input[type=week],
+ .wy-form label {
+ margin-bottom: .3em;
+ display: block
+ }
+
+ .wy-form input[type=color],
+ .wy-form input[type=date],
+ .wy-form input[type=datetime-local],
+ .wy-form input[type=datetime],
+ .wy-form input[type=email],
+ .wy-form input[type=month],
+ .wy-form input[type=number],
+ .wy-form input[type=password],
+ .wy-form input[type=search],
+ .wy-form input[type=tel],
+ .wy-form input[type=time],
+ .wy-form input[type=url],
+ .wy-form input[type=week] {
+ margin-bottom: 0
+ }
+
+ .wy-form-aligned .wy-control-group label {
+ margin-bottom: .3em;
+ text-align: left;
+ display: block;
+ width: 100%
+ }
+
+ .wy-form-aligned .wy-control {
+ margin: 1.5em 0 0
+ }
+
+ .wy-form-message,
+ .wy-form-message-inline,
+ .wy-form .wy-help-inline {
+ display: block;
+ font-size: 80%;
+ padding: 6px 0
+ }
+}
+
+@media screen and (max-width:768px) {
+ .tablet-hide {
+ display: none
+ }
+}
+
+@media screen and (max-width:480px) {
+ .mobile-hide {
+ display: none
+ }
+}
+
+.float-left {
+ float: left
+}
+
+.float-right {
+ float: right
+}
+
+.full-width {
+ width: 100%
+}
+
+.rst-content table.docutils,
+.rst-content table.field-list,
+.wy-table {
+ border-collapse: collapse;
+ border-spacing: 0;
+ empty-cells: show;
+ margin-bottom: 24px
+}
+
+.rst-content table.docutils caption,
+.rst-content table.field-list caption,
+.wy-table caption {
+ color: #000;
+ font: italic 85%/1 arial, sans-serif;
+ padding: 1em 0;
+ text-align: center
+}
+
+.rst-content table.docutils td,
+.rst-content table.docutils th,
+.rst-content table.field-list td,
+.rst-content table.field-list th,
+.wy-table td,
+.wy-table th {
+ font-size: 90%;
+ margin: 0;
+ overflow: visible;
+ padding: 8px 16px
+}
+
+.rst-content table.docutils td:first-child,
+.rst-content table.docutils th:first-child,
+.rst-content table.field-list td:first-child,
+.rst-content table.field-list th:first-child,
+.wy-table td:first-child,
+.wy-table th:first-child {
+ border-left-width: 0
+}
+
+.rst-content table.docutils thead,
+.rst-content table.field-list thead,
+.wy-table thead {
+ color: #000;
+ text-align: left;
+ vertical-align: bottom;
+ white-space: nowrap
+}
+
+.rst-content table.docutils thead th,
+.rst-content table.field-list thead th,
+.wy-table thead th {
+ font-weight: 700;
+ border-bottom: 2px solid #e1e4e5
+}
+
+.rst-content table.docutils td,
+.rst-content table.field-list td,
+.wy-table td {
+ background-color: transparent;
+ vertical-align: middle
+}
+
+.rst-content table.docutils td p,
+.rst-content table.field-list td p,
+.wy-table td p {
+ line-height: 18px
+}
+
+.rst-content table.docutils td p:last-child,
+.rst-content table.field-list td p:last-child,
+.wy-table td p:last-child {
+ margin-bottom: 0
+}
+
+.rst-content table.docutils .wy-table-cell-min,
+.rst-content table.field-list .wy-table-cell-min,
+.wy-table .wy-table-cell-min {
+ width: 1%;
+ padding-right: 0
+}
+
+.rst-content table.docutils .wy-table-cell-min input[type=checkbox],
+.rst-content table.field-list .wy-table-cell-min input[type=checkbox],
+.wy-table .wy-table-cell-min input[type=checkbox] {
+ margin: 0
+}
+
+.wy-table-secondary {
+ color: grey;
+ font-size: 90%
+}
+
+.wy-table-tertiary {
+ color: grey;
+ font-size: 80%
+}
+
+.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,
+.wy-table-backed,
+.wy-table-odd td,
+.wy-table-striped tr:nth-child(2n-1) td {
+ background-color: #f3f6f6
+}
+
+.rst-content table.docutils,
+.wy-table-bordered-all {
+ border: 1px solid #e1e4e5
+}
+
+.rst-content table.docutils td,
+.wy-table-bordered-all td {
+ border-bottom: 1px solid #e1e4e5;
+ border-left: 1px solid #e1e4e5
+}
+
+.rst-content table.docutils tbody>tr:last-child td,
+.wy-table-bordered-all tbody>tr:last-child td {
+ border-bottom-width: 0
+}
+
+.wy-table-bordered {
+ border: 1px solid #e1e4e5
+}
+
+.wy-table-bordered-rows td {
+ border-bottom: 1px solid #e1e4e5
+}
+
+.wy-table-bordered-rows tbody>tr:last-child td {
+ border-bottom-width: 0
+}
+
+.wy-table-horizontal td,
+.wy-table-horizontal th {
+ border-width: 0 0 1px;
+ border-bottom: 1px solid #e1e4e5
+}
+
+.wy-table-horizontal tbody>tr:last-child td {
+ border-bottom-width: 0
+}
+
+.wy-table-responsive {
+ margin-bottom: 24px;
+ max-width: 100%;
+ overflow: auto
+}
+
+.wy-table-responsive table {
+ margin-bottom: 0 !important
+}
+
+.wy-table-responsive table td,
+.wy-table-responsive table th {
+ white-space: nowrap
+}
+
+a {
+ color: #2980b9;
+ text-decoration: none;
+ cursor: pointer
+}
+
+a:hover {
+ color: #3091d1
+}
+
+a:visited {
+ color: #9b59b6
+}
+
+html {
+ height: 100%
+}
+
+body,
+html {
+ overflow-x: hidden
+}
+
+body {
+ font-family: Lato, proxima-nova, Helvetica Neue, Arial, sans-serif;
+ font-weight: 400;
+ color: #404040;
+ min-height: 100%;
+ background: #edf0f2
+}
+
+.wy-text-left {
+ text-align: left
+}
+
+.wy-text-center {
+ text-align: center
+}
+
+.wy-text-right {
+ text-align: right
+}
+
+.wy-text-large {
+ font-size: 120%
+}
+
+.wy-text-normal {
+ font-size: 100%
+}
+
+.wy-text-small,
+small {
+ font-size: 80%
+}
+
+.wy-text-strike {
+ text-decoration: line-through
+}
+
+.wy-text-warning {
+ color: #e67e22 !important
+}
+
+a.wy-text-warning:hover {
+ color: #eb9950 !important
+}
+
+.wy-text-info {
+ color: #2980b9 !important
+}
+
+a.wy-text-info:hover {
+ color: #409ad5 !important
+}
+
+.wy-text-success {
+ color: #27ae60 !important
+}
+
+a.wy-text-success:hover {
+ color: #36d278 !important
+}
+
+.wy-text-danger {
+ color: #e74c3c !important
+}
+
+a.wy-text-danger:hover {
+ color: #ed7669 !important
+}
+
+.wy-text-neutral {
+ color: #404040 !important
+}
+
+a.wy-text-neutral:hover {
+ color: #595959 !important
+}
+
+.rst-content .toctree-wrapper>p.caption,
+h1,
+h2,
+h3,
+h4,
+h5,
+h6,
+legend {
+ margin-top: 0;
+ font-weight: 700;
+ font-family: Roboto Slab, ff-tisa-web-pro, Georgia, Arial, sans-serif
+}
+
+p {
+ line-height: 24px;
+ font-size: 16px;
+ margin: 0 0 24px
+}
+
+h1 {
+ font-size: 175%
+}
+
+.rst-content .toctree-wrapper>p.caption,
+h2 {
+ font-size: 150%
+}
+
+h3 {
+ font-size: 125%
+}
+
+h4 {
+ font-size: 115%
+}
+
+h5 {
+ font-size: 110%
+}
+
+h6 {
+ font-size: 100%
+}
+
+hr {
+ display: block;
+ height: 1px;
+ border: 0;
+ border-top: 1px solid #e1e4e5;
+ margin: 24px 0;
+ padding: 0
+}
+
+.rst-content code,
+.rst-content tt,
+code {
+ white-space: nowrap;
+ max-width: 100%;
+ background: #fff;
+ border: 1px solid #e1e4e5;
+ font-size: 75%;
+ padding: 0 5px;
+ font-family: SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, Courier, monospace;
+ color: #e74c3c;
+ overflow-x: auto
+}
+
+.rst-content tt.code-large,
+code.code-large {
+ font-size: 90%
+}
+
+.rst-content .section ul,
+.rst-content .toctree-wrapper ul,
+.rst-content section ul,
+.wy-plain-list-disc,
+article ul {
+ list-style: disc;
+ line-height: 24px;
+ margin-bottom: 24px
+}
+
+.rst-content .section ul li,
+.rst-content .toctree-wrapper ul li,
+.rst-content section ul li,
+.wy-plain-list-disc li,
+article ul li {
+ list-style: disc;
+ margin-left: 24px
+}
+
+.rst-content .section ul li p:last-child,
+.rst-content .section ul li ul,
+.rst-content .toctree-wrapper ul li p:last-child,
+.rst-content .toctree-wrapper ul li ul,
+.rst-content section ul li p:last-child,
+.rst-content section ul li ul,
+.wy-plain-list-disc li p:last-child,
+.wy-plain-list-disc li ul,
+article ul li p:last-child,
+article ul li ul {
+ margin-bottom: 0
+}
+
+.rst-content .section ul li li,
+.rst-content .toctree-wrapper ul li li,
+.rst-content section ul li li,
+.wy-plain-list-disc li li,
+article ul li li {
+ list-style: circle
+}
+
+.rst-content .section ul li li li,
+.rst-content .toctree-wrapper ul li li li,
+.rst-content section ul li li li,
+.wy-plain-list-disc li li li,
+article ul li li li {
+ list-style: square
+}
+
+.rst-content .section ul li ol li,
+.rst-content .toctree-wrapper ul li ol li,
+.rst-content section ul li ol li,
+.wy-plain-list-disc li ol li,
+article ul li ol li {
+ list-style: decimal
+}
+
+.rst-content .section ol,
+.rst-content .section ol.arabic,
+.rst-content .toctree-wrapper ol,
+.rst-content .toctree-wrapper ol.arabic,
+.rst-content section ol,
+.rst-content section ol.arabic,
+.wy-plain-list-decimal,
+article ol {
+ list-style: decimal;
+ line-height: 24px;
+ margin-bottom: 24px
+}
+
+.rst-content .section ol.arabic li,
+.rst-content .section ol li,
+.rst-content .toctree-wrapper ol.arabic li,
+.rst-content .toctree-wrapper ol li,
+.rst-content section ol.arabic li,
+.rst-content section ol li,
+.wy-plain-list-decimal li,
+article ol li {
+ list-style: decimal;
+ margin-left: 24px
+}
+
+.rst-content .section ol.arabic li ul,
+.rst-content .section ol li p:last-child,
+.rst-content .section ol li ul,
+.rst-content .toctree-wrapper ol.arabic li ul,
+.rst-content .toctree-wrapper ol li p:last-child,
+.rst-content .toctree-wrapper ol li ul,
+.rst-content section ol.arabic li ul,
+.rst-content section ol li p:last-child,
+.rst-content section ol li ul,
+.wy-plain-list-decimal li p:last-child,
+.wy-plain-list-decimal li ul,
+article ol li p:last-child,
+article ol li ul {
+ margin-bottom: 0
+}
+
+.rst-content .section ol.arabic li ul li,
+.rst-content .section ol li ul li,
+.rst-content .toctree-wrapper ol.arabic li ul li,
+.rst-content .toctree-wrapper ol li ul li,
+.rst-content section ol.arabic li ul li,
+.rst-content section ol li ul li,
+.wy-plain-list-decimal li ul li,
+article ol li ul li {
+ list-style: disc
+}
+
+.wy-breadcrumbs {
+ *zoom: 1
+}
+
+.wy-breadcrumbs:after,
+.wy-breadcrumbs:before {
+ display: table;
+ content: ""
+}
+
+.wy-breadcrumbs:after {
+ clear: both
+}
+
+.wy-breadcrumbs>li {
+ display: inline-block;
+ padding-top: 5px
+}
+
+.wy-breadcrumbs>li.wy-breadcrumbs-aside {
+ float: right
+}
+
+.rst-content .wy-breadcrumbs>li code,
+.rst-content .wy-breadcrumbs>li tt,
+.wy-breadcrumbs>li .rst-content tt,
+.wy-breadcrumbs>li code {
+ all: inherit;
+ color: inherit
+}
+
+.breadcrumb-item:before {
+ content: "/";
+ color: #bbb;
+ font-size: 13px;
+ padding: 0 6px 0 3px
+}
+
+.wy-breadcrumbs-extra {
+ margin-bottom: 0;
+ color: #b3b3b3;
+ font-size: 80%;
+ display: inline-block
+}
+
+@media screen and (max-width:480px) {
+
+ .wy-breadcrumbs-extra,
+ .wy-breadcrumbs li.wy-breadcrumbs-aside {
+ display: none
+ }
+}
+
+@media print {
+ .wy-breadcrumbs li.wy-breadcrumbs-aside {
+ display: none
+ }
+}
+
+html {
+ font-size: 16px
+}
+
+.wy-affix {
+ position: fixed;
+ top: 1.618em
+}
+
+.wy-menu a:hover {
+ text-decoration: none
+}
+
+.wy-menu-horiz {
+ *zoom: 1
+}
+
+.wy-menu-horiz:after,
+.wy-menu-horiz:before {
+ display: table;
+ content: ""
+}
+
+.wy-menu-horiz:after {
+ clear: both
+}
+
+.wy-menu-horiz li,
+.wy-menu-horiz ul {
+ display: inline-block
+}
+
+.wy-menu-horiz li:hover {
+ background: hsla(0, 0%, 100%, .1)
+}
+
+.wy-menu-horiz li.divide-left {
+ border-left: 1px solid #404040
+}
+
+.wy-menu-horiz li.divide-right {
+ border-right: 1px solid #404040
+}
+
+.wy-menu-horiz a {
+ height: 32px;
+ display: inline-block;
+ line-height: 32px;
+ padding: 0 16px
+}
+
+.wy-menu-vertical {
+ width: 300px
+}
+
+.wy-menu-vertical header,
+.wy-menu-vertical p.caption {
+ color: #55a5d9;
+ height: 32px;
+ line-height: 32px;
+ padding: 0 1.618em;
+ margin: 12px 0 0;
+ display: block;
+ font-weight: 700;
+ text-transform: uppercase;
+ font-size: 85%;
+ white-space: nowrap
+}
+
+.wy-menu-vertical ul {
+ margin-bottom: 0
+}
+
+.wy-menu-vertical li.divide-top {
+ border-top: 1px solid #404040
+}
+
+.wy-menu-vertical li.divide-bottom {
+ border-bottom: 1px solid #404040
+}
+
+.wy-menu-vertical li.current {
+ background: #e3e3e3
+}
+
+.wy-menu-vertical li.current a {
+ color: grey;
+ border-right: 1px solid #c9c9c9;
+ padding: .4045em 2.427em
+}
+
+.wy-menu-vertical li.current a:hover {
+ background: #d6d6d6
+}
+
+.rst-content .wy-menu-vertical li tt,
+.wy-menu-vertical li .rst-content tt,
+.wy-menu-vertical li code {
+ border: none;
+ background: inherit;
+ color: inherit;
+ padding-left: 0;
+ padding-right: 0
+}
+
+.wy-menu-vertical li button.toctree-expand {
+ display: block;
+ float: left;
+ margin-left: -1.2em;
+ line-height: 18px;
+ color: #4d4d4d;
+ border: none;
+ background: none;
+ padding: 0
+}
+
+.wy-menu-vertical li.current>a,
+.wy-menu-vertical li.on a {
+ color: #404040;
+ font-weight: 700;
+ position: relative;
+ background: #fcfcfc;
+ border: none;
+ padding: .4045em 1.618em
+}
+
+.wy-menu-vertical li.current>a:hover,
+.wy-menu-vertical li.on a:hover {
+ background: #fcfcfc
+}
+
+.wy-menu-vertical li.current>a:hover button.toctree-expand,
+.wy-menu-vertical li.on a:hover button.toctree-expand {
+ color: grey
+}
+
+.wy-menu-vertical li.current>a button.toctree-expand,
+.wy-menu-vertical li.on a button.toctree-expand {
+ display: block;
+ line-height: 18px;
+ color: #333
+}
+
+.wy-menu-vertical li.toctree-l1.current>a {
+ border-bottom: 1px solid #c9c9c9;
+ border-top: 1px solid #c9c9c9
+}
+
+.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,
+.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,
+.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,
+.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,
+.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,
+.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,
+.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,
+.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,
+.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,
+.wy-menu-vertical .toctree-l10.current .toctree-l11>ul {
+ display: none
+}
+
+.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,
+.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,
+.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,
+.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,
+.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,
+.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,
+.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,
+.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,
+.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,
+.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul {
+ display: block
+}
+
+.wy-menu-vertical li.toctree-l3,
+.wy-menu-vertical li.toctree-l4 {
+ font-size: .9em
+}
+
+.wy-menu-vertical li.toctree-l2 a,
+.wy-menu-vertical li.toctree-l3 a,
+.wy-menu-vertical li.toctree-l4 a,
+.wy-menu-vertical li.toctree-l5 a,
+.wy-menu-vertical li.toctree-l6 a,
+.wy-menu-vertical li.toctree-l7 a,
+.wy-menu-vertical li.toctree-l8 a,
+.wy-menu-vertical li.toctree-l9 a,
+.wy-menu-vertical li.toctree-l10 a {
+ color: #404040
+}
+
+.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,
+.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,
+.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,
+.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,
+.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,
+.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,
+.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,
+.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,
+.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand {
+ color: grey
+}
+
+.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,
+.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,
+.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,
+.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,
+.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,
+.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,
+.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,
+.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,
+.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a {
+ display: block
+}
+
+.wy-menu-vertical li.toctree-l2.current>a {
+ padding: .4045em 2.427em
+}
+
+.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a {
+ padding: .4045em 1.618em .4045em 4.045em
+}
+
+.wy-menu-vertical li.toctree-l3.current>a {
+ padding: .4045em 4.045em
+}
+
+.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a {
+ padding: .4045em 1.618em .4045em 5.663em
+}
+
+.wy-menu-vertical li.toctree-l4.current>a {
+ padding: .4045em 5.663em
+}
+
+.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a {
+ padding: .4045em 1.618em .4045em 7.281em
+}
+
+.wy-menu-vertical li.toctree-l5.current>a {
+ padding: .4045em 7.281em
+}
+
+.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a {
+ padding: .4045em 1.618em .4045em 8.899em
+}
+
+.wy-menu-vertical li.toctree-l6.current>a {
+ padding: .4045em 8.899em
+}
+
+.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a {
+ padding: .4045em 1.618em .4045em 10.517em
+}
+
+.wy-menu-vertical li.toctree-l7.current>a {
+ padding: .4045em 10.517em
+}
+
+.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a {
+ padding: .4045em 1.618em .4045em 12.135em
+}
+
+.wy-menu-vertical li.toctree-l8.current>a {
+ padding: .4045em 12.135em
+}
+
+.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a {
+ padding: .4045em 1.618em .4045em 13.753em
+}
+
+.wy-menu-vertical li.toctree-l9.current>a {
+ padding: .4045em 13.753em
+}
+
+.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a {
+ padding: .4045em 1.618em .4045em 15.371em
+}
+
+.wy-menu-vertical li.toctree-l10.current>a {
+ padding: .4045em 15.371em
+}
+
+.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a {
+ padding: .4045em 1.618em .4045em 16.989em
+}
+
+.wy-menu-vertical li.toctree-l2.current>a,
+.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a {
+ background: #c9c9c9
+}
+
+.wy-menu-vertical li.toctree-l2 button.toctree-expand {
+ color: #a3a3a3
+}
+
+.wy-menu-vertical li.toctree-l3.current>a,
+.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a {
+ background: #bdbdbd
+}
+
+.wy-menu-vertical li.toctree-l3 button.toctree-expand {
+ color: #969696
+}
+
+.wy-menu-vertical li.current ul {
+ display: block
+}
+
+.wy-menu-vertical li ul {
+ margin-bottom: 0;
+ display: none
+}
+
+.wy-menu-vertical li ul li a {
+ margin-bottom: 0;
+ color: #d9d9d9;
+ font-weight: 400
+}
+
+.wy-menu-vertical a {
+ line-height: 18px;
+ padding: .4045em 1.618em;
+ display: block;
+ position: relative;
+ font-size: 90%;
+ color: #d9d9d9
+}
+
+.wy-menu-vertical a:hover {
+ background-color: #4e4a4a;
+ cursor: pointer
+}
+
+.wy-menu-vertical a:hover button.toctree-expand {
+ color: #d9d9d9
+}
+
+.wy-menu-vertical a:active {
+ background-color: #2980b9;
+ cursor: pointer;
+ color: #fff
+}
+
+.wy-menu-vertical a:active button.toctree-expand {
+ color: #fff
+}
+
+.wy-side-nav-search {
+ display: block;
+ width: 300px;
+ padding: .809em;
+ margin-bottom: .809em;
+ z-index: 200;
+ background-color: #2980b9;
+ text-align: center;
+ color: #fcfcfc
+}
+
+.wy-side-nav-search input[type=text] {
+ width: 100%;
+ border-radius: 50px;
+ padding: 6px 12px;
+ border-color: #2472a4
+}
+
+.wy-side-nav-search img {
+ display: block;
+ margin: auto auto .809em;
+ height: 45px;
+ width: 45px;
+ background-color: #2980b9;
+ padding: 5px;
+ border-radius: 100%
+}
+
+.wy-side-nav-search .wy-dropdown>a,
+.wy-side-nav-search>a {
+ color: #fcfcfc;
+ font-size: 100%;
+ font-weight: 700;
+ display: inline-block;
+ padding: 4px 6px;
+ margin-bottom: .809em;
+ max-width: 100%
+}
+
+.wy-side-nav-search .wy-dropdown>a:hover,
+.wy-side-nav-search>a:hover {
+ background: hsla(0, 0%, 100%, .1)
+}
+
+.wy-side-nav-search .wy-dropdown>a img.logo,
+.wy-side-nav-search>a img.logo {
+ display: block;
+ margin: 0 auto;
+ height: auto;
+ width: auto;
+ border-radius: 0;
+ max-width: 100%;
+ background: transparent
+}
+
+.wy-side-nav-search .wy-dropdown>a.icon img.logo,
+.wy-side-nav-search>a.icon img.logo {
+ margin-top: .85em
+}
+
+.wy-side-nav-search>div.version {
+ margin-top: -.4045em;
+ margin-bottom: .809em;
+ font-weight: 400;
+ color: hsla(0, 0%, 100%, .3)
+}
+
+.wy-nav .wy-menu-vertical header {
+ color: #2980b9
+}
+
+.wy-nav .wy-menu-vertical a {
+ color: #b3b3b3
+}
+
+.wy-nav .wy-menu-vertical a:hover {
+ background-color: #2980b9;
+ color: #fff
+}
+
+[data-menu-wrap] {
+ -webkit-transition: all .2s ease-in;
+ -moz-transition: all .2s ease-in;
+ transition: all .2s ease-in;
+ position: absolute;
+ opacity: 1;
+ width: 100%;
+ opacity: 0
+}
+
+[data-menu-wrap].move-center {
+ left: 0;
+ right: auto;
+ opacity: 1
+}
+
+[data-menu-wrap].move-left {
+ right: auto;
+ left: -100%;
+ opacity: 0
+}
+
+[data-menu-wrap].move-right {
+ right: -100%;
+ left: auto;
+ opacity: 0
+}
+
+.wy-body-for-nav {
+ background: #fcfcfc
+}
+
+.wy-grid-for-nav {
+ position: absolute;
+ width: 100%;
+ height: 100%
+}
+
+.wy-nav-side {
+ position: fixed;
+ top: 0;
+ bottom: 0;
+ left: 0;
+ padding-bottom: 2em;
+ width: 300px;
+ overflow-x: hidden;
+ overflow-y: hidden;
+ min-height: 100%;
+ color: #9b9b9b;
+ background: #343131;
+ z-index: 200
+}
+
+.wy-side-scroll {
+ width: 320px;
+ position: relative;
+ overflow-x: hidden;
+ overflow-y: scroll;
+ height: 100%
+}
+
+.wy-nav-top {
+ display: none;
+ background: #2980b9;
+ color: #fff;
+ padding: .4045em .809em;
+ position: relative;
+ line-height: 50px;
+ text-align: center;
+ font-size: 100%;
+ *zoom: 1
+}
+
+.wy-nav-top:after,
+.wy-nav-top:before {
+ display: table;
+ content: ""
+}
+
+.wy-nav-top:after {
+ clear: both
+}
+
+.wy-nav-top a {
+ color: #fff;
+ font-weight: 700
+}
+
+.wy-nav-top img {
+ margin-right: 12px;
+ height: 45px;
+ width: 45px;
+ background-color: #2980b9;
+ padding: 5px;
+ border-radius: 100%
+}
+
+.wy-nav-top i {
+ font-size: 30px;
+ float: left;
+ cursor: pointer;
+ padding-top: inherit
+}
+
+.wy-nav-content-wrap {
+ margin-left: 300px;
+ background: #fcfcfc;
+ min-height: 100%
+}
+
+.wy-nav-content {
+ padding: 1.618em 3.236em;
+ height: 100%;
+ max-width: 900px;
+ margin: auto
+}
+
+.wy-body-mask {
+ position: fixed;
+ width: 100%;
+ height: 100%;
+ background: rgba(0, 0, 0, .2);
+ display: none;
+ z-index: 499
+}
+
+.wy-body-mask.on {
+ display: block
+}
+
+footer {
+ color: grey
+}
+
+footer p {
+ margin-bottom: 12px
+}
+
+.rst-content footer span.commit tt,
+footer span.commit .rst-content tt,
+footer span.commit code {
+ padding: 0;
+ font-family: SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, Courier, monospace;
+ font-size: 1em;
+ background: none;
+ border: none;
+ color: grey
+}
+
+.rst-footer-buttons {
+ *zoom: 1
+}
+
+.rst-footer-buttons:after,
+.rst-footer-buttons:before {
+ width: 100%;
+ display: table;
+ content: ""
+}
+
+.rst-footer-buttons:after {
+ clear: both
+}
+
+.rst-breadcrumbs-buttons {
+ margin-top: 12px;
+ *zoom: 1
+}
+
+.rst-breadcrumbs-buttons:after,
+.rst-breadcrumbs-buttons:before {
+ display: table;
+ content: ""
+}
+
+.rst-breadcrumbs-buttons:after {
+ clear: both
+}
+
+#search-results .search li {
+ margin-bottom: 24px;
+ border-bottom: 1px solid #e1e4e5;
+ padding-bottom: 24px
+}
+
+#search-results .search li:first-child {
+ border-top: 1px solid #e1e4e5;
+ padding-top: 24px
+}
+
+#search-results .search li a {
+ font-size: 120%;
+ margin-bottom: 12px;
+ display: inline-block
+}
+
+#search-results .context {
+ color: grey;
+ font-size: 90%
+}
+
+.genindextable li>ul {
+ margin-left: 24px
+}
+
+@media screen and (max-width:768px) {
+ .wy-body-for-nav {
+ background: #fcfcfc
+ }
+
+ .wy-nav-top {
+ display: block
+ }
+
+ .wy-nav-side {
+ left: -300px
+ }
+
+ .wy-nav-side.shift {
+ width: 85%;
+ left: 0
+ }
+
+ .wy-menu.wy-menu-vertical,
+ .wy-side-nav-search,
+ .wy-side-scroll {
+ width: auto
+ }
+
+ .wy-nav-content-wrap {
+ margin-left: 0
+ }
+
+ .wy-nav-content-wrap .wy-nav-content {
+ padding: 1.618em
+ }
+
+ .wy-nav-content-wrap.shift {
+ position: fixed;
+ min-width: 100%;
+ left: 85%;
+ top: 0;
+ height: 100%;
+ overflow: hidden
+ }
+}
+
+@media screen and (min-width:1100px) {
+ .wy-nav-content-wrap {
+ background: rgba(0, 0, 0, .05)
+ }
+
+ .wy-nav-content {
+ margin: 0;
+ background: #fcfcfc
+ }
+}
+
+@media print {
+
+ .rst-versions,
+ .wy-nav-side,
+ footer {
+ display: none
+ }
+
+ .wy-nav-content-wrap {
+ margin-left: 0
+ }
+}
+
+.rst-versions {
+ position: fixed;
+ bottom: 0;
+ left: 0;
+ width: 300px;
+ color: #fcfcfc;
+ background: #1f1d1d;
+ font-family: Lato, proxima-nova, Helvetica Neue, Arial, sans-serif;
+ z-index: 400
+}
+
+.rst-versions a {
+ color: #2980b9;
+ text-decoration: none
+}
+
+.rst-versions .rst-badge-small {
+ display: none
+}
+
+.rst-versions .rst-current-version {
+ padding: 12px;
+ background-color: #272525;
+ display: block;
+ text-align: right;
+ font-size: 90%;
+ cursor: pointer;
+ color: #27ae60;
+ *zoom: 1
+}
+
+.rst-versions .rst-current-version:after,
+.rst-versions .rst-current-version:before {
+ display: table;
+ content: ""
+}
+
+.rst-versions .rst-current-version:after {
+ clear: both
+}
+
+.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,
+.rst-content .eqno .rst-versions .rst-current-version .headerlink,
+.rst-content .rst-versions .rst-current-version .admonition-title,
+.rst-content code.download .rst-versions .rst-current-version span:first-child,
+.rst-content dl dt .rst-versions .rst-current-version .headerlink,
+.rst-content h1 .rst-versions .rst-current-version .headerlink,
+.rst-content h2 .rst-versions .rst-current-version .headerlink,
+.rst-content h3 .rst-versions .rst-current-version .headerlink,
+.rst-content h4 .rst-versions .rst-current-version .headerlink,
+.rst-content h5 .rst-versions .rst-current-version .headerlink,
+.rst-content h6 .rst-versions .rst-current-version .headerlink,
+.rst-content p .rst-versions .rst-current-version .headerlink,
+.rst-content table>caption .rst-versions .rst-current-version .headerlink,
+.rst-content tt.download .rst-versions .rst-current-version span:first-child,
+.rst-versions .rst-current-version .fa,
+.rst-versions .rst-current-version .icon,
+.rst-versions .rst-current-version .rst-content .admonition-title,
+.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,
+.rst-versions .rst-current-version .rst-content .eqno .headerlink,
+.rst-versions .rst-current-version .rst-content code.download span:first-child,
+.rst-versions .rst-current-version .rst-content dl dt .headerlink,
+.rst-versions .rst-current-version .rst-content h1 .headerlink,
+.rst-versions .rst-current-version .rst-content h2 .headerlink,
+.rst-versions .rst-current-version .rst-content h3 .headerlink,
+.rst-versions .rst-current-version .rst-content h4 .headerlink,
+.rst-versions .rst-current-version .rst-content h5 .headerlink,
+.rst-versions .rst-current-version .rst-content h6 .headerlink,
+.rst-versions .rst-current-version .rst-content p .headerlink,
+.rst-versions .rst-current-version .rst-content table>caption .headerlink,
+.rst-versions .rst-current-version .rst-content tt.download span:first-child,
+.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,
+.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand {
+ color: #fcfcfc
+}
+
+.rst-versions .rst-current-version .fa-book,
+.rst-versions .rst-current-version .icon-book {
+ float: left
+}
+
+.rst-versions .rst-current-version.rst-out-of-date {
+ background-color: #e74c3c;
+ color: #fff
+}
+
+.rst-versions .rst-current-version.rst-active-old-version {
+ background-color: #f1c40f;
+ color: #000
+}
+
+.rst-versions.shift-up {
+ height: auto;
+ max-height: 100%;
+ overflow-y: scroll
+}
+
+.rst-versions.shift-up .rst-other-versions {
+ display: block
+}
+
+.rst-versions .rst-other-versions {
+ font-size: 90%;
+ padding: 12px;
+ color: grey;
+ display: none
+}
+
+.rst-versions .rst-other-versions hr {
+ display: block;
+ height: 1px;
+ border: 0;
+ margin: 20px 0;
+ padding: 0;
+ border-top: 1px solid #413d3d
+}
+
+.rst-versions .rst-other-versions dd {
+ display: inline-block;
+ margin: 0
+}
+
+.rst-versions .rst-other-versions dd a {
+ display: inline-block;
+ padding: 6px;
+ color: #fcfcfc
+}
+
+.rst-versions.rst-badge {
+ width: auto;
+ bottom: 20px;
+ right: 20px;
+ left: auto;
+ border: none;
+ max-width: 300px;
+ max-height: 90%
+}
+
+.rst-versions.rst-badge .fa-book,
+.rst-versions.rst-badge .icon-book {
+ float: none;
+ line-height: 30px
+}
+
+.rst-versions.rst-badge.shift-up .rst-current-version {
+ text-align: right
+}
+
+.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,
+.rst-versions.rst-badge.shift-up .rst-current-version .icon-book {
+ float: left
+}
+
+.rst-versions.rst-badge>.rst-current-version {
+ width: auto;
+ height: 30px;
+ line-height: 30px;
+ padding: 0 6px;
+ display: block;
+ text-align: center
+}
+
+@media screen and (max-width:768px) {
+ .rst-versions {
+ width: 85%;
+ display: none
+ }
+
+ .rst-versions.shift {
+ display: block
+ }
+}
+
+.rst-content .toctree-wrapper>p.caption,
+.rst-content h1,
+.rst-content h2,
+.rst-content h3,
+.rst-content h4,
+.rst-content h5,
+.rst-content h6 {
+ margin-bottom: 24px
+}
+
+.rst-content img {
+ max-width: 100%;
+ height: auto
+}
+
+.rst-content div.figure,
+.rst-content figure {
+ margin-bottom: 24px
+}
+
+.rst-content div.figure .caption-text,
+.rst-content figure .caption-text {
+ font-style: italic
+}
+
+.rst-content div.figure p:last-child.caption,
+.rst-content figure p:last-child.caption {
+ margin-bottom: 0
+}
+
+.rst-content div.figure.align-center,
+.rst-content figure.align-center {
+ text-align: center
+}
+
+.rst-content .section>a>img,
+.rst-content .section>img,
+.rst-content section>a>img,
+.rst-content section>img {
+ margin-bottom: 24px
+}
+
+.rst-content abbr[title] {
+ text-decoration: none
+}
+
+.rst-content.style-external-links a.reference.external:after {
+ font-family: FontAwesome;
+ content: "\f08e";
+ color: #b3b3b3;
+ vertical-align: super;
+ font-size: 60%;
+ margin: 0 .2em
+}
+
+.rst-content blockquote {
+ margin-left: 24px;
+ line-height: 24px;
+ margin-bottom: 24px
+}
+
+.rst-content pre.literal-block {
+ white-space: pre;
+ margin: 0;
+ padding: 12px;
+ font-family: SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, Courier, monospace;
+ display: block;
+ overflow: auto
+}
+
+.rst-content div[class^=highlight],
+.rst-content pre.literal-block {
+ border: 1px solid #e1e4e5;
+ overflow-x: auto;
+ margin: 1px 0 24px
+}
+
+.rst-content div[class^=highlight] div[class^=highlight],
+.rst-content pre.literal-block div[class^=highlight] {
+ padding: 0;
+ border: none;
+ margin: 0
+}
+
+.rst-content div[class^=highlight] td.code {
+ width: 100%
+}
+
+.rst-content .linenodiv pre {
+ border-right: 1px solid #e6e9ea;
+ margin: 0;
+ padding: 12px;
+ font-family: SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, Courier, monospace;
+ user-select: none;
+ pointer-events: none
+}
+
+.rst-content div[class^=highlight] pre {
+ white-space: pre;
+ margin: 0;
+ padding: 12px;
+ display: block;
+ overflow: auto
+}
+
+.rst-content div[class^=highlight] pre .hll {
+ display: block;
+ margin: 0 -12px;
+ padding: 0 12px
+}
+
+.rst-content .linenodiv pre,
+.rst-content div[class^=highlight] pre,
+.rst-content pre.literal-block {
+ font-family: SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, Courier, monospace;
+ font-size: 12px;
+ line-height: 1.4
+}
+
+.rst-content div.highlight .gp,
+.rst-content div.highlight span.linenos {
+ user-select: none;
+ pointer-events: none
+}
+
+.rst-content div.highlight span.linenos {
+ display: inline-block;
+ padding-left: 0;
+ padding-right: 12px;
+ margin-right: 12px;
+ border-right: 1px solid #e6e9ea
+}
+
+.rst-content .code-block-caption {
+ font-style: italic;
+ font-size: 85%;
+ line-height: 1;
+ padding: 1em 0;
+ text-align: center
+}
+
+@media print {
+
+ .rst-content .codeblock,
+ .rst-content div[class^=highlight],
+ .rst-content div[class^=highlight] pre {
+ white-space: pre-wrap
+ }
+}
+
+.rst-content .admonition,
+.rst-content .admonition-todo,
+.rst-content .attention,
+.rst-content .caution,
+.rst-content .danger,
+.rst-content .error,
+.rst-content .hint,
+.rst-content .important,
+.rst-content .note,
+.rst-content .seealso,
+.rst-content .tip,
+.rst-content .warning {
+ clear: both
+}
+
+.rst-content .admonition-todo .last,
+.rst-content .admonition-todo>:last-child,
+.rst-content .admonition .last,
+.rst-content .admonition>:last-child,
+.rst-content .attention .last,
+.rst-content .attention>:last-child,
+.rst-content .caution .last,
+.rst-content .caution>:last-child,
+.rst-content .danger .last,
+.rst-content .danger>:last-child,
+.rst-content .error .last,
+.rst-content .error>:last-child,
+.rst-content .hint .last,
+.rst-content .hint>:last-child,
+.rst-content .important .last,
+.rst-content .important>:last-child,
+.rst-content .note .last,
+.rst-content .note>:last-child,
+.rst-content .seealso .last,
+.rst-content .seealso>:last-child,
+.rst-content .tip .last,
+.rst-content .tip>:last-child,
+.rst-content .warning .last,
+.rst-content .warning>:last-child {
+ margin-bottom: 0
+}
+
+.rst-content .admonition-title:before {
+ margin-right: 4px
+}
+
+.rst-content .admonition table {
+ border-color: rgba(0, 0, 0, .1)
+}
+
+.rst-content .admonition table td,
+.rst-content .admonition table th {
+ background: transparent !important;
+ border-color: rgba(0, 0, 0, .1) !important
+}
+
+.rst-content .section ol.loweralpha,
+.rst-content .section ol.loweralpha>li,
+.rst-content .toctree-wrapper ol.loweralpha,
+.rst-content .toctree-wrapper ol.loweralpha>li,
+.rst-content section ol.loweralpha,
+.rst-content section ol.loweralpha>li {
+ list-style: lower-alpha
+}
+
+.rst-content .section ol.upperalpha,
+.rst-content .section ol.upperalpha>li,
+.rst-content .toctree-wrapper ol.upperalpha,
+.rst-content .toctree-wrapper ol.upperalpha>li,
+.rst-content section ol.upperalpha,
+.rst-content section ol.upperalpha>li {
+ list-style: upper-alpha
+}
+
+.rst-content .section ol li>*,
+.rst-content .section ul li>*,
+.rst-content .toctree-wrapper ol li>*,
+.rst-content .toctree-wrapper ul li>*,
+.rst-content section ol li>*,
+.rst-content section ul li>* {
+ margin-top: 12px;
+ margin-bottom: 12px
+}
+
+.rst-content .section ol li>:first-child,
+.rst-content .section ul li>:first-child,
+.rst-content .toctree-wrapper ol li>:first-child,
+.rst-content .toctree-wrapper ul li>:first-child,
+.rst-content section ol li>:first-child,
+.rst-content section ul li>:first-child {
+ margin-top: 0
+}
+
+.rst-content .section ol li>p,
+.rst-content .section ol li>p:last-child,
+.rst-content .section ul li>p,
+.rst-content .section ul li>p:last-child,
+.rst-content .toctree-wrapper ol li>p,
+.rst-content .toctree-wrapper ol li>p:last-child,
+.rst-content .toctree-wrapper ul li>p,
+.rst-content .toctree-wrapper ul li>p:last-child,
+.rst-content section ol li>p,
+.rst-content section ol li>p:last-child,
+.rst-content section ul li>p,
+.rst-content section ul li>p:last-child {
+ margin-bottom: 12px
+}
+
+.rst-content .section ol li>p:only-child,
+.rst-content .section ol li>p:only-child:last-child,
+.rst-content .section ul li>p:only-child,
+.rst-content .section ul li>p:only-child:last-child,
+.rst-content .toctree-wrapper ol li>p:only-child,
+.rst-content .toctree-wrapper ol li>p:only-child:last-child,
+.rst-content .toctree-wrapper ul li>p:only-child,
+.rst-content .toctree-wrapper ul li>p:only-child:last-child,
+.rst-content section ol li>p:only-child,
+.rst-content section ol li>p:only-child:last-child,
+.rst-content section ul li>p:only-child,
+.rst-content section ul li>p:only-child:last-child {
+ margin-bottom: 0
+}
+
+.rst-content .section ol li>ol,
+.rst-content .section ol li>ul,
+.rst-content .section ul li>ol,
+.rst-content .section ul li>ul,
+.rst-content .toctree-wrapper ol li>ol,
+.rst-content .toctree-wrapper ol li>ul,
+.rst-content .toctree-wrapper ul li>ol,
+.rst-content .toctree-wrapper ul li>ul,
+.rst-content section ol li>ol,
+.rst-content section ol li>ul,
+.rst-content section ul li>ol,
+.rst-content section ul li>ul {
+ margin-bottom: 12px
+}
+
+.rst-content .section ol.simple li>*,
+.rst-content .section ol.simple li ol,
+.rst-content .section ol.simple li ul,
+.rst-content .section ul.simple li>*,
+.rst-content .section ul.simple li ol,
+.rst-content .section ul.simple li ul,
+.rst-content .toctree-wrapper ol.simple li>*,
+.rst-content .toctree-wrapper ol.simple li ol,
+.rst-content .toctree-wrapper ol.simple li ul,
+.rst-content .toctree-wrapper ul.simple li>*,
+.rst-content .toctree-wrapper ul.simple li ol,
+.rst-content .toctree-wrapper ul.simple li ul,
+.rst-content section ol.simple li>*,
+.rst-content section ol.simple li ol,
+.rst-content section ol.simple li ul,
+.rst-content section ul.simple li>*,
+.rst-content section ul.simple li ol,
+.rst-content section ul.simple li ul {
+ margin-top: 0;
+ margin-bottom: 0
+}
+
+.rst-content .line-block {
+ margin-left: 0;
+ margin-bottom: 24px;
+ line-height: 24px
+}
+
+.rst-content .line-block .line-block {
+ margin-left: 24px;
+ margin-bottom: 0
+}
+
+.rst-content .topic-title {
+ font-weight: 700;
+ margin-bottom: 12px
+}
+
+.rst-content .toc-backref {
+ color: #404040
+}
+
+.rst-content .align-right {
+ float: right;
+ margin: 0 0 24px 24px
+}
+
+.rst-content .align-left {
+ float: left;
+ margin: 0 24px 24px 0
+}
+
+.rst-content .align-center {
+ margin: auto
+}
+
+.rst-content .align-center:not(table) {
+ display: block
+}
+
+.rst-content .code-block-caption .headerlink,
+.rst-content .eqno .headerlink,
+.rst-content .toctree-wrapper>p.caption .headerlink,
+.rst-content dl dt .headerlink,
+.rst-content h1 .headerlink,
+.rst-content h2 .headerlink,
+.rst-content h3 .headerlink,
+.rst-content h4 .headerlink,
+.rst-content h5 .headerlink,
+.rst-content h6 .headerlink,
+.rst-content p.caption .headerlink,
+.rst-content p .headerlink,
+.rst-content table>caption .headerlink {
+ opacity: 0;
+ font-size: 14px;
+ font-family: FontAwesome;
+ margin-left: .5em
+}
+
+.rst-content .code-block-caption .headerlink:focus,
+.rst-content .code-block-caption:hover .headerlink,
+.rst-content .eqno .headerlink:focus,
+.rst-content .eqno:hover .headerlink,
+.rst-content .toctree-wrapper>p.caption .headerlink:focus,
+.rst-content .toctree-wrapper>p.caption:hover .headerlink,
+.rst-content dl dt .headerlink:focus,
+.rst-content dl dt:hover .headerlink,
+.rst-content h1 .headerlink:focus,
+.rst-content h1:hover .headerlink,
+.rst-content h2 .headerlink:focus,
+.rst-content h2:hover .headerlink,
+.rst-content h3 .headerlink:focus,
+.rst-content h3:hover .headerlink,
+.rst-content h4 .headerlink:focus,
+.rst-content h4:hover .headerlink,
+.rst-content h5 .headerlink:focus,
+.rst-content h5:hover .headerlink,
+.rst-content h6 .headerlink:focus,
+.rst-content h6:hover .headerlink,
+.rst-content p.caption .headerlink:focus,
+.rst-content p.caption:hover .headerlink,
+.rst-content p .headerlink:focus,
+.rst-content p:hover .headerlink,
+.rst-content table>caption .headerlink:focus,
+.rst-content table>caption:hover .headerlink {
+ opacity: 1
+}
+
+.rst-content p a {
+ overflow-wrap: anywhere
+}
+
+.rst-content .wy-table td p,
+.rst-content .wy-table td ul,
+.rst-content .wy-table th p,
+.rst-content .wy-table th ul,
+.rst-content table.docutils td p,
+.rst-content table.docutils td ul,
+.rst-content table.docutils th p,
+.rst-content table.docutils th ul,
+.rst-content table.field-list td p,
+.rst-content table.field-list td ul,
+.rst-content table.field-list th p,
+.rst-content table.field-list th ul {
+ font-size: inherit
+}
+
+.rst-content .btn:focus {
+ outline: 2px solid
+}
+
+.rst-content table>caption .headerlink:after {
+ font-size: 12px
+}
+
+.rst-content .centered {
+ text-align: center
+}
+
+.rst-content .sidebar {
+ float: right;
+ width: 40%;
+ display: block;
+ margin: 0 0 24px 24px;
+ padding: 24px;
+ background: #f3f6f6;
+ border: 1px solid #e1e4e5
+}
+
+.rst-content .sidebar dl,
+.rst-content .sidebar p,
+.rst-content .sidebar ul {
+ font-size: 90%
+}
+
+.rst-content .sidebar .last,
+.rst-content .sidebar>:last-child {
+ margin-bottom: 0
+}
+
+.rst-content .sidebar .sidebar-title {
+ display: block;
+ font-family: Roboto Slab, ff-tisa-web-pro, Georgia, Arial, sans-serif;
+ font-weight: 700;
+ background: #e1e4e5;
+ padding: 6px 12px;
+ margin: -24px -24px 24px;
+ font-size: 100%
+}
+
+.rst-content .highlighted {
+ background: #f1c40f;
+ box-shadow: 0 0 0 2px #f1c40f;
+ display: inline;
+ font-weight: 700
+}
+
+.rst-content .citation-reference,
+.rst-content .footnote-reference {
+ vertical-align: baseline;
+ position: relative;
+ top: -.4em;
+ line-height: 0;
+ font-size: 90%
+}
+
+.rst-content .citation-reference>span.fn-bracket,
+.rst-content .footnote-reference>span.fn-bracket {
+ display: none
+}
+
+.rst-content .hlist {
+ width: 100%
+}
+
+.rst-content dl dt span.classifier:before {
+ content: " : "
+}
+
+.rst-content dl dt span.classifier-delimiter {
+ display: none !important
+}
+
+html.writer-html4 .rst-content table.docutils.citation,
+html.writer-html4 .rst-content table.docutils.footnote {
+ background: none;
+ border: none
+}
+
+html.writer-html4 .rst-content table.docutils.citation td,
+html.writer-html4 .rst-content table.docutils.citation tr,
+html.writer-html4 .rst-content table.docutils.footnote td,
+html.writer-html4 .rst-content table.docutils.footnote tr {
+ border: none;
+ background-color: transparent !important;
+ white-space: normal
+}
+
+html.writer-html4 .rst-content table.docutils.citation td.label,
+html.writer-html4 .rst-content table.docutils.footnote td.label {
+ padding-left: 0;
+ padding-right: 0;
+ vertical-align: top
+}
+
+html.writer-html5 .rst-content dl.citation,
+html.writer-html5 .rst-content dl.field-list,
+html.writer-html5 .rst-content dl.footnote {
+ display: grid;
+ grid-template-columns: auto minmax(80%, 95%)
+}
+
+html.writer-html5 .rst-content dl.citation>dt,
+html.writer-html5 .rst-content dl.field-list>dt,
+html.writer-html5 .rst-content dl.footnote>dt {
+ display: inline-grid;
+ grid-template-columns: max-content auto
+}
+
+html.writer-html5 .rst-content aside.citation,
+html.writer-html5 .rst-content aside.footnote,
+html.writer-html5 .rst-content div.citation {
+ display: grid;
+ grid-template-columns: auto auto minmax(.65rem, auto) minmax(40%, 95%)
+}
+
+html.writer-html5 .rst-content aside.citation>span.label,
+html.writer-html5 .rst-content aside.footnote>span.label,
+html.writer-html5 .rst-content div.citation>span.label {
+ grid-column-start: 1;
+ grid-column-end: 2
+}
+
+html.writer-html5 .rst-content aside.citation>span.backrefs,
+html.writer-html5 .rst-content aside.footnote>span.backrefs,
+html.writer-html5 .rst-content div.citation>span.backrefs {
+ grid-column-start: 2;
+ grid-column-end: 3;
+ grid-row-start: 1;
+ grid-row-end: 3
+}
+
+html.writer-html5 .rst-content aside.citation>p,
+html.writer-html5 .rst-content aside.footnote>p,
+html.writer-html5 .rst-content div.citation>p {
+ grid-column-start: 4;
+ grid-column-end: 5
+}
+
+html.writer-html5 .rst-content dl.citation,
+html.writer-html5 .rst-content dl.field-list,
+html.writer-html5 .rst-content dl.footnote {
+ margin-bottom: 24px
+}
+
+html.writer-html5 .rst-content dl.citation>dt,
+html.writer-html5 .rst-content dl.field-list>dt,
+html.writer-html5 .rst-content dl.footnote>dt {
+ padding-left: 1rem
+}
+
+html.writer-html5 .rst-content dl.citation>dd,
+html.writer-html5 .rst-content dl.citation>dt,
+html.writer-html5 .rst-content dl.field-list>dd,
+html.writer-html5 .rst-content dl.field-list>dt,
+html.writer-html5 .rst-content dl.footnote>dd,
+html.writer-html5 .rst-content dl.footnote>dt {
+ margin-bottom: 0
+}
+
+html.writer-html5 .rst-content dl.citation,
+html.writer-html5 .rst-content dl.footnote {
+ font-size: .9rem
+}
+
+html.writer-html5 .rst-content dl.citation>dt,
+html.writer-html5 .rst-content dl.footnote>dt {
+ margin: 0 .5rem .5rem 0;
+ line-height: 1.2rem;
+ word-break: break-all;
+ font-weight: 400
+}
+
+html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,
+html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before {
+ content: "["
+}
+
+html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,
+html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after {
+ content: "]"
+}
+
+html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,
+html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref {
+ text-align: left;
+ font-style: italic;
+ margin-left: .65rem;
+ word-break: break-word;
+ word-spacing: -.1rem;
+ max-width: 5rem
+}
+
+html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,
+html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a {
+ word-break: keep-all
+}
+
+html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,
+html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before {
+ content: " "
+}
+
+html.writer-html5 .rst-content dl.citation>dd,
+html.writer-html5 .rst-content dl.footnote>dd {
+ margin: 0 0 .5rem;
+ line-height: 1.2rem
+}
+
+html.writer-html5 .rst-content dl.citation>dd p,
+html.writer-html5 .rst-content dl.footnote>dd p {
+ font-size: .9rem
+}
+
+html.writer-html5 .rst-content aside.citation,
+html.writer-html5 .rst-content aside.footnote,
+html.writer-html5 .rst-content div.citation {
+ padding-left: 1rem;
+ padding-right: 1rem;
+ font-size: .9rem;
+ line-height: 1.2rem
+}
+
+html.writer-html5 .rst-content aside.citation p,
+html.writer-html5 .rst-content aside.footnote p,
+html.writer-html5 .rst-content div.citation p {
+ font-size: .9rem;
+ line-height: 1.2rem;
+ margin-bottom: 12px
+}
+
+html.writer-html5 .rst-content aside.citation span.backrefs,
+html.writer-html5 .rst-content aside.footnote span.backrefs,
+html.writer-html5 .rst-content div.citation span.backrefs {
+ text-align: left;
+ font-style: italic;
+ margin-left: .65rem;
+ word-break: break-word;
+ word-spacing: -.1rem;
+ max-width: 5rem
+}
+
+html.writer-html5 .rst-content aside.citation span.backrefs>a,
+html.writer-html5 .rst-content aside.footnote span.backrefs>a,
+html.writer-html5 .rst-content div.citation span.backrefs>a {
+ word-break: keep-all
+}
+
+html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,
+html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,
+html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before {
+ content: " "
+}
+
+html.writer-html5 .rst-content aside.citation span.label,
+html.writer-html5 .rst-content aside.footnote span.label,
+html.writer-html5 .rst-content div.citation span.label {
+ line-height: 1.2rem
+}
+
+html.writer-html5 .rst-content aside.citation-list,
+html.writer-html5 .rst-content aside.footnote-list,
+html.writer-html5 .rst-content div.citation-list {
+ margin-bottom: 24px
+}
+
+html.writer-html5 .rst-content dl.option-list kbd {
+ font-size: .9rem
+}
+
+.rst-content table.docutils.footnote,
+html.writer-html4 .rst-content table.docutils.citation,
+html.writer-html5 .rst-content aside.footnote,
+html.writer-html5 .rst-content aside.footnote-list aside.footnote,
+html.writer-html5 .rst-content div.citation-list>div.citation,
+html.writer-html5 .rst-content dl.citation,
+html.writer-html5 .rst-content dl.footnote {
+ color: grey
+}
+
+.rst-content table.docutils.footnote code,
+.rst-content table.docutils.footnote tt,
+html.writer-html4 .rst-content table.docutils.citation code,
+html.writer-html4 .rst-content table.docutils.citation tt,
+html.writer-html5 .rst-content aside.footnote-list aside.footnote code,
+html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,
+html.writer-html5 .rst-content aside.footnote code,
+html.writer-html5 .rst-content aside.footnote tt,
+html.writer-html5 .rst-content div.citation-list>div.citation code,
+html.writer-html5 .rst-content div.citation-list>div.citation tt,
+html.writer-html5 .rst-content dl.citation code,
+html.writer-html5 .rst-content dl.citation tt,
+html.writer-html5 .rst-content dl.footnote code,
+html.writer-html5 .rst-content dl.footnote tt {
+ color: #555
+}
+
+.rst-content .wy-table-responsive.citation,
+.rst-content .wy-table-responsive.footnote {
+ margin-bottom: 0
+}
+
+.rst-content .wy-table-responsive.citation+:not(.citation),
+.rst-content .wy-table-responsive.footnote+:not(.footnote) {
+ margin-top: 24px
+}
+
+.rst-content .wy-table-responsive.citation:last-child,
+.rst-content .wy-table-responsive.footnote:last-child {
+ margin-bottom: 24px
+}
+
+.rst-content table.docutils th {
+ border-color: #e1e4e5
+}
+
+html.writer-html5 .rst-content table.docutils th {
+ border: 1px solid #e1e4e5
+}
+
+html.writer-html5 .rst-content table.docutils td>p,
+html.writer-html5 .rst-content table.docutils th>p {
+ line-height: 1rem;
+ margin-bottom: 0;
+ font-size: .9rem
+}
+
+.rst-content table.docutils td .last,
+.rst-content table.docutils td .last>:last-child {
+ margin-bottom: 0
+}
+
+.rst-content table.field-list,
+.rst-content table.field-list td {
+ border: none
+}
+
+.rst-content table.field-list td p {
+ line-height: inherit
+}
+
+.rst-content table.field-list td>strong {
+ display: inline-block
+}
+
+.rst-content table.field-list .field-name {
+ padding-right: 10px;
+ text-align: left;
+ white-space: nowrap
+}
+
+.rst-content table.field-list .field-body {
+ text-align: left
+}
+
+.rst-content code,
+.rst-content tt {
+ color: #000;
+ font-family: SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, Courier, monospace;
+ padding: 2px 5px
+}
+
+.rst-content code big,
+.rst-content code em,
+.rst-content tt big,
+.rst-content tt em {
+ font-size: 100% !important;
+ line-height: normal
+}
+
+.rst-content code.literal,
+.rst-content tt.literal {
+ color: #e74c3c;
+ white-space: normal
+}
+
+.rst-content code.xref,
+.rst-content tt.xref,
+a .rst-content code,
+a .rst-content tt {
+ font-weight: 700;
+ color: #404040;
+ overflow-wrap: normal
+}
+
+.rst-content kbd,
+.rst-content pre,
+.rst-content samp {
+ font-family: SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, Courier, monospace
+}
+
+.rst-content a code,
+.rst-content a tt {
+ color: #2980b9
+}
+
+.rst-content dl {
+ margin-bottom: 24px
+}
+
+.rst-content dl dt {
+ font-weight: 700;
+ margin-bottom: 12px
+}
+
+.rst-content dl ol,
+.rst-content dl p,
+.rst-content dl table,
+.rst-content dl ul {
+ margin-bottom: 12px
+}
+
+.rst-content dl dd {
+ margin: 0 0 12px 24px;
+ line-height: 24px
+}
+
+.rst-content dl dd>ol:last-child,
+.rst-content dl dd>p:last-child,
+.rst-content dl dd>table:last-child,
+.rst-content dl dd>ul:last-child {
+ margin-bottom: 0
+}
+
+html.writer-html4 .rst-content dl:not(.docutils),
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) {
+ margin-bottom: 24px
+}
+
+html.writer-html4 .rst-content dl:not(.docutils)>dt,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt {
+ display: table;
+ margin: 6px 0;
+ font-size: 90%;
+ line-height: normal;
+ background: #e7f2fa;
+ color: #2980b9;
+ border-top: 3px solid #6ab0de;
+ padding: 6px;
+ position: relative
+}
+
+html.writer-html4 .rst-content dl:not(.docutils)>dt:before,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before {
+ color: #6ab0de
+}
+
+html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink {
+ color: #404040;
+ font-size: 100% !important
+}
+
+html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt {
+ margin-bottom: 6px;
+ border: none;
+ border-left: 3px solid #ccc;
+ background: #f0f0f0;
+ color: #555
+}
+
+html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink {
+ color: #404040;
+ font-size: 100% !important
+}
+
+html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child {
+ margin-top: 0
+}
+
+html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,
+html.writer-html4 .rst-content dl:not(.docutils) code.descname,
+html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,
+html.writer-html4 .rst-content dl:not(.docutils) tt.descname,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname {
+ background-color: transparent;
+ border: none;
+ padding: 0;
+ font-size: 100% !important
+}
+
+html.writer-html4 .rst-content dl:not(.docutils) code.descname,
+html.writer-html4 .rst-content dl:not(.docutils) tt.descname,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname {
+ font-weight: 700
+}
+
+html.writer-html4 .rst-content dl:not(.docutils) .optional,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional {
+ display: inline-block;
+ padding: 0 4px;
+ color: #000;
+ font-weight: 700
+}
+
+html.writer-html4 .rst-content dl:not(.docutils) .property,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property {
+ display: inline-block;
+ padding-right: 8px;
+ max-width: 100%
+}
+
+html.writer-html4 .rst-content dl:not(.docutils) .k,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k {
+ font-style: italic
+}
+
+html.writer-html4 .rst-content dl:not(.docutils) .descclassname,
+html.writer-html4 .rst-content dl:not(.docutils) .descname,
+html.writer-html4 .rst-content dl:not(.docutils) .sig-name,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,
+html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name {
+ font-family: SFMono-Regular, Menlo, Monaco, Consolas, Liberation Mono, Courier New, Courier, monospace;
+ color: #000
+}
+
+.rst-content .viewcode-back,
+.rst-content .viewcode-link {
+ display: inline-block;
+ color: #27ae60;
+ font-size: 80%;
+ padding-left: 24px
+}
+
+.rst-content .viewcode-back {
+ display: block;
+ float: right
+}
+
+.rst-content p.rubric {
+ margin-bottom: 12px;
+ font-weight: 700
+}
+
+.rst-content code.download,
+.rst-content tt.download {
+ background: inherit;
+ padding: inherit;
+ font-weight: 400;
+ font-family: inherit;
+ font-size: inherit;
+ color: inherit;
+ border: inherit;
+ white-space: inherit
+}
+
+.rst-content code.download span:first-child,
+.rst-content tt.download span:first-child {
+ -webkit-font-smoothing: subpixel-antialiased
+}
+
+.rst-content code.download span:first-child:before,
+.rst-content tt.download span:first-child:before {
+ margin-right: 4px
+}
+
+.rst-content .guilabel,
+.rst-content .menuselection {
+ font-size: 80%;
+ font-weight: 700;
+ border-radius: 4px;
+ padding: 2.4px 6px;
+ margin: auto 2px
+}
+
+.rst-content .guilabel,
+.rst-content .menuselection {
+ border: 1px solid #7fbbe3;
+ background: #e7f2fa
+}
+
+.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,
+.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd {
+ color: inherit;
+ font-size: 80%;
+ background-color: #fff;
+ border: 1px solid #a6a6a6;
+ border-radius: 4px;
+ box-shadow: 0 2px grey;
+ padding: 2.4px 6px;
+ margin: auto 0
+}
+
+.rst-content .versionmodified {
+ font-style: italic
+}
+
+@media screen and (max-width:480px) {
+ .rst-content .sidebar {
+ width: 100%
+ }
+}
+
+span[id*=MathJax-Span] {
+ color: #404040
+}
+
+.math {
+ text-align: center
+}
+
+@font-face {
+ font-family: Lato;
+ src: url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"), url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");
+ font-weight: 400;
+ font-style: normal;
+ font-display: block
+}
+
+@font-face {
+ font-family: Lato;
+ src: url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"), url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");
+ font-weight: 700;
+ font-style: normal;
+ font-display: block
+}
+
+@font-face {
+ font-family: Lato;
+ src: url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"), url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");
+ font-weight: 700;
+ font-style: italic;
+ font-display: block
+}
+
+@font-face {
+ font-family: Lato;
+ src: url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"), url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");
+ font-weight: 400;
+ font-style: italic;
+ font-display: block
+}
+
+@font-face {
+ font-family: Roboto Slab;
+ font-style: normal;
+ font-weight: 400;
+ src: url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"), url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");
+ font-display: block
+}
+
+@font-face {
+ font-family: Roboto Slab;
+ font-style: normal;
+ font-weight: 700;
+ src: url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"), url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");
+ font-display: block
+}
\ No newline at end of file
diff --git a/docs/source/_static/custom.css b/docs/source/_static/custom.css
new file mode 100644
index 00000000..2a924f1d
--- /dev/null
+++ b/docs/source/_static/custom.css
@@ -0,0 +1 @@
+/* This file intentionally left blank. */
diff --git a/docs/source/_static/doctools.js b/docs/source/_static/doctools.js
new file mode 100644
index 00000000..d06a71d7
--- /dev/null
+++ b/docs/source/_static/doctools.js
@@ -0,0 +1,156 @@
+/*
+ * doctools.js
+ * ~~~~~~~~~~~
+ *
+ * Base JavaScript utilities for all Sphinx HTML documentation.
+ *
+ * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :license: BSD, see LICENSE for details.
+ *
+ */
+"use strict";
+
+const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([
+ "TEXTAREA",
+ "INPUT",
+ "SELECT",
+ "BUTTON",
+]);
+
+const _ready = (callback) => {
+ if (document.readyState !== "loading") {
+ callback();
+ } else {
+ document.addEventListener("DOMContentLoaded", callback);
+ }
+};
+
+/**
+ * Small JavaScript module for the documentation.
+ */
+const Documentation = {
+ init: () => {
+ Documentation.initDomainIndexTable();
+ Documentation.initOnKeyListeners();
+ },
+
+ /**
+ * i18n support
+ */
+ TRANSLATIONS: {},
+ PLURAL_EXPR: (n) => (n === 1 ? 0 : 1),
+ LOCALE: "unknown",
+
+ // gettext and ngettext don't access this so that the functions
+ // can safely bound to a different name (_ = Documentation.gettext)
+ gettext: (string) => {
+ const translated = Documentation.TRANSLATIONS[string];
+ switch (typeof translated) {
+ case "undefined":
+ return string; // no translation
+ case "string":
+ return translated; // translation exists
+ default:
+ return translated[0]; // (singular, plural) translation tuple exists
+ }
+ },
+
+ ngettext: (singular, plural, n) => {
+ const translated = Documentation.TRANSLATIONS[singular];
+ if (typeof translated !== "undefined")
+ return translated[Documentation.PLURAL_EXPR(n)];
+ return n === 1 ? singular : plural;
+ },
+
+ addTranslations: (catalog) => {
+ Object.assign(Documentation.TRANSLATIONS, catalog.messages);
+ Documentation.PLURAL_EXPR = new Function(
+ "n",
+ `return (${catalog.plural_expr})`
+ );
+ Documentation.LOCALE = catalog.locale;
+ },
+
+ /**
+ * helper function to focus on search bar
+ */
+ focusSearchBar: () => {
+ document.querySelectorAll("input[name=q]")[0]?.focus();
+ },
+
+ /**
+ * Initialise the domain index toggle buttons
+ */
+ initDomainIndexTable: () => {
+ const toggler = (el) => {
+ const idNumber = el.id.substr(7);
+ const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`);
+ if (el.src.substr(-9) === "minus.png") {
+ el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`;
+ toggledRows.forEach((el) => (el.style.display = "none"));
+ } else {
+ el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`;
+ toggledRows.forEach((el) => (el.style.display = ""));
+ }
+ };
+
+ const togglerElements = document.querySelectorAll("img.toggler");
+ togglerElements.forEach((el) =>
+ el.addEventListener("click", (event) => toggler(event.currentTarget))
+ );
+ togglerElements.forEach((el) => (el.style.display = ""));
+ if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler);
+ },
+
+ initOnKeyListeners: () => {
+ // only install a listener if it is really needed
+ if (
+ !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
+ !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS
+ )
+ return;
+
+ document.addEventListener("keydown", (event) => {
+ // bail for input elements
+ if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
+ // bail with special keys
+ if (event.altKey || event.ctrlKey || event.metaKey) return;
+
+ if (!event.shiftKey) {
+ switch (event.key) {
+ case "ArrowLeft":
+ if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
+
+ const prevLink = document.querySelector('link[rel="prev"]');
+ if (prevLink && prevLink.href) {
+ window.location.href = prevLink.href;
+ event.preventDefault();
+ }
+ break;
+ case "ArrowRight":
+ if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
+
+ const nextLink = document.querySelector('link[rel="next"]');
+ if (nextLink && nextLink.href) {
+ window.location.href = nextLink.href;
+ event.preventDefault();
+ }
+ break;
+ }
+ }
+
+ // some keyboard layouts may need Shift to get /
+ switch (event.key) {
+ case "/":
+ if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
+ Documentation.focusSearchBar();
+ event.preventDefault();
+ }
+ });
+ },
+};
+
+// quick alias for translations
+const _ = Documentation.gettext;
+
+_ready(Documentation.init);
diff --git a/docs/source/_static/documentation_options.js b/docs/source/_static/documentation_options.js
new file mode 100644
index 00000000..995f333f
--- /dev/null
+++ b/docs/source/_static/documentation_options.js
@@ -0,0 +1,14 @@
+var DOCUMENTATION_OPTIONS = {
+ URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
+ VERSION: '1.0.0',
+ LANGUAGE: 'en',
+ COLLAPSE_INDEX: false,
+ BUILDER: 'html',
+ FILE_SUFFIX: '.html',
+ LINK_SUFFIX: '.html',
+ HAS_SOURCE: true,
+ SOURCELINK_SUFFIX: '.txt',
+ NAVIGATION_WITH_KEYS: false,
+ SHOW_SEARCH_SUMMARY: true,
+ ENABLE_SEARCH_SHORTCUTS: true,
+};
\ No newline at end of file
diff --git a/docs/source/_static/images/logo_binder.svg b/docs/source/_static/images/logo_binder.svg
new file mode 100644
index 00000000..45fecf75
--- /dev/null
+++ b/docs/source/_static/images/logo_binder.svg
@@ -0,0 +1,19 @@
+
+
+
diff --git a/docs/source/_static/images/logo_deepnote.svg b/docs/source/_static/images/logo_deepnote.svg
new file mode 100644
index 00000000..fa77ebfc
--- /dev/null
+++ b/docs/source/_static/images/logo_deepnote.svg
@@ -0,0 +1 @@
+
diff --git a/docs/source/_static/images/logo_jupyterhub.svg b/docs/source/_static/images/logo_jupyterhub.svg
new file mode 100644
index 00000000..60cfe9f2
--- /dev/null
+++ b/docs/source/_static/images/logo_jupyterhub.svg
@@ -0,0 +1 @@
+
diff --git a/docs/source/_static/jquery.js b/docs/source/_static/jquery.js
new file mode 100644
index 00000000..c4c6022f
--- /dev/null
+++ b/docs/source/_static/jquery.js
@@ -0,0 +1,2 @@
+/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */
+!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,""],col:[2,""],tr:[2,""],td:[3,""],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document);
\ No newline at end of file
diff --git a/docs/source/_static/js/html5shiv.min.js b/docs/source/_static/js/html5shiv.min.js
new file mode 100644
index 00000000..cd1c674f
--- /dev/null
+++ b/docs/source/_static/js/html5shiv.min.js
@@ -0,0 +1,4 @@
+/**
+* @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed
+*/
+!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document);
\ No newline at end of file
diff --git a/docs/source/_static/js/theme.js b/docs/source/_static/js/theme.js
new file mode 100644
index 00000000..1fddb6ee
--- /dev/null
+++ b/docs/source/_static/js/theme.js
@@ -0,0 +1 @@
+!function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap(""),n("table.docutils.footnote").wrap(""),n("table.docutils.citation").wrap(""),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0
+ var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
+ var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
+ var s_v = "^(" + C + ")?" + v; // vowel in stem
+
+ this.stemWord = function (w) {
+ var stem;
+ var suffix;
+ var firstch;
+ var origword = w;
+
+ if (w.length < 3)
+ return w;
+
+ var re;
+ var re2;
+ var re3;
+ var re4;
+
+ firstch = w.substr(0,1);
+ if (firstch == "y")
+ w = firstch.toUpperCase() + w.substr(1);
+
+ // Step 1a
+ re = /^(.+?)(ss|i)es$/;
+ re2 = /^(.+?)([^s])s$/;
+
+ if (re.test(w))
+ w = w.replace(re,"$1$2");
+ else if (re2.test(w))
+ w = w.replace(re2,"$1$2");
+
+ // Step 1b
+ re = /^(.+?)eed$/;
+ re2 = /^(.+?)(ed|ing)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ re = new RegExp(mgr0);
+ if (re.test(fp[1])) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1];
+ re2 = new RegExp(s_v);
+ if (re2.test(stem)) {
+ w = stem;
+ re2 = /(at|bl|iz)$/;
+ re3 = new RegExp("([^aeiouylsz])\\1$");
+ re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re2.test(w))
+ w = w + "e";
+ else if (re3.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ else if (re4.test(w))
+ w = w + "e";
+ }
+ }
+
+ // Step 1c
+ re = /^(.+?)y$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(s_v);
+ if (re.test(stem))
+ w = stem + "i";
+ }
+
+ // Step 2
+ re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step2list[suffix];
+ }
+
+ // Step 3
+ re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step3list[suffix];
+ }
+
+ // Step 4
+ re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
+ re2 = /^(.+?)(s|t)(ion)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ if (re.test(stem))
+ w = stem;
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1] + fp[2];
+ re2 = new RegExp(mgr1);
+ if (re2.test(stem))
+ w = stem;
+ }
+
+ // Step 5
+ re = /^(.+?)e$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ re2 = new RegExp(meq1);
+ re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
+ w = stem;
+ }
+ re = /ll$/;
+ re2 = new RegExp(mgr1);
+ if (re.test(w) && re2.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+
+ // and turn initial Y back to y
+ if (firstch == "y")
+ w = firstch.toLowerCase() + w.substr(1);
+ return w;
+ }
+}
+
diff --git a/docs/source/_static/locales/ar/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/ar/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..2e8d6820
--- /dev/null
+++ b/docs/source/_static/locales/ar/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: ar\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "أقترح تحرير"
+
+msgid "Last updated on"
+msgstr "آخر تحديث في"
+
+msgid "Edit this page"
+msgstr "قم بتحرير هذه الصفحة"
+
+msgid "Launch"
+msgstr "إطلاق"
+
+msgid "Print to PDF"
+msgstr "طباعة إلى PDF"
+
+msgid "open issue"
+msgstr "قضية مفتوحة"
+
+msgid "Download notebook file"
+msgstr "تنزيل ملف دفتر الملاحظات"
+
+msgid "Toggle navigation"
+msgstr "تبديل التنقل"
+
+msgid "Source repository"
+msgstr "مستودع المصدر"
+
+msgid "By the"
+msgstr "بواسطة"
+
+msgid "next page"
+msgstr "الصفحة التالية"
+
+msgid "repository"
+msgstr "مخزن"
+
+msgid "Sphinx Book Theme"
+msgstr "موضوع كتاب أبو الهول"
+
+msgid "Download source file"
+msgstr "تنزيل ملف المصدر"
+
+msgid "Contents"
+msgstr "محتويات"
+
+msgid "By"
+msgstr "بواسطة"
+
+msgid "Copyright"
+msgstr "حقوق النشر"
+
+msgid "Fullscreen mode"
+msgstr "وضع ملء الشاشة"
+
+msgid "Open an issue"
+msgstr "افتح قضية"
+
+msgid "previous page"
+msgstr "الصفحة السابقة"
+
+msgid "Download this page"
+msgstr "قم بتنزيل هذه الصفحة"
+
+msgid "Theme by the"
+msgstr "موضوع بواسطة"
diff --git a/docs/source/_static/locales/bg/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/bg/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..56ef0ebd
--- /dev/null
+++ b/docs/source/_static/locales/bg/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: bg\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "предложи редактиране"
+
+msgid "Last updated on"
+msgstr "Последна актуализация на"
+
+msgid "Edit this page"
+msgstr "Редактирайте тази страница"
+
+msgid "Launch"
+msgstr "Стартиране"
+
+msgid "Print to PDF"
+msgstr "Печат в PDF"
+
+msgid "open issue"
+msgstr "отворен брой"
+
+msgid "Download notebook file"
+msgstr "Изтеглете файла на бележника"
+
+msgid "Toggle navigation"
+msgstr "Превключване на навигацията"
+
+msgid "Source repository"
+msgstr "Хранилище на източника"
+
+msgid "By the"
+msgstr "По"
+
+msgid "next page"
+msgstr "Следваща страница"
+
+msgid "repository"
+msgstr "хранилище"
+
+msgid "Sphinx Book Theme"
+msgstr "Тема на книгата Sphinx"
+
+msgid "Download source file"
+msgstr "Изтеглете изходния файл"
+
+msgid "Contents"
+msgstr "Съдържание"
+
+msgid "By"
+msgstr "От"
+
+msgid "Copyright"
+msgstr "Авторско право"
+
+msgid "Fullscreen mode"
+msgstr "Режим на цял екран"
+
+msgid "Open an issue"
+msgstr "Отворете проблем"
+
+msgid "previous page"
+msgstr "предишна страница"
+
+msgid "Download this page"
+msgstr "Изтеглете тази страница"
+
+msgid "Theme by the"
+msgstr "Тема от"
diff --git a/docs/source/_static/locales/bn/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/bn/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..243ca31f
--- /dev/null
+++ b/docs/source/_static/locales/bn/LC_MESSAGES/booktheme.po
@@ -0,0 +1,63 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: bn\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "Last updated on"
+msgstr "সর্বশেষ আপডেট"
+
+msgid "Edit this page"
+msgstr "এই পৃষ্ঠাটি সম্পাদনা করুন"
+
+msgid "Launch"
+msgstr "শুরু করা"
+
+msgid "Print to PDF"
+msgstr "পিডিএফ প্রিন্ট করুন"
+
+msgid "open issue"
+msgstr "খোলা সমস্যা"
+
+msgid "Download notebook file"
+msgstr "নোটবুক ফাইল ডাউনলোড করুন"
+
+msgid "Toggle navigation"
+msgstr "নেভিগেশন টগল করুন"
+
+msgid "Source repository"
+msgstr "উত্স সংগ্রহস্থল"
+
+msgid "By the"
+msgstr "দ্বারা"
+
+msgid "next page"
+msgstr "পরবর্তী পৃষ্ঠা"
+
+msgid "Sphinx Book Theme"
+msgstr "স্পিনিক্স বুক থিম"
+
+msgid "Download source file"
+msgstr "উত্স ফাইল ডাউনলোড করুন"
+
+msgid "By"
+msgstr "দ্বারা"
+
+msgid "Copyright"
+msgstr "কপিরাইট"
+
+msgid "Open an issue"
+msgstr "একটি সমস্যা খুলুন"
+
+msgid "previous page"
+msgstr "আগের পৃষ্ঠা"
+
+msgid "Download this page"
+msgstr "এই পৃষ্ঠাটি ডাউনলোড করুন"
+
+msgid "Theme by the"
+msgstr "থিম দ্বারা"
diff --git a/docs/source/_static/locales/ca/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/ca/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..b27a13db
--- /dev/null
+++ b/docs/source/_static/locales/ca/LC_MESSAGES/booktheme.po
@@ -0,0 +1,66 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: ca\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "suggerir edició"
+
+msgid "Last updated on"
+msgstr "Darrera actualització el"
+
+msgid "Edit this page"
+msgstr "Editeu aquesta pàgina"
+
+msgid "Launch"
+msgstr "Llançament"
+
+msgid "Print to PDF"
+msgstr "Imprimeix a PDF"
+
+msgid "open issue"
+msgstr "número obert"
+
+msgid "Download notebook file"
+msgstr "Descarregar fitxer de quadern"
+
+msgid "Toggle navigation"
+msgstr "Commuta la navegació"
+
+msgid "Source repository"
+msgstr "Dipòsit de fonts"
+
+msgid "By the"
+msgstr "Per la"
+
+msgid "next page"
+msgstr "pàgina següent"
+
+msgid "Sphinx Book Theme"
+msgstr "Tema del llibre Esfinx"
+
+msgid "Download source file"
+msgstr "Baixeu el fitxer font"
+
+msgid "By"
+msgstr "Per"
+
+msgid "Copyright"
+msgstr "Copyright"
+
+msgid "Open an issue"
+msgstr "Obriu un número"
+
+msgid "previous page"
+msgstr "Pàgina anterior"
+
+msgid "Download this page"
+msgstr "Descarregueu aquesta pàgina"
+
+msgid "Theme by the"
+msgstr "Tema del"
diff --git a/docs/source/_static/locales/cs/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/cs/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..3818df97
--- /dev/null
+++ b/docs/source/_static/locales/cs/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: cs\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "navrhnout úpravy"
+
+msgid "Last updated on"
+msgstr "Naposledy aktualizováno"
+
+msgid "Edit this page"
+msgstr "Upravit tuto stránku"
+
+msgid "Launch"
+msgstr "Zahájení"
+
+msgid "Print to PDF"
+msgstr "Tisk do PDF"
+
+msgid "open issue"
+msgstr "otevřené číslo"
+
+msgid "Download notebook file"
+msgstr "Stáhnout soubor poznámkového bloku"
+
+msgid "Toggle navigation"
+msgstr "Přepnout navigaci"
+
+msgid "Source repository"
+msgstr "Zdrojové úložiště"
+
+msgid "By the"
+msgstr "Podle"
+
+msgid "next page"
+msgstr "další strana"
+
+msgid "repository"
+msgstr "úložiště"
+
+msgid "Sphinx Book Theme"
+msgstr "Téma knihy Sfinga"
+
+msgid "Download source file"
+msgstr "Stáhněte si zdrojový soubor"
+
+msgid "Contents"
+msgstr "Obsah"
+
+msgid "By"
+msgstr "Podle"
+
+msgid "Copyright"
+msgstr "autorská práva"
+
+msgid "Fullscreen mode"
+msgstr "Režim celé obrazovky"
+
+msgid "Open an issue"
+msgstr "Otevřete problém"
+
+msgid "previous page"
+msgstr "předchozí stránka"
+
+msgid "Download this page"
+msgstr "Stáhněte si tuto stránku"
+
+msgid "Theme by the"
+msgstr "Téma od"
diff --git a/docs/source/_static/locales/da/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/da/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..7f20a3bd
--- /dev/null
+++ b/docs/source/_static/locales/da/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: da\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "foreslå redigering"
+
+msgid "Last updated on"
+msgstr "Sidst opdateret den"
+
+msgid "Edit this page"
+msgstr "Rediger denne side"
+
+msgid "Launch"
+msgstr "Start"
+
+msgid "Print to PDF"
+msgstr "Udskriv til PDF"
+
+msgid "open issue"
+msgstr "åbent nummer"
+
+msgid "Download notebook file"
+msgstr "Download notesbog-fil"
+
+msgid "Toggle navigation"
+msgstr "Skift navigation"
+
+msgid "Source repository"
+msgstr "Kildelager"
+
+msgid "By the"
+msgstr "Ved"
+
+msgid "next page"
+msgstr "Næste side"
+
+msgid "repository"
+msgstr "lager"
+
+msgid "Sphinx Book Theme"
+msgstr "Sphinx bogtema"
+
+msgid "Download source file"
+msgstr "Download kildefil"
+
+msgid "Contents"
+msgstr "Indhold"
+
+msgid "By"
+msgstr "Ved"
+
+msgid "Copyright"
+msgstr "ophavsret"
+
+msgid "Fullscreen mode"
+msgstr "Fuldskærmstilstand"
+
+msgid "Open an issue"
+msgstr "Åbn et problem"
+
+msgid "previous page"
+msgstr "forrige side"
+
+msgid "Download this page"
+msgstr "Download denne side"
+
+msgid "Theme by the"
+msgstr "Tema af"
diff --git a/docs/source/_static/locales/de/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/de/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..c0027d3a
--- /dev/null
+++ b/docs/source/_static/locales/de/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: de\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "vorschlagen zu bearbeiten"
+
+msgid "Last updated on"
+msgstr "Zuletzt aktualisiert am"
+
+msgid "Edit this page"
+msgstr "Bearbeite diese Seite"
+
+msgid "Launch"
+msgstr "Starten"
+
+msgid "Print to PDF"
+msgstr "In PDF drucken"
+
+msgid "open issue"
+msgstr "offenes Thema"
+
+msgid "Download notebook file"
+msgstr "Notebook-Datei herunterladen"
+
+msgid "Toggle navigation"
+msgstr "Navigation umschalten"
+
+msgid "Source repository"
+msgstr "Quell-Repository"
+
+msgid "By the"
+msgstr "Bis zum"
+
+msgid "next page"
+msgstr "Nächste Seite"
+
+msgid "repository"
+msgstr "Repository"
+
+msgid "Sphinx Book Theme"
+msgstr "Sphinx-Buch-Thema"
+
+msgid "Download source file"
+msgstr "Quelldatei herunterladen"
+
+msgid "Contents"
+msgstr "Inhalt"
+
+msgid "By"
+msgstr "Durch"
+
+msgid "Copyright"
+msgstr "Urheberrechte ©"
+
+msgid "Fullscreen mode"
+msgstr "Vollbildmodus"
+
+msgid "Open an issue"
+msgstr "Öffnen Sie ein Problem"
+
+msgid "previous page"
+msgstr "vorherige Seite"
+
+msgid "Download this page"
+msgstr "Laden Sie diese Seite herunter"
+
+msgid "Theme by the"
+msgstr "Thema von der"
diff --git a/docs/source/_static/locales/el/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/el/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..bdeb3270
--- /dev/null
+++ b/docs/source/_static/locales/el/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: el\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "προτείνω επεξεργασία"
+
+msgid "Last updated on"
+msgstr "Τελευταία ενημέρωση στις"
+
+msgid "Edit this page"
+msgstr "Επεξεργαστείτε αυτήν τη σελίδα"
+
+msgid "Launch"
+msgstr "Εκτόξευση"
+
+msgid "Print to PDF"
+msgstr "Εκτύπωση σε PDF"
+
+msgid "open issue"
+msgstr "ανοιχτό ζήτημα"
+
+msgid "Download notebook file"
+msgstr "Λήψη αρχείου σημειωματάριου"
+
+msgid "Toggle navigation"
+msgstr "Εναλλαγή πλοήγησης"
+
+msgid "Source repository"
+msgstr "Αποθήκη πηγής"
+
+msgid "By the"
+msgstr "Από το"
+
+msgid "next page"
+msgstr "επόμενη σελίδα"
+
+msgid "repository"
+msgstr "αποθήκη"
+
+msgid "Sphinx Book Theme"
+msgstr "Θέμα βιβλίου Sphinx"
+
+msgid "Download source file"
+msgstr "Λήψη αρχείου προέλευσης"
+
+msgid "Contents"
+msgstr "Περιεχόμενα"
+
+msgid "By"
+msgstr "Με"
+
+msgid "Copyright"
+msgstr "Πνευματική ιδιοκτησία"
+
+msgid "Fullscreen mode"
+msgstr "ΛΕΙΤΟΥΡΓΙΑ ΠΛΗΡΟΥΣ ΟΘΟΝΗΣ"
+
+msgid "Open an issue"
+msgstr "Ανοίξτε ένα ζήτημα"
+
+msgid "previous page"
+msgstr "προηγούμενη σελίδα"
+
+msgid "Download this page"
+msgstr "Λήψη αυτής της σελίδας"
+
+msgid "Theme by the"
+msgstr "Θέμα από το"
diff --git a/docs/source/_static/locales/eo/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/eo/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..6749f3a3
--- /dev/null
+++ b/docs/source/_static/locales/eo/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: eo\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "sugesti redaktadon"
+
+msgid "Last updated on"
+msgstr "Laste ĝisdatigita la"
+
+msgid "Edit this page"
+msgstr "Redaktu ĉi tiun paĝon"
+
+msgid "Launch"
+msgstr "Lanĉo"
+
+msgid "Print to PDF"
+msgstr "Presi al PDF"
+
+msgid "open issue"
+msgstr "malferma numero"
+
+msgid "Download notebook file"
+msgstr "Elŝutu kajeran dosieron"
+
+msgid "Toggle navigation"
+msgstr "Ŝalti navigadon"
+
+msgid "Source repository"
+msgstr "Fonto-deponejo"
+
+msgid "By the"
+msgstr "Per la"
+
+msgid "next page"
+msgstr "sekva paĝo"
+
+msgid "repository"
+msgstr "deponejo"
+
+msgid "Sphinx Book Theme"
+msgstr "Sfinksa Libro-Temo"
+
+msgid "Download source file"
+msgstr "Elŝutu fontodosieron"
+
+msgid "Contents"
+msgstr "Enhavo"
+
+msgid "By"
+msgstr "De"
+
+msgid "Copyright"
+msgstr "Kopirajto"
+
+msgid "Fullscreen mode"
+msgstr "Plenekrana reĝimo"
+
+msgid "Open an issue"
+msgstr "Malfermu numeron"
+
+msgid "previous page"
+msgstr "antaŭa paĝo"
+
+msgid "Download this page"
+msgstr "Elŝutu ĉi tiun paĝon"
+
+msgid "Theme by the"
+msgstr "Temo de la"
diff --git a/docs/source/_static/locales/es/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/es/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..71dde37f
--- /dev/null
+++ b/docs/source/_static/locales/es/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: es\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "sugerir editar"
+
+msgid "Last updated on"
+msgstr "Ultima actualización en"
+
+msgid "Edit this page"
+msgstr "Edita esta página"
+
+msgid "Launch"
+msgstr "Lanzamiento"
+
+msgid "Print to PDF"
+msgstr "Imprimir en PDF"
+
+msgid "open issue"
+msgstr "Tema abierto"
+
+msgid "Download notebook file"
+msgstr "Descargar archivo de cuaderno"
+
+msgid "Toggle navigation"
+msgstr "Navegación de palanca"
+
+msgid "Source repository"
+msgstr "Repositorio de origen"
+
+msgid "By the"
+msgstr "Por el"
+
+msgid "next page"
+msgstr "siguiente página"
+
+msgid "repository"
+msgstr "repositorio"
+
+msgid "Sphinx Book Theme"
+msgstr "Tema del libro de la esfinge"
+
+msgid "Download source file"
+msgstr "Descargar archivo fuente"
+
+msgid "Contents"
+msgstr "Contenido"
+
+msgid "By"
+msgstr "Por"
+
+msgid "Copyright"
+msgstr "Derechos de autor"
+
+msgid "Fullscreen mode"
+msgstr "Modo de pantalla completa"
+
+msgid "Open an issue"
+msgstr "Abrir un problema"
+
+msgid "previous page"
+msgstr "pagina anterior"
+
+msgid "Download this page"
+msgstr "Descarga esta pagina"
+
+msgid "Theme by the"
+msgstr "Tema por el"
diff --git a/docs/source/_static/locales/et/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/et/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..cdcd07c7
--- /dev/null
+++ b/docs/source/_static/locales/et/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: et\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "soovita muuta"
+
+msgid "Last updated on"
+msgstr "Viimati uuendatud"
+
+msgid "Edit this page"
+msgstr "Muutke seda lehte"
+
+msgid "Launch"
+msgstr "Käivitage"
+
+msgid "Print to PDF"
+msgstr "Prindi PDF-i"
+
+msgid "open issue"
+msgstr "avatud küsimus"
+
+msgid "Download notebook file"
+msgstr "Laadige sülearvuti fail alla"
+
+msgid "Toggle navigation"
+msgstr "Lülita navigeerimine sisse"
+
+msgid "Source repository"
+msgstr "Allikahoidla"
+
+msgid "By the"
+msgstr "Autor"
+
+msgid "next page"
+msgstr "järgmine leht"
+
+msgid "repository"
+msgstr "hoidla"
+
+msgid "Sphinx Book Theme"
+msgstr "Sfinksiraamatu teema"
+
+msgid "Download source file"
+msgstr "Laadige alla lähtefail"
+
+msgid "Contents"
+msgstr "Sisu"
+
+msgid "By"
+msgstr "Kõrval"
+
+msgid "Copyright"
+msgstr "Autoriõigus"
+
+msgid "Fullscreen mode"
+msgstr "Täisekraanirežiim"
+
+msgid "Open an issue"
+msgstr "Avage probleem"
+
+msgid "previous page"
+msgstr "eelmine leht"
+
+msgid "Download this page"
+msgstr "Laadige see leht alla"
+
+msgid "Theme by the"
+msgstr "Teema"
diff --git a/docs/source/_static/locales/fi/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/fi/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..3c3dd089
--- /dev/null
+++ b/docs/source/_static/locales/fi/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: fi\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "ehdottaa muokkausta"
+
+msgid "Last updated on"
+msgstr "Viimeksi päivitetty"
+
+msgid "Edit this page"
+msgstr "Muokkaa tätä sivua"
+
+msgid "Launch"
+msgstr "Tuoda markkinoille"
+
+msgid "Print to PDF"
+msgstr "Tulosta PDF-tiedostoon"
+
+msgid "open issue"
+msgstr "avoin ongelma"
+
+msgid "Download notebook file"
+msgstr "Lataa muistikirjatiedosto"
+
+msgid "Toggle navigation"
+msgstr "Vaihda navigointia"
+
+msgid "Source repository"
+msgstr "Lähteen arkisto"
+
+msgid "By the"
+msgstr "Mukaan"
+
+msgid "next page"
+msgstr "seuraava sivu"
+
+msgid "repository"
+msgstr "arkisto"
+
+msgid "Sphinx Book Theme"
+msgstr "Sphinx-kirjan teema"
+
+msgid "Download source file"
+msgstr "Lataa lähdetiedosto"
+
+msgid "Contents"
+msgstr "Sisällys"
+
+msgid "By"
+msgstr "Tekijä"
+
+msgid "Copyright"
+msgstr "Tekijänoikeus"
+
+msgid "Fullscreen mode"
+msgstr "Koko näytön tila"
+
+msgid "Open an issue"
+msgstr "Avaa ongelma"
+
+msgid "previous page"
+msgstr "Edellinen sivu"
+
+msgid "Download this page"
+msgstr "Lataa tämä sivu"
+
+msgid "Theme by the"
+msgstr "Teeman tekijä"
diff --git a/docs/source/_static/locales/fr/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/fr/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..b57d2fe7
--- /dev/null
+++ b/docs/source/_static/locales/fr/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: fr\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "suggestion de modification"
+
+msgid "Last updated on"
+msgstr "Dernière mise à jour le"
+
+msgid "Edit this page"
+msgstr "Modifier cette page"
+
+msgid "Launch"
+msgstr "lancement"
+
+msgid "Print to PDF"
+msgstr "Imprimer au format PDF"
+
+msgid "open issue"
+msgstr "signaler un problème"
+
+msgid "Download notebook file"
+msgstr "Télécharger le fichier notebook"
+
+msgid "Toggle navigation"
+msgstr "Basculer la navigation"
+
+msgid "Source repository"
+msgstr "Dépôt source"
+
+msgid "By the"
+msgstr "Par le"
+
+msgid "next page"
+msgstr "page suivante"
+
+msgid "repository"
+msgstr "dépôt"
+
+msgid "Sphinx Book Theme"
+msgstr "Thème du livre Sphinx"
+
+msgid "Download source file"
+msgstr "Télécharger le fichier source"
+
+msgid "Contents"
+msgstr "Contenu"
+
+msgid "By"
+msgstr "Par"
+
+msgid "Copyright"
+msgstr "droits d'auteur"
+
+msgid "Fullscreen mode"
+msgstr "Mode plein écran"
+
+msgid "Open an issue"
+msgstr "Ouvrez un problème"
+
+msgid "previous page"
+msgstr "page précédente"
+
+msgid "Download this page"
+msgstr "Téléchargez cette page"
+
+msgid "Theme by the"
+msgstr "Thème par le"
diff --git a/docs/source/_static/locales/hr/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/hr/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..4c425e89
--- /dev/null
+++ b/docs/source/_static/locales/hr/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: hr\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "predloži uređivanje"
+
+msgid "Last updated on"
+msgstr "Posljednje ažuriranje:"
+
+msgid "Edit this page"
+msgstr "Uredite ovu stranicu"
+
+msgid "Launch"
+msgstr "Pokrenite"
+
+msgid "Print to PDF"
+msgstr "Ispis u PDF"
+
+msgid "open issue"
+msgstr "otvoreno izdanje"
+
+msgid "Download notebook file"
+msgstr "Preuzmi datoteku bilježnice"
+
+msgid "Toggle navigation"
+msgstr "Uključi / isključi navigaciju"
+
+msgid "Source repository"
+msgstr "Izvorno spremište"
+
+msgid "By the"
+msgstr "Od strane"
+
+msgid "next page"
+msgstr "sljedeća stranica"
+
+msgid "repository"
+msgstr "spremište"
+
+msgid "Sphinx Book Theme"
+msgstr "Tema knjige Sphinx"
+
+msgid "Download source file"
+msgstr "Preuzmi izvornu datoteku"
+
+msgid "Contents"
+msgstr "Sadržaj"
+
+msgid "By"
+msgstr "Po"
+
+msgid "Copyright"
+msgstr "Autorska prava"
+
+msgid "Fullscreen mode"
+msgstr "Način preko cijelog zaslona"
+
+msgid "Open an issue"
+msgstr "Otvorite izdanje"
+
+msgid "previous page"
+msgstr "Prethodna stranica"
+
+msgid "Download this page"
+msgstr "Preuzmite ovu stranicu"
+
+msgid "Theme by the"
+msgstr "Tema autora"
diff --git a/docs/source/_static/locales/id/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/id/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..5db2ae14
--- /dev/null
+++ b/docs/source/_static/locales/id/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: id\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "menyarankan edit"
+
+msgid "Last updated on"
+msgstr "Terakhir diperbarui saat"
+
+msgid "Edit this page"
+msgstr "Edit halaman ini"
+
+msgid "Launch"
+msgstr "Meluncurkan"
+
+msgid "Print to PDF"
+msgstr "Cetak ke PDF"
+
+msgid "open issue"
+msgstr "masalah terbuka"
+
+msgid "Download notebook file"
+msgstr "Unduh file notebook"
+
+msgid "Toggle navigation"
+msgstr "Alihkan navigasi"
+
+msgid "Source repository"
+msgstr "Repositori sumber"
+
+msgid "By the"
+msgstr "Oleh"
+
+msgid "next page"
+msgstr "halaman selanjutnya"
+
+msgid "repository"
+msgstr "gudang"
+
+msgid "Sphinx Book Theme"
+msgstr "Tema Buku Sphinx"
+
+msgid "Download source file"
+msgstr "Unduh file sumber"
+
+msgid "Contents"
+msgstr "Isi"
+
+msgid "By"
+msgstr "Oleh"
+
+msgid "Copyright"
+msgstr "hak cipta"
+
+msgid "Fullscreen mode"
+msgstr "Mode layar penuh"
+
+msgid "Open an issue"
+msgstr "Buka masalah"
+
+msgid "previous page"
+msgstr "halaman sebelumnya"
+
+msgid "Download this page"
+msgstr "Unduh halaman ini"
+
+msgid "Theme by the"
+msgstr "Tema oleh"
diff --git a/docs/source/_static/locales/it/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/it/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..7d54fdef
--- /dev/null
+++ b/docs/source/_static/locales/it/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: it\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "suggerisci modifica"
+
+msgid "Last updated on"
+msgstr "Ultimo aggiornamento il"
+
+msgid "Edit this page"
+msgstr "Modifica questa pagina"
+
+msgid "Launch"
+msgstr "Lanciare"
+
+msgid "Print to PDF"
+msgstr "Stampa in PDF"
+
+msgid "open issue"
+msgstr "questione aperta"
+
+msgid "Download notebook file"
+msgstr "Scarica il file del taccuino"
+
+msgid "Toggle navigation"
+msgstr "Attiva / disattiva la navigazione"
+
+msgid "Source repository"
+msgstr "Repository di origine"
+
+msgid "By the"
+msgstr "Dal"
+
+msgid "next page"
+msgstr "pagina successiva"
+
+msgid "repository"
+msgstr "repository"
+
+msgid "Sphinx Book Theme"
+msgstr "Tema del libro della Sfinge"
+
+msgid "Download source file"
+msgstr "Scarica il file sorgente"
+
+msgid "Contents"
+msgstr "Contenuti"
+
+msgid "By"
+msgstr "Di"
+
+msgid "Copyright"
+msgstr "Diritto d'autore"
+
+msgid "Fullscreen mode"
+msgstr "Modalità schermo intero"
+
+msgid "Open an issue"
+msgstr "Apri un problema"
+
+msgid "previous page"
+msgstr "pagina precedente"
+
+msgid "Download this page"
+msgstr "Scarica questa pagina"
+
+msgid "Theme by the"
+msgstr "Tema di"
diff --git a/docs/source/_static/locales/iw/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/iw/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..32b017cf
--- /dev/null
+++ b/docs/source/_static/locales/iw/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: iw\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "מציע לערוך"
+
+msgid "Last updated on"
+msgstr "עודכן לאחרונה ב"
+
+msgid "Edit this page"
+msgstr "ערוך דף זה"
+
+msgid "Launch"
+msgstr "לְהַשִׁיק"
+
+msgid "Print to PDF"
+msgstr "הדפס לקובץ PDF"
+
+msgid "open issue"
+msgstr "בעיה פתוחה"
+
+msgid "Download notebook file"
+msgstr "הורד קובץ מחברת"
+
+msgid "Toggle navigation"
+msgstr "החלף ניווט"
+
+msgid "Source repository"
+msgstr "מאגר המקורות"
+
+msgid "By the"
+msgstr "דרך"
+
+msgid "next page"
+msgstr "עמוד הבא"
+
+msgid "repository"
+msgstr "מאגר"
+
+msgid "Sphinx Book Theme"
+msgstr "נושא ספר ספינקס"
+
+msgid "Download source file"
+msgstr "הורד את קובץ המקור"
+
+msgid "Contents"
+msgstr "תוכן"
+
+msgid "By"
+msgstr "על ידי"
+
+msgid "Copyright"
+msgstr "זכויות יוצרים"
+
+msgid "Fullscreen mode"
+msgstr "מצב מסך מלא"
+
+msgid "Open an issue"
+msgstr "פתח גיליון"
+
+msgid "previous page"
+msgstr "עמוד קודם"
+
+msgid "Download this page"
+msgstr "הורד דף זה"
+
+msgid "Theme by the"
+msgstr "נושא מאת"
diff --git a/docs/source/_static/locales/ja/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/ja/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..16924e19
--- /dev/null
+++ b/docs/source/_static/locales/ja/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: ja\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "編集を提案する"
+
+msgid "Last updated on"
+msgstr "最終更新日"
+
+msgid "Edit this page"
+msgstr "このページを編集"
+
+msgid "Launch"
+msgstr "起動"
+
+msgid "Print to PDF"
+msgstr "PDFに印刷"
+
+msgid "open issue"
+msgstr "未解決の問題"
+
+msgid "Download notebook file"
+msgstr "ノートブックファイルをダウンロード"
+
+msgid "Toggle navigation"
+msgstr "ナビゲーションを切り替え"
+
+msgid "Source repository"
+msgstr "ソースリポジトリ"
+
+msgid "By the"
+msgstr "によって"
+
+msgid "next page"
+msgstr "次のページ"
+
+msgid "repository"
+msgstr "リポジトリ"
+
+msgid "Sphinx Book Theme"
+msgstr "スフィンクスの本のテーマ"
+
+msgid "Download source file"
+msgstr "ソースファイルをダウンロード"
+
+msgid "Contents"
+msgstr "目次"
+
+msgid "By"
+msgstr "著者"
+
+msgid "Copyright"
+msgstr "Copyright"
+
+msgid "Fullscreen mode"
+msgstr "全画面モード"
+
+msgid "Open an issue"
+msgstr "問題を報告"
+
+msgid "previous page"
+msgstr "前のページ"
+
+msgid "Download this page"
+msgstr "このページをダウンロード"
+
+msgid "Theme by the"
+msgstr "のテーマ"
diff --git a/docs/source/_static/locales/ko/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/ko/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..69dd18f7
--- /dev/null
+++ b/docs/source/_static/locales/ko/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: ko\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "편집 제안"
+
+msgid "Last updated on"
+msgstr "마지막 업데이트"
+
+msgid "Edit this page"
+msgstr "이 페이지 편집"
+
+msgid "Launch"
+msgstr "시작하다"
+
+msgid "Print to PDF"
+msgstr "PDF로 인쇄"
+
+msgid "open issue"
+msgstr "열린 문제"
+
+msgid "Download notebook file"
+msgstr "노트북 파일 다운로드"
+
+msgid "Toggle navigation"
+msgstr "탐색 전환"
+
+msgid "Source repository"
+msgstr "소스 저장소"
+
+msgid "By the"
+msgstr "에 의해"
+
+msgid "next page"
+msgstr "다음 페이지"
+
+msgid "repository"
+msgstr "저장소"
+
+msgid "Sphinx Book Theme"
+msgstr "스핑크스 도서 테마"
+
+msgid "Download source file"
+msgstr "소스 파일 다운로드"
+
+msgid "Contents"
+msgstr "내용"
+
+msgid "By"
+msgstr "으로"
+
+msgid "Copyright"
+msgstr "저작권"
+
+msgid "Fullscreen mode"
+msgstr "전체 화면으로보기"
+
+msgid "Open an issue"
+msgstr "이슈 열기"
+
+msgid "previous page"
+msgstr "이전 페이지"
+
+msgid "Download this page"
+msgstr "이 페이지 다운로드"
+
+msgid "Theme by the"
+msgstr "테마별"
diff --git a/docs/source/_static/locales/lt/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/lt/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..9f037752
--- /dev/null
+++ b/docs/source/_static/locales/lt/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: lt\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "pasiūlyti redaguoti"
+
+msgid "Last updated on"
+msgstr "Paskutinį kartą atnaujinta"
+
+msgid "Edit this page"
+msgstr "Redaguoti šį puslapį"
+
+msgid "Launch"
+msgstr "Paleiskite"
+
+msgid "Print to PDF"
+msgstr "Spausdinti į PDF"
+
+msgid "open issue"
+msgstr "atviras klausimas"
+
+msgid "Download notebook file"
+msgstr "Atsisiųsti nešiojamojo kompiuterio failą"
+
+msgid "Toggle navigation"
+msgstr "Perjungti naršymą"
+
+msgid "Source repository"
+msgstr "Šaltinio saugykla"
+
+msgid "By the"
+msgstr "Prie"
+
+msgid "next page"
+msgstr "Kitas puslapis"
+
+msgid "repository"
+msgstr "saugykla"
+
+msgid "Sphinx Book Theme"
+msgstr "Sfinkso knygos tema"
+
+msgid "Download source file"
+msgstr "Atsisiųsti šaltinio failą"
+
+msgid "Contents"
+msgstr "Turinys"
+
+msgid "By"
+msgstr "Iki"
+
+msgid "Copyright"
+msgstr "Autorių teisės"
+
+msgid "Fullscreen mode"
+msgstr "Pilno ekrano režimas"
+
+msgid "Open an issue"
+msgstr "Atidarykite problemą"
+
+msgid "previous page"
+msgstr "Ankstesnis puslapis"
+
+msgid "Download this page"
+msgstr "Atsisiųskite šį puslapį"
+
+msgid "Theme by the"
+msgstr "Tema"
diff --git a/docs/source/_static/locales/lv/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/lv/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..c9633b54
--- /dev/null
+++ b/docs/source/_static/locales/lv/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: lv\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "ieteikt rediģēt"
+
+msgid "Last updated on"
+msgstr "Pēdējoreiz atjaunināts"
+
+msgid "Edit this page"
+msgstr "Rediģēt šo lapu"
+
+msgid "Launch"
+msgstr "Uzsākt"
+
+msgid "Print to PDF"
+msgstr "Drukāt PDF formātā"
+
+msgid "open issue"
+msgstr "atklāts jautājums"
+
+msgid "Download notebook file"
+msgstr "Lejupielādēt piezīmju grāmatiņu"
+
+msgid "Toggle navigation"
+msgstr "Pārslēgt navigāciju"
+
+msgid "Source repository"
+msgstr "Avota krātuve"
+
+msgid "By the"
+msgstr "Ar"
+
+msgid "next page"
+msgstr "nākamā lapaspuse"
+
+msgid "repository"
+msgstr "krātuve"
+
+msgid "Sphinx Book Theme"
+msgstr "Sfinksa grāmatas tēma"
+
+msgid "Download source file"
+msgstr "Lejupielādēt avota failu"
+
+msgid "Contents"
+msgstr "Saturs"
+
+msgid "By"
+msgstr "Autors"
+
+msgid "Copyright"
+msgstr "Autortiesības"
+
+msgid "Fullscreen mode"
+msgstr "Pilnekrāna režīms"
+
+msgid "Open an issue"
+msgstr "Atveriet problēmu"
+
+msgid "previous page"
+msgstr "iepriekšējā lapa"
+
+msgid "Download this page"
+msgstr "Lejupielādējiet šo lapu"
+
+msgid "Theme by the"
+msgstr "Autora tēma"
diff --git a/docs/source/_static/locales/ml/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/ml/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..9a6a41e8
--- /dev/null
+++ b/docs/source/_static/locales/ml/LC_MESSAGES/booktheme.po
@@ -0,0 +1,66 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: ml\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "എഡിറ്റുചെയ്യാൻ നിർദ്ദേശിക്കുക"
+
+msgid "Last updated on"
+msgstr "അവസാനം അപ്ഡേറ്റുചെയ്തത്"
+
+msgid "Edit this page"
+msgstr "ഈ പേജ് എഡിറ്റുചെയ്യുക"
+
+msgid "Launch"
+msgstr "സമാരംഭിക്കുക"
+
+msgid "Print to PDF"
+msgstr "PDF- ലേക്ക് പ്രിന്റുചെയ്യുക"
+
+msgid "open issue"
+msgstr "തുറന്ന പ്രശ്നം"
+
+msgid "Download notebook file"
+msgstr "നോട്ട്ബുക്ക് ഫയൽ ഡൺലോഡ് ചെയ്യുക"
+
+msgid "Toggle navigation"
+msgstr "നാവിഗേഷൻ ടോഗിൾ ചെയ്യുക"
+
+msgid "Source repository"
+msgstr "ഉറവിട ശേഖരം"
+
+msgid "By the"
+msgstr "എഴുതിയത്"
+
+msgid "next page"
+msgstr "അടുത്ത പേജ്"
+
+msgid "Sphinx Book Theme"
+msgstr "സ്ഫിങ്ക്സ് പുസ്തക തീം"
+
+msgid "Download source file"
+msgstr "ഉറവിട ഫയൽ ഡൗൺലോഡുചെയ്യുക"
+
+msgid "By"
+msgstr "എഴുതിയത്"
+
+msgid "Copyright"
+msgstr "പകർപ്പവകാശം"
+
+msgid "Open an issue"
+msgstr "ഒരു പ്രശ്നം തുറക്കുക"
+
+msgid "previous page"
+msgstr "മുൻപത്തെ താൾ"
+
+msgid "Download this page"
+msgstr "ഈ പേജ് ഡൗൺലോഡുചെയ്യുക"
+
+msgid "Theme by the"
+msgstr "പ്രമേയം"
diff --git a/docs/source/_static/locales/mr/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/mr/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..ef72d8c6
--- /dev/null
+++ b/docs/source/_static/locales/mr/LC_MESSAGES/booktheme.po
@@ -0,0 +1,66 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: mr\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "संपादन सुचवा"
+
+msgid "Last updated on"
+msgstr "अखेरचे अद्यतनित"
+
+msgid "Edit this page"
+msgstr "हे पृष्ठ संपादित करा"
+
+msgid "Launch"
+msgstr "लाँच करा"
+
+msgid "Print to PDF"
+msgstr "पीडीएफवर मुद्रित करा"
+
+msgid "open issue"
+msgstr "खुला मुद्दा"
+
+msgid "Download notebook file"
+msgstr "नोटबुक फाईल डाउनलोड करा"
+
+msgid "Toggle navigation"
+msgstr "नेव्हिगेशन टॉगल करा"
+
+msgid "Source repository"
+msgstr "स्त्रोत भांडार"
+
+msgid "By the"
+msgstr "द्वारा"
+
+msgid "next page"
+msgstr "पुढील पृष्ठ"
+
+msgid "Sphinx Book Theme"
+msgstr "स्फिंक्स बुक थीम"
+
+msgid "Download source file"
+msgstr "स्त्रोत फाइल डाउनलोड करा"
+
+msgid "By"
+msgstr "द्वारा"
+
+msgid "Copyright"
+msgstr "कॉपीराइट"
+
+msgid "Open an issue"
+msgstr "एक मुद्दा उघडा"
+
+msgid "previous page"
+msgstr "मागील पान"
+
+msgid "Download this page"
+msgstr "हे पृष्ठ डाउनलोड करा"
+
+msgid "Theme by the"
+msgstr "द्वारा थीम"
diff --git a/docs/source/_static/locales/ms/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/ms/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..e29cbe2e
--- /dev/null
+++ b/docs/source/_static/locales/ms/LC_MESSAGES/booktheme.po
@@ -0,0 +1,66 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: ms\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "cadangkan edit"
+
+msgid "Last updated on"
+msgstr "Terakhir dikemas kini pada"
+
+msgid "Edit this page"
+msgstr "Edit halaman ini"
+
+msgid "Launch"
+msgstr "Lancarkan"
+
+msgid "Print to PDF"
+msgstr "Cetak ke PDF"
+
+msgid "open issue"
+msgstr "isu terbuka"
+
+msgid "Download notebook file"
+msgstr "Muat turun fail buku nota"
+
+msgid "Toggle navigation"
+msgstr "Togol navigasi"
+
+msgid "Source repository"
+msgstr "Repositori sumber"
+
+msgid "By the"
+msgstr "Oleh"
+
+msgid "next page"
+msgstr "muka surat seterusnya"
+
+msgid "Sphinx Book Theme"
+msgstr "Tema Buku Sphinx"
+
+msgid "Download source file"
+msgstr "Muat turun fail sumber"
+
+msgid "By"
+msgstr "Oleh"
+
+msgid "Copyright"
+msgstr "hak cipta"
+
+msgid "Open an issue"
+msgstr "Buka masalah"
+
+msgid "previous page"
+msgstr "halaman sebelumnya"
+
+msgid "Download this page"
+msgstr "Muat turun halaman ini"
+
+msgid "Theme by the"
+msgstr "Tema oleh"
diff --git a/docs/source/_static/locales/nl/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/nl/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..e4844d7c
--- /dev/null
+++ b/docs/source/_static/locales/nl/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: nl\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "suggereren bewerken"
+
+msgid "Last updated on"
+msgstr "Laatst geupdate op"
+
+msgid "Edit this page"
+msgstr "bewerk deze pagina"
+
+msgid "Launch"
+msgstr "Lancering"
+
+msgid "Print to PDF"
+msgstr "Afdrukken naar pdf"
+
+msgid "open issue"
+msgstr "open probleem"
+
+msgid "Download notebook file"
+msgstr "Download notebookbestand"
+
+msgid "Toggle navigation"
+msgstr "Schakel navigatie"
+
+msgid "Source repository"
+msgstr "Bronopslagplaats"
+
+msgid "By the"
+msgstr "Door de"
+
+msgid "next page"
+msgstr "volgende bladzijde"
+
+msgid "repository"
+msgstr "repository"
+
+msgid "Sphinx Book Theme"
+msgstr "Sphinx-boekthema"
+
+msgid "Download source file"
+msgstr "Download het bronbestand"
+
+msgid "Contents"
+msgstr "Inhoud"
+
+msgid "By"
+msgstr "Door"
+
+msgid "Copyright"
+msgstr "auteursrechten"
+
+msgid "Fullscreen mode"
+msgstr "Volledig scherm"
+
+msgid "Open an issue"
+msgstr "Open een probleem"
+
+msgid "previous page"
+msgstr "vorige pagina"
+
+msgid "Download this page"
+msgstr "Download deze pagina"
+
+msgid "Theme by the"
+msgstr "Thema door de"
diff --git a/docs/source/_static/locales/no/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/no/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..d079dd9b
--- /dev/null
+++ b/docs/source/_static/locales/no/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: no\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "foreslå redigering"
+
+msgid "Last updated on"
+msgstr "Sist oppdatert den"
+
+msgid "Edit this page"
+msgstr "Rediger denne siden"
+
+msgid "Launch"
+msgstr "Start"
+
+msgid "Print to PDF"
+msgstr "Skriv ut til PDF"
+
+msgid "open issue"
+msgstr "åpent nummer"
+
+msgid "Download notebook file"
+msgstr "Last ned notatbokfilen"
+
+msgid "Toggle navigation"
+msgstr "Bytt navigasjon"
+
+msgid "Source repository"
+msgstr "Kildedepot"
+
+msgid "By the"
+msgstr "Ved"
+
+msgid "next page"
+msgstr "neste side"
+
+msgid "repository"
+msgstr "oppbevaringssted"
+
+msgid "Sphinx Book Theme"
+msgstr "Sphinx boktema"
+
+msgid "Download source file"
+msgstr "Last ned kildefilen"
+
+msgid "Contents"
+msgstr "Innhold"
+
+msgid "By"
+msgstr "Av"
+
+msgid "Copyright"
+msgstr "opphavsrett"
+
+msgid "Fullscreen mode"
+msgstr "Fullskjerm-modus"
+
+msgid "Open an issue"
+msgstr "Åpne et problem"
+
+msgid "previous page"
+msgstr "forrige side"
+
+msgid "Download this page"
+msgstr "Last ned denne siden"
+
+msgid "Theme by the"
+msgstr "Tema av"
diff --git a/docs/source/_static/locales/pl/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/pl/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..fcac51d3
--- /dev/null
+++ b/docs/source/_static/locales/pl/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: pl\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "zaproponuj edycję"
+
+msgid "Last updated on"
+msgstr "Ostatnia aktualizacja"
+
+msgid "Edit this page"
+msgstr "Edytuj tę strone"
+
+msgid "Launch"
+msgstr "Uruchomić"
+
+msgid "Print to PDF"
+msgstr "Drukuj do PDF"
+
+msgid "open issue"
+msgstr "otwarty problem"
+
+msgid "Download notebook file"
+msgstr "Pobierz plik notatnika"
+
+msgid "Toggle navigation"
+msgstr "Przełącz nawigację"
+
+msgid "Source repository"
+msgstr "Repozytorium źródłowe"
+
+msgid "By the"
+msgstr "Przez"
+
+msgid "next page"
+msgstr "Następna strona"
+
+msgid "repository"
+msgstr "magazyn"
+
+msgid "Sphinx Book Theme"
+msgstr "Motyw książki Sphinx"
+
+msgid "Download source file"
+msgstr "Pobierz plik źródłowy"
+
+msgid "Contents"
+msgstr "Zawartość"
+
+msgid "By"
+msgstr "Przez"
+
+msgid "Copyright"
+msgstr "prawa autorskie"
+
+msgid "Fullscreen mode"
+msgstr "Pełny ekran"
+
+msgid "Open an issue"
+msgstr "Otwórz problem"
+
+msgid "previous page"
+msgstr "Poprzednia strona"
+
+msgid "Download this page"
+msgstr "Pobierz tę stronę"
+
+msgid "Theme by the"
+msgstr "Motyw autorstwa"
diff --git a/docs/source/_static/locales/pt/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/pt/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..1761db08
--- /dev/null
+++ b/docs/source/_static/locales/pt/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: pt\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "sugerir edição"
+
+msgid "Last updated on"
+msgstr "Última atualização em"
+
+msgid "Edit this page"
+msgstr "Edite essa página"
+
+msgid "Launch"
+msgstr "Lançamento"
+
+msgid "Print to PDF"
+msgstr "Imprimir em PDF"
+
+msgid "open issue"
+msgstr "questão aberta"
+
+msgid "Download notebook file"
+msgstr "Baixar arquivo de notebook"
+
+msgid "Toggle navigation"
+msgstr "Alternar de navegação"
+
+msgid "Source repository"
+msgstr "Repositório fonte"
+
+msgid "By the"
+msgstr "Pelo"
+
+msgid "next page"
+msgstr "próxima página"
+
+msgid "repository"
+msgstr "repositório"
+
+msgid "Sphinx Book Theme"
+msgstr "Tema do livro Sphinx"
+
+msgid "Download source file"
+msgstr "Baixar arquivo fonte"
+
+msgid "Contents"
+msgstr "Conteúdo"
+
+msgid "By"
+msgstr "De"
+
+msgid "Copyright"
+msgstr "direito autoral"
+
+msgid "Fullscreen mode"
+msgstr "Modo tela cheia"
+
+msgid "Open an issue"
+msgstr "Abra um problema"
+
+msgid "previous page"
+msgstr "página anterior"
+
+msgid "Download this page"
+msgstr "Baixe esta página"
+
+msgid "Theme by the"
+msgstr "Tema por"
diff --git a/docs/source/_static/locales/ro/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/ro/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..db865c8f
--- /dev/null
+++ b/docs/source/_static/locales/ro/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: ro\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "sugerează editare"
+
+msgid "Last updated on"
+msgstr "Ultima actualizare la"
+
+msgid "Edit this page"
+msgstr "Editați această pagină"
+
+msgid "Launch"
+msgstr "Lansa"
+
+msgid "Print to PDF"
+msgstr "Imprimați în PDF"
+
+msgid "open issue"
+msgstr "problema deschisă"
+
+msgid "Download notebook file"
+msgstr "Descărcați fișierul notebook"
+
+msgid "Toggle navigation"
+msgstr "Comutare navigare"
+
+msgid "Source repository"
+msgstr "Depozit sursă"
+
+msgid "By the"
+msgstr "Langa"
+
+msgid "next page"
+msgstr "pagina următoare"
+
+msgid "repository"
+msgstr "repertoriu"
+
+msgid "Sphinx Book Theme"
+msgstr "Tema Sphinx Book"
+
+msgid "Download source file"
+msgstr "Descărcați fișierul sursă"
+
+msgid "Contents"
+msgstr "Cuprins"
+
+msgid "By"
+msgstr "De"
+
+msgid "Copyright"
+msgstr "Drepturi de autor"
+
+msgid "Fullscreen mode"
+msgstr "Modul ecran întreg"
+
+msgid "Open an issue"
+msgstr "Deschideți o problemă"
+
+msgid "previous page"
+msgstr "pagina anterioară"
+
+msgid "Download this page"
+msgstr "Descarcă această pagină"
+
+msgid "Theme by the"
+msgstr "Tema de"
diff --git a/docs/source/_static/locales/ru/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/ru/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..84ab6eb5
--- /dev/null
+++ b/docs/source/_static/locales/ru/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: ru\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "предложить редактировать"
+
+msgid "Last updated on"
+msgstr "Последнее обновление"
+
+msgid "Edit this page"
+msgstr "Редактировать эту страницу"
+
+msgid "Launch"
+msgstr "Запуск"
+
+msgid "Print to PDF"
+msgstr "Распечатать в PDF"
+
+msgid "open issue"
+msgstr "открытый вопрос"
+
+msgid "Download notebook file"
+msgstr "Скачать файл записной книжки"
+
+msgid "Toggle navigation"
+msgstr "Переключить навигацию"
+
+msgid "Source repository"
+msgstr "Исходный репозиторий"
+
+msgid "By the"
+msgstr "Посредством"
+
+msgid "next page"
+msgstr "Следующая страница"
+
+msgid "repository"
+msgstr "хранилище"
+
+msgid "Sphinx Book Theme"
+msgstr "Тема книги Сфинкс"
+
+msgid "Download source file"
+msgstr "Скачать исходный файл"
+
+msgid "Contents"
+msgstr "Содержание"
+
+msgid "By"
+msgstr "По"
+
+msgid "Copyright"
+msgstr "авторское право"
+
+msgid "Fullscreen mode"
+msgstr "Полноэкранный режим"
+
+msgid "Open an issue"
+msgstr "Открыть вопрос"
+
+msgid "previous page"
+msgstr "Предыдущая страница"
+
+msgid "Download this page"
+msgstr "Загрузите эту страницу"
+
+msgid "Theme by the"
+msgstr "Тема от"
diff --git a/docs/source/_static/locales/sk/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/sk/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..e44878b5
--- /dev/null
+++ b/docs/source/_static/locales/sk/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: sk\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "navrhnúť úpravu"
+
+msgid "Last updated on"
+msgstr "Posledná aktualizácia dňa"
+
+msgid "Edit this page"
+msgstr "Upraviť túto stránku"
+
+msgid "Launch"
+msgstr "Spustiť"
+
+msgid "Print to PDF"
+msgstr "Tlač do PDF"
+
+msgid "open issue"
+msgstr "otvorené vydanie"
+
+msgid "Download notebook file"
+msgstr "Stiahnite si zošit"
+
+msgid "Toggle navigation"
+msgstr "Prepnúť navigáciu"
+
+msgid "Source repository"
+msgstr "Zdrojové úložisko"
+
+msgid "By the"
+msgstr "Podľa"
+
+msgid "next page"
+msgstr "ďalšia strana"
+
+msgid "repository"
+msgstr "Úložisko"
+
+msgid "Sphinx Book Theme"
+msgstr "Téma knihy Sfinga"
+
+msgid "Download source file"
+msgstr "Stiahnite si zdrojový súbor"
+
+msgid "Contents"
+msgstr "Obsah"
+
+msgid "By"
+msgstr "Autor:"
+
+msgid "Copyright"
+msgstr "Autorské práva"
+
+msgid "Fullscreen mode"
+msgstr "Režim celej obrazovky"
+
+msgid "Open an issue"
+msgstr "Otvorte problém"
+
+msgid "previous page"
+msgstr "predchádzajúca strana"
+
+msgid "Download this page"
+msgstr "Stiahnite si túto stránku"
+
+msgid "Theme by the"
+msgstr "Téma od"
diff --git a/docs/source/_static/locales/sl/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/sl/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..228939bc
--- /dev/null
+++ b/docs/source/_static/locales/sl/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: sl\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "predlagajte urejanje"
+
+msgid "Last updated on"
+msgstr "Nazadnje posodobljeno dne"
+
+msgid "Edit this page"
+msgstr "Uredite to stran"
+
+msgid "Launch"
+msgstr "Kosilo"
+
+msgid "Print to PDF"
+msgstr "Natisni v PDF"
+
+msgid "open issue"
+msgstr "odprto vprašanje"
+
+msgid "Download notebook file"
+msgstr "Prenesite datoteko zvezka"
+
+msgid "Toggle navigation"
+msgstr "Preklopi navigacijo"
+
+msgid "Source repository"
+msgstr "Izvorno skladišče"
+
+msgid "By the"
+msgstr "Avtor"
+
+msgid "next page"
+msgstr "Naslednja stran"
+
+msgid "repository"
+msgstr "odlagališče"
+
+msgid "Sphinx Book Theme"
+msgstr "Tema knjige Sphinx"
+
+msgid "Download source file"
+msgstr "Prenesite izvorno datoteko"
+
+msgid "Contents"
+msgstr "Vsebina"
+
+msgid "By"
+msgstr "Avtor"
+
+msgid "Copyright"
+msgstr "avtorske pravice"
+
+msgid "Fullscreen mode"
+msgstr "Celozaslonski način"
+
+msgid "Open an issue"
+msgstr "Odprite številko"
+
+msgid "previous page"
+msgstr "Prejšnja stran"
+
+msgid "Download this page"
+msgstr "Prenesite to stran"
+
+msgid "Theme by the"
+msgstr "Tema avtorja"
diff --git a/docs/source/_static/locales/sr/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/sr/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..1a712a18
--- /dev/null
+++ b/docs/source/_static/locales/sr/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: sr\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "предложи уређивање"
+
+msgid "Last updated on"
+msgstr "Последње ажурирање"
+
+msgid "Edit this page"
+msgstr "Уредите ову страницу"
+
+msgid "Launch"
+msgstr "Лансирање"
+
+msgid "Print to PDF"
+msgstr "Испис у ПДФ"
+
+msgid "open issue"
+msgstr "отворено издање"
+
+msgid "Download notebook file"
+msgstr "Преузмите датотеку бележнице"
+
+msgid "Toggle navigation"
+msgstr "Укључи / искључи навигацију"
+
+msgid "Source repository"
+msgstr "Изворно спремиште"
+
+msgid "By the"
+msgstr "Од"
+
+msgid "next page"
+msgstr "Следећа страна"
+
+msgid "repository"
+msgstr "спремиште"
+
+msgid "Sphinx Book Theme"
+msgstr "Тема књиге Спхинк"
+
+msgid "Download source file"
+msgstr "Преузми изворну датотеку"
+
+msgid "Contents"
+msgstr "Садржај"
+
+msgid "By"
+msgstr "Од стране"
+
+msgid "Copyright"
+msgstr "Ауторско право"
+
+msgid "Fullscreen mode"
+msgstr "Режим целог екрана"
+
+msgid "Open an issue"
+msgstr "Отворите издање"
+
+msgid "previous page"
+msgstr "Претходна страница"
+
+msgid "Download this page"
+msgstr "Преузмите ову страницу"
+
+msgid "Theme by the"
+msgstr "Тхеме би"
diff --git a/docs/source/_static/locales/sv/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/sv/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..7d2b56d9
--- /dev/null
+++ b/docs/source/_static/locales/sv/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: sv\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "föreslå redigering"
+
+msgid "Last updated on"
+msgstr "Senast uppdaterad den"
+
+msgid "Edit this page"
+msgstr "Redigera den här sidan"
+
+msgid "Launch"
+msgstr "Lansera"
+
+msgid "Print to PDF"
+msgstr "Skriv ut till PDF"
+
+msgid "open issue"
+msgstr "öppet problem"
+
+msgid "Download notebook file"
+msgstr "Ladda ner anteckningsbokfilen"
+
+msgid "Toggle navigation"
+msgstr "Växla navigering"
+
+msgid "Source repository"
+msgstr "Källförvar"
+
+msgid "By the"
+msgstr "Vid"
+
+msgid "next page"
+msgstr "nästa sida"
+
+msgid "repository"
+msgstr "förvar"
+
+msgid "Sphinx Book Theme"
+msgstr "Sphinx boktema"
+
+msgid "Download source file"
+msgstr "Ladda ner källfil"
+
+msgid "Contents"
+msgstr "Innehåll"
+
+msgid "By"
+msgstr "Förbi"
+
+msgid "Copyright"
+msgstr "upphovsrätt"
+
+msgid "Fullscreen mode"
+msgstr "Fullskärmsläge"
+
+msgid "Open an issue"
+msgstr "Öppna ett problem"
+
+msgid "previous page"
+msgstr "föregående sida"
+
+msgid "Download this page"
+msgstr "Ladda ner den här sidan"
+
+msgid "Theme by the"
+msgstr "Tema av"
diff --git a/docs/source/_static/locales/ta/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/ta/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..c75ffe19
--- /dev/null
+++ b/docs/source/_static/locales/ta/LC_MESSAGES/booktheme.po
@@ -0,0 +1,66 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: ta\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "திருத்த பரிந்துரைக்கவும்"
+
+msgid "Last updated on"
+msgstr "கடைசியாக புதுப்பிக்கப்பட்டது"
+
+msgid "Edit this page"
+msgstr "இந்தப் பக்கத்தைத் திருத்தவும்"
+
+msgid "Launch"
+msgstr "தொடங்க"
+
+msgid "Print to PDF"
+msgstr "PDF இல் அச்சிடுக"
+
+msgid "open issue"
+msgstr "திறந்த பிரச்சினை"
+
+msgid "Download notebook file"
+msgstr "நோட்புக் கோப்பைப் பதிவிறக்கவும்"
+
+msgid "Toggle navigation"
+msgstr "வழிசெலுத்தலை நிலைமாற்று"
+
+msgid "Source repository"
+msgstr "மூல களஞ்சியம்"
+
+msgid "By the"
+msgstr "மூலம்"
+
+msgid "next page"
+msgstr "அடுத்த பக்கம்"
+
+msgid "Sphinx Book Theme"
+msgstr "ஸ்பிங்க்ஸ் புத்தக தீம்"
+
+msgid "Download source file"
+msgstr "மூல கோப்பைப் பதிவிறக்குக"
+
+msgid "By"
+msgstr "வழங்கியவர்"
+
+msgid "Copyright"
+msgstr "பதிப்புரிமை"
+
+msgid "Open an issue"
+msgstr "சிக்கலைத் திறக்கவும்"
+
+msgid "previous page"
+msgstr "முந்தைய பக்கம்"
+
+msgid "Download this page"
+msgstr "இந்தப் பக்கத்தைப் பதிவிறக்கவும்"
+
+msgid "Theme by the"
+msgstr "வழங்கிய தீம்"
diff --git a/docs/source/_static/locales/te/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/te/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..2595c035
--- /dev/null
+++ b/docs/source/_static/locales/te/LC_MESSAGES/booktheme.po
@@ -0,0 +1,66 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: te\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "సవరించమని సూచించండి"
+
+msgid "Last updated on"
+msgstr "చివరిగా నవీకరించబడింది"
+
+msgid "Edit this page"
+msgstr "ఈ పేజీని సవరించండి"
+
+msgid "Launch"
+msgstr "ప్రారంభించండి"
+
+msgid "Print to PDF"
+msgstr "PDF కి ముద్రించండి"
+
+msgid "open issue"
+msgstr "ఓపెన్ ఇష్యూ"
+
+msgid "Download notebook file"
+msgstr "నోట్బుక్ ఫైల్ను డౌన్లోడ్ చేయండి"
+
+msgid "Toggle navigation"
+msgstr "నావిగేషన్ను టోగుల్ చేయండి"
+
+msgid "Source repository"
+msgstr "మూల రిపోజిటరీ"
+
+msgid "By the"
+msgstr "ద్వారా"
+
+msgid "next page"
+msgstr "తరువాతి పేజీ"
+
+msgid "Sphinx Book Theme"
+msgstr "సింహిక పుస్తక థీమ్"
+
+msgid "Download source file"
+msgstr "మూల ఫైల్ను డౌన్లోడ్ చేయండి"
+
+msgid "By"
+msgstr "ద్వారా"
+
+msgid "Copyright"
+msgstr "కాపీరైట్"
+
+msgid "Open an issue"
+msgstr "సమస్యను తెరవండి"
+
+msgid "previous page"
+msgstr "ముందు పేజి"
+
+msgid "Download this page"
+msgstr "ఈ పేజీని డౌన్లోడ్ చేయండి"
+
+msgid "Theme by the"
+msgstr "ద్వారా థీమ్"
diff --git a/docs/source/_static/locales/tg/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/tg/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..73cd30ea
--- /dev/null
+++ b/docs/source/_static/locales/tg/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: tg\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "пешниҳод вироиш"
+
+msgid "Last updated on"
+msgstr "Last навсозӣ дар"
+
+msgid "Edit this page"
+msgstr "Ин саҳифаро таҳрир кунед"
+
+msgid "Launch"
+msgstr "Оғоз"
+
+msgid "Print to PDF"
+msgstr "Чоп ба PDF"
+
+msgid "open issue"
+msgstr "барориши кушод"
+
+msgid "Download notebook file"
+msgstr "Файли дафтарро зеркашӣ кунед"
+
+msgid "Toggle navigation"
+msgstr "Гузаришро иваз кунед"
+
+msgid "Source repository"
+msgstr "Анбори манбаъ"
+
+msgid "By the"
+msgstr "Бо"
+
+msgid "next page"
+msgstr "саҳифаи оянда"
+
+msgid "repository"
+msgstr "анбор"
+
+msgid "Sphinx Book Theme"
+msgstr "Сфинкс Мавзӯи китоб"
+
+msgid "Download source file"
+msgstr "Файли манбаъро зеркашӣ кунед"
+
+msgid "Contents"
+msgstr "Мундариҷа"
+
+msgid "By"
+msgstr "Бо"
+
+msgid "Copyright"
+msgstr "Ҳуқуқи муаллиф"
+
+msgid "Fullscreen mode"
+msgstr "Ҳолати экрани пурра"
+
+msgid "Open an issue"
+msgstr "Масъаларо кушоед"
+
+msgid "previous page"
+msgstr "саҳифаи қаблӣ"
+
+msgid "Download this page"
+msgstr "Ин саҳифаро зеркашӣ кунед"
+
+msgid "Theme by the"
+msgstr "Мавзӯъи аз"
diff --git a/docs/source/_static/locales/th/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/th/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..0392b4ad
--- /dev/null
+++ b/docs/source/_static/locales/th/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: th\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "แนะนำแก้ไข"
+
+msgid "Last updated on"
+msgstr "ปรับปรุงล่าสุดเมื่อ"
+
+msgid "Edit this page"
+msgstr "แก้ไขหน้านี้"
+
+msgid "Launch"
+msgstr "เปิด"
+
+msgid "Print to PDF"
+msgstr "พิมพ์เป็น PDF"
+
+msgid "open issue"
+msgstr "เปิดปัญหา"
+
+msgid "Download notebook file"
+msgstr "ดาวน์โหลดไฟล์สมุดบันทึก"
+
+msgid "Toggle navigation"
+msgstr "ไม่ต้องสลับช่องทาง"
+
+msgid "Source repository"
+msgstr "ที่เก็บซอร์ส"
+
+msgid "By the"
+msgstr "โดย"
+
+msgid "next page"
+msgstr "หน้าต่อไป"
+
+msgid "repository"
+msgstr "ที่เก็บ"
+
+msgid "Sphinx Book Theme"
+msgstr "ธีมหนังสือสฟิงซ์"
+
+msgid "Download source file"
+msgstr "ดาวน์โหลดไฟล์ต้นฉบับ"
+
+msgid "Contents"
+msgstr "สารบัญ"
+
+msgid "By"
+msgstr "โดย"
+
+msgid "Copyright"
+msgstr "ลิขสิทธิ์"
+
+msgid "Fullscreen mode"
+msgstr "โหมดเต็มหน้าจอ"
+
+msgid "Open an issue"
+msgstr "เปิดปัญหา"
+
+msgid "previous page"
+msgstr "หน้าที่แล้ว"
+
+msgid "Download this page"
+msgstr "ดาวน์โหลดหน้านี้"
+
+msgid "Theme by the"
+msgstr "ธีมโดย"
diff --git a/docs/source/_static/locales/tl/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/tl/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..c8375b54
--- /dev/null
+++ b/docs/source/_static/locales/tl/LC_MESSAGES/booktheme.po
@@ -0,0 +1,66 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: tl\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "iminumungkahi i-edit"
+
+msgid "Last updated on"
+msgstr "Huling na-update noong"
+
+msgid "Edit this page"
+msgstr "I-edit ang pahinang ito"
+
+msgid "Launch"
+msgstr "Ilunsad"
+
+msgid "Print to PDF"
+msgstr "I-print sa PDF"
+
+msgid "open issue"
+msgstr "bukas na isyu"
+
+msgid "Download notebook file"
+msgstr "Mag-download ng file ng notebook"
+
+msgid "Toggle navigation"
+msgstr "I-toggle ang pag-navigate"
+
+msgid "Source repository"
+msgstr "Pinagmulan ng imbakan"
+
+msgid "By the"
+msgstr "Sa pamamagitan ng"
+
+msgid "next page"
+msgstr "Susunod na pahina"
+
+msgid "Sphinx Book Theme"
+msgstr "Tema ng Sphinx Book"
+
+msgid "Download source file"
+msgstr "Mag-download ng file ng pinagmulan"
+
+msgid "By"
+msgstr "Ni"
+
+msgid "Copyright"
+msgstr "Copyright"
+
+msgid "Open an issue"
+msgstr "Magbukas ng isyu"
+
+msgid "previous page"
+msgstr "Nakaraang pahina"
+
+msgid "Download this page"
+msgstr "I-download ang pahinang ito"
+
+msgid "Theme by the"
+msgstr "Tema ng"
diff --git a/docs/source/_static/locales/tr/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/tr/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..47d7bdf7
--- /dev/null
+++ b/docs/source/_static/locales/tr/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: tr\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "düzenleme öner"
+
+msgid "Last updated on"
+msgstr "Son güncelleme tarihi"
+
+msgid "Edit this page"
+msgstr "Bu sayfayı düzenle"
+
+msgid "Launch"
+msgstr "Başlatmak"
+
+msgid "Print to PDF"
+msgstr "PDF olarak yazdır"
+
+msgid "open issue"
+msgstr "Açık konu"
+
+msgid "Download notebook file"
+msgstr "Defter dosyasını indirin"
+
+msgid "Toggle navigation"
+msgstr "Gezinmeyi değiştir"
+
+msgid "Source repository"
+msgstr "Kaynak kod deposu"
+
+msgid "By the"
+msgstr "Tarafından"
+
+msgid "next page"
+msgstr "sonraki Sayfa"
+
+msgid "repository"
+msgstr "depo"
+
+msgid "Sphinx Book Theme"
+msgstr "Sfenks Kitap Teması"
+
+msgid "Download source file"
+msgstr "Kaynak dosyayı indirin"
+
+msgid "Contents"
+msgstr "İçindekiler"
+
+msgid "By"
+msgstr "Tarafından"
+
+msgid "Copyright"
+msgstr "Telif hakkı"
+
+msgid "Fullscreen mode"
+msgstr "Tam ekran modu"
+
+msgid "Open an issue"
+msgstr "Bir sorunu açın"
+
+msgid "previous page"
+msgstr "önceki sayfa"
+
+msgid "Download this page"
+msgstr "Bu sayfayı indirin"
+
+msgid "Theme by the"
+msgstr "Tarafından tema"
diff --git a/docs/source/_static/locales/uk/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/uk/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..e85f6f16
--- /dev/null
+++ b/docs/source/_static/locales/uk/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: uk\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "запропонувати редагувати"
+
+msgid "Last updated on"
+msgstr "Останнє оновлення:"
+
+msgid "Edit this page"
+msgstr "Редагувати цю сторінку"
+
+msgid "Launch"
+msgstr "Запуск"
+
+msgid "Print to PDF"
+msgstr "Друк у форматі PDF"
+
+msgid "open issue"
+msgstr "відкритий випуск"
+
+msgid "Download notebook file"
+msgstr "Завантажте файл блокнота"
+
+msgid "Toggle navigation"
+msgstr "Переключити навігацію"
+
+msgid "Source repository"
+msgstr "Джерело сховища"
+
+msgid "By the"
+msgstr "По"
+
+msgid "next page"
+msgstr "Наступна сторінка"
+
+msgid "repository"
+msgstr "сховище"
+
+msgid "Sphinx Book Theme"
+msgstr "Тема книги \"Сфінкс\""
+
+msgid "Download source file"
+msgstr "Завантажити вихідний файл"
+
+msgid "Contents"
+msgstr "Зміст"
+
+msgid "By"
+msgstr "Автор"
+
+msgid "Copyright"
+msgstr "Авторське право"
+
+msgid "Fullscreen mode"
+msgstr "Повноекранний режим"
+
+msgid "Open an issue"
+msgstr "Відкрийте випуск"
+
+msgid "previous page"
+msgstr "Попередня сторінка"
+
+msgid "Download this page"
+msgstr "Завантажте цю сторінку"
+
+msgid "Theme by the"
+msgstr "Тема від"
diff --git a/docs/source/_static/locales/ur/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/ur/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..0f90726c
--- /dev/null
+++ b/docs/source/_static/locales/ur/LC_MESSAGES/booktheme.po
@@ -0,0 +1,66 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: ur\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "ترمیم کی تجویز کریں"
+
+msgid "Last updated on"
+msgstr "آخری بار تازہ کاری ہوئی"
+
+msgid "Edit this page"
+msgstr "اس صفحے میں ترمیم کریں"
+
+msgid "Launch"
+msgstr "لانچ کریں"
+
+msgid "Print to PDF"
+msgstr "پی ڈی ایف پرنٹ کریں"
+
+msgid "open issue"
+msgstr "کھلا مسئلہ"
+
+msgid "Download notebook file"
+msgstr "نوٹ بک فائل ڈاؤن لوڈ کریں"
+
+msgid "Toggle navigation"
+msgstr "نیویگیشن ٹوگل کریں"
+
+msgid "Source repository"
+msgstr "ماخذ ذخیرہ"
+
+msgid "By the"
+msgstr "کی طرف"
+
+msgid "next page"
+msgstr "اگلا صفحہ"
+
+msgid "Sphinx Book Theme"
+msgstr "سپنکس بک تھیم"
+
+msgid "Download source file"
+msgstr "سورس فائل ڈاؤن لوڈ کریں"
+
+msgid "By"
+msgstr "بذریعہ"
+
+msgid "Copyright"
+msgstr "کاپی رائٹ"
+
+msgid "Open an issue"
+msgstr "ایک مسئلہ کھولیں"
+
+msgid "previous page"
+msgstr "سابقہ صفحہ"
+
+msgid "Download this page"
+msgstr "اس صفحے کو ڈاؤن لوڈ کریں"
+
+msgid "Theme by the"
+msgstr "کے ذریعہ تھیم"
diff --git a/docs/source/_static/locales/vi/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/vi/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..2cb5cf3b
--- /dev/null
+++ b/docs/source/_static/locales/vi/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: vi\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "đề nghị chỉnh sửa"
+
+msgid "Last updated on"
+msgstr "Cập nhật lần cuối vào"
+
+msgid "Edit this page"
+msgstr "chỉnh sửa trang này"
+
+msgid "Launch"
+msgstr "Phóng"
+
+msgid "Print to PDF"
+msgstr "In sang PDF"
+
+msgid "open issue"
+msgstr "vấn đề mở"
+
+msgid "Download notebook file"
+msgstr "Tải xuống tệp sổ tay"
+
+msgid "Toggle navigation"
+msgstr "Chuyển đổi điều hướng thành"
+
+msgid "Source repository"
+msgstr "Kho nguồn"
+
+msgid "By the"
+msgstr "Bằng"
+
+msgid "next page"
+msgstr "Trang tiếp theo"
+
+msgid "repository"
+msgstr "kho"
+
+msgid "Sphinx Book Theme"
+msgstr "Chủ đề sách nhân sư"
+
+msgid "Download source file"
+msgstr "Tải xuống tệp nguồn"
+
+msgid "Contents"
+msgstr "Nội dung"
+
+msgid "By"
+msgstr "Bởi"
+
+msgid "Copyright"
+msgstr "Bản quyền"
+
+msgid "Fullscreen mode"
+msgstr "Chế độ toàn màn hình"
+
+msgid "Open an issue"
+msgstr "Mở một vấn đề"
+
+msgid "previous page"
+msgstr "trang trước"
+
+msgid "Download this page"
+msgstr "Tải xuống trang này"
+
+msgid "Theme by the"
+msgstr "Chủ đề của"
diff --git a/docs/source/_static/locales/zh_CN/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/zh_CN/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..f91f3ba0
--- /dev/null
+++ b/docs/source/_static/locales/zh_CN/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: zh_CN\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "提出修改建议"
+
+msgid "Last updated on"
+msgstr "上次更新时间:"
+
+msgid "Edit this page"
+msgstr "编辑此页面"
+
+msgid "Launch"
+msgstr "启动"
+
+msgid "Print to PDF"
+msgstr "列印成 PDF"
+
+msgid "open issue"
+msgstr "创建议题"
+
+msgid "Download notebook file"
+msgstr "下载笔记本文件"
+
+msgid "Toggle navigation"
+msgstr "显示或隐藏导航栏"
+
+msgid "Source repository"
+msgstr "源码库"
+
+msgid "By the"
+msgstr "作者:"
+
+msgid "next page"
+msgstr "下一页"
+
+msgid "repository"
+msgstr "仓库"
+
+msgid "Sphinx Book Theme"
+msgstr "Sphinx Book 主题"
+
+msgid "Download source file"
+msgstr "下载源文件"
+
+msgid "Contents"
+msgstr "目录"
+
+msgid "By"
+msgstr "作者:"
+
+msgid "Copyright"
+msgstr "版权"
+
+msgid "Fullscreen mode"
+msgstr "全屏模式"
+
+msgid "Open an issue"
+msgstr "创建议题"
+
+msgid "previous page"
+msgstr "上一页"
+
+msgid "Download this page"
+msgstr "下载此页面"
+
+msgid "Theme by the"
+msgstr "主题作者:"
diff --git a/docs/source/_static/locales/zh_TW/LC_MESSAGES/booktheme.po b/docs/source/_static/locales/zh_TW/LC_MESSAGES/booktheme.po
new file mode 100644
index 00000000..7833d904
--- /dev/null
+++ b/docs/source/_static/locales/zh_TW/LC_MESSAGES/booktheme.po
@@ -0,0 +1,75 @@
+
+msgid ""
+msgstr ""
+"Project-Id-Version: Sphinx-Book-Theme\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Language: zh_TW\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+
+msgid "suggest edit"
+msgstr "提出修改建議"
+
+msgid "Last updated on"
+msgstr "最後更新時間:"
+
+msgid "Edit this page"
+msgstr "編輯此頁面"
+
+msgid "Launch"
+msgstr "啟動"
+
+msgid "Print to PDF"
+msgstr "列印成 PDF"
+
+msgid "open issue"
+msgstr "公開的問題"
+
+msgid "Download notebook file"
+msgstr "下載 Notebook 檔案"
+
+msgid "Toggle navigation"
+msgstr "顯示或隱藏導覽列"
+
+msgid "Source repository"
+msgstr "來源儲存庫"
+
+msgid "By the"
+msgstr "作者:"
+
+msgid "next page"
+msgstr "下一頁"
+
+msgid "repository"
+msgstr "儲存庫"
+
+msgid "Sphinx Book Theme"
+msgstr "Sphinx Book 佈景主題"
+
+msgid "Download source file"
+msgstr "下載原始檔"
+
+msgid "Contents"
+msgstr "目錄"
+
+msgid "By"
+msgstr "作者:"
+
+msgid "Copyright"
+msgstr "Copyright"
+
+msgid "Fullscreen mode"
+msgstr "全螢幕模式"
+
+msgid "Open an issue"
+msgstr "開啟議題"
+
+msgid "previous page"
+msgstr "上一頁"
+
+msgid "Download this page"
+msgstr "下載此頁面"
+
+msgid "Theme by the"
+msgstr "佈景主題作者:"
diff --git a/docs/source/_static/pygments.css b/docs/source/_static/pygments.css
new file mode 100644
index 00000000..84ab3030
--- /dev/null
+++ b/docs/source/_static/pygments.css
@@ -0,0 +1,75 @@
+pre { line-height: 125%; }
+td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
+span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
+td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
+span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
+.highlight .hll { background-color: #ffffcc }
+.highlight { background: #f8f8f8; }
+.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */
+.highlight .err { border: 1px solid #FF0000 } /* Error */
+.highlight .k { color: #008000; font-weight: bold } /* Keyword */
+.highlight .o { color: #666666 } /* Operator */
+.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */
+.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */
+.highlight .cp { color: #9C6500 } /* Comment.Preproc */
+.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */
+.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */
+.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */
+.highlight .gd { color: #A00000 } /* Generic.Deleted */
+.highlight .ge { font-style: italic } /* Generic.Emph */
+.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */
+.highlight .gr { color: #E40000 } /* Generic.Error */
+.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
+.highlight .gi { color: #008400 } /* Generic.Inserted */
+.highlight .go { color: #717171 } /* Generic.Output */
+.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
+.highlight .gs { font-weight: bold } /* Generic.Strong */
+.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
+.highlight .gt { color: #0044DD } /* Generic.Traceback */
+.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
+.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
+.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
+.highlight .kp { color: #008000 } /* Keyword.Pseudo */
+.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
+.highlight .kt { color: #B00040 } /* Keyword.Type */
+.highlight .m { color: #666666 } /* Literal.Number */
+.highlight .s { color: #BA2121 } /* Literal.String */
+.highlight .na { color: #687822 } /* Name.Attribute */
+.highlight .nb { color: #008000 } /* Name.Builtin */
+.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */
+.highlight .no { color: #880000 } /* Name.Constant */
+.highlight .nd { color: #AA22FF } /* Name.Decorator */
+.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */
+.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */
+.highlight .nf { color: #0000FF } /* Name.Function */
+.highlight .nl { color: #767600 } /* Name.Label */
+.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
+.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */
+.highlight .nv { color: #19177C } /* Name.Variable */
+.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
+.highlight .w { color: #bbbbbb } /* Text.Whitespace */
+.highlight .mb { color: #666666 } /* Literal.Number.Bin */
+.highlight .mf { color: #666666 } /* Literal.Number.Float */
+.highlight .mh { color: #666666 } /* Literal.Number.Hex */
+.highlight .mi { color: #666666 } /* Literal.Number.Integer */
+.highlight .mo { color: #666666 } /* Literal.Number.Oct */
+.highlight .sa { color: #BA2121 } /* Literal.String.Affix */
+.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */
+.highlight .sc { color: #BA2121 } /* Literal.String.Char */
+.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */
+.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
+.highlight .s2 { color: #BA2121 } /* Literal.String.Double */
+.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */
+.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */
+.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */
+.highlight .sx { color: #008000 } /* Literal.String.Other */
+.highlight .sr { color: #A45A77 } /* Literal.String.Regex */
+.highlight .s1 { color: #BA2121 } /* Literal.String.Single */
+.highlight .ss { color: #19177C } /* Literal.String.Symbol */
+.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */
+.highlight .fm { color: #0000FF } /* Name.Function.Magic */
+.highlight .vc { color: #19177C } /* Name.Variable.Class */
+.highlight .vg { color: #19177C } /* Name.Variable.Global */
+.highlight .vi { color: #19177C } /* Name.Variable.Instance */
+.highlight .vm { color: #19177C } /* Name.Variable.Magic */
+.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */
\ No newline at end of file
diff --git a/docs/source/_static/sbt-webpack-macros.html b/docs/source/_static/sbt-webpack-macros.html
new file mode 100644
index 00000000..6cbf559f
--- /dev/null
+++ b/docs/source/_static/sbt-webpack-macros.html
@@ -0,0 +1,11 @@
+
+{% macro head_pre_bootstrap() %}
+
+{% endmacro %}
+
+{% macro body_post() %}
+
+{% endmacro %}
diff --git a/docs/source/_static/scripts/bootstrap.js b/docs/source/_static/scripts/bootstrap.js
new file mode 100644
index 00000000..ef07e0bc
--- /dev/null
+++ b/docs/source/_static/scripts/bootstrap.js
@@ -0,0 +1,3 @@
+/*! For license information please see bootstrap.js.LICENSE.txt */
+(()=>{"use strict";var t={d:(e,i)=>{for(var n in i)t.o(i,n)&&!t.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:i[n]})},o:(t,e)=>Object.prototype.hasOwnProperty.call(t,e),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},e={};t.r(e),t.d(e,{afterMain:()=>w,afterRead:()=>b,afterWrite:()=>C,applyStyles:()=>$,arrow:()=>G,auto:()=>r,basePlacements:()=>a,beforeMain:()=>v,beforeRead:()=>m,beforeWrite:()=>A,bottom:()=>n,clippingParents:()=>h,computeStyles:()=>et,createPopper:()=>Dt,createPopperBase:()=>Lt,createPopperLite:()=>$t,detectOverflow:()=>mt,end:()=>c,eventListeners:()=>nt,flip:()=>_t,hide:()=>yt,left:()=>o,main:()=>y,modifierPhases:()=>T,offset:()=>wt,placements:()=>g,popper:()=>u,popperGenerator:()=>kt,popperOffsets:()=>At,preventOverflow:()=>Et,read:()=>_,reference:()=>f,right:()=>s,start:()=>l,top:()=>i,variationPlacements:()=>p,viewport:()=>d,write:()=>E});var i="top",n="bottom",s="right",o="left",r="auto",a=[i,n,s,o],l="start",c="end",h="clippingParents",d="viewport",u="popper",f="reference",p=a.reduce((function(t,e){return t.concat([e+"-"+l,e+"-"+c])}),[]),g=[].concat(a,[r]).reduce((function(t,e){return t.concat([e,e+"-"+l,e+"-"+c])}),[]),m="beforeRead",_="read",b="afterRead",v="beforeMain",y="main",w="afterMain",A="beforeWrite",E="write",C="afterWrite",T=[m,_,b,v,y,w,A,E,C];function O(t){return t?(t.nodeName||"").toLowerCase():null}function x(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function k(t){return t instanceof x(t).Element||t instanceof Element}function L(t){return t instanceof x(t).HTMLElement||t instanceof HTMLElement}function D(t){return"undefined"!=typeof ShadowRoot&&(t instanceof x(t).ShadowRoot||t instanceof ShadowRoot)}const $={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];L(s)&&O(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});L(n)&&O(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function S(t){return t.split("-")[0]}var I=Math.max,N=Math.min,P=Math.round;function j(){var t=navigator.userAgentData;return null!=t&&t.brands&&Array.isArray(t.brands)?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function M(){return!/^((?!chrome|android).)*safari/i.test(j())}function H(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&L(t)&&(s=t.offsetWidth>0&&P(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&P(n.height)/t.offsetHeight||1);var r=(k(t)?x(t):window).visualViewport,a=!M()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,h=n.width/s,d=n.height/o;return{width:h,height:d,top:c,right:l+h,bottom:c+d,left:l,x:l,y:c}}function B(t){var e=H(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function W(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&D(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function F(t){return x(t).getComputedStyle(t)}function z(t){return["table","td","th"].indexOf(O(t))>=0}function q(t){return((k(t)?t.ownerDocument:t.document)||window.document).documentElement}function R(t){return"html"===O(t)?t:t.assignedSlot||t.parentNode||(D(t)?t.host:null)||q(t)}function V(t){return L(t)&&"fixed"!==F(t).position?t.offsetParent:null}function Y(t){for(var e=x(t),i=V(t);i&&z(i)&&"static"===F(i).position;)i=V(i);return i&&("html"===O(i)||"body"===O(i)&&"static"===F(i).position)?e:i||function(t){var e=/firefox/i.test(j());if(/Trident/i.test(j())&&L(t)&&"fixed"===F(t).position)return null;var i=R(t);for(D(i)&&(i=i.host);L(i)&&["html","body"].indexOf(O(i))<0;){var n=F(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function K(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function Q(t,e,i){return I(t,N(e,i))}function X(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function U(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const G={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,r=t.state,l=t.name,c=t.options,h=r.elements.arrow,d=r.modifiersData.popperOffsets,u=S(r.placement),f=K(u),p=[o,s].indexOf(u)>=0?"height":"width";if(h&&d){var g=function(t,e){return X("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:U(t,a))}(c.padding,r),m=B(h),_="y"===f?i:o,b="y"===f?n:s,v=r.rects.reference[p]+r.rects.reference[f]-d[f]-r.rects.popper[p],y=d[f]-r.rects.reference[f],w=Y(h),A=w?"y"===f?w.clientHeight||0:w.clientWidth||0:0,E=v/2-y/2,C=g[_],T=A-m[p]-g[b],O=A/2-m[p]/2+E,x=Q(C,O,T),k=f;r.modifiersData[l]=((e={})[k]=x,e.centerOffset=x-O,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&W(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function J(t){return t.split("-")[1]}var Z={top:"auto",right:"auto",bottom:"auto",left:"auto"};function tt(t){var e,r=t.popper,a=t.popperRect,l=t.placement,h=t.variation,d=t.offsets,u=t.position,f=t.gpuAcceleration,p=t.adaptive,g=t.roundOffsets,m=t.isFixed,_=d.x,b=void 0===_?0:_,v=d.y,y=void 0===v?0:v,w="function"==typeof g?g({x:b,y}):{x:b,y};b=w.x,y=w.y;var A=d.hasOwnProperty("x"),E=d.hasOwnProperty("y"),C=o,T=i,O=window;if(p){var k=Y(r),L="clientHeight",D="clientWidth";k===x(r)&&"static"!==F(k=q(r)).position&&"absolute"===u&&(L="scrollHeight",D="scrollWidth"),(l===i||(l===o||l===s)&&h===c)&&(T=n,y-=(m&&k===O&&O.visualViewport?O.visualViewport.height:k[L])-a.height,y*=f?1:-1),l!==o&&(l!==i&&l!==n||h!==c)||(C=s,b-=(m&&k===O&&O.visualViewport?O.visualViewport.width:k[D])-a.width,b*=f?1:-1)}var $,S=Object.assign({position:u},p&&Z),I=!0===g?function(t,e){var i=t.x,n=t.y,s=e.devicePixelRatio||1;return{x:P(i*s)/s||0,y:P(n*s)/s||0}}({x:b,y},x(r)):{x:b,y};return b=I.x,y=I.y,f?Object.assign({},S,(($={})[T]=E?"0":"",$[C]=A?"0":"",$.transform=(O.devicePixelRatio||1)<=1?"translate("+b+"px, "+y+"px)":"translate3d("+b+"px, "+y+"px, 0)",$)):Object.assign({},S,((e={})[T]=E?y+"px":"",e[C]=A?b+"px":"",e.transform="",e))}const et={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:S(e.placement),variation:J(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,tt(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,tt(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var it={passive:!0};const nt={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=x(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,it)})),a&&l.addEventListener("resize",i.update,it),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,it)})),a&&l.removeEventListener("resize",i.update,it)}},data:{}};var st={left:"right",right:"left",bottom:"top",top:"bottom"};function ot(t){return t.replace(/left|right|bottom|top/g,(function(t){return st[t]}))}var rt={start:"end",end:"start"};function at(t){return t.replace(/start|end/g,(function(t){return rt[t]}))}function lt(t){var e=x(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function ct(t){return H(q(t)).left+lt(t).scrollLeft}function ht(t){var e=F(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function dt(t){return["html","body","#document"].indexOf(O(t))>=0?t.ownerDocument.body:L(t)&&ht(t)?t:dt(R(t))}function ut(t,e){var i;void 0===e&&(e=[]);var n=dt(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=x(n),r=s?[o].concat(o.visualViewport||[],ht(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(ut(R(r)))}function ft(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function pt(t,e,i){return e===d?ft(function(t,e){var i=x(t),n=q(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=M();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+ct(t),y:l}}(t,i)):k(e)?function(t,e){var i=H(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):ft(function(t){var e,i=q(t),n=lt(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=I(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=I(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+ct(t),l=-n.scrollTop;return"rtl"===F(s||i).direction&&(a+=I(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(q(t)))}function gt(t){var e,r=t.reference,a=t.element,h=t.placement,d=h?S(h):null,u=h?J(h):null,f=r.x+r.width/2-a.width/2,p=r.y+r.height/2-a.height/2;switch(d){case i:e={x:f,y:r.y-a.height};break;case n:e={x:f,y:r.y+r.height};break;case s:e={x:r.x+r.width,y:p};break;case o:e={x:r.x-a.width,y:p};break;default:e={x:r.x,y:r.y}}var g=d?K(d):null;if(null!=g){var m="y"===g?"height":"width";switch(u){case l:e[g]=e[g]-(r[m]/2-a[m]/2);break;case c:e[g]=e[g]+(r[m]/2-a[m]/2)}}return e}function mt(t,e){void 0===e&&(e={});var o=e,r=o.placement,l=void 0===r?t.placement:r,c=o.strategy,p=void 0===c?t.strategy:c,g=o.boundary,m=void 0===g?h:g,_=o.rootBoundary,b=void 0===_?d:_,v=o.elementContext,y=void 0===v?u:v,w=o.altBoundary,A=void 0!==w&&w,E=o.padding,C=void 0===E?0:E,T=X("number"!=typeof C?C:U(C,a)),x=y===u?f:u,D=t.rects.popper,$=t.elements[A?x:y],S=function(t,e,i,n){var s="clippingParents"===e?function(t){var e=ut(R(t)),i=["absolute","fixed"].indexOf(F(t).position)>=0&&L(t)?Y(t):t;return k(i)?e.filter((function(t){return k(t)&&W(t,i)&&"body"!==O(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=pt(t,i,n);return e.top=I(s.top,e.top),e.right=N(s.right,e.right),e.bottom=N(s.bottom,e.bottom),e.left=I(s.left,e.left),e}),pt(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}(k($)?$:$.contextElement||q(t.elements.popper),m,b,p),P=H(t.elements.reference),j=gt({reference:P,element:D,strategy:"absolute",placement:l}),M=ft(Object.assign({},D,j)),B=y===u?M:P,z={top:S.top-B.top+T.top,bottom:B.bottom-S.bottom+T.bottom,left:S.left-B.left+T.left,right:B.right-S.right+T.right},V=t.modifiersData.offset;if(y===u&&V){var K=V[l];Object.keys(z).forEach((function(t){var e=[s,n].indexOf(t)>=0?1:-1,o=[i,n].indexOf(t)>=0?"y":"x";z[t]+=K[o]*e}))}return z}const _t={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,c=t.options,h=t.name;if(!e.modifiersData[h]._skip){for(var d=c.mainAxis,u=void 0===d||d,f=c.altAxis,m=void 0===f||f,_=c.fallbackPlacements,b=c.padding,v=c.boundary,y=c.rootBoundary,w=c.altBoundary,A=c.flipVariations,E=void 0===A||A,C=c.allowedAutoPlacements,T=e.options.placement,O=S(T),x=_||(O!==T&&E?function(t){if(S(t)===r)return[];var e=ot(t);return[at(t),e,at(e)]}(T):[ot(T)]),k=[T].concat(x).reduce((function(t,i){return t.concat(S(i)===r?function(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,l=i.flipVariations,c=i.allowedAutoPlacements,h=void 0===c?g:c,d=J(n),u=d?l?p:p.filter((function(t){return J(t)===d})):a,f=u.filter((function(t){return h.indexOf(t)>=0}));0===f.length&&(f=u);var m=f.reduce((function(e,i){return e[i]=mt(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[S(i)],e}),{});return Object.keys(m).sort((function(t,e){return m[t]-m[e]}))}(e,{placement:i,boundary:v,rootBoundary:y,padding:b,flipVariations:E,allowedAutoPlacements:C}):i)}),[]),L=e.rects.reference,D=e.rects.popper,$=new Map,I=!0,N=k[0],P=0;P=0,W=B?"width":"height",F=mt(e,{placement:j,boundary:v,rootBoundary:y,altBoundary:w,padding:b}),z=B?H?s:o:H?n:i;L[W]>D[W]&&(z=ot(z));var q=ot(z),R=[];if(u&&R.push(F[M]<=0),m&&R.push(F[z]<=0,F[q]<=0),R.every((function(t){return t}))){N=j,I=!1;break}$.set(j,R)}if(I)for(var V=function(t){var e=k.find((function(e){var i=$.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return N=e,"break"},Y=E?3:1;Y>0&&"break"!==V(Y);Y--);e.placement!==N&&(e.modifiersData[h]._skip=!0,e.placement=N,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function bt(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function vt(t){return[i,s,n,o].some((function(e){return t[e]>=0}))}const yt={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=mt(e,{elementContext:"reference"}),a=mt(e,{altBoundary:!0}),l=bt(r,n),c=bt(a,s,o),h=vt(l),d=vt(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},wt={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,n=t.options,r=t.name,a=n.offset,l=void 0===a?[0,0]:a,c=g.reduce((function(t,n){return t[n]=function(t,e,n){var r=S(t),a=[o,i].indexOf(r)>=0?-1:1,l="function"==typeof n?n(Object.assign({},e,{placement:t})):n,c=l[0],h=l[1];return c=c||0,h=(h||0)*a,[o,s].indexOf(r)>=0?{x:h,y:c}:{x:c,y:h}}(n,e.rects,l),t}),{}),h=c[e.placement],d=h.x,u=h.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=d,e.modifiersData.popperOffsets.y+=u),e.modifiersData[r]=c}},At={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=gt({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},Et={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,r=t.options,a=t.name,c=r.mainAxis,h=void 0===c||c,d=r.altAxis,u=void 0!==d&&d,f=r.boundary,p=r.rootBoundary,g=r.altBoundary,m=r.padding,_=r.tether,b=void 0===_||_,v=r.tetherOffset,y=void 0===v?0:v,w=mt(e,{boundary:f,rootBoundary:p,padding:m,altBoundary:g}),A=S(e.placement),E=J(e.placement),C=!E,T=K(A),O="x"===T?"y":"x",x=e.modifiersData.popperOffsets,k=e.rects.reference,L=e.rects.popper,D="function"==typeof y?y(Object.assign({},e.rects,{placement:e.placement})):y,$="number"==typeof D?{mainAxis:D,altAxis:D}:Object.assign({mainAxis:0,altAxis:0},D),P=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,j={x:0,y:0};if(x){if(h){var M,H="y"===T?i:o,W="y"===T?n:s,F="y"===T?"height":"width",z=x[T],q=z+w[H],R=z-w[W],V=b?-L[F]/2:0,X=E===l?k[F]:L[F],U=E===l?-L[F]:-k[F],G=e.elements.arrow,Z=b&&G?B(G):{width:0,height:0},tt=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},et=tt[H],it=tt[W],nt=Q(0,k[F],Z[F]),st=C?k[F]/2-V-nt-et-$.mainAxis:X-nt-et-$.mainAxis,ot=C?-k[F]/2+V+nt+it+$.mainAxis:U+nt+it+$.mainAxis,rt=e.elements.arrow&&Y(e.elements.arrow),at=rt?"y"===T?rt.clientTop||0:rt.clientLeft||0:0,lt=null!=(M=null==P?void 0:P[T])?M:0,ct=z+ot-lt,ht=Q(b?N(q,z+st-lt-at):q,z,b?I(R,ct):R);x[T]=ht,j[T]=ht-z}if(u){var dt,ut="x"===T?i:o,ft="x"===T?n:s,pt=x[O],gt="y"===O?"height":"width",_t=pt+w[ut],bt=pt-w[ft],vt=-1!==[i,o].indexOf(A),yt=null!=(dt=null==P?void 0:P[O])?dt:0,wt=vt?_t:pt-k[gt]-L[gt]-yt+$.altAxis,At=vt?pt+k[gt]+L[gt]-yt-$.altAxis:bt,Et=b&&vt?function(t,e,i){var n=Q(t,e,i);return n>i?i:n}(wt,pt,At):Q(b?wt:_t,pt,b?At:bt);x[O]=Et,j[O]=Et-pt}e.modifiersData[a]=j}},requiresIfExists:["offset"]};function Ct(t,e,i){void 0===i&&(i=!1);var n,s,o=L(e),r=L(e)&&function(t){var e=t.getBoundingClientRect(),i=P(e.width)/t.offsetWidth||1,n=P(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=q(e),l=H(t,r,i),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==O(e)||ht(a))&&(c=(n=e)!==x(n)&&L(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:lt(n)),L(e)?((h=H(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=ct(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}function Tt(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var Ot={placement:"bottom",modifiers:[],strategy:"absolute"};function xt(){for(var t=arguments.length,e=new Array(t),i=0;i{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?i.trim():null}return e},Nt=t=>{const e=It(t);return e&&document.querySelector(e)?e:null},Pt=t=>{const e=It(t);return e?document.querySelector(e):null},jt=t=>{t.dispatchEvent(new Event(St))},Mt=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),Ht=t=>Mt(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(t):null,Bt=t=>{if(!Mt(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},Wt=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),Ft=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?Ft(t.parentNode):null},zt=()=>{},qt=t=>{t.offsetHeight},Rt=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,Vt=[],Yt=()=>"rtl"===document.documentElement.dir,Kt=t=>{var e;e=()=>{const e=Rt();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(Vt.length||document.addEventListener("DOMContentLoaded",(()=>{for(const t of Vt)t()})),Vt.push(e)):e()},Qt=t=>{"function"==typeof t&&t()},Xt=(t,e,i=!0)=>{if(!i)return void Qt(t);const n=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let s=!1;const o=({target:i})=>{i===e&&(s=!0,e.removeEventListener(St,o),Qt(t))};e.addEventListener(St,o),setTimeout((()=>{s||jt(e)}),n)},Ut=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},Gt=/[^.]*(?=\..*)\.|.*/,Jt=/\..*/,Zt=/::\d+$/,te={};let ee=1;const ie={mouseenter:"mouseover",mouseleave:"mouseout"},ne=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function se(t,e){return e&&`${e}::${ee++}`||t.uidEvent||ee++}function oe(t){const e=se(t);return t.uidEvent=e,te[e]=te[e]||{},te[e]}function re(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function ae(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=de(t);return ne.has(o)||(o=t),[n,s,o]}function le(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=ae(e,i,n);if(e in ie){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=oe(t),c=l[a]||(l[a]={}),h=re(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=se(r,e.replace(Gt,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return fe(s,{delegateTarget:r}),n.oneOff&&ue.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return fe(n,{delegateTarget:t}),i.oneOff&&ue.off(t,n.type,e),e.apply(t,[n])}}(t,r);u.delegationSelector=o?i:null,u.callable=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function ce(t,e,i,n,s){const o=re(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function he(t,e,i,n){const s=e[i]||{};for(const o of Object.keys(s))if(o.includes(n)){const n=s[o];ce(t,e,i,n.callable,n.delegationSelector)}}function de(t){return t=t.replace(Jt,""),ie[t]||t}const ue={on(t,e,i,n){le(t,e,i,n,!1)},one(t,e,i,n){le(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=ae(e,i,n),a=r!==e,l=oe(t),c=l[r]||{},h=e.startsWith(".");if(void 0===o){if(h)for(const i of Object.keys(l))he(t,l,i,e.slice(1));for(const i of Object.keys(c)){const n=i.replace(Zt,"");if(!a||e.includes(n)){const e=c[i];ce(t,l,r,e.callable,e.delegationSelector)}}}else{if(!Object.keys(c).length)return;ce(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=Rt();let s=null,o=!0,r=!0,a=!1;e!==de(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());let l=new Event(e,{bubbles:o,cancelable:!0});return l=fe(l,i),a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function fe(t,e){for(const[i,n]of Object.entries(e||{}))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}const pe=new Map,ge={set(t,e,i){pe.has(t)||pe.set(t,new Map);const n=pe.get(t);n.has(e)||0===n.size?n.set(e,i):console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(n.keys())[0]}.`)},get:(t,e)=>pe.has(t)&&pe.get(t).get(e)||null,remove(t,e){if(!pe.has(t))return;const i=pe.get(t);i.delete(e),0===i.size&&pe.delete(t)}};function me(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function _e(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const be={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${_e(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${_e(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith("bs")&&!t.startsWith("bsConfig")));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=me(t.dataset[n])}return e},getDataAttribute:(t,e)=>me(t.getAttribute(`data-bs-${_e(e)}`))};class ve{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=Mt(e)?be.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...Mt(e)?be.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const n of Object.keys(e)){const s=e[n],o=t[n],r=Mt(o)?"element":null==(i=o)?`${i}`:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(r))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${r}" but expected type "${s}".`)}var i}}class ye extends ve{constructor(t,e){super(),(t=Ht(t))&&(this._element=t,this._config=this._getConfig(e),ge.set(this._element,this.constructor.DATA_KEY,this))}dispose(){ge.remove(this._element,this.constructor.DATA_KEY),ue.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){Xt(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return ge.get(Ht(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.2.3"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const we=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,n=t.NAME;ue.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),Wt(this))return;const s=Pt(this)||this.closest(`.${n}`);t.getOrCreateInstance(s)[e]()}))},Ae=".bs.alert",Ee=`close${Ae}`,Ce=`closed${Ae}`;class Te extends ye{static get NAME(){return"alert"}close(){if(ue.trigger(this._element,Ee).defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),ue.trigger(this._element,Ce),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Te.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}we(Te,"close"),Kt(Te);const Oe='[data-bs-toggle="button"]';class xe extends ye{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=xe.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}ue.on(document,"click.bs.button.data-api",Oe,(t=>{t.preventDefault();const e=t.target.closest(Oe);xe.getOrCreateInstance(e).toggle()})),Kt(xe);const ke={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(",");return this.find(e,t).filter((t=>!Wt(t)&&Bt(t)))}},Le=".bs.swipe",De=`touchstart${Le}`,$e=`touchmove${Le}`,Se=`touchend${Le}`,Ie=`pointerdown${Le}`,Ne=`pointerup${Le}`,Pe={endCallback:null,leftCallback:null,rightCallback:null},je={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class Me extends ve{constructor(t,e){super(),this._element=t,t&&Me.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return Pe}static get DefaultType(){return je}static get NAME(){return"swipe"}dispose(){ue.off(this._element,Le)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),Qt(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&Qt(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(ue.on(this._element,Ie,(t=>this._start(t))),ue.on(this._element,Ne,(t=>this._end(t))),this._element.classList.add("pointer-event")):(ue.on(this._element,De,(t=>this._start(t))),ue.on(this._element,$e,(t=>this._move(t))),ue.on(this._element,Se,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const He=".bs.carousel",Be=".data-api",We="next",Fe="prev",ze="left",qe="right",Re=`slide${He}`,Ve=`slid${He}`,Ye=`keydown${He}`,Ke=`mouseenter${He}`,Qe=`mouseleave${He}`,Xe=`dragstart${He}`,Ue=`load${He}${Be}`,Ge=`click${He}${Be}`,Je="carousel",Ze="active",ti=".active",ei=".carousel-item",ii=ti+ei,ni={ArrowLeft:qe,ArrowRight:ze},si={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},oi={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class ri extends ye{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=ke.findOne(".carousel-indicators",this._element),this._addEventListeners(),this._config.ride===Je&&this.cycle()}static get Default(){return si}static get DefaultType(){return oi}static get NAME(){return"carousel"}next(){this._slide(We)}nextWhenVisible(){!document.hidden&&Bt(this._element)&&this.next()}prev(){this._slide(Fe)}pause(){this._isSliding&&jt(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?ue.one(this._element,Ve,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void ue.one(this._element,Ve,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?We:Fe;this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&ue.on(this._element,Ye,(t=>this._keydown(t))),"hover"===this._config.pause&&(ue.on(this._element,Ke,(()=>this.pause())),ue.on(this._element,Qe,(()=>this._maybeEnableCycle()))),this._config.touch&&Me.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of ke.find(".carousel-item img",this._element))ue.on(t,Xe,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(ze)),rightCallback:()=>this._slide(this._directionToOrder(qe)),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new Me(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=ni[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=ke.findOne(ti,this._indicatorsElement);e.classList.remove(Ze),e.removeAttribute("aria-current");const i=ke.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add(Ze),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n=t===We,s=e||Ut(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>ue.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(Re).defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),qt(s),i.classList.add(l),s.classList.add(l),this._queueCallback((()=>{s.classList.remove(l,c),s.classList.add(Ze),i.classList.remove(Ze,c,l),this._isSliding=!1,r(Ve)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return ke.findOne(ii,this._element)}_getItems(){return ke.find(ei,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return Yt()?t===ze?Fe:We:t===ze?We:Fe}_orderToDirection(t){return Yt()?t===Fe?ze:qe:t===Fe?qe:ze}static jQueryInterface(t){return this.each((function(){const e=ri.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}ue.on(document,Ge,"[data-bs-slide], [data-bs-slide-to]",(function(t){const e=Pt(this);if(!e||!e.classList.contains(Je))return;t.preventDefault();const i=ri.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===be.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),ue.on(window,Ue,(()=>{const t=ke.find('[data-bs-ride="carousel"]');for(const e of t)ri.getOrCreateInstance(e)})),Kt(ri);const ai=".bs.collapse",li=`show${ai}`,ci=`shown${ai}`,hi=`hide${ai}`,di=`hidden${ai}`,ui=`click${ai}.data-api`,fi="show",pi="collapse",gi="collapsing",mi=`:scope .${pi} .${pi}`,_i='[data-bs-toggle="collapse"]',bi={parent:null,toggle:!0},vi={parent:"(null|element)",toggle:"boolean"};class yi extends ye{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=ke.find(_i);for(const t of i){const e=Nt(t),i=ke.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return bi}static get DefaultType(){return vi}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter((t=>t!==this._element)).map((t=>yi.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(ue.trigger(this._element,li).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(pi),this._element.classList.add(gi),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(gi),this._element.classList.add(pi,fi),this._element.style[e]="",ue.trigger(this._element,ci)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(ue.trigger(this._element,hi).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,qt(this._element),this._element.classList.add(gi),this._element.classList.remove(pi,fi);for(const t of this._triggerArray){const e=Pt(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(gi),this._element.classList.add(pi),ue.trigger(this._element,di)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(fi)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=Ht(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(_i);for(const e of t){const t=Pt(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=ke.find(mi,this._config.parent);return ke.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=yi.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}ue.on(document,ui,_i,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();const e=Nt(this),i=ke.find(e);for(const t of i)yi.getOrCreateInstance(t,{toggle:!1}).toggle()})),Kt(yi);const wi="dropdown",Ai=".bs.dropdown",Ei=".data-api",Ci="ArrowUp",Ti="ArrowDown",Oi=`hide${Ai}`,xi=`hidden${Ai}`,ki=`show${Ai}`,Li=`shown${Ai}`,Di=`click${Ai}${Ei}`,$i=`keydown${Ai}${Ei}`,Si=`keyup${Ai}${Ei}`,Ii="show",Ni='[data-bs-toggle="dropdown"]:not(.disabled):not(:disabled)',Pi=`${Ni}.${Ii}`,ji=".dropdown-menu",Mi=Yt()?"top-end":"top-start",Hi=Yt()?"top-start":"top-end",Bi=Yt()?"bottom-end":"bottom-start",Wi=Yt()?"bottom-start":"bottom-end",Fi=Yt()?"left-start":"right-start",zi=Yt()?"right-start":"left-start",qi={autoClose:!0,boundary:"clippingParents",display:"dynamic",offset:[0,2],popperConfig:null,reference:"toggle"},Ri={autoClose:"(boolean|string)",boundary:"(string|element)",display:"string",offset:"(array|string|function)",popperConfig:"(null|object|function)",reference:"(string|element|object)"};class Vi extends ye{constructor(t,e){super(t,e),this._popper=null,this._parent=this._element.parentNode,this._menu=ke.next(this._element,ji)[0]||ke.prev(this._element,ji)[0]||ke.findOne(ji,this._parent),this._inNavbar=this._detectNavbar()}static get Default(){return qi}static get DefaultType(){return Ri}static get NAME(){return wi}toggle(){return this._isShown()?this.hide():this.show()}show(){if(Wt(this._element)||this._isShown())return;const t={relatedTarget:this._element};if(!ue.trigger(this._element,ki,t).defaultPrevented){if(this._createPopper(),"ontouchstart"in document.documentElement&&!this._parent.closest(".navbar-nav"))for(const t of[].concat(...document.body.children))ue.on(t,"mouseover",zt);this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add(Ii),this._element.classList.add(Ii),ue.trigger(this._element,Li,t)}}hide(){if(Wt(this._element)||!this._isShown())return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){if(!ue.trigger(this._element,Oi,t).defaultPrevented){if("ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))ue.off(t,"mouseover",zt);this._popper&&this._popper.destroy(),this._menu.classList.remove(Ii),this._element.classList.remove(Ii),this._element.setAttribute("aria-expanded","false"),be.removeDataAttribute(this._menu,"popper"),ue.trigger(this._element,xi,t)}}_getConfig(t){if("object"==typeof(t=super._getConfig(t)).reference&&!Mt(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError(`${wi.toUpperCase()}: Option "reference" provided type "object" without a required "getBoundingClientRect" method.`);return t}_createPopper(){if(void 0===e)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let t=this._element;"parent"===this._config.reference?t=this._parent:Mt(this._config.reference)?t=Ht(this._config.reference):"object"==typeof this._config.reference&&(t=this._config.reference);const i=this._getPopperConfig();this._popper=Dt(t,this._menu,i)}_isShown(){return this._menu.classList.contains(Ii)}_getPlacement(){const t=this._parent;if(t.classList.contains("dropend"))return Fi;if(t.classList.contains("dropstart"))return zi;if(t.classList.contains("dropup-center"))return"top";if(t.classList.contains("dropdown-center"))return"bottom";const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?Hi:Mi:e?Wi:Bi}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(be.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,..."function"==typeof this._config.popperConfig?this._config.popperConfig(t):this._config.popperConfig}}_selectMenuItem({key:t,target:e}){const i=ke.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter((t=>Bt(t)));i.length&&Ut(i,e,t===Ti,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=Vi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=ke.find(Pi);for(const i of e){const e=Vi.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Ci,Ti].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(Ni)?this:ke.prev(this,Ni)[0]||ke.next(this,Ni)[0]||ke.findOne(Ni,t.delegateTarget.parentNode),o=Vi.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}ue.on(document,$i,Ni,Vi.dataApiKeydownHandler),ue.on(document,$i,ji,Vi.dataApiKeydownHandler),ue.on(document,Di,Vi.clearMenus),ue.on(document,Si,Vi.clearMenus),ue.on(document,Di,Ni,(function(t){t.preventDefault(),Vi.getOrCreateInstance(this).toggle()})),Kt(Vi);const Yi=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",Ki=".sticky-top",Qi="padding-right",Xi="margin-right";class Ui{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,Qi,(e=>e+t)),this._setElementAttributes(Yi,Qi,(e=>e+t)),this._setElementAttributes(Ki,Xi,(e=>e-t))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,Qi),this._resetElementAttributes(Yi,Qi),this._resetElementAttributes(Ki,Xi)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(s))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&be.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=be.getDataAttribute(t,e);null!==i?(be.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(Mt(t))e(t);else for(const i of ke.find(t,this._element))e(i)}}const Gi="backdrop",Ji="show",Zi=`mousedown.bs.${Gi}`,tn={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},en={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class nn extends ve{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return tn}static get DefaultType(){return en}static get NAME(){return Gi}show(t){if(!this._config.isVisible)return void Qt(t);this._append();const e=this._getElement();this._config.isAnimated&&qt(e),e.classList.add(Ji),this._emulateAnimation((()=>{Qt(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(Ji),this._emulateAnimation((()=>{this.dispose(),Qt(t)}))):Qt(t)}dispose(){this._isAppended&&(ue.off(this._element,Zi),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=Ht(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),ue.on(t,Zi,(()=>{Qt(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){Xt(t,this._getElement(),this._config.isAnimated)}}const sn=".bs.focustrap",on=`focusin${sn}`,rn=`keydown.tab${sn}`,an="backward",ln={autofocus:!0,trapElement:null},cn={autofocus:"boolean",trapElement:"element"};class hn extends ve{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return ln}static get DefaultType(){return cn}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),ue.off(document,sn),ue.on(document,on,(t=>this._handleFocusin(t))),ue.on(document,rn,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,ue.off(document,sn))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=ke.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===an?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?an:"forward")}}const dn=".bs.modal",un=`hide${dn}`,fn=`hidePrevented${dn}`,pn=`hidden${dn}`,gn=`show${dn}`,mn=`shown${dn}`,_n=`resize${dn}`,bn=`click.dismiss${dn}`,vn=`mousedown.dismiss${dn}`,yn=`keydown.dismiss${dn}`,wn=`click${dn}.data-api`,An="modal-open",En="show",Cn="modal-static",Tn={backdrop:!0,focus:!0,keyboard:!0},On={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class xn extends ye{constructor(t,e){super(t,e),this._dialog=ke.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new Ui,this._addEventListeners()}static get Default(){return Tn}static get DefaultType(){return On}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||ue.trigger(this._element,gn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(An),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(ue.trigger(this._element,un).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(En),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){for(const t of[window,this._dialog])ue.off(t,dn);this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new nn({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new hn({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=ke.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),qt(this._element),this._element.classList.add(En),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,ue.trigger(this._element,mn,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){ue.on(this._element,yn,(t=>{if("Escape"===t.key)return this._config.keyboard?(t.preventDefault(),void this.hide()):void this._triggerBackdropTransition()})),ue.on(window,_n,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),ue.on(this._element,vn,(t=>{ue.one(this._element,bn,(e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(An),this._resetAdjustments(),this._scrollBar.reset(),ue.trigger(this._element,pn)}))}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(ue.trigger(this._element,fn).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains(Cn)||(t||(this._element.style.overflowY="hidden"),this._element.classList.add(Cn),this._queueCallback((()=>{this._element.classList.remove(Cn),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=Yt()?"paddingLeft":"paddingRight";this._element.style[t]=`${e}px`}if(!i&&t){const t=Yt()?"paddingRight":"paddingLeft";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=xn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}ue.on(document,wn,'[data-bs-toggle="modal"]',(function(t){const e=Pt(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),ue.one(e,gn,(t=>{t.defaultPrevented||ue.one(e,pn,(()=>{Bt(this)&&this.focus()}))}));const i=ke.findOne(".modal.show");i&&xn.getInstance(i).hide(),xn.getOrCreateInstance(e).toggle(this)})),we(xn),Kt(xn);const kn=".bs.offcanvas",Ln=".data-api",Dn=`load${kn}${Ln}`,$n="show",Sn="showing",In="hiding",Nn=".offcanvas.show",Pn=`show${kn}`,jn=`shown${kn}`,Mn=`hide${kn}`,Hn=`hidePrevented${kn}`,Bn=`hidden${kn}`,Wn=`resize${kn}`,Fn=`click${kn}${Ln}`,zn=`keydown.dismiss${kn}`,qn={backdrop:!0,keyboard:!0,scroll:!1},Rn={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class Vn extends ye{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return qn}static get DefaultType(){return Rn}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||ue.trigger(this._element,Pn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new Ui).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add(Sn),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add($n),this._element.classList.remove(Sn),ue.trigger(this._element,jn,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(ue.trigger(this._element,Mn).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add(In),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove($n,In),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new Ui).reset(),ue.trigger(this._element,Bn)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new nn({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():ue.trigger(this._element,Hn)}:null})}_initializeFocusTrap(){return new hn({trapElement:this._element})}_addEventListeners(){ue.on(this._element,zn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():ue.trigger(this._element,Hn))}))}static jQueryInterface(t){return this.each((function(){const e=Vn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}ue.on(document,Fn,'[data-bs-toggle="offcanvas"]',(function(t){const e=Pt(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),Wt(this))return;ue.one(e,Bn,(()=>{Bt(this)&&this.focus()}));const i=ke.findOne(Nn);i&&i!==e&&Vn.getInstance(i).hide(),Vn.getOrCreateInstance(e).toggle(this)})),ue.on(window,Dn,(()=>{for(const t of ke.find(Nn))Vn.getOrCreateInstance(t).show()})),ue.on(window,Wn,(()=>{for(const t of ke.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&Vn.getOrCreateInstance(t).hide()})),we(Vn),Kt(Vn);const Yn=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Kn=/^(?:(?:https?|mailto|ftp|tel|file|sms):|[^#&/:?]*(?:[#/?]|$))/i,Qn=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[\d+/a-z]+=*$/i,Xn=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!Yn.has(i)||Boolean(Kn.test(t.nodeValue)||Qn.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},Un={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Gn={allowList:Un,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:""},Jn={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},Zn={entry:"(string|element|function|null)",selector:"(string|element)"};class ts extends ve{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return Gn}static get DefaultType(){return Jn}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},Zn)}_setContent(t,e,i){const n=ke.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?Mt(e)?this._putElementInTemplate(Ht(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)Xn(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return"function"==typeof t?t(this):t}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const es=new Set(["sanitize","allowList","sanitizeFn"]),is="fade",ns="show",ss=".modal",os="hide.bs.modal",rs="hover",as="focus",ls={AUTO:"auto",TOP:"top",RIGHT:Yt()?"left":"right",BOTTOM:"bottom",LEFT:Yt()?"right":"left"},cs={allowList:Un,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,0],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},hs={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class ds extends ye{constructor(t,i){if(void 0===e)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,i),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return cs}static get DefaultType(){return hs}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),ue.off(this._element.closest(ss),os,this._hideModalHandler),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=ue.trigger(this._element,this.constructor.eventName("show")),e=(Ft(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),ue.trigger(this._element,this.constructor.eventName("inserted"))),this._popper=this._createPopper(i),i.classList.add(ns),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))ue.on(t,"mouseover",zt);this._queueCallback((()=>{ue.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!ue.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented){if(this._getTipElement().classList.remove(ns),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))ue.off(t,"mouseover",zt);this._activeTrigger.click=!1,this._activeTrigger[as]=!1,this._activeTrigger[rs]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute("aria-describedby"),ue.trigger(this._element,this.constructor.eventName("hidden")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(is,ns),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add(is),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new ts({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{".tooltip-inner":this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(is)}_isShown(){return this.tip&&this.tip.classList.contains(ns)}_createPopper(t){const e="function"==typeof this._config.placement?this._config.placement.call(this,t,this._element):this._config.placement,i=ls[e.toUpperCase()];return Dt(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return"function"==typeof t?t.call(this._element):t}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,..."function"==typeof this._config.popperConfig?this._config.popperConfig(e):this._config.popperConfig}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)ue.on(this._element,this.constructor.eventName("click"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if("manual"!==e){const t=e===rs?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i=e===rs?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");ue.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?as:rs]=!0,e._enter()})),ue.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?as:rs]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},ue.on(this._element.closest(ss),os,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=be.getDataAttributes(this._element);for(const t of Object.keys(e))es.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:Ht(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const e in this._config)this.constructor.Default[e]!==this._config[e]&&(t[e]=this._config[e]);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=ds.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Kt(ds);const us={...ds.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},fs={...ds.DefaultType,content:"(null|string|element|function)"};class ps extends ds{static get Default(){return us}static get DefaultType(){return fs}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{".popover-header":this._getTitle(),".popover-body":this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=ps.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Kt(ps);const gs=".bs.scrollspy",ms=`activate${gs}`,_s=`click${gs}`,bs=`load${gs}.data-api`,vs="active",ys="[href]",ws=".nav-link",As=`${ws}, .nav-item > ${ws}, .list-group-item`,Es={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},Cs={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class Ts extends ye{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return Es}static get DefaultType(){return Cs}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=Ht(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(ue.off(this._config.target,_s),ue.on(this._config.target,_s,ys,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=ke.find(ys,this._config.target);for(const e of t){if(!e.hash||Wt(e))continue;const t=ke.findOne(e.hash,this._element);Bt(t)&&(this._targetLinks.set(e.hash,e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(vs),this._activateParents(t),ue.trigger(this._element,ms,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))ke.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add(vs);else for(const e of ke.parents(t,".nav, .list-group"))for(const t of ke.prev(e,As))t.classList.add(vs)}_clearActiveClass(t){t.classList.remove(vs);const e=ke.find(`${ys}.${vs}`,t);for(const t of e)t.classList.remove(vs)}static jQueryInterface(t){return this.each((function(){const e=Ts.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}ue.on(window,bs,(()=>{for(const t of ke.find('[data-bs-spy="scroll"]'))Ts.getOrCreateInstance(t)})),Kt(Ts);const Os=".bs.tab",xs=`hide${Os}`,ks=`hidden${Os}`,Ls=`show${Os}`,Ds=`shown${Os}`,$s=`click${Os}`,Ss=`keydown${Os}`,Is=`load${Os}`,Ns="ArrowLeft",Ps="ArrowRight",js="ArrowUp",Ms="ArrowDown",Hs="active",Bs="fade",Ws="show",Fs=":not(.dropdown-toggle)",zs='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',qs=`.nav-link${Fs}, .list-group-item${Fs}, [role="tab"]${Fs}, ${zs}`,Rs=`.${Hs}[data-bs-toggle="tab"], .${Hs}[data-bs-toggle="pill"], .${Hs}[data-bs-toggle="list"]`;class Vs extends ye{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),ue.on(this._element,Ss,(t=>this._keydown(t))))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?ue.trigger(e,xs,{relatedTarget:t}):null;ue.trigger(t,Ls,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(Hs),this._activate(Pt(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),ue.trigger(t,Ds,{relatedTarget:e})):t.classList.add(Ws)}),t,t.classList.contains(Bs)))}_deactivate(t,e){t&&(t.classList.remove(Hs),t.blur(),this._deactivate(Pt(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),ue.trigger(t,ks,{relatedTarget:e})):t.classList.remove(Ws)}),t,t.classList.contains(Bs)))}_keydown(t){if(![Ns,Ps,js,Ms].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=[Ps,Ms].includes(t.key),i=Ut(this._getChildren().filter((t=>!Wt(t))),t.target,e,!0);i&&(i.focus({preventScroll:!0}),Vs.getOrCreateInstance(i).show())}_getChildren(){return ke.find(qs,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=Pt(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby",`#${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=ke.findOne(t,i);s&&s.classList.toggle(n,e)};n(".dropdown-toggle",Hs),n(".dropdown-menu",Ws),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(Hs)}_getInnerElement(t){return t.matches(qs)?t:ke.findOne(qs,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Vs.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}ue.on(document,$s,zs,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),Wt(this)||Vs.getOrCreateInstance(this).show()})),ue.on(window,Is,(()=>{for(const t of ke.find(Rs))Vs.getOrCreateInstance(t)})),Kt(Vs);const Ys=".bs.toast",Ks=`mouseover${Ys}`,Qs=`mouseout${Ys}`,Xs=`focusin${Ys}`,Us=`focusout${Ys}`,Gs=`hide${Ys}`,Js=`hidden${Ys}`,Zs=`show${Ys}`,to=`shown${Ys}`,eo="hide",io="show",no="showing",so={animation:"boolean",autohide:"boolean",delay:"number"},oo={animation:!0,autohide:!0,delay:5e3};class ro extends ye{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return oo}static get DefaultType(){return so}static get NAME(){return"toast"}show(){ue.trigger(this._element,Zs).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(eo),qt(this._element),this._element.classList.add(io,no),this._queueCallback((()=>{this._element.classList.remove(no),ue.trigger(this._element,to),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(ue.trigger(this._element,Gs).defaultPrevented||(this._element.classList.add(no),this._queueCallback((()=>{this._element.classList.add(eo),this._element.classList.remove(no,io),ue.trigger(this._element,Js)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove(io),super.dispose()}isShown(){return this._element.classList.contains(io)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){ue.on(this._element,Ks,(t=>this._onInteraction(t,!0))),ue.on(this._element,Qs,(t=>this._onInteraction(t,!1))),ue.on(this._element,Xs,(t=>this._onInteraction(t,!0))),ue.on(this._element,Us,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=ro.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}function ao(t){"loading"!=document.readyState?t():document.addEventListener("DOMContentLoaded",t)}we(ro),Kt(ro),ao((function(){[].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')).map((function(t){return new ds(t,{delay:{show:500,hide:100}})}))})),ao((function(){document.getElementById("pst-back-to-top").addEventListener("click",(function(){document.body.scrollTop=0,document.documentElement.scrollTop=0}))})),ao((function(){var t=document.getElementById("pst-back-to-top"),e=document.getElementsByClassName("bd-header")[0].getBoundingClientRect();window.addEventListener("scroll",(function(){this.oldScroll>this.scrollY&&this.scrollY>e.bottom?t.style.display="block":t.style.display="none",this.oldScroll=this.scrollY}))}))})();
+//# sourceMappingURL=bootstrap.js.map
\ No newline at end of file
diff --git a/docs/source/_static/scripts/bootstrap.js.LICENSE.txt b/docs/source/_static/scripts/bootstrap.js.LICENSE.txt
new file mode 100644
index 00000000..91ad10aa
--- /dev/null
+++ b/docs/source/_static/scripts/bootstrap.js.LICENSE.txt
@@ -0,0 +1,5 @@
+/*!
+ * Bootstrap v5.2.3 (https://getbootstrap.com/)
+ * Copyright 2011-2022 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)
+ * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)
+ */
diff --git a/docs/source/_static/scripts/bootstrap.js.map b/docs/source/_static/scripts/bootstrap.js.map
new file mode 100644
index 00000000..04c27d7b
--- /dev/null
+++ b/docs/source/_static/scripts/bootstrap.js.map
@@ -0,0 +1 @@
+{"version":3,"file":"scripts/bootstrap.js","mappings":";mBACA,IAAIA,EAAsB,CCA1BA,EAAwB,CAACC,EAASC,KACjC,IAAI,IAAIC,KAAOD,EACXF,EAAoBI,EAAEF,EAAYC,KAASH,EAAoBI,EAAEH,EAASE,IAC5EE,OAAOC,eAAeL,EAASE,EAAK,CAAEI,YAAY,EAAMC,IAAKN,EAAWC,IAE1E,ECNDH,EAAwB,CAACS,EAAKC,IAAUL,OAAOM,UAAUC,eAAeC,KAAKJ,EAAKC,GCClFV,EAAyBC,IACH,oBAAXa,QAA0BA,OAAOC,aAC1CV,OAAOC,eAAeL,EAASa,OAAOC,YAAa,CAAEC,MAAO,WAE7DX,OAAOC,eAAeL,EAAS,aAAc,CAAEe,OAAO,GAAO,ipBCLvD,IAAI,EAAM,MACNC,EAAS,SACTC,EAAQ,QACRC,EAAO,OACPC,EAAO,OACPC,EAAiB,CAAC,EAAKJ,EAAQC,EAAOC,GACtCG,EAAQ,QACRC,EAAM,MACNC,EAAkB,kBAClBC,EAAW,WACXC,EAAS,SACTC,EAAY,YACZC,EAAmCP,EAAeQ,QAAO,SAAUC,EAAKC,GACjF,OAAOD,EAAIE,OAAO,CAACD,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAChE,GAAG,IACQ,EAA0B,GAAGS,OAAOX,EAAgB,CAACD,IAAOS,QAAO,SAAUC,EAAKC,GAC3F,OAAOD,EAAIE,OAAO,CAACD,EAAWA,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAC3E,GAAG,IAEQU,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAc,cACdC,EAAQ,QACRC,EAAa,aACbC,EAAiB,CAACT,EAAYC,EAAMC,EAAWC,EAAYC,EAAMC,EAAWC,EAAaC,EAAOC,GC9B5F,SAASE,EAAYC,GAClC,OAAOA,GAAWA,EAAQC,UAAY,IAAIC,cAAgB,IAC5D,CCFe,SAASC,EAAUC,GAChC,GAAY,MAARA,EACF,OAAOC,OAGT,GAAwB,oBAApBD,EAAKE,WAAkC,CACzC,IAAIC,EAAgBH,EAAKG,cACzB,OAAOA,GAAgBA,EAAcC,aAAwBH,MAC/D,CAEA,OAAOD,CACT,CCTA,SAASK,EAAUL,GAEjB,OAAOA,aADUD,EAAUC,GAAMM,SACIN,aAAgBM,OACvD,CAEA,SAASC,EAAcP,GAErB,OAAOA,aADUD,EAAUC,GAAMQ,aACIR,aAAgBQ,WACvD,CAEA,SAASC,EAAaT,GAEpB,MAA0B,oBAAfU,aAKJV,aADUD,EAAUC,GAAMU,YACIV,aAAgBU,WACvD,CCwDA,SACEC,KAAM,cACNC,SAAS,EACTC,MAAO,QACPC,GA5EF,SAAqBC,GACnB,IAAIC,EAAQD,EAAKC,MACjB3D,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIS,EAAQJ,EAAMK,OAAOV,IAAS,CAAC,EAC/BW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EACxCf,EAAUoB,EAAME,SAASP,GAExBJ,EAAcX,IAAaD,EAAYC,KAO5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUR,GACxC,IAAI3C,EAAQsD,EAAWX,IAET,IAAV3C,EACF4B,EAAQ4B,gBAAgBb,GAExBf,EAAQ6B,aAAad,GAAgB,IAAV3C,EAAiB,GAAKA,EAErD,IACF,GACF,EAoDE0D,OAlDF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MACdY,EAAgB,CAClBlD,OAAQ,CACNmD,SAAUb,EAAMc,QAAQC,SACxB5D,KAAM,IACN6D,IAAK,IACLC,OAAQ,KAEVC,MAAO,CACLL,SAAU,YAEZlD,UAAW,CAAC,GASd,OAPAtB,OAAOkE,OAAOP,EAAME,SAASxC,OAAO0C,MAAOQ,EAAclD,QACzDsC,EAAMK,OAASO,EAEXZ,EAAME,SAASgB,OACjB7E,OAAOkE,OAAOP,EAAME,SAASgB,MAAMd,MAAOQ,EAAcM,OAGnD,WACL7E,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIf,EAAUoB,EAAME,SAASP,GACzBW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EAGxCS,EAFkB/D,OAAO4D,KAAKD,EAAMK,OAAOzD,eAAe+C,GAAQK,EAAMK,OAAOV,GAAQiB,EAAcjB,IAE7E9B,QAAO,SAAUuC,EAAOe,GAElD,OADAf,EAAMe,GAAY,GACXf,CACT,GAAG,CAAC,GAECb,EAAcX,IAAaD,EAAYC,KAI5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUiB,GACxCxC,EAAQ4B,gBAAgBY,EAC1B,IACF,GACF,CACF,EASEC,SAAU,CAAC,kBCjFE,SAASC,EAAiBvD,GACvC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCHO,IAAI,EAAMC,KAAKC,IACX,EAAMD,KAAKE,IACXC,EAAQH,KAAKG,MCFT,SAASC,IACtB,IAAIC,EAASC,UAAUC,cAEvB,OAAc,MAAVF,GAAkBA,EAAOG,QAAUC,MAAMC,QAAQL,EAAOG,QACnDH,EAAOG,OAAOG,KAAI,SAAUC,GACjC,OAAOA,EAAKC,MAAQ,IAAMD,EAAKE,OACjC,IAAGC,KAAK,KAGHT,UAAUU,SACnB,CCTe,SAASC,IACtB,OAAQ,iCAAiCC,KAAKd,IAChD,CCCe,SAASe,EAAsB/D,EAASgE,EAAcC,QAC9C,IAAjBD,IACFA,GAAe,QAGO,IAApBC,IACFA,GAAkB,GAGpB,IAAIC,EAAalE,EAAQ+D,wBACrBI,EAAS,EACTC,EAAS,EAETJ,GAAgBrD,EAAcX,KAChCmE,EAASnE,EAAQqE,YAAc,GAAItB,EAAMmB,EAAWI,OAAStE,EAAQqE,aAAmB,EACxFD,EAASpE,EAAQuE,aAAe,GAAIxB,EAAMmB,EAAWM,QAAUxE,EAAQuE,cAAoB,GAG7F,IACIE,GADOhE,EAAUT,GAAWG,EAAUH,GAAWK,QAC3BoE,eAEtBC,GAAoBb,KAAsBI,EAC1CU,GAAKT,EAAW3F,MAAQmG,GAAoBD,EAAiBA,EAAeG,WAAa,IAAMT,EAC/FU,GAAKX,EAAW9B,KAAOsC,GAAoBD,EAAiBA,EAAeK,UAAY,IAAMV,EAC7FE,EAAQJ,EAAWI,MAAQH,EAC3BK,EAASN,EAAWM,OAASJ,EACjC,MAAO,CACLE,MAAOA,EACPE,OAAQA,EACRpC,IAAKyC,EACLvG,MAAOqG,EAAIL,EACXjG,OAAQwG,EAAIL,EACZjG,KAAMoG,EACNA,EAAGA,EACHE,EAAGA,EAEP,CCrCe,SAASE,EAAc/E,GACpC,IAAIkE,EAAaH,EAAsB/D,GAGnCsE,EAAQtE,EAAQqE,YAChBG,EAASxE,EAAQuE,aAUrB,OARI3B,KAAKoC,IAAId,EAAWI,MAAQA,IAAU,IACxCA,EAAQJ,EAAWI,OAGjB1B,KAAKoC,IAAId,EAAWM,OAASA,IAAW,IAC1CA,EAASN,EAAWM,QAGf,CACLG,EAAG3E,EAAQ4E,WACXC,EAAG7E,EAAQ8E,UACXR,MAAOA,EACPE,OAAQA,EAEZ,CCvBe,SAASS,EAASC,EAAQC,GACvC,IAAIC,EAAWD,EAAME,aAAeF,EAAME,cAE1C,GAAIH,EAAOD,SAASE,GAClB,OAAO,EAEJ,GAAIC,GAAYvE,EAAauE,GAAW,CACzC,IAAIE,EAAOH,EAEX,EAAG,CACD,GAAIG,GAAQJ,EAAOK,WAAWD,GAC5B,OAAO,EAITA,EAAOA,EAAKE,YAAcF,EAAKG,IACjC,OAASH,EACX,CAGF,OAAO,CACT,CCrBe,SAAS,EAAiBtF,GACvC,OAAOG,EAAUH,GAAS0F,iBAAiB1F,EAC7C,CCFe,SAAS2F,EAAe3F,GACrC,MAAO,CAAC,QAAS,KAAM,MAAM4F,QAAQ7F,EAAYC,KAAa,CAChE,CCFe,SAAS6F,EAAmB7F,GAEzC,QAASS,EAAUT,GAAWA,EAAQO,cACtCP,EAAQ8F,WAAazF,OAAOyF,UAAUC,eACxC,CCFe,SAASC,EAAchG,GACpC,MAA6B,SAAzBD,EAAYC,GACPA,EAMPA,EAAQiG,cACRjG,EAAQwF,aACR3E,EAAab,GAAWA,EAAQyF,KAAO,OAEvCI,EAAmB7F,EAGvB,CCVA,SAASkG,EAAoBlG,GAC3B,OAAKW,EAAcX,IACoB,UAAvC,EAAiBA,GAASiC,SAInBjC,EAAQmG,aAHN,IAIX,CAwCe,SAASC,EAAgBpG,GAItC,IAHA,IAAIK,EAASF,EAAUH,GACnBmG,EAAeD,EAAoBlG,GAEhCmG,GAAgBR,EAAeQ,IAA6D,WAA5C,EAAiBA,GAAclE,UACpFkE,EAAeD,EAAoBC,GAGrC,OAAIA,IAA+C,SAA9BpG,EAAYoG,IAA0D,SAA9BpG,EAAYoG,IAAwE,WAA5C,EAAiBA,GAAclE,UAC3H5B,EAGF8F,GAhDT,SAA4BnG,GAC1B,IAAIqG,EAAY,WAAWvC,KAAKd,KAGhC,GAFW,WAAWc,KAAKd,MAEfrC,EAAcX,IAII,UAFX,EAAiBA,GAEnBiC,SACb,OAAO,KAIX,IAAIqE,EAAcN,EAAchG,GAMhC,IAJIa,EAAayF,KACfA,EAAcA,EAAYb,MAGrB9E,EAAc2F,IAAgB,CAAC,OAAQ,QAAQV,QAAQ7F,EAAYuG,IAAgB,GAAG,CAC3F,IAAIC,EAAM,EAAiBD,GAI3B,GAAsB,SAAlBC,EAAIC,WAA4C,SAApBD,EAAIE,aAA0C,UAAhBF,EAAIG,UAAiF,IAA1D,CAAC,YAAa,eAAed,QAAQW,EAAII,aAAsBN,GAAgC,WAAnBE,EAAII,YAA2BN,GAAaE,EAAIK,QAAyB,SAAfL,EAAIK,OACjO,OAAON,EAEPA,EAAcA,EAAYd,UAE9B,CAEA,OAAO,IACT,CAgByBqB,CAAmB7G,IAAYK,CACxD,CCpEe,SAASyG,EAAyB3H,GAC/C,MAAO,CAAC,MAAO,UAAUyG,QAAQzG,IAAc,EAAI,IAAM,GAC3D,CCDO,SAAS4H,EAAOjE,EAAK1E,EAAOyE,GACjC,OAAO,EAAQC,EAAK,EAAQ1E,EAAOyE,GACrC,CCFe,SAASmE,EAAmBC,GACzC,OAAOxJ,OAAOkE,OAAO,CAAC,ECDf,CACLS,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GDHuC0I,EACjD,CEHe,SAASC,EAAgB9I,EAAOiD,GAC7C,OAAOA,EAAKpC,QAAO,SAAUkI,EAAS5J,GAEpC,OADA4J,EAAQ5J,GAAOa,EACR+I,CACT,GAAG,CAAC,EACN,CCuFA,SACEpG,KAAM,QACNC,SAAS,EACTC,MAAO,OACPC,GA9EF,SAAeC,GACb,IAAIiG,EAEAhG,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZmB,EAAUf,EAAKe,QACfmF,EAAejG,EAAME,SAASgB,MAC9BgF,EAAgBlG,EAAMmG,cAAcD,cACpCE,EAAgB9E,EAAiBtB,EAAMjC,WACvCsI,EAAOX,EAAyBU,GAEhCE,EADa,CAACnJ,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAClC,SAAW,QAElC,GAAKH,GAAiBC,EAAtB,CAIA,IAAIL,EAxBgB,SAAyBU,EAASvG,GAItD,OAAO4F,EAAsC,iBAH7CW,EAA6B,mBAAZA,EAAyBA,EAAQlK,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CAC/EzI,UAAWiC,EAAMjC,aACbwI,GACkDA,EAAUT,EAAgBS,EAASlJ,GAC7F,CAmBsBoJ,CAAgB3F,EAAQyF,QAASvG,GACjD0G,EAAY/C,EAAcsC,GAC1BU,EAAmB,MAATN,EAAe,EAAMlJ,EAC/ByJ,EAAmB,MAATP,EAAepJ,EAASC,EAClC2J,EAAU7G,EAAMwG,MAAM7I,UAAU2I,GAAOtG,EAAMwG,MAAM7I,UAAU0I,GAAQH,EAAcG,GAAQrG,EAAMwG,MAAM9I,OAAO4I,GAC9GQ,EAAYZ,EAAcG,GAAQrG,EAAMwG,MAAM7I,UAAU0I,GACxDU,EAAoB/B,EAAgBiB,GACpCe,EAAaD,EAA6B,MAATV,EAAeU,EAAkBE,cAAgB,EAAIF,EAAkBG,aAAe,EAAI,EAC3HC,EAAoBN,EAAU,EAAIC,EAAY,EAG9CpF,EAAMmE,EAAcc,GACpBlF,EAAMuF,EAAaN,EAAUJ,GAAOT,EAAce,GAClDQ,EAASJ,EAAa,EAAIN,EAAUJ,GAAO,EAAIa,EAC/CE,EAAS1B,EAAOjE,EAAK0F,EAAQ3F,GAE7B6F,EAAWjB,EACfrG,EAAMmG,cAAcxG,KAASqG,EAAwB,CAAC,GAAyBsB,GAAYD,EAAQrB,EAAsBuB,aAAeF,EAASD,EAAQpB,EAnBzJ,CAoBF,EA4CEtF,OA1CF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MAEdwH,EADU7G,EAAMG,QACWlC,QAC3BqH,OAAoC,IAArBuB,EAA8B,sBAAwBA,EAErD,MAAhBvB,IAKwB,iBAAjBA,IACTA,EAAejG,EAAME,SAASxC,OAAO+J,cAAcxB,MAahDpC,EAAS7D,EAAME,SAASxC,OAAQuI,KAQrCjG,EAAME,SAASgB,MAAQ+E,EACzB,EASE5E,SAAU,CAAC,iBACXqG,iBAAkB,CAAC,oBCnGN,SAASC,EAAa5J,GACnC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCOA,IAAIqG,EAAa,CACf5G,IAAK,OACL9D,MAAO,OACPD,OAAQ,OACRE,KAAM,QAeD,SAAS0K,GAAYlH,GAC1B,IAAImH,EAEApK,EAASiD,EAAMjD,OACfqK,EAAapH,EAAMoH,WACnBhK,EAAY4C,EAAM5C,UAClBiK,EAAYrH,EAAMqH,UAClBC,EAAUtH,EAAMsH,QAChBpH,EAAWF,EAAME,SACjBqH,EAAkBvH,EAAMuH,gBACxBC,EAAWxH,EAAMwH,SACjBC,EAAezH,EAAMyH,aACrBC,EAAU1H,EAAM0H,QAChBC,EAAaL,EAAQ1E,EACrBA,OAAmB,IAAf+E,EAAwB,EAAIA,EAChCC,EAAaN,EAAQxE,EACrBA,OAAmB,IAAf8E,EAAwB,EAAIA,EAEhCC,EAAgC,mBAAjBJ,EAA8BA,EAAa,CAC5D7E,EAAGA,EACHE,IACG,CACHF,EAAGA,EACHE,GAGFF,EAAIiF,EAAMjF,EACVE,EAAI+E,EAAM/E,EACV,IAAIgF,EAAOR,EAAQrL,eAAe,KAC9B8L,EAAOT,EAAQrL,eAAe,KAC9B+L,EAAQxL,EACRyL,EAAQ,EACRC,EAAM5J,OAEV,GAAIkJ,EAAU,CACZ,IAAIpD,EAAeC,EAAgBtH,GAC/BoL,EAAa,eACbC,EAAY,cAEZhE,IAAiBhG,EAAUrB,IAGmB,WAA5C,EAFJqH,EAAeN,EAAmB/G,IAECmD,UAAsC,aAAbA,IAC1DiI,EAAa,eACbC,EAAY,gBAOZhL,IAAc,IAAQA,IAAcZ,GAAQY,IAAcb,IAAU8K,IAAczK,KACpFqL,EAAQ3L,EAGRwG,IAFc4E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeD,OACzF2B,EAAa+D,IACEf,EAAW3E,OAC1BK,GAAKyE,EAAkB,GAAK,GAG1BnK,IAAcZ,IAASY,IAAc,GAAOA,IAAcd,GAAW+K,IAAczK,KACrFoL,EAAQzL,EAGRqG,IAFc8E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeH,MACzF6B,EAAagE,IACEhB,EAAW7E,MAC1BK,GAAK2E,EAAkB,GAAK,EAEhC,CAEA,IAgBMc,EAhBFC,EAAe5M,OAAOkE,OAAO,CAC/BM,SAAUA,GACTsH,GAAYP,GAEXsB,GAAyB,IAAjBd,EAlFd,SAA2BrI,EAAM8I,GAC/B,IAAItF,EAAIxD,EAAKwD,EACTE,EAAI1D,EAAK0D,EACT0F,EAAMN,EAAIO,kBAAoB,EAClC,MAAO,CACL7F,EAAG5B,EAAM4B,EAAI4F,GAAOA,GAAO,EAC3B1F,EAAG9B,EAAM8B,EAAI0F,GAAOA,GAAO,EAE/B,CA0EsCE,CAAkB,CACpD9F,EAAGA,EACHE,GACC1E,EAAUrB,IAAW,CACtB6F,EAAGA,EACHE,GAMF,OAHAF,EAAI2F,EAAM3F,EACVE,EAAIyF,EAAMzF,EAENyE,EAGK7L,OAAOkE,OAAO,CAAC,EAAG0I,IAAeD,EAAiB,CAAC,GAAkBJ,GAASF,EAAO,IAAM,GAAIM,EAAeL,GAASF,EAAO,IAAM,GAAIO,EAAe5D,WAAayD,EAAIO,kBAAoB,IAAM,EAAI,aAAe7F,EAAI,OAASE,EAAI,MAAQ,eAAiBF,EAAI,OAASE,EAAI,SAAUuF,IAG5R3M,OAAOkE,OAAO,CAAC,EAAG0I,IAAenB,EAAkB,CAAC,GAAmBc,GAASF,EAAOjF,EAAI,KAAO,GAAIqE,EAAgBa,GAASF,EAAOlF,EAAI,KAAO,GAAIuE,EAAgB1C,UAAY,GAAI0C,GAC9L,CAuDA,UACEnI,KAAM,gBACNC,SAAS,EACTC,MAAO,cACPC,GAzDF,SAAuBwJ,GACrB,IAAItJ,EAAQsJ,EAAMtJ,MACdc,EAAUwI,EAAMxI,QAChByI,EAAwBzI,EAAQoH,gBAChCA,OAA4C,IAA1BqB,GAA0CA,EAC5DC,EAAoB1I,EAAQqH,SAC5BA,OAAiC,IAAtBqB,GAAsCA,EACjDC,EAAwB3I,EAAQsH,aAChCA,OAAyC,IAA1BqB,GAA0CA,EAYzDR,EAAe,CACjBlL,UAAWuD,EAAiBtB,EAAMjC,WAClCiK,UAAWL,EAAa3H,EAAMjC,WAC9BL,OAAQsC,EAAME,SAASxC,OACvBqK,WAAY/H,EAAMwG,MAAM9I,OACxBwK,gBAAiBA,EACjBG,QAAoC,UAA3BrI,EAAMc,QAAQC,UAGgB,MAArCf,EAAMmG,cAAcD,gBACtBlG,EAAMK,OAAO3C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAO3C,OAAQmK,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACvGhB,QAASjI,EAAMmG,cAAcD,cAC7BrF,SAAUb,EAAMc,QAAQC,SACxBoH,SAAUA,EACVC,aAAcA,OAIe,MAA7BpI,EAAMmG,cAAcjF,QACtBlB,EAAMK,OAAOa,MAAQ7E,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAOa,MAAO2G,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACrGhB,QAASjI,EAAMmG,cAAcjF,MAC7BL,SAAU,WACVsH,UAAU,EACVC,aAAcA,OAIlBpI,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,wBAAyBsC,EAAMjC,WAEnC,EAQE2L,KAAM,CAAC,GChLT,IAAIC,GAAU,CACZA,SAAS,GAsCX,UACEhK,KAAM,iBACNC,SAAS,EACTC,MAAO,QACPC,GAAI,WAAe,EACnBY,OAxCF,SAAgBX,GACd,IAAIC,EAAQD,EAAKC,MACb4J,EAAW7J,EAAK6J,SAChB9I,EAAUf,EAAKe,QACf+I,EAAkB/I,EAAQgJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAkBjJ,EAAQkJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7C9K,EAASF,EAAUiB,EAAME,SAASxC,QAClCuM,EAAgB,GAAGjM,OAAOgC,EAAMiK,cAActM,UAAWqC,EAAMiK,cAAcvM,QAYjF,OAVIoM,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaC,iBAAiB,SAAUP,EAASQ,OAAQT,GAC3D,IAGEK,GACF/K,EAAOkL,iBAAiB,SAAUP,EAASQ,OAAQT,IAG9C,WACDG,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaG,oBAAoB,SAAUT,EAASQ,OAAQT,GAC9D,IAGEK,GACF/K,EAAOoL,oBAAoB,SAAUT,EAASQ,OAAQT,GAE1D,CACF,EASED,KAAM,CAAC,GC/CT,IAAIY,GAAO,CACTnN,KAAM,QACND,MAAO,OACPD,OAAQ,MACR+D,IAAK,UAEQ,SAASuJ,GAAqBxM,GAC3C,OAAOA,EAAUyM,QAAQ,0BAA0B,SAAUC,GAC3D,OAAOH,GAAKG,EACd,GACF,CCVA,IAAI,GAAO,CACTnN,MAAO,MACPC,IAAK,SAEQ,SAASmN,GAA8B3M,GACpD,OAAOA,EAAUyM,QAAQ,cAAc,SAAUC,GAC/C,OAAO,GAAKA,EACd,GACF,CCPe,SAASE,GAAgB3L,GACtC,IAAI6J,EAAM9J,EAAUC,GAGpB,MAAO,CACL4L,WAHe/B,EAAIgC,YAInBC,UAHcjC,EAAIkC,YAKtB,CCNe,SAASC,GAAoBpM,GAQ1C,OAAO+D,EAAsB8B,EAAmB7F,IAAUzB,KAAOwN,GAAgB/L,GAASgM,UAC5F,CCXe,SAASK,GAAerM,GAErC,IAAIsM,EAAoB,EAAiBtM,GACrCuM,EAAWD,EAAkBC,SAC7BC,EAAYF,EAAkBE,UAC9BC,EAAYH,EAAkBG,UAElC,MAAO,6BAA6B3I,KAAKyI,EAAWE,EAAYD,EAClE,CCLe,SAASE,GAAgBtM,GACtC,MAAI,CAAC,OAAQ,OAAQ,aAAawF,QAAQ7F,EAAYK,KAAU,EAEvDA,EAAKG,cAAcoM,KAGxBhM,EAAcP,IAASiM,GAAejM,GACjCA,EAGFsM,GAAgB1G,EAAc5F,GACvC,CCJe,SAASwM,GAAkB5M,EAAS6M,GACjD,IAAIC,OAES,IAATD,IACFA,EAAO,IAGT,IAAIvB,EAAeoB,GAAgB1M,GAC/B+M,EAASzB,KAAqE,OAAlDwB,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,MACpH1C,EAAM9J,EAAUmL,GAChB0B,EAASD,EAAS,CAAC9C,GAAK7K,OAAO6K,EAAIxF,gBAAkB,GAAI4H,GAAef,GAAgBA,EAAe,IAAMA,EAC7G2B,EAAcJ,EAAKzN,OAAO4N,GAC9B,OAAOD,EAASE,EAChBA,EAAY7N,OAAOwN,GAAkB5G,EAAcgH,IACrD,CCzBe,SAASE,GAAiBC,GACvC,OAAO1P,OAAOkE,OAAO,CAAC,EAAGwL,EAAM,CAC7B5O,KAAM4O,EAAKxI,EACXvC,IAAK+K,EAAKtI,EACVvG,MAAO6O,EAAKxI,EAAIwI,EAAK7I,MACrBjG,OAAQ8O,EAAKtI,EAAIsI,EAAK3I,QAE1B,CCqBA,SAAS4I,GAA2BpN,EAASqN,EAAgBlL,GAC3D,OAAOkL,IAAmBxO,EAAWqO,GCzBxB,SAAyBlN,EAASmC,GAC/C,IAAI8H,EAAM9J,EAAUH,GAChBsN,EAAOzH,EAAmB7F,GAC1ByE,EAAiBwF,EAAIxF,eACrBH,EAAQgJ,EAAKhF,YACb9D,EAAS8I,EAAKjF,aACd1D,EAAI,EACJE,EAAI,EAER,GAAIJ,EAAgB,CAClBH,EAAQG,EAAeH,MACvBE,EAASC,EAAeD,OACxB,IAAI+I,EAAiB1J,KAEjB0J,IAAmBA,GAA+B,UAAbpL,KACvCwC,EAAIF,EAAeG,WACnBC,EAAIJ,EAAeK,UAEvB,CAEA,MAAO,CACLR,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EAAIyH,GAAoBpM,GAC3B6E,EAAGA,EAEP,CDDwD2I,CAAgBxN,EAASmC,IAAa1B,EAAU4M,GAdxG,SAAoCrN,EAASmC,GAC3C,IAAIgL,EAAOpJ,EAAsB/D,GAAS,EAAoB,UAAbmC,GASjD,OARAgL,EAAK/K,IAAM+K,EAAK/K,IAAMpC,EAAQyN,UAC9BN,EAAK5O,KAAO4O,EAAK5O,KAAOyB,EAAQ0N,WAChCP,EAAK9O,OAAS8O,EAAK/K,IAAMpC,EAAQqI,aACjC8E,EAAK7O,MAAQ6O,EAAK5O,KAAOyB,EAAQsI,YACjC6E,EAAK7I,MAAQtE,EAAQsI,YACrB6E,EAAK3I,OAASxE,EAAQqI,aACtB8E,EAAKxI,EAAIwI,EAAK5O,KACd4O,EAAKtI,EAAIsI,EAAK/K,IACP+K,CACT,CAG0HQ,CAA2BN,EAAgBlL,GAAY+K,GEtBlK,SAAyBlN,GACtC,IAAI8M,EAEAQ,EAAOzH,EAAmB7F,GAC1B4N,EAAY7B,GAAgB/L,GAC5B2M,EAA0D,OAAlDG,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,KAChGrI,EAAQ,EAAIgJ,EAAKO,YAAaP,EAAKhF,YAAaqE,EAAOA,EAAKkB,YAAc,EAAGlB,EAAOA,EAAKrE,YAAc,GACvG9D,EAAS,EAAI8I,EAAKQ,aAAcR,EAAKjF,aAAcsE,EAAOA,EAAKmB,aAAe,EAAGnB,EAAOA,EAAKtE,aAAe,GAC5G1D,GAAKiJ,EAAU5B,WAAaI,GAAoBpM,GAChD6E,GAAK+I,EAAU1B,UAMnB,MAJiD,QAA7C,EAAiBS,GAAQW,GAAMS,YACjCpJ,GAAK,EAAI2I,EAAKhF,YAAaqE,EAAOA,EAAKrE,YAAc,GAAKhE,GAGrD,CACLA,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EACHE,EAAGA,EAEP,CFCkMmJ,CAAgBnI,EAAmB7F,IACrO,CG1Be,SAASiO,GAAe9M,GACrC,IAOIkI,EAPAtK,EAAYoC,EAAKpC,UACjBiB,EAAUmB,EAAKnB,QACfb,EAAYgC,EAAKhC,UACjBqI,EAAgBrI,EAAYuD,EAAiBvD,GAAa,KAC1DiK,EAAYjK,EAAY4J,EAAa5J,GAAa,KAClD+O,EAAUnP,EAAU4F,EAAI5F,EAAUuF,MAAQ,EAAItE,EAAQsE,MAAQ,EAC9D6J,EAAUpP,EAAU8F,EAAI9F,EAAUyF,OAAS,EAAIxE,EAAQwE,OAAS,EAGpE,OAAQgD,GACN,KAAK,EACH6B,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI7E,EAAQwE,QAE3B,MAEF,KAAKnG,EACHgL,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI9F,EAAUyF,QAE7B,MAEF,KAAKlG,EACH+K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI5F,EAAUuF,MAC3BO,EAAGsJ,GAEL,MAEF,KAAK5P,EACH8K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI3E,EAAQsE,MACzBO,EAAGsJ,GAEL,MAEF,QACE9E,EAAU,CACR1E,EAAG5F,EAAU4F,EACbE,EAAG9F,EAAU8F,GAInB,IAAIuJ,EAAW5G,EAAgBV,EAAyBU,GAAiB,KAEzE,GAAgB,MAAZ4G,EAAkB,CACpB,IAAI1G,EAAmB,MAAb0G,EAAmB,SAAW,QAExC,OAAQhF,GACN,KAAK1K,EACH2K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAC7E,MAEF,KAAK/I,EACH0K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAKnF,CAEA,OAAO2B,CACT,CC3De,SAASgF,GAAejN,EAAOc,QAC5B,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACXqM,EAAqBD,EAASnP,UAC9BA,OAAmC,IAAvBoP,EAAgCnN,EAAMjC,UAAYoP,EAC9DC,EAAoBF,EAASnM,SAC7BA,OAAiC,IAAtBqM,EAA+BpN,EAAMe,SAAWqM,EAC3DC,EAAoBH,EAASI,SAC7BA,OAAiC,IAAtBD,EAA+B7P,EAAkB6P,EAC5DE,EAAwBL,EAASM,aACjCA,OAAyC,IAA1BD,EAAmC9P,EAAW8P,EAC7DE,EAAwBP,EAASQ,eACjCA,OAA2C,IAA1BD,EAAmC/P,EAAS+P,EAC7DE,EAAuBT,EAASU,YAChCA,OAAuC,IAAzBD,GAA0CA,EACxDE,EAAmBX,EAAS3G,QAC5BA,OAA+B,IAArBsH,EAA8B,EAAIA,EAC5ChI,EAAgBD,EAAsC,iBAAZW,EAAuBA,EAAUT,EAAgBS,EAASlJ,IACpGyQ,EAAaJ,IAAmBhQ,EAASC,EAAYD,EACrDqK,EAAa/H,EAAMwG,MAAM9I,OACzBkB,EAAUoB,EAAME,SAAS0N,EAAcE,EAAaJ,GACpDK,EJkBS,SAAyBnP,EAAS0O,EAAUE,EAAczM,GACvE,IAAIiN,EAAmC,oBAAbV,EAlB5B,SAA4B1O,GAC1B,IAAIpB,EAAkBgO,GAAkB5G,EAAchG,IAElDqP,EADoB,CAAC,WAAY,SAASzJ,QAAQ,EAAiB5F,GAASiC,WAAa,GACnDtB,EAAcX,GAAWoG,EAAgBpG,GAAWA,EAE9F,OAAKS,EAAU4O,GAKRzQ,EAAgBgI,QAAO,SAAUyG,GACtC,OAAO5M,EAAU4M,IAAmBpI,EAASoI,EAAgBgC,IAAmD,SAAhCtP,EAAYsN,EAC9F,IANS,EAOX,CAK6DiC,CAAmBtP,GAAW,GAAGZ,OAAOsP,GAC/F9P,EAAkB,GAAGQ,OAAOgQ,EAAqB,CAACR,IAClDW,EAAsB3Q,EAAgB,GACtC4Q,EAAe5Q,EAAgBK,QAAO,SAAUwQ,EAASpC,GAC3D,IAAIF,EAAOC,GAA2BpN,EAASqN,EAAgBlL,GAK/D,OAJAsN,EAAQrN,IAAM,EAAI+K,EAAK/K,IAAKqN,EAAQrN,KACpCqN,EAAQnR,MAAQ,EAAI6O,EAAK7O,MAAOmR,EAAQnR,OACxCmR,EAAQpR,OAAS,EAAI8O,EAAK9O,OAAQoR,EAAQpR,QAC1CoR,EAAQlR,KAAO,EAAI4O,EAAK5O,KAAMkR,EAAQlR,MAC/BkR,CACT,GAAGrC,GAA2BpN,EAASuP,EAAqBpN,IAK5D,OAJAqN,EAAalL,MAAQkL,EAAalR,MAAQkR,EAAajR,KACvDiR,EAAahL,OAASgL,EAAanR,OAASmR,EAAapN,IACzDoN,EAAa7K,EAAI6K,EAAajR,KAC9BiR,EAAa3K,EAAI2K,EAAapN,IACvBoN,CACT,CInC2BE,CAAgBjP,EAAUT,GAAWA,EAAUA,EAAQ2P,gBAAkB9J,EAAmBzE,EAAME,SAASxC,QAAS4P,EAAUE,EAAczM,GACjKyN,EAAsB7L,EAAsB3C,EAAME,SAASvC,WAC3DuI,EAAgB2G,GAAe,CACjClP,UAAW6Q,EACX5P,QAASmJ,EACThH,SAAU,WACVhD,UAAWA,IAET0Q,EAAmB3C,GAAiBzP,OAAOkE,OAAO,CAAC,EAAGwH,EAAY7B,IAClEwI,EAAoBhB,IAAmBhQ,EAAS+Q,EAAmBD,EAGnEG,EAAkB,CACpB3N,IAAK+M,EAAmB/M,IAAM0N,EAAkB1N,IAAM6E,EAAc7E,IACpE/D,OAAQyR,EAAkBzR,OAAS8Q,EAAmB9Q,OAAS4I,EAAc5I,OAC7EE,KAAM4Q,EAAmB5Q,KAAOuR,EAAkBvR,KAAO0I,EAAc1I,KACvED,MAAOwR,EAAkBxR,MAAQ6Q,EAAmB7Q,MAAQ2I,EAAc3I,OAExE0R,EAAa5O,EAAMmG,cAAckB,OAErC,GAAIqG,IAAmBhQ,GAAUkR,EAAY,CAC3C,IAAIvH,EAASuH,EAAW7Q,GACxB1B,OAAO4D,KAAK0O,GAAiBxO,SAAQ,SAAUhE,GAC7C,IAAI0S,EAAW,CAAC3R,EAAOD,GAAQuH,QAAQrI,IAAQ,EAAI,GAAK,EACpDkK,EAAO,CAAC,EAAKpJ,GAAQuH,QAAQrI,IAAQ,EAAI,IAAM,IACnDwS,EAAgBxS,IAAQkL,EAAOhB,GAAQwI,CACzC,GACF,CAEA,OAAOF,CACT,CCyEA,UACEhP,KAAM,OACNC,SAAS,EACTC,MAAO,OACPC,GA5HF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KAEhB,IAAIK,EAAMmG,cAAcxG,GAAMmP,MAA9B,CAoCA,IAhCA,IAAIC,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAqCA,EACpDG,EAA8BtO,EAAQuO,mBACtC9I,EAAUzF,EAAQyF,QAClB+G,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtB0B,EAAwBxO,EAAQyO,eAChCA,OAA2C,IAA1BD,GAA0CA,EAC3DE,EAAwB1O,EAAQ0O,sBAChCC,EAAqBzP,EAAMc,QAAQ/C,UACnCqI,EAAgB9E,EAAiBmO,GAEjCJ,EAAqBD,IADHhJ,IAAkBqJ,GACqCF,EAjC/E,SAAuCxR,GACrC,GAAIuD,EAAiBvD,KAAeX,EAClC,MAAO,GAGT,IAAIsS,EAAoBnF,GAAqBxM,GAC7C,MAAO,CAAC2M,GAA8B3M,GAAY2R,EAAmBhF,GAA8BgF,GACrG,CA0B6IC,CAA8BF,GAA3E,CAAClF,GAAqBkF,KAChHG,EAAa,CAACH,GAAoBzR,OAAOqR,GAAoBxR,QAAO,SAAUC,EAAKC,GACrF,OAAOD,EAAIE,OAAOsD,EAAiBvD,KAAeX,ECvCvC,SAA8B4C,EAAOc,QAClC,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACX/C,EAAYmP,EAASnP,UACrBuP,EAAWJ,EAASI,SACpBE,EAAeN,EAASM,aACxBjH,EAAU2G,EAAS3G,QACnBgJ,EAAiBrC,EAASqC,eAC1BM,EAAwB3C,EAASsC,sBACjCA,OAAkD,IAA1BK,EAAmC,EAAgBA,EAC3E7H,EAAYL,EAAa5J,GACzB6R,EAAa5H,EAAYuH,EAAiB3R,EAAsBA,EAAoB4H,QAAO,SAAUzH,GACvG,OAAO4J,EAAa5J,KAAeiK,CACrC,IAAK3K,EACDyS,EAAoBF,EAAWpK,QAAO,SAAUzH,GAClD,OAAOyR,EAAsBhL,QAAQzG,IAAc,CACrD,IAEiC,IAA7B+R,EAAkBC,SACpBD,EAAoBF,GAQtB,IAAII,EAAYF,EAAkBjS,QAAO,SAAUC,EAAKC,GAOtD,OANAD,EAAIC,GAAakP,GAAejN,EAAO,CACrCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,IACRjF,EAAiBvD,IACbD,CACT,GAAG,CAAC,GACJ,OAAOzB,OAAO4D,KAAK+P,GAAWC,MAAK,SAAUC,EAAGC,GAC9C,OAAOH,EAAUE,GAAKF,EAAUG,EAClC,GACF,CDH6DC,CAAqBpQ,EAAO,CACnFjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTgJ,eAAgBA,EAChBC,sBAAuBA,IACpBzR,EACP,GAAG,IACCsS,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzB4S,EAAY,IAAIC,IAChBC,GAAqB,EACrBC,EAAwBb,EAAW,GAE9Bc,EAAI,EAAGA,EAAId,EAAWG,OAAQW,IAAK,CAC1C,IAAI3S,EAAY6R,EAAWc,GAEvBC,EAAiBrP,EAAiBvD,GAElC6S,EAAmBjJ,EAAa5J,KAAeT,EAC/CuT,EAAa,CAAC,EAAK5T,GAAQuH,QAAQmM,IAAmB,EACtDrK,EAAMuK,EAAa,QAAU,SAC7B1F,EAAW8B,GAAejN,EAAO,CACnCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdI,YAAaA,EACbrH,QAASA,IAEPuK,EAAoBD,EAAaD,EAAmB1T,EAAQC,EAAOyT,EAAmB3T,EAAS,EAE/FoT,EAAc/J,GAAOyB,EAAWzB,KAClCwK,EAAoBvG,GAAqBuG,IAG3C,IAAIC,EAAmBxG,GAAqBuG,GACxCE,EAAS,GAUb,GARIhC,GACFgC,EAAOC,KAAK9F,EAASwF,IAAmB,GAGtCxB,GACF6B,EAAOC,KAAK9F,EAAS2F,IAAsB,EAAG3F,EAAS4F,IAAqB,GAG1EC,EAAOE,OAAM,SAAUC,GACzB,OAAOA,CACT,IAAI,CACFV,EAAwB1S,EACxByS,GAAqB,EACrB,KACF,CAEAF,EAAUc,IAAIrT,EAAWiT,EAC3B,CAEA,GAAIR,EAqBF,IAnBA,IAEIa,EAAQ,SAAeC,GACzB,IAAIC,EAAmB3B,EAAW4B,MAAK,SAAUzT,GAC/C,IAAIiT,EAASV,EAAU9T,IAAIuB,GAE3B,GAAIiT,EACF,OAAOA,EAAOS,MAAM,EAAGH,GAAIJ,OAAM,SAAUC,GACzC,OAAOA,CACT,GAEJ,IAEA,GAAII,EAEF,OADAd,EAAwBc,EACjB,OAEX,EAESD,EAnBY/B,EAAiB,EAAI,EAmBZ+B,EAAK,GAGpB,UAFFD,EAAMC,GADmBA,KAOpCtR,EAAMjC,YAAc0S,IACtBzQ,EAAMmG,cAAcxG,GAAMmP,OAAQ,EAClC9O,EAAMjC,UAAY0S,EAClBzQ,EAAM0R,OAAQ,EA5GhB,CA8GF,EAQEhK,iBAAkB,CAAC,UACnBgC,KAAM,CACJoF,OAAO,IE7IX,SAAS6C,GAAexG,EAAUY,EAAM6F,GAQtC,YAPyB,IAArBA,IACFA,EAAmB,CACjBrO,EAAG,EACHE,EAAG,IAIA,CACLzC,IAAKmK,EAASnK,IAAM+K,EAAK3I,OAASwO,EAAiBnO,EACnDvG,MAAOiO,EAASjO,MAAQ6O,EAAK7I,MAAQ0O,EAAiBrO,EACtDtG,OAAQkO,EAASlO,OAAS8O,EAAK3I,OAASwO,EAAiBnO,EACzDtG,KAAMgO,EAAShO,KAAO4O,EAAK7I,MAAQ0O,EAAiBrO,EAExD,CAEA,SAASsO,GAAsB1G,GAC7B,MAAO,CAAC,EAAKjO,EAAOD,EAAQE,GAAM2U,MAAK,SAAUC,GAC/C,OAAO5G,EAAS4G,IAAS,CAC3B,GACF,CA+BA,UACEpS,KAAM,OACNC,SAAS,EACTC,MAAO,OACP6H,iBAAkB,CAAC,mBACnB5H,GAlCF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZ0Q,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBkU,EAAmB5R,EAAMmG,cAAc6L,gBACvCC,EAAoBhF,GAAejN,EAAO,CAC5C0N,eAAgB,cAEdwE,EAAoBjF,GAAejN,EAAO,CAC5C4N,aAAa,IAEXuE,EAA2BR,GAAeM,EAAmB5B,GAC7D+B,EAAsBT,GAAeO,EAAmBnK,EAAY6J,GACpES,EAAoBR,GAAsBM,GAC1CG,EAAmBT,GAAsBO,GAC7CpS,EAAMmG,cAAcxG,GAAQ,CAC1BwS,yBAA0BA,EAC1BC,oBAAqBA,EACrBC,kBAAmBA,EACnBC,iBAAkBA,GAEpBtS,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,+BAAgC2U,EAChC,sBAAuBC,GAE3B,GCJA,IACE3S,KAAM,SACNC,SAAS,EACTC,MAAO,OACPwB,SAAU,CAAC,iBACXvB,GA5BF,SAAgBa,GACd,IAAIX,EAAQW,EAAMX,MACdc,EAAUH,EAAMG,QAChBnB,EAAOgB,EAAMhB,KACb4S,EAAkBzR,EAAQuG,OAC1BA,OAA6B,IAApBkL,EAA6B,CAAC,EAAG,GAAKA,EAC/C7I,EAAO,UAAkB,SAAU5L,EAAKC,GAE1C,OADAD,EAAIC,GA5BD,SAAiCA,EAAWyI,EAAOa,GACxD,IAAIjB,EAAgB9E,EAAiBvD,GACjCyU,EAAiB,CAACrV,EAAM,GAAKqH,QAAQ4B,IAAkB,GAAK,EAAI,EAEhErG,EAAyB,mBAAXsH,EAAwBA,EAAOhL,OAAOkE,OAAO,CAAC,EAAGiG,EAAO,CACxEzI,UAAWA,KACPsJ,EACFoL,EAAW1S,EAAK,GAChB2S,EAAW3S,EAAK,GAIpB,OAFA0S,EAAWA,GAAY,EACvBC,GAAYA,GAAY,GAAKF,EACtB,CAACrV,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAAI,CACjD7C,EAAGmP,EACHjP,EAAGgP,GACD,CACFlP,EAAGkP,EACHhP,EAAGiP,EAEP,CASqBC,CAAwB5U,EAAWiC,EAAMwG,MAAOa,GAC1DvJ,CACT,GAAG,CAAC,GACA8U,EAAwBlJ,EAAK1J,EAAMjC,WACnCwF,EAAIqP,EAAsBrP,EAC1BE,EAAImP,EAAsBnP,EAEW,MAArCzD,EAAMmG,cAAcD,gBACtBlG,EAAMmG,cAAcD,cAAc3C,GAAKA,EACvCvD,EAAMmG,cAAcD,cAAczC,GAAKA,GAGzCzD,EAAMmG,cAAcxG,GAAQ+J,CAC9B,GC1BA,IACE/J,KAAM,gBACNC,SAAS,EACTC,MAAO,OACPC,GApBF,SAAuBC,GACrB,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KAKhBK,EAAMmG,cAAcxG,GAAQkN,GAAe,CACzClP,UAAWqC,EAAMwG,MAAM7I,UACvBiB,QAASoB,EAAMwG,MAAM9I,OACrBqD,SAAU,WACVhD,UAAWiC,EAAMjC,WAErB,EAQE2L,KAAM,CAAC,GCgHT,IACE/J,KAAM,kBACNC,SAAS,EACTC,MAAO,OACPC,GA/HF,SAAyBC,GACvB,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KACZoP,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAsCA,EACrD3B,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtBrH,EAAUzF,EAAQyF,QAClBsM,EAAkB/R,EAAQgS,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAwBjS,EAAQkS,aAChCA,OAAyC,IAA1BD,EAAmC,EAAIA,EACtD5H,EAAW8B,GAAejN,EAAO,CACnCsN,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTqH,YAAaA,IAEXxH,EAAgB9E,EAAiBtB,EAAMjC,WACvCiK,EAAYL,EAAa3H,EAAMjC,WAC/BkV,GAAmBjL,EACnBgF,EAAWtH,EAAyBU,GACpC8I,ECrCY,MDqCSlC,ECrCH,IAAM,IDsCxB9G,EAAgBlG,EAAMmG,cAAcD,cACpCmK,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBwV,EAA4C,mBAAjBF,EAA8BA,EAAa3W,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CACvGzI,UAAWiC,EAAMjC,aACbiV,EACFG,EAA2D,iBAAtBD,EAAiC,CACxElG,SAAUkG,EACVhE,QAASgE,GACP7W,OAAOkE,OAAO,CAChByM,SAAU,EACVkC,QAAS,GACRgE,GACCE,EAAsBpT,EAAMmG,cAAckB,OAASrH,EAAMmG,cAAckB,OAAOrH,EAAMjC,WAAa,KACjG2L,EAAO,CACTnG,EAAG,EACHE,EAAG,GAGL,GAAKyC,EAAL,CAIA,GAAI8I,EAAe,CACjB,IAAIqE,EAEAC,EAAwB,MAAbtG,EAAmB,EAAM7P,EACpCoW,EAAuB,MAAbvG,EAAmB/P,EAASC,EACtCoJ,EAAmB,MAAb0G,EAAmB,SAAW,QACpC3F,EAASnB,EAAc8G,GACvBtL,EAAM2F,EAAS8D,EAASmI,GACxB7R,EAAM4F,EAAS8D,EAASoI,GACxBC,EAAWV,GAAU/K,EAAWzB,GAAO,EAAI,EAC3CmN,EAASzL,IAAc1K,EAAQ+S,EAAc/J,GAAOyB,EAAWzB,GAC/DoN,EAAS1L,IAAc1K,GAASyK,EAAWzB,IAAQ+J,EAAc/J,GAGjEL,EAAejG,EAAME,SAASgB,MAC9BwF,EAAYoM,GAAU7M,EAAetC,EAAcsC,GAAgB,CACrE/C,MAAO,EACPE,OAAQ,GAENuQ,GAAqB3T,EAAMmG,cAAc,oBAAsBnG,EAAMmG,cAAc,oBAAoBI,QxBhFtG,CACLvF,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GwB6EFyW,GAAkBD,GAAmBL,GACrCO,GAAkBF,GAAmBJ,GAMrCO,GAAWnO,EAAO,EAAG0K,EAAc/J,GAAMI,EAAUJ,IACnDyN,GAAYd,EAAkB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWF,GAAkBT,EAA4BnG,SAAWyG,EAASK,GAAWF,GAAkBT,EAA4BnG,SACxMgH,GAAYf,GAAmB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWD,GAAkBV,EAA4BnG,SAAW0G,EAASI,GAAWD,GAAkBV,EAA4BnG,SACzMjG,GAAoB/G,EAAME,SAASgB,OAAS8D,EAAgBhF,EAAME,SAASgB,OAC3E+S,GAAelN,GAAiC,MAAbiG,EAAmBjG,GAAkBsF,WAAa,EAAItF,GAAkBuF,YAAc,EAAI,EAC7H4H,GAAwH,OAAjGb,EAA+C,MAAvBD,OAA8B,EAASA,EAAoBpG,IAAqBqG,EAAwB,EAEvJc,GAAY9M,EAAS2M,GAAYE,GACjCE,GAAkBzO,EAAOmN,EAAS,EAAQpR,EAF9B2F,EAAS0M,GAAYG,GAAsBD,IAEKvS,EAAK2F,EAAQyL,EAAS,EAAQrR,EAAK0S,IAAa1S,GAChHyE,EAAc8G,GAAYoH,GAC1B1K,EAAKsD,GAAYoH,GAAkB/M,CACrC,CAEA,GAAI8H,EAAc,CAChB,IAAIkF,GAEAC,GAAyB,MAAbtH,EAAmB,EAAM7P,EAErCoX,GAAwB,MAAbvH,EAAmB/P,EAASC,EAEvCsX,GAAUtO,EAAcgJ,GAExBuF,GAAmB,MAAZvF,EAAkB,SAAW,QAEpCwF,GAAOF,GAAUrJ,EAASmJ,IAE1BK,GAAOH,GAAUrJ,EAASoJ,IAE1BK,IAAuD,IAAxC,CAAC,EAAKzX,GAAMqH,QAAQ4B,GAEnCyO,GAAyH,OAAjGR,GAAgD,MAAvBjB,OAA8B,EAASA,EAAoBlE,IAAoBmF,GAAyB,EAEzJS,GAAaF,GAAeF,GAAOF,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAEzI6F,GAAaH,GAAeJ,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAAUyF,GAE5IK,GAAmBlC,GAAU8B,G1BzH9B,SAAwBlT,EAAK1E,EAAOyE,GACzC,IAAIwT,EAAItP,EAAOjE,EAAK1E,EAAOyE,GAC3B,OAAOwT,EAAIxT,EAAMA,EAAMwT,CACzB,C0BsHoDC,CAAeJ,GAAYN,GAASO,IAAcpP,EAAOmN,EAASgC,GAAaJ,GAAMF,GAAS1B,EAASiC,GAAaJ,IAEpKzO,EAAcgJ,GAAW8F,GACzBtL,EAAKwF,GAAW8F,GAAmBR,EACrC,CAEAxU,EAAMmG,cAAcxG,GAAQ+J,CAvE5B,CAwEF,EAQEhC,iBAAkB,CAAC,WE1HN,SAASyN,GAAiBC,EAAyBrQ,EAAcsD,QAC9D,IAAZA,IACFA,GAAU,GAGZ,ICnBoCrJ,ECJOJ,EFuBvCyW,EAA0B9V,EAAcwF,GACxCuQ,EAAuB/V,EAAcwF,IAf3C,SAAyBnG,GACvB,IAAImN,EAAOnN,EAAQ+D,wBACfI,EAASpB,EAAMoK,EAAK7I,OAAStE,EAAQqE,aAAe,EACpDD,EAASrB,EAAMoK,EAAK3I,QAAUxE,EAAQuE,cAAgB,EAC1D,OAAkB,IAAXJ,GAA2B,IAAXC,CACzB,CAU4DuS,CAAgBxQ,GACtEJ,EAAkBF,EAAmBM,GACrCgH,EAAOpJ,EAAsByS,EAAyBE,EAAsBjN,GAC5EyB,EAAS,CACXc,WAAY,EACZE,UAAW,GAET7C,EAAU,CACZ1E,EAAG,EACHE,EAAG,GAkBL,OAfI4R,IAA4BA,IAA4BhN,MACxB,SAA9B1J,EAAYoG,IAChBkG,GAAetG,MACbmF,GCnCgC9K,EDmCT+F,KClCdhG,EAAUC,IAAUO,EAAcP,GCJxC,CACL4L,YAFyChM,EDQbI,GCNR4L,WACpBE,UAAWlM,EAAQkM,WDGZH,GAAgB3L,IDoCnBO,EAAcwF,KAChBkD,EAAUtF,EAAsBoC,GAAc,IACtCxB,GAAKwB,EAAauH,WAC1BrE,EAAQxE,GAAKsB,EAAasH,WACjB1H,IACTsD,EAAQ1E,EAAIyH,GAAoBrG,KAI7B,CACLpB,EAAGwI,EAAK5O,KAAO2M,EAAOc,WAAa3C,EAAQ1E,EAC3CE,EAAGsI,EAAK/K,IAAM8I,EAAOgB,UAAY7C,EAAQxE,EACzCP,MAAO6I,EAAK7I,MACZE,OAAQ2I,EAAK3I,OAEjB,CGvDA,SAASoS,GAAMC,GACb,IAAItT,EAAM,IAAIoO,IACVmF,EAAU,IAAIC,IACdC,EAAS,GAKb,SAAS3F,EAAK4F,GACZH,EAAQI,IAAID,EAASlW,MACN,GAAG3B,OAAO6X,EAASxU,UAAY,GAAIwU,EAASnO,kBAAoB,IACtEvH,SAAQ,SAAU4V,GACzB,IAAKL,EAAQM,IAAID,GAAM,CACrB,IAAIE,EAAc9T,EAAI3F,IAAIuZ,GAEtBE,GACFhG,EAAKgG,EAET,CACF,IACAL,EAAO3E,KAAK4E,EACd,CAQA,OAzBAJ,EAAUtV,SAAQ,SAAU0V,GAC1B1T,EAAIiP,IAAIyE,EAASlW,KAAMkW,EACzB,IAiBAJ,EAAUtV,SAAQ,SAAU0V,GACrBH,EAAQM,IAAIH,EAASlW,OAExBsQ,EAAK4F,EAET,IACOD,CACT,CClBA,IAEIM,GAAkB,CACpBnY,UAAW,SACX0X,UAAW,GACX1U,SAAU,YAGZ,SAASoV,KACP,IAAK,IAAI1B,EAAO2B,UAAUrG,OAAQsG,EAAO,IAAIpU,MAAMwS,GAAO6B,EAAO,EAAGA,EAAO7B,EAAM6B,IAC/ED,EAAKC,GAAQF,UAAUE,GAGzB,OAAQD,EAAKvE,MAAK,SAAUlT,GAC1B,QAASA,GAAoD,mBAAlCA,EAAQ+D,sBACrC,GACF,CAEO,SAAS4T,GAAgBC,QACL,IAArBA,IACFA,EAAmB,CAAC,GAGtB,IAAIC,EAAoBD,EACpBE,EAAwBD,EAAkBE,iBAC1CA,OAA6C,IAA1BD,EAAmC,GAAKA,EAC3DE,EAAyBH,EAAkBI,eAC3CA,OAA4C,IAA3BD,EAAoCV,GAAkBU,EAC3E,OAAO,SAAsBjZ,EAAWD,EAAQoD,QAC9B,IAAZA,IACFA,EAAU+V,GAGZ,IC/C6B/W,EAC3BgX,ED8CE9W,EAAQ,CACVjC,UAAW,SACXgZ,iBAAkB,GAClBjW,QAASzE,OAAOkE,OAAO,CAAC,EAAG2V,GAAiBW,GAC5C1Q,cAAe,CAAC,EAChBjG,SAAU,CACRvC,UAAWA,EACXD,OAAQA,GAEV4C,WAAY,CAAC,EACbD,OAAQ,CAAC,GAEP2W,EAAmB,GACnBC,GAAc,EACdrN,EAAW,CACb5J,MAAOA,EACPkX,WAAY,SAAoBC,GAC9B,IAAIrW,EAAsC,mBAArBqW,EAAkCA,EAAiBnX,EAAMc,SAAWqW,EACzFC,IACApX,EAAMc,QAAUzE,OAAOkE,OAAO,CAAC,EAAGsW,EAAgB7W,EAAMc,QAASA,GACjEd,EAAMiK,cAAgB,CACpBtM,UAAW0B,EAAU1B,GAAa6N,GAAkB7N,GAAaA,EAAU4Q,eAAiB/C,GAAkB7N,EAAU4Q,gBAAkB,GAC1I7Q,OAAQ8N,GAAkB9N,IAI5B,IEzE4B+X,EAC9B4B,EFwEMN,EDvCG,SAAwBtB,GAErC,IAAIsB,EAAmBvB,GAAMC,GAE7B,OAAO/W,EAAeb,QAAO,SAAUC,EAAK+B,GAC1C,OAAO/B,EAAIE,OAAO+Y,EAAiBvR,QAAO,SAAUqQ,GAClD,OAAOA,EAAShW,QAAUA,CAC5B,IACF,GAAG,GACL,CC8B+ByX,EEzEK7B,EFyEsB,GAAGzX,OAAO2Y,EAAkB3W,EAAMc,QAAQ2U,WExE9F4B,EAAS5B,EAAU5X,QAAO,SAAUwZ,EAAQE,GAC9C,IAAIC,EAAWH,EAAOE,EAAQ5X,MAK9B,OAJA0X,EAAOE,EAAQ5X,MAAQ6X,EAAWnb,OAAOkE,OAAO,CAAC,EAAGiX,EAAUD,EAAS,CACrEzW,QAASzE,OAAOkE,OAAO,CAAC,EAAGiX,EAAS1W,QAASyW,EAAQzW,SACrD4I,KAAMrN,OAAOkE,OAAO,CAAC,EAAGiX,EAAS9N,KAAM6N,EAAQ7N,QAC5C6N,EACEF,CACT,GAAG,CAAC,GAEGhb,OAAO4D,KAAKoX,GAAQlV,KAAI,SAAUhG,GACvC,OAAOkb,EAAOlb,EAChB,MFsGM,OAvCA6D,EAAM+W,iBAAmBA,EAAiBvR,QAAO,SAAUiS,GACzD,OAAOA,EAAE7X,OACX,IAoJFI,EAAM+W,iBAAiB5W,SAAQ,SAAUqI,GACvC,IAAI7I,EAAO6I,EAAM7I,KACb+X,EAAgBlP,EAAM1H,QACtBA,OAA4B,IAAlB4W,EAA2B,CAAC,EAAIA,EAC1ChX,EAAS8H,EAAM9H,OAEnB,GAAsB,mBAAXA,EAAuB,CAChC,IAAIiX,EAAYjX,EAAO,CACrBV,MAAOA,EACPL,KAAMA,EACNiK,SAAUA,EACV9I,QAASA,IAKXkW,EAAiB/F,KAAK0G,GAFT,WAAmB,EAGlC,CACF,IAjIS/N,EAASQ,QAClB,EAMAwN,YAAa,WACX,IAAIX,EAAJ,CAIA,IAAIY,EAAkB7X,EAAME,SACxBvC,EAAYka,EAAgBla,UAC5BD,EAASma,EAAgBna,OAG7B,GAAKyY,GAAiBxY,EAAWD,GAAjC,CASAsC,EAAMwG,MAAQ,CACZ7I,UAAWwX,GAAiBxX,EAAWqH,EAAgBtH,GAAoC,UAA3BsC,EAAMc,QAAQC,UAC9ErD,OAAQiG,EAAcjG,IAOxBsC,EAAM0R,OAAQ,EACd1R,EAAMjC,UAAYiC,EAAMc,QAAQ/C,UAKhCiC,EAAM+W,iBAAiB5W,SAAQ,SAAU0V,GACvC,OAAO7V,EAAMmG,cAAc0P,EAASlW,MAAQtD,OAAOkE,OAAO,CAAC,EAAGsV,EAASnM,KACzE,IAGA,IAFA,IAESoO,EAAQ,EAAGA,EAAQ9X,EAAM+W,iBAAiBhH,OAAQ+H,IAUzD,IAAoB,IAAhB9X,EAAM0R,MAAV,CAMA,IAAIqG,EAAwB/X,EAAM+W,iBAAiBe,GAC/ChY,EAAKiY,EAAsBjY,GAC3BkY,EAAyBD,EAAsBjX,QAC/CoM,OAAsC,IAA3B8K,EAAoC,CAAC,EAAIA,EACpDrY,EAAOoY,EAAsBpY,KAEf,mBAAPG,IACTE,EAAQF,EAAG,CACTE,MAAOA,EACPc,QAASoM,EACTvN,KAAMA,EACNiK,SAAUA,KACN5J,EAdR,MAHEA,EAAM0R,OAAQ,EACdoG,GAAS,CAnCb,CAbA,CAmEF,EAGA1N,QClM2BtK,EDkMV,WACf,OAAO,IAAImY,SAAQ,SAAUC,GAC3BtO,EAASgO,cACTM,EAAQlY,EACV,GACF,ECrMG,WAUL,OATK8W,IACHA,EAAU,IAAImB,SAAQ,SAAUC,GAC9BD,QAAQC,UAAUC,MAAK,WACrBrB,OAAUsB,EACVF,EAAQpY,IACV,GACF,KAGKgX,CACT,GD2LIuB,QAAS,WACPjB,IACAH,GAAc,CAChB,GAGF,IAAKd,GAAiBxY,EAAWD,GAK/B,OAAOkM,EAmCT,SAASwN,IACPJ,EAAiB7W,SAAQ,SAAUL,GACjC,OAAOA,GACT,IACAkX,EAAmB,EACrB,CAEA,OAvCApN,EAASsN,WAAWpW,GAASqX,MAAK,SAAUnY,IACrCiX,GAAenW,EAAQwX,eAC1BxX,EAAQwX,cAActY,EAE1B,IAmCO4J,CACT,CACF,CACO,IAAI2O,GAA4BhC,KGrPnC,GAA4BA,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,EAAa,GAAQ,GAAM,GAAiB,EAAO,MCJrH,GAA4BjC,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,KCQtE,MAEMC,GAAiB,gBAsBjBC,GAAc9Z,IAClB,IAAI+Z,EAAW/Z,EAAQga,aAAa,kBAEpC,IAAKD,GAAyB,MAAbA,EAAkB,CACjC,IAAIE,EAAgBja,EAAQga,aAAa,QAKzC,IAAKC,IAAkBA,EAAcC,SAAS,OAASD,EAAcE,WAAW,KAC9E,OAAO,KAILF,EAAcC,SAAS,OAASD,EAAcE,WAAW,OAC3DF,EAAgB,IAAIA,EAActX,MAAM,KAAK,MAG/CoX,EAAWE,GAAmC,MAAlBA,EAAwBA,EAAcG,OAAS,IAC7E,CAEA,OAAOL,CAAQ,EAGXM,GAAyBra,IAC7B,MAAM+Z,EAAWD,GAAY9Z,GAE7B,OAAI+Z,GACKjU,SAAS+C,cAAckR,GAAYA,EAGrC,IAAI,EAGPO,GAAyBta,IAC7B,MAAM+Z,EAAWD,GAAY9Z,GAC7B,OAAO+Z,EAAWjU,SAAS+C,cAAckR,GAAY,IAAI,EA0BrDQ,GAAuBva,IAC3BA,EAAQwa,cAAc,IAAIC,MAAMZ,IAAgB,EAG5C,GAAYa,MACXA,GAA4B,iBAAXA,UAIO,IAAlBA,EAAOC,SAChBD,EAASA,EAAO,SAGgB,IAApBA,EAAOE,UAGjBC,GAAaH,GAEb,GAAUA,GACLA,EAAOC,OAASD,EAAO,GAAKA,EAGf,iBAAXA,GAAuBA,EAAOvJ,OAAS,EACzCrL,SAAS+C,cAAc6R,GAGzB,KAGHI,GAAY9a,IAChB,IAAK,GAAUA,IAAgD,IAApCA,EAAQ+a,iBAAiB5J,OAClD,OAAO,EAGT,MAAM6J,EAAgF,YAA7DtV,iBAAiB1F,GAASib,iBAAiB,cAE9DC,EAAgBlb,EAAQmb,QAAQ,uBAEtC,IAAKD,EACH,OAAOF,EAGT,GAAIE,IAAkBlb,EAAS,CAC7B,MAAMob,EAAUpb,EAAQmb,QAAQ,WAEhC,GAAIC,GAAWA,EAAQ5V,aAAe0V,EACpC,OAAO,EAGT,GAAgB,OAAZE,EACF,OAAO,CAEX,CAEA,OAAOJ,CAAgB,EAGnBK,GAAarb,IACZA,GAAWA,EAAQ4a,WAAaU,KAAKC,gBAItCvb,EAAQwb,UAAUvW,SAAS,mBAIC,IAArBjF,EAAQyb,SACVzb,EAAQyb,SAGVzb,EAAQ0b,aAAa,aAAoD,UAArC1b,EAAQga,aAAa,aAG5D2B,GAAiB3b,IACrB,IAAK8F,SAASC,gBAAgB6V,aAC5B,OAAO,KAIT,GAAmC,mBAAxB5b,EAAQqF,YAA4B,CAC7C,MAAMwW,EAAO7b,EAAQqF,cACrB,OAAOwW,aAAgB/a,WAAa+a,EAAO,IAC7C,CAEA,OAAI7b,aAAmBc,WACdd,EAIJA,EAAQwF,WAINmW,GAAe3b,EAAQwF,YAHrB,IAGgC,EAGrCsW,GAAO,OAWPC,GAAS/b,IACbA,EAAQuE,YAAY,EAGhByX,GAAY,IACZ3b,OAAO4b,SAAWnW,SAAS6G,KAAK+O,aAAa,qBACxCrb,OAAO4b,OAGT,KAGHC,GAA4B,GAmB5BC,GAAQ,IAAuC,QAAjCrW,SAASC,gBAAgBqW,IAEvCC,GAAqBC,IAnBAC,QAoBN,KACjB,MAAMC,EAAIR,KAGV,GAAIQ,EAAG,CACL,MAAMzb,EAAOub,EAAOG,KACdC,EAAqBF,EAAEtb,GAAGH,GAChCyb,EAAEtb,GAAGH,GAAQub,EAAOK,gBACpBH,EAAEtb,GAAGH,GAAM6b,YAAcN,EAEzBE,EAAEtb,GAAGH,GAAM8b,WAAa,KACtBL,EAAEtb,GAAGH,GAAQ2b,EACNJ,EAAOK,gBAElB,GAjC0B,YAAxB7W,SAASgX,YAENZ,GAA0B/K,QAC7BrL,SAASyF,iBAAiB,oBAAoB,KAC5C,IAAK,MAAMgR,KAAYL,GACrBK,GACF,IAIJL,GAA0B7J,KAAKkK,IAE/BA,GAsBA,EAGEQ,GAAUR,IACU,mBAAbA,GACTA,GACF,EAGIS,GAAyB,CAACT,EAAUU,EAAmBC,GAAoB,KAC/E,IAAKA,EAEH,YADAH,GAAQR,GAIV,MACMY,EAnMiCnd,KACvC,IAAKA,EACH,OAAO,EAIT,IAAI,mBACFod,EAAkB,gBAClBC,GACEhd,OAAOqF,iBAAiB1F,GAC5B,MAAMsd,EAA0BC,OAAOC,WAAWJ,GAC5CK,EAAuBF,OAAOC,WAAWH,GAE/C,OAAKC,GAA4BG,GAKjCL,EAAqBA,EAAmBza,MAAM,KAAK,GACnD0a,EAAkBA,EAAgB1a,MAAM,KAAK,GAjFf,KAkFtB4a,OAAOC,WAAWJ,GAAsBG,OAAOC,WAAWH,KANzD,CAMoG,EA+KpFK,CAAiCT,GADlC,EAExB,IAAIU,GAAS,EAEb,MAAMC,EAAU,EACd5Q,aAEIA,IAAWiQ,IAIfU,GAAS,EACTV,EAAkBxR,oBAAoBoO,GAAgB+D,GACtDb,GAAQR,GAAS,EAGnBU,EAAkB1R,iBAAiBsO,GAAgB+D,GACnDC,YAAW,KACJF,GACHpD,GAAqB0C,EACvB,GACCE,EAAiB,EAahBW,GAAuB,CAACjR,EAAMkR,EAAeC,EAAeC,KAChE,MAAMC,EAAarR,EAAKsE,OACxB,IAAI+H,EAAQrM,EAAKjH,QAAQmY,GAGzB,OAAe,IAAX7E,GACM8E,GAAiBC,EAAiBpR,EAAKqR,EAAa,GAAKrR,EAAK,IAGxEqM,GAAS8E,EAAgB,GAAK,EAE1BC,IACF/E,GAASA,EAAQgF,GAAcA,GAG1BrR,EAAKjK,KAAKC,IAAI,EAAGD,KAAKE,IAAIoW,EAAOgF,EAAa,KAAI,EAarDC,GAAiB,qBACjBC,GAAiB,OACjBC,GAAgB,SAChBC,GAAgB,CAAC,EAEvB,IAAIC,GAAW,EACf,MAAMC,GAAe,CACnBC,WAAY,YACZC,WAAY,YAERC,GAAe,IAAI5H,IAAI,CAAC,QAAS,WAAY,UAAW,YAAa,cAAe,aAAc,iBAAkB,YAAa,WAAY,YAAa,cAAe,YAAa,UAAW,WAAY,QAAS,oBAAqB,aAAc,YAAa,WAAY,cAAe,cAAe,cAAe,YAAa,eAAgB,gBAAiB,eAAgB,gBAAiB,aAAc,QAAS,OAAQ,SAAU,QAAS,SAAU,SAAU,UAAW,WAAY,OAAQ,SAAU,eAAgB,SAAU,OAAQ,mBAAoB,mBAAoB,QAAS,QAAS,WAK/lB,SAAS6H,GAAa5e,EAAS6e,GAC7B,OAAOA,GAAO,GAAGA,MAAQN,QAAgBve,EAAQue,UAAYA,IAC/D,CAEA,SAASO,GAAiB9e,GACxB,MAAM6e,EAAMD,GAAa5e,GAGzB,OAFAA,EAAQue,SAAWM,EACnBP,GAAcO,GAAOP,GAAcO,IAAQ,CAAC,EACrCP,GAAcO,EACvB,CA0CA,SAASE,GAAYC,EAAQC,EAAUC,EAAqB,MAC1D,OAAOzhB,OAAO0hB,OAAOH,GAAQpM,MAAKwM,GAASA,EAAMH,WAAaA,GAAYG,EAAMF,qBAAuBA,GACzG,CAEA,SAASG,GAAoBC,EAAmB1B,EAAS2B,GACvD,MAAMC,EAAiC,iBAAZ5B,EAErBqB,EAAWO,EAAcD,EAAqB3B,GAAW2B,EAC/D,IAAIE,EAAYC,GAAaJ,GAM7B,OAJKX,GAAavH,IAAIqI,KACpBA,EAAYH,GAGP,CAACE,EAAaP,EAAUQ,EACjC,CAEA,SAASE,GAAW3f,EAASsf,EAAmB1B,EAAS2B,EAAoBK,GAC3E,GAAiC,iBAAtBN,IAAmCtf,EAC5C,OAGF,IAAKwf,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GAGzF,GAAID,KAAqBd,GAAc,CACrC,MAAMqB,EAAe3e,GACZ,SAAUke,GACf,IAAKA,EAAMU,eAAiBV,EAAMU,gBAAkBV,EAAMW,iBAAmBX,EAAMW,eAAe9a,SAASma,EAAMU,eAC/G,OAAO5e,EAAGjD,KAAK+hB,KAAMZ,EAEzB,EAGFH,EAAWY,EAAaZ,EAC1B,CAEA,MAAMD,EAASF,GAAiB9e,GAC1BigB,EAAWjB,EAAOS,KAAeT,EAAOS,GAAa,CAAC,GACtDS,EAAmBnB,GAAYkB,EAAUhB,EAAUO,EAAc5B,EAAU,MAEjF,GAAIsC,EAEF,YADAA,EAAiBN,OAASM,EAAiBN,QAAUA,GAIvD,MAAMf,EAAMD,GAAaK,EAAUK,EAAkB1T,QAAQuS,GAAgB,KACvEjd,EAAKse,EAzEb,SAAoCxf,EAAS+Z,EAAU7Y,GACrD,OAAO,SAAS0c,EAAQwB,GACtB,MAAMe,EAAcngB,EAAQogB,iBAAiBrG,GAE7C,IAAK,IAAI,OACP/M,GACEoS,EAAOpS,GAAUA,IAAWgT,KAAMhT,EAASA,EAAOxH,WACpD,IAAK,MAAM6a,KAAcF,EACvB,GAAIE,IAAerT,EAYnB,OARAsT,GAAWlB,EAAO,CAChBW,eAAgB/S,IAGd4Q,EAAQgC,QACVW,GAAaC,IAAIxgB,EAASof,EAAMqB,KAAM1G,EAAU7Y,GAG3CA,EAAGwf,MAAM1T,EAAQ,CAACoS,GAG/B,CACF,CAiD2BuB,CAA2B3gB,EAAS4d,EAASqB,GAvFxE,SAA0Bjf,EAASkB,GACjC,OAAO,SAAS0c,EAAQwB,GAStB,OARAkB,GAAWlB,EAAO,CAChBW,eAAgB/f,IAGd4d,EAAQgC,QACVW,GAAaC,IAAIxgB,EAASof,EAAMqB,KAAMvf,GAGjCA,EAAGwf,MAAM1gB,EAAS,CAACof,GAC5B,CACF,CA2EoFwB,CAAiB5gB,EAASif,GAC5G/d,EAAGge,mBAAqBM,EAAc5B,EAAU,KAChD1c,EAAG+d,SAAWA,EACd/d,EAAG0e,OAASA,EACZ1e,EAAGqd,SAAWM,EACdoB,EAASpB,GAAO3d,EAChBlB,EAAQuL,iBAAiBkU,EAAWve,EAAIse,EAC1C,CAEA,SAASqB,GAAc7gB,EAASgf,EAAQS,EAAW7B,EAASsB,GAC1D,MAAMhe,EAAK6d,GAAYC,EAAOS,GAAY7B,EAASsB,GAE9Che,IAILlB,EAAQyL,oBAAoBgU,EAAWve,EAAI4f,QAAQ5B,WAC5CF,EAAOS,GAAWve,EAAGqd,UAC9B,CAEA,SAASwC,GAAyB/gB,EAASgf,EAAQS,EAAWuB,GAC5D,MAAMC,EAAoBjC,EAAOS,IAAc,CAAC,EAEhD,IAAK,MAAMyB,KAAczjB,OAAO4D,KAAK4f,GACnC,GAAIC,EAAWhH,SAAS8G,GAAY,CAClC,MAAM5B,EAAQ6B,EAAkBC,GAChCL,GAAc7gB,EAASgf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAClE,CAEJ,CAEA,SAASQ,GAAaN,GAGpB,OADAA,EAAQA,EAAMxT,QAAQwS,GAAgB,IAC/BI,GAAaY,IAAUA,CAChC,CAEA,MAAMmB,GAAe,CACnBY,GAAGnhB,EAASof,EAAOxB,EAAS2B,GAC1BI,GAAW3f,EAASof,EAAOxB,EAAS2B,GAAoB,EAC1D,EAEA6B,IAAIphB,EAASof,EAAOxB,EAAS2B,GAC3BI,GAAW3f,EAASof,EAAOxB,EAAS2B,GAAoB,EAC1D,EAEAiB,IAAIxgB,EAASsf,EAAmB1B,EAAS2B,GACvC,GAAiC,iBAAtBD,IAAmCtf,EAC5C,OAGF,MAAOwf,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GACrF8B,EAAc5B,IAAcH,EAC5BN,EAASF,GAAiB9e,GAC1BihB,EAAoBjC,EAAOS,IAAc,CAAC,EAC1C6B,EAAchC,EAAkBnF,WAAW,KAEjD,QAAwB,IAAb8E,EAAX,CAUA,GAAIqC,EACF,IAAK,MAAMC,KAAgB9jB,OAAO4D,KAAK2d,GACrC+B,GAAyB/gB,EAASgf,EAAQuC,EAAcjC,EAAkBzM,MAAM,IAIpF,IAAK,MAAM2O,KAAe/jB,OAAO4D,KAAK4f,GAAoB,CACxD,MAAMC,EAAaM,EAAY5V,QAAQyS,GAAe,IAEtD,IAAKgD,GAAe/B,EAAkBpF,SAASgH,GAAa,CAC1D,MAAM9B,EAAQ6B,EAAkBO,GAChCX,GAAc7gB,EAASgf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAClE,CACF,CAfA,KARA,CAEE,IAAKzhB,OAAO4D,KAAK4f,GAAmB9P,OAClC,OAGF0P,GAAc7gB,EAASgf,EAAQS,EAAWR,EAAUO,EAAc5B,EAAU,KAE9E,CAgBF,EAEA6D,QAAQzhB,EAASof,EAAO3H,GACtB,GAAqB,iBAAV2H,IAAuBpf,EAChC,OAAO,KAGT,MAAMwc,EAAIR,KAGV,IAAI0F,EAAc,KACdC,GAAU,EACVC,GAAiB,EACjBC,GAAmB,EAJHzC,IADFM,GAAaN,IAOZ5C,IACjBkF,EAAclF,EAAE/B,MAAM2E,EAAO3H,GAC7B+E,EAAExc,GAASyhB,QAAQC,GACnBC,GAAWD,EAAYI,uBACvBF,GAAkBF,EAAYK,gCAC9BF,EAAmBH,EAAYM,sBAGjC,IAAIC,EAAM,IAAIxH,MAAM2E,EAAO,CACzBuC,UACAO,YAAY,IAgBd,OAdAD,EAAM3B,GAAW2B,EAAKxK,GAElBoK,GACFI,EAAIE,iBAGFP,GACF5hB,EAAQwa,cAAcyH,GAGpBA,EAAIJ,kBAAoBH,GAC1BA,EAAYS,iBAGPF,CACT,GAIF,SAAS3B,GAAWziB,EAAKukB,GACvB,IAAK,MAAO7kB,EAAKa,KAAUX,OAAO4kB,QAAQD,GAAQ,CAAC,GACjD,IACEvkB,EAAIN,GAAOa,CACb,CAAE,MAAOkkB,GACP7kB,OAAOC,eAAeG,EAAKN,EAAK,CAC9BglB,cAAc,EAEd3kB,IAAG,IACMQ,GAIb,CAGF,OAAOP,CACT,CAYA,MAAM2kB,GAAa,IAAI7Q,IACjB8Q,GAAO,CACXjQ,IAAIxS,EAASzC,EAAKyN,GACXwX,GAAWpL,IAAIpX,IAClBwiB,GAAWhQ,IAAIxS,EAAS,IAAI2R,KAG9B,MAAM+Q,EAAcF,GAAW5kB,IAAIoC,GAG9B0iB,EAAYtL,IAAI7Z,IAA6B,IAArBmlB,EAAYC,KAMzCD,EAAYlQ,IAAIjV,EAAKyN,GAJnB4X,QAAQC,MAAM,+EAA+Exf,MAAMyf,KAAKJ,EAAYrhB,QAAQ,MAKhI,EAEAzD,IAAG,CAACoC,EAASzC,IACPilB,GAAWpL,IAAIpX,IACVwiB,GAAW5kB,IAAIoC,GAASpC,IAAIL,IAG9B,KAGTwlB,OAAO/iB,EAASzC,GACd,IAAKilB,GAAWpL,IAAIpX,GAClB,OAGF,MAAM0iB,EAAcF,GAAW5kB,IAAIoC,GACnC0iB,EAAYM,OAAOzlB,GAEM,IAArBmlB,EAAYC,MACdH,GAAWQ,OAAOhjB,EAEtB,GAUF,SAASijB,GAAc7kB,GACrB,GAAc,SAAVA,EACF,OAAO,EAGT,GAAc,UAAVA,EACF,OAAO,EAGT,GAAIA,IAAUmf,OAAOnf,GAAOkC,WAC1B,OAAOid,OAAOnf,GAGhB,GAAc,KAAVA,GAA0B,SAAVA,EAClB,OAAO,KAGT,GAAqB,iBAAVA,EACT,OAAOA,EAGT,IACE,OAAO8kB,KAAKC,MAAMC,mBAAmBhlB,GACvC,CAAE,MAAOkkB,GACP,OAAOlkB,CACT,CACF,CAEA,SAASilB,GAAiB9lB,GACxB,OAAOA,EAAIqO,QAAQ,UAAU0X,GAAO,IAAIA,EAAIpjB,iBAC9C,CAEA,MAAMqjB,GAAc,CAClBC,iBAAiBxjB,EAASzC,EAAKa,GAC7B4B,EAAQ6B,aAAa,WAAWwhB,GAAiB9lB,KAAQa,EAC3D,EAEAqlB,oBAAoBzjB,EAASzC,GAC3ByC,EAAQ4B,gBAAgB,WAAWyhB,GAAiB9lB,KACtD,EAEAmmB,kBAAkB1jB,GAChB,IAAKA,EACH,MAAO,CAAC,EAGV,MAAM0B,EAAa,CAAC,EACdiiB,EAASlmB,OAAO4D,KAAKrB,EAAQ4jB,SAAShd,QAAOrJ,GAAOA,EAAI4c,WAAW,QAAU5c,EAAI4c,WAAW,cAElG,IAAK,MAAM5c,KAAOomB,EAAQ,CACxB,IAAIE,EAAUtmB,EAAIqO,QAAQ,MAAO,IACjCiY,EAAUA,EAAQC,OAAO,GAAG5jB,cAAgB2jB,EAAQhR,MAAM,EAAGgR,EAAQ1S,QACrEzP,EAAWmiB,GAAWZ,GAAcjjB,EAAQ4jB,QAAQrmB,GACtD,CAEA,OAAOmE,CACT,EAEAqiB,iBAAgB,CAAC/jB,EAASzC,IACjB0lB,GAAcjjB,EAAQga,aAAa,WAAWqJ,GAAiB9lB,QAe1E,MAAMymB,GAEOC,qBACT,MAAO,CAAC,CACV,CAEWC,yBACT,MAAO,CAAC,CACV,CAEWzH,kBACT,MAAM,IAAI0H,MAAM,sEAClB,CAEAC,WAAWC,GAMT,OALAA,EAASrE,KAAKsE,gBAAgBD,GAC9BA,EAASrE,KAAKuE,kBAAkBF,GAEhCrE,KAAKwE,iBAAiBH,GAEfA,CACT,CAEAE,kBAAkBF,GAChB,OAAOA,CACT,CAEAC,gBAAgBD,EAAQrkB,GACtB,MAAMykB,EAAa,GAAUzkB,GAAWujB,GAAYQ,iBAAiB/jB,EAAS,UAAY,CAAC,EAE3F,MAAO,IAAKggB,KAAK0E,YAAYT,WACD,iBAAfQ,EAA0BA,EAAa,CAAC,KAC/C,GAAUzkB,GAAWujB,GAAYG,kBAAkB1jB,GAAW,CAAC,KAC7C,iBAAXqkB,EAAsBA,EAAS,CAAC,EAE/C,CAEAG,iBAAiBH,EAAQM,EAAc3E,KAAK0E,YAAYR,aACtD,IAAK,MAAM3hB,KAAY9E,OAAO4D,KAAKsjB,GAAc,CAC/C,MAAMC,EAAgBD,EAAYpiB,GAC5BnE,EAAQimB,EAAO9hB,GACfsiB,EAAY,GAAUzmB,GAAS,UA1uBrCsc,OADSA,EA2uB+Ctc,GAzuBnD,GAAGsc,IAGLjd,OAAOM,UAAUuC,SAASrC,KAAKyc,GAAQoK,MAAM,eAAe,GAAG5kB,cAwuBlE,IAAK,IAAI6kB,OAAOH,GAAe9gB,KAAK+gB,GAClC,MAAM,IAAIG,UAAU,GAAGhF,KAAK0E,YAAYjI,KAAKwI,0BAA0B1iB,qBAA4BsiB,yBAAiCD,MAExI,CAhvBWlK,KAivBb,EAmBF,MAAMwK,WAAsBlB,GAC1BU,YAAY1kB,EAASqkB,GACnBc,SACAnlB,EAAU6a,GAAW7a,MAMrBggB,KAAKoF,SAAWplB,EAChBggB,KAAKqF,QAAUrF,KAAKoE,WAAWC,GAC/B5B,GAAKjQ,IAAIwN,KAAKoF,SAAUpF,KAAK0E,YAAYY,SAAUtF,MACrD,CAGAuF,UACE9C,GAAKM,OAAO/C,KAAKoF,SAAUpF,KAAK0E,YAAYY,UAC5C/E,GAAaC,IAAIR,KAAKoF,SAAUpF,KAAK0E,YAAYc,WAEjD,IAAK,MAAMC,KAAgBhoB,OAAOioB,oBAAoB1F,MACpDA,KAAKyF,GAAgB,IAEzB,CAEAE,eAAepJ,EAAUvc,EAAS4lB,GAAa,GAC7C5I,GAAuBT,EAAUvc,EAAS4lB,EAC5C,CAEAxB,WAAWC,GAMT,OALAA,EAASrE,KAAKsE,gBAAgBD,EAAQrE,KAAKoF,UAC3Cf,EAASrE,KAAKuE,kBAAkBF,GAEhCrE,KAAKwE,iBAAiBH,GAEfA,CACT,CAGAwB,mBAAmB7lB,GACjB,OAAOyiB,GAAK7kB,IAAIid,GAAW7a,GAAUggB,KAAKsF,SAC5C,CAEAO,2BAA2B7lB,EAASqkB,EAAS,CAAC,GAC5C,OAAOrE,KAAK8F,YAAY9lB,IAAY,IAAIggB,KAAKhgB,EAA2B,iBAAXqkB,EAAsBA,EAAS,KAC9F,CAEW0B,qBACT,MApDY,OAqDd,CAEWT,sBACT,MAAO,MAAMtF,KAAKvD,MACpB,CAEW+I,uBACT,MAAO,IAAIxF,KAAKsF,UAClB,CAEAO,iBAAiB9kB,GACf,MAAO,GAAGA,IAAOif,KAAKwF,WACxB,EAWF,MAAMQ,GAAuB,CAACC,EAAWC,EAAS,UAChD,MAAMC,EAAa,gBAAgBF,EAAUT,YACvCzkB,EAAOklB,EAAUxJ,KACvB8D,GAAaY,GAAGrb,SAAUqgB,EAAY,qBAAqBplB,OAAU,SAAUqe,GAK7E,GAJI,CAAC,IAAK,QAAQlF,SAAS8F,KAAKoG,UAC9BhH,EAAM+C,iBAGJ9G,GAAW2E,MACb,OAGF,MAAMhT,EAASsN,GAAuB0F,OAASA,KAAK7E,QAAQ,IAAIpa,KAC/CklB,EAAUI,oBAAoBrZ,GAEtCkZ,IACX,GAAE,EAeEI,GAAc,YACdC,GAAc,QAAQD,KACtBE,GAAe,SAASF,KAO9B,MAAMG,WAAcvB,GAEPzI,kBACT,MAdW,OAeb,CAGAiK,QAGE,GAFmBnG,GAAakB,QAAQzB,KAAKoF,SAAUmB,IAExC1E,iBACb,OAGF7B,KAAKoF,SAAS5J,UAAUuH,OAnBF,QAqBtB,MAAM6C,EAAa5F,KAAKoF,SAAS5J,UAAUvW,SAtBrB,QAwBtB+a,KAAK2F,gBAAe,IAAM3F,KAAK2G,mBAAmB3G,KAAKoF,SAAUQ,EACnE,CAGAe,kBACE3G,KAAKoF,SAASrC,SAEdxC,GAAakB,QAAQzB,KAAKoF,SAAUoB,IACpCxG,KAAKuF,SACP,CAGAM,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAO2b,GAAMJ,oBAAoBrG,MAEvC,GAAsB,iBAAXqE,EAAX,CAIA,QAAqB7K,IAAjB1O,EAAKuZ,IAAyBA,EAAOlK,WAAW,MAAmB,gBAAXkK,EAC1D,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,GAAQrE,KANb,CAOF,GACF,EAQFgG,GAAqBS,GAAO,SAK5BpK,GAAmBoK,IAYnB,MAKMI,GAAyB,4BAM/B,MAAMC,WAAe5B,GAERzI,kBACT,MAdW,QAeb,CAGAsK,SAEE/G,KAAKoF,SAASvjB,aAAa,eAAgBme,KAAKoF,SAAS5J,UAAUuL,OAhB3C,UAiB1B,CAGAlB,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOgc,GAAOT,oBAAoBrG,MAEzB,WAAXqE,GACFvZ,EAAKuZ,IAET,GACF,EAQF9D,GAAaY,GAAGrb,SAlCe,2BAkCmB+gB,IAAwBzH,IACxEA,EAAM+C,iBACN,MAAM6E,EAAS5H,EAAMpS,OAAOmO,QAAQ0L,IACvBC,GAAOT,oBAAoBW,GACnCD,QAAQ,IAMf1K,GAAmByK,IAYnB,MAAMG,GAAiB,CACrBrU,KAAI,CAACmH,EAAU/Z,EAAU8F,SAASC,kBACzB,GAAG3G,UAAUsB,QAAQ3C,UAAUqiB,iBAAiBniB,KAAK+B,EAAS+Z,IAGvEmN,QAAO,CAACnN,EAAU/Z,EAAU8F,SAASC,kBAC5BrF,QAAQ3C,UAAU8K,cAAc5K,KAAK+B,EAAS+Z,GAGvDoN,SAAQ,CAACnnB,EAAS+Z,IACT,GAAG3a,UAAUY,EAAQmnB,UAAUvgB,QAAOzB,GAASA,EAAMiiB,QAAQrN,KAGtEsN,QAAQrnB,EAAS+Z,GACf,MAAMsN,EAAU,GAChB,IAAIC,EAAWtnB,EAAQwF,WAAW2V,QAAQpB,GAE1C,KAAOuN,GACLD,EAAQhV,KAAKiV,GACbA,EAAWA,EAAS9hB,WAAW2V,QAAQpB,GAGzC,OAAOsN,CACT,EAEAE,KAAKvnB,EAAS+Z,GACZ,IAAIyN,EAAWxnB,EAAQynB,uBAEvB,KAAOD,GAAU,CACf,GAAIA,EAASJ,QAAQrN,GACnB,MAAO,CAACyN,GAGVA,EAAWA,EAASC,sBACtB,CAEA,MAAO,EACT,EAGAniB,KAAKtF,EAAS+Z,GACZ,IAAIzU,EAAOtF,EAAQ0nB,mBAEnB,KAAOpiB,GAAM,CACX,GAAIA,EAAK8hB,QAAQrN,GACf,MAAO,CAACzU,GAGVA,EAAOA,EAAKoiB,kBACd,CAEA,MAAO,EACT,EAEAC,kBAAkB3nB,GAChB,MAAM4nB,EAAa,CAAC,IAAK,SAAU,QAAS,WAAY,SAAU,UAAW,aAAc,4BAA4BrkB,KAAIwW,GAAY,GAAGA,2BAAiCpW,KAAK,KAChL,OAAOqc,KAAKpN,KAAKgV,EAAY5nB,GAAS4G,QAAOihB,IAAOxM,GAAWwM,IAAO/M,GAAU+M,IAClF,GAeIC,GAAc,YACdC,GAAmB,aAAaD,KAChCE,GAAkB,YAAYF,KAC9BG,GAAiB,WAAWH,KAC5BI,GAAoB,cAAcJ,KAClCK,GAAkB,YAAYL,KAK9BM,GAAY,CAChBC,YAAa,KACbC,aAAc,KACdC,cAAe,MAEXC,GAAgB,CACpBH,YAAa,kBACbC,aAAc,kBACdC,cAAe,mBAMjB,MAAME,WAAczE,GAClBU,YAAY1kB,EAASqkB,GACnBc,QACAnF,KAAKoF,SAAWplB,EAEXA,GAAYyoB,GAAMC,gBAIvB1I,KAAKqF,QAAUrF,KAAKoE,WAAWC,GAC/BrE,KAAK2I,QAAU,EACf3I,KAAK4I,sBAAwB9H,QAAQzgB,OAAOwoB,cAE5C7I,KAAK8I,cACP,CAGW7E,qBACT,OAAOmE,EACT,CAEWlE,yBACT,OAAOsE,EACT,CAEW/L,kBACT,MAnDW,OAoDb,CAGA8I,UACEhF,GAAaC,IAAIR,KAAKoF,SAAU0C,GAClC,CAGAiB,OAAO3J,GACAY,KAAK4I,sBAKN5I,KAAKgJ,wBAAwB5J,KAC/BY,KAAK2I,QAAUvJ,EAAM6J,SALrBjJ,KAAK2I,QAAUvJ,EAAM8J,QAAQ,GAAGD,OAOpC,CAEAE,KAAK/J,GACCY,KAAKgJ,wBAAwB5J,KAC/BY,KAAK2I,QAAUvJ,EAAM6J,QAAUjJ,KAAK2I,SAGtC3I,KAAKoJ,eAELrM,GAAQiD,KAAKqF,QAAQgD,YACvB,CAEAgB,MAAMjK,GACJY,KAAK2I,QAAUvJ,EAAM8J,SAAW9J,EAAM8J,QAAQ/X,OAAS,EAAI,EAAIiO,EAAM8J,QAAQ,GAAGD,QAAUjJ,KAAK2I,OACjG,CAEAS,eACE,MAAME,EAAY1mB,KAAKoC,IAAIgb,KAAK2I,SAEhC,GAAIW,GA9EgB,GA+ElB,OAGF,MAAMvb,EAAYub,EAAYtJ,KAAK2I,QACnC3I,KAAK2I,QAAU,EAEV5a,GAILgP,GAAQhP,EAAY,EAAIiS,KAAKqF,QAAQkD,cAAgBvI,KAAKqF,QAAQiD,aACpE,CAEAQ,cACM9I,KAAK4I,uBACPrI,GAAaY,GAAGnB,KAAKoF,SAAU8C,IAAmB9I,GAASY,KAAK+I,OAAO3J,KACvEmB,GAAaY,GAAGnB,KAAKoF,SAAU+C,IAAiB/I,GAASY,KAAKmJ,KAAK/J,KAEnEY,KAAKoF,SAAS5J,UAAUtE,IAlGG,mBAoG3BqJ,GAAaY,GAAGnB,KAAKoF,SAAU2C,IAAkB3I,GAASY,KAAK+I,OAAO3J,KACtEmB,GAAaY,GAAGnB,KAAKoF,SAAU4C,IAAiB5I,GAASY,KAAKqJ,MAAMjK,KACpEmB,GAAaY,GAAGnB,KAAKoF,SAAU6C,IAAgB7I,GAASY,KAAKmJ,KAAK/J,KAEtE,CAEA4J,wBAAwB5J,GACtB,OAAOY,KAAK4I,wBA5GS,QA4GiBxJ,EAAMmK,aA7GrB,UA6GyDnK,EAAMmK,YACxF,CAGA1D,qBACE,MAAO,iBAAkB/f,SAASC,iBAAmB7C,UAAUsmB,eAAiB,CAClF,EAcF,MAEMC,GAAc,eACdC,GAAiB,YAKjBC,GAAa,OACbC,GAAa,OACbC,GAAiB,OACjBC,GAAkB,QAClBC,GAAc,QAAQN,KACtBO,GAAa,OAAOP,KACpBQ,GAAkB,UAAUR,KAC5BS,GAAqB,aAAaT,KAClCU,GAAqB,aAAaV,KAClCW,GAAmB,YAAYX,KAC/BY,GAAwB,OAAOZ,KAAcC,KAC7CY,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAsB,WACtBC,GAAsB,SAMtBC,GAAkB,UAClBC,GAAgB,iBAChBC,GAAuBF,GAAkBC,GAKzCE,GAAmB,CACvB,UAAoBd,GACpB,WAAqBD,IAEjBgB,GAAY,CAChBC,SAAU,IACVC,UAAU,EACVC,MAAO,QACPC,MAAM,EACNC,OAAO,EACPC,MAAM,GAEFC,GAAgB,CACpBN,SAAU,mBAEVC,SAAU,UACVC,MAAO,mBACPC,KAAM,mBACNC,MAAO,UACPC,KAAM,WAMR,MAAME,WAAiBnG,GACrBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAKsL,UAAY,KACjBtL,KAAKuL,eAAiB,KACtBvL,KAAKwL,YAAa,EAClBxL,KAAKyL,aAAe,KACpBzL,KAAK0L,aAAe,KACpB1L,KAAK2L,mBAAqB1E,GAAeC,QApCjB,uBAoC8ClH,KAAKoF,UAE3EpF,KAAK4L,qBAED5L,KAAKqF,QAAQ4F,OAASV,IACxBvK,KAAK6L,OAET,CAGW5H,qBACT,OAAO4G,EACT,CAEW3G,yBACT,OAAOkH,EACT,CAEW3O,kBACT,MAtFW,UAuFb,CAGAnX,OACE0a,KAAK8L,OAAOnC,GACd,CAEAoC,mBAIOjmB,SAASkmB,QAAUlR,GAAUkF,KAAKoF,WACrCpF,KAAK1a,MAET,CAEAiiB,OACEvH,KAAK8L,OAAOlC,GACd,CAEAoB,QACMhL,KAAKwL,YACPjR,GAAqByF,KAAKoF,UAG5BpF,KAAKiM,gBACP,CAEAJ,QACE7L,KAAKiM,iBAELjM,KAAKkM,kBAELlM,KAAKsL,UAAYa,aAAY,IAAMnM,KAAK+L,mBAAmB/L,KAAKqF,QAAQyF,SAC1E,CAEAsB,oBACOpM,KAAKqF,QAAQ4F,OAIdjL,KAAKwL,WACPjL,GAAaa,IAAIpB,KAAKoF,SAAU4E,IAAY,IAAMhK,KAAK6L,UAIzD7L,KAAK6L,QACP,CAEAQ,GAAGnT,GACD,MAAMoT,EAAQtM,KAAKuM,YAEnB,GAAIrT,EAAQoT,EAAMnb,OAAS,GAAK+H,EAAQ,EACtC,OAGF,GAAI8G,KAAKwL,WAEP,YADAjL,GAAaa,IAAIpB,KAAKoF,SAAU4E,IAAY,IAAMhK,KAAKqM,GAAGnT,KAI5D,MAAMsT,EAAcxM,KAAKyM,cAAczM,KAAK0M,cAE5C,GAAIF,IAAgBtT,EAClB,OAGF,MAAMtC,EAAQsC,EAAQsT,EAAc7C,GAAaC,GAEjD5J,KAAK8L,OAAOlV,EAAO0V,EAAMpT,GAC3B,CAEAqM,UACMvF,KAAK0L,cACP1L,KAAK0L,aAAanG,UAGpBJ,MAAMI,SACR,CAGAhB,kBAAkBF,GAEhB,OADAA,EAAOsI,gBAAkBtI,EAAOyG,SACzBzG,CACT,CAEAuH,qBACM5L,KAAKqF,QAAQ0F,UACfxK,GAAaY,GAAGnB,KAAKoF,SAAU6E,IAAiB7K,GAASY,KAAK4M,SAASxN,KAG9C,UAAvBY,KAAKqF,QAAQ2F,QACfzK,GAAaY,GAAGnB,KAAKoF,SAAU8E,IAAoB,IAAMlK,KAAKgL,UAC9DzK,GAAaY,GAAGnB,KAAKoF,SAAU+E,IAAoB,IAAMnK,KAAKoM,uBAG5DpM,KAAKqF,QAAQ6F,OAASzC,GAAMC,eAC9B1I,KAAK6M,yBAET,CAEAA,0BACE,IAAK,MAAMC,KAAO7F,GAAerU,KA/JX,qBA+JmCoN,KAAKoF,UAC5D7E,GAAaY,GAAG2L,EAAK1C,IAAkBhL,GAASA,EAAM+C,mBAGxD,MAqBM4K,EAAc,CAClBzE,aAAc,IAAMtI,KAAK8L,OAAO9L,KAAKgN,kBAAkBnD,KACvDtB,cAAe,IAAMvI,KAAK8L,OAAO9L,KAAKgN,kBAAkBlD,KACxDzB,YAxBkB,KACS,UAAvBrI,KAAKqF,QAAQ2F,QAWjBhL,KAAKgL,QAEDhL,KAAKyL,cACPwB,aAAajN,KAAKyL,cAGpBzL,KAAKyL,aAAe5N,YAAW,IAAMmC,KAAKoM,qBA7MjB,IA6M+DpM,KAAKqF,QAAQyF,UAAS,GAQhH9K,KAAK0L,aAAe,IAAIjD,GAAMzI,KAAKoF,SAAU2H,EAC/C,CAEAH,SAASxN,GACP,GAAI,kBAAkBtb,KAAKsb,EAAMpS,OAAOoZ,SACtC,OAGF,MAAMrY,EAAY6c,GAAiBxL,EAAM7hB,KAErCwQ,IACFqR,EAAM+C,iBAENnC,KAAK8L,OAAO9L,KAAKgN,kBAAkBjf,IAEvC,CAEA0e,cAAczsB,GACZ,OAAOggB,KAAKuM,YAAY3mB,QAAQ5F,EAClC,CAEAktB,2BAA2BhU,GACzB,IAAK8G,KAAK2L,mBACR,OAGF,MAAMwB,EAAkBlG,GAAeC,QAAQuD,GAAiBzK,KAAK2L,oBACrEwB,EAAgB3R,UAAUuH,OAAOyH,IACjC2C,EAAgBvrB,gBAAgB,gBAChC,MAAMwrB,EAAqBnG,GAAeC,QAAQ,sBAAsBhO,MAAW8G,KAAK2L,oBAEpFyB,IACFA,EAAmB5R,UAAUtE,IAAIsT,IACjC4C,EAAmBvrB,aAAa,eAAgB,QAEpD,CAEAqqB,kBACE,MAAMlsB,EAAUggB,KAAKuL,gBAAkBvL,KAAK0M,aAE5C,IAAK1sB,EACH,OAGF,MAAMqtB,EAAkB9P,OAAO+P,SAASttB,EAAQga,aAAa,oBAAqB,IAClFgG,KAAKqF,QAAQyF,SAAWuC,GAAmBrN,KAAKqF,QAAQsH,eAC1D,CAEAb,OAAOlV,EAAO5W,EAAU,MACtB,GAAIggB,KAAKwL,WACP,OAGF,MAAMzN,EAAgBiC,KAAK0M,aAErBa,EAAS3W,IAAU+S,GACnB6D,EAAcxtB,GAAW8d,GAAqBkC,KAAKuM,YAAaxO,EAAewP,EAAQvN,KAAKqF,QAAQ8F,MAE1G,GAAIqC,IAAgBzP,EAClB,OAGF,MAAM0P,EAAmBzN,KAAKyM,cAAce,GAEtCE,EAAeC,GACZpN,GAAakB,QAAQzB,KAAKoF,SAAUuI,EAAW,CACpD7N,cAAe0N,EACfzf,UAAWiS,KAAK4N,kBAAkBhX,GAClCkM,KAAM9C,KAAKyM,cAAc1O,GACzBsO,GAAIoB,IAMR,GAFmBC,EAAa3D,IAEjBlI,iBACb,OAGF,IAAK9D,IAAkByP,EAGrB,OAGF,MAAMK,EAAY/M,QAAQd,KAAKsL,WAC/BtL,KAAKgL,QACLhL,KAAKwL,YAAa,EAElBxL,KAAKkN,2BAA2BO,GAEhCzN,KAAKuL,eAAiBiC,EACtB,MAAMM,EAAuBP,EA/RR,sBADF,oBAiSbQ,EAAiBR,EA/RH,qBACA,qBA+RpBC,EAAYhS,UAAUtE,IAAI6W,GAC1BhS,GAAOyR,GACPzP,EAAcvC,UAAUtE,IAAI4W,GAC5BN,EAAYhS,UAAUtE,IAAI4W,GAU1B9N,KAAK2F,gBARoB,KACvB6H,EAAYhS,UAAUuH,OAAO+K,EAAsBC,GACnDP,EAAYhS,UAAUtE,IAAIsT,IAC1BzM,EAAcvC,UAAUuH,OAAOyH,GAAqBuD,EAAgBD,GACpE9N,KAAKwL,YAAa,EAClBkC,EAAa1D,GAAW,GAGYjM,EAAeiC,KAAKgO,eAEtDH,GACF7N,KAAK6L,OAET,CAEAmC,cACE,OAAOhO,KAAKoF,SAAS5J,UAAUvW,SAxTV,QAyTvB,CAEAynB,aACE,OAAOzF,GAAeC,QAAQyD,GAAsB3K,KAAKoF,SAC3D,CAEAmH,YACE,OAAOtF,GAAerU,KAAK8X,GAAe1K,KAAKoF,SACjD,CAEA6G,iBACMjM,KAAKsL,YACP2C,cAAcjO,KAAKsL,WACnBtL,KAAKsL,UAAY,KAErB,CAEA0B,kBAAkBjf,GAChB,OAAIoO,KACKpO,IAAc8b,GAAiBD,GAAaD,GAG9C5b,IAAc8b,GAAiBF,GAAaC,EACrD,CAEAgE,kBAAkBhX,GAChB,OAAIuF,KACKvF,IAAUgT,GAAaC,GAAiBC,GAG1ClT,IAAUgT,GAAaE,GAAkBD,EAClD,CAGAhE,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOugB,GAAShF,oBAAoBrG,KAAMqE,GAEhD,GAAsB,iBAAXA,GAKX,GAAsB,iBAAXA,EAAqB,CAC9B,QAAqB7K,IAAjB1O,EAAKuZ,IAAyBA,EAAOlK,WAAW,MAAmB,gBAAXkK,EAC1D,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IACP,OAVEvZ,EAAKuhB,GAAGhI,EAWZ,GACF,EAQF9D,GAAaY,GAAGrb,SAAUwkB,GA1WE,uCA0W2C,SAAUlL,GAC/E,MAAMpS,EAASsN,GAAuB0F,MAEtC,IAAKhT,IAAWA,EAAOwO,UAAUvW,SAASslB,IACxC,OAGFnL,EAAM+C,iBACN,MAAM+L,EAAW7C,GAAShF,oBAAoBrZ,GACxCmhB,EAAanO,KAAKhG,aAAa,oBAErC,OAAImU,GACFD,EAAS7B,GAAG8B,QAEZD,EAAS9B,qBAKyC,SAAhD7I,GAAYQ,iBAAiB/D,KAAM,UACrCkO,EAAS5oB,YAET4oB,EAAS9B,sBAKX8B,EAAS3G,YAET2G,EAAS9B,oBACX,IACA7L,GAAaY,GAAG9gB,OAAQgqB,IAAuB,KAC7C,MAAM+D,EAAYnH,GAAerU,KAzYR,6BA2YzB,IAAK,MAAMsb,KAAYE,EACrB/C,GAAShF,oBAAoB6H,EAC/B,IAMF7R,GAAmBgP,IAYnB,MAEMgD,GAAc,eAEdC,GAAe,OAAOD,KACtBE,GAAgB,QAAQF,KACxBG,GAAe,OAAOH,KACtBI,GAAiB,SAASJ,KAC1BK,GAAyB,QAAQL,cACjCM,GAAoB,OACpBC,GAAsB,WACtBC,GAAwB,aAExBC,GAA6B,WAAWF,OAAwBA,KAKhEG,GAAyB,8BACzBC,GAAY,CAChB9pB,OAAQ,KACR6hB,QAAQ,GAEJkI,GAAgB,CACpB/pB,OAAQ,iBACR6hB,OAAQ,WAMV,MAAMmI,WAAiBhK,GACrBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAKmP,kBAAmB,EACxBnP,KAAKoP,cAAgB,GACrB,MAAMC,EAAapI,GAAerU,KAAKmc,IAEvC,IAAK,MAAMO,KAAQD,EAAY,CAC7B,MAAMtV,EAAWM,GAAuBiV,GAClCC,EAAgBtI,GAAerU,KAAKmH,GAAUnT,QAAO4oB,GAAgBA,IAAiBxP,KAAKoF,WAEhF,OAAbrL,GAAqBwV,EAAcpe,QACrC6O,KAAKoP,cAAc/c,KAAKid,EAE5B,CAEAtP,KAAKyP,sBAEAzP,KAAKqF,QAAQngB,QAChB8a,KAAK0P,0BAA0B1P,KAAKoP,cAAepP,KAAK2P,YAGtD3P,KAAKqF,QAAQ0B,QACf/G,KAAK+G,QAET,CAGW9C,qBACT,OAAO+K,EACT,CAEW9K,yBACT,OAAO+K,EACT,CAEWxS,kBACT,MApEW,UAqEb,CAGAsK,SACM/G,KAAK2P,WACP3P,KAAK4P,OAEL5P,KAAK6P,MAET,CAEAA,OACE,GAAI7P,KAAKmP,kBAAoBnP,KAAK2P,WAChC,OAGF,IAAIG,EAAiB,GAQrB,GANI9P,KAAKqF,QAAQngB,SACf4qB,EAAiB9P,KAAK+P,uBAvEH,wCAuE4CnpB,QAAO5G,GAAWA,IAAYggB,KAAKoF,WAAU7hB,KAAIvD,GAAWkvB,GAAS7I,oBAAoBrmB,EAAS,CAC/J+mB,QAAQ,OAIR+I,EAAe3e,QAAU2e,EAAe,GAAGX,iBAC7C,OAKF,GAFmB5O,GAAakB,QAAQzB,KAAKoF,SAAUkJ,IAExCzM,iBACb,OAGF,IAAK,MAAMmO,KAAkBF,EAC3BE,EAAeJ,OAGjB,MAAMK,EAAYjQ,KAAKkQ,gBAEvBlQ,KAAKoF,SAAS5J,UAAUuH,OAAO6L,IAE/B5O,KAAKoF,SAAS5J,UAAUtE,IAAI2X,IAE5B7O,KAAKoF,SAAS5jB,MAAMyuB,GAAa,EAEjCjQ,KAAK0P,0BAA0B1P,KAAKoP,eAAe,GAEnDpP,KAAKmP,kBAAmB,EAExB,MAYMgB,EAAa,SADUF,EAAU,GAAGhL,cAAgBgL,EAAUpd,MAAM,KAG1EmN,KAAK2F,gBAdY,KACf3F,KAAKmP,kBAAmB,EAExBnP,KAAKoF,SAAS5J,UAAUuH,OAAO8L,IAE/B7O,KAAKoF,SAAS5J,UAAUtE,IAAI0X,GAAqBD,IAEjD3O,KAAKoF,SAAS5jB,MAAMyuB,GAAa,GACjC1P,GAAakB,QAAQzB,KAAKoF,SAAUmJ,GAAc,GAMtBvO,KAAKoF,UAAU,GAE7CpF,KAAKoF,SAAS5jB,MAAMyuB,GAAa,GAAGjQ,KAAKoF,SAAS+K,MACpD,CAEAP,OACE,GAAI5P,KAAKmP,mBAAqBnP,KAAK2P,WACjC,OAKF,GAFmBpP,GAAakB,QAAQzB,KAAKoF,SAAUoJ,IAExC3M,iBACb,OAGF,MAAMoO,EAAYjQ,KAAKkQ,gBAEvBlQ,KAAKoF,SAAS5jB,MAAMyuB,GAAa,GAAGjQ,KAAKoF,SAASrhB,wBAAwBksB,OAC1ElU,GAAOiE,KAAKoF,UAEZpF,KAAKoF,SAAS5J,UAAUtE,IAAI2X,IAE5B7O,KAAKoF,SAAS5J,UAAUuH,OAAO6L,GAAqBD,IAEpD,IAAK,MAAMlN,KAAWzB,KAAKoP,cAAe,CACxC,MAAMpvB,EAAUsa,GAAuBmH,GAEnCzhB,IAAYggB,KAAK2P,SAAS3vB,IAC5BggB,KAAK0P,0BAA0B,CAACjO,IAAU,EAE9C,CAEAzB,KAAKmP,kBAAmB,EAYxBnP,KAAKoF,SAAS5jB,MAAMyuB,GAAa,GAEjCjQ,KAAK2F,gBAZY,KACf3F,KAAKmP,kBAAmB,EAExBnP,KAAKoF,SAAS5J,UAAUuH,OAAO8L,IAE/B7O,KAAKoF,SAAS5J,UAAUtE,IAAI0X,IAE5BrO,GAAakB,QAAQzB,KAAKoF,SAAUqJ,GAAe,GAKvBzO,KAAKoF,UAAU,EAC/C,CAEAuK,SAAS3vB,EAAUggB,KAAKoF,UACtB,OAAOplB,EAAQwb,UAAUvW,SAAS0pB,GACpC,CAGApK,kBAAkBF,GAIhB,OAHAA,EAAO0C,OAASjG,QAAQuD,EAAO0C,QAE/B1C,EAAOnf,OAAS2V,GAAWwJ,EAAOnf,QAC3Bmf,CACT,CAEA6L,gBACE,OAAOlQ,KAAKoF,SAAS5J,UAAUvW,SAtLL,uBAChB,QACC,QAqLb,CAEAwqB,sBACE,IAAKzP,KAAKqF,QAAQngB,OAChB,OAGF,MAAMiiB,EAAWnH,KAAK+P,uBAAuBhB,IAE7C,IAAK,MAAM/uB,KAAWmnB,EAAU,CAC9B,MAAMiJ,EAAW9V,GAAuBta,GAEpCowB,GACFpQ,KAAK0P,0BAA0B,CAAC1vB,GAAUggB,KAAK2P,SAASS,GAE5D,CACF,CAEAL,uBAAuBhW,GACrB,MAAMoN,EAAWF,GAAerU,KAAKkc,GAA4B9O,KAAKqF,QAAQngB,QAE9E,OAAO+hB,GAAerU,KAAKmH,EAAUiG,KAAKqF,QAAQngB,QAAQ0B,QAAO5G,IAAYmnB,EAASjN,SAASla,IACjG,CAEA0vB,0BAA0BW,EAAcC,GACtC,GAAKD,EAAalf,OAIlB,IAAK,MAAMnR,KAAWqwB,EACpBrwB,EAAQwb,UAAUuL,OAvNK,aAuNyBuJ,GAChDtwB,EAAQ6B,aAAa,gBAAiByuB,EAE1C,CAGAzK,uBAAuBxB,GACrB,MAAMgB,EAAU,CAAC,EAMjB,MAJsB,iBAAXhB,GAAuB,YAAYvgB,KAAKugB,KACjDgB,EAAQ0B,QAAS,GAGZ/G,KAAK4G,MAAK,WACf,MAAM9b,EAAOokB,GAAS7I,oBAAoBrG,KAAMqF,GAEhD,GAAsB,iBAAXhB,EAAqB,CAC9B,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IACP,CACF,GACF,EAQF9D,GAAaY,GAAGrb,SAAU4oB,GAAwBK,IAAwB,SAAU3P,IAErD,MAAzBA,EAAMpS,OAAOoZ,SAAmBhH,EAAMW,gBAAmD,MAAjCX,EAAMW,eAAeqG,UAC/EhH,EAAM+C,iBAGR,MAAMpI,EAAWM,GAAuB2F,MAClCuQ,EAAmBtJ,GAAerU,KAAKmH,GAE7C,IAAK,MAAM/Z,KAAWuwB,EACpBrB,GAAS7I,oBAAoBrmB,EAAS,CACpC+mB,QAAQ,IACPA,QAEP,IAKA1K,GAAmB6S,IAYnB,MAAMsB,GAAS,WAETC,GAAc,eACdC,GAAiB,YAGjBC,GAAiB,UACjBC,GAAmB,YAGnBC,GAAe,OAAOJ,KACtBK,GAAiB,SAASL,KAC1BM,GAAe,OAAON,KACtBO,GAAgB,QAAQP,KACxBQ,GAAyB,QAAQR,KAAcC,KAC/CQ,GAAyB,UAAUT,KAAcC,KACjDS,GAAuB,QAAQV,KAAcC,KAC7CU,GAAoB,OAMpBC,GAAyB,4DACzBC,GAA6B,GAAGD,MAA0BD,KAC1DG,GAAgB,iBAIhBC,GAAgBrV,KAAU,UAAY,YACtCsV,GAAmBtV,KAAU,YAAc,UAC3CuV,GAAmBvV,KAAU,aAAe,eAC5CwV,GAAsBxV,KAAU,eAAiB,aACjDyV,GAAkBzV,KAAU,aAAe,cAC3C0V,GAAiB1V,KAAU,cAAgB,aAG3C2V,GAAY,CAChBC,WAAW,EACXrjB,SAAU,kBACVsjB,QAAS,UACTvpB,OAAQ,CAAC,EAAG,GACZwpB,aAAc,KACdlzB,UAAW,UAEPmzB,GAAgB,CACpBH,UAAW,mBACXrjB,SAAU,mBACVsjB,QAAS,SACTvpB,OAAQ,0BACRwpB,aAAc,yBACdlzB,UAAW,2BAMb,MAAMozB,WAAiBjN,GACrBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAKoS,QAAU,KACfpS,KAAKqS,QAAUrS,KAAKoF,SAAS5f,WAG7Bwa,KAAKsS,MAAQrL,GAAe3hB,KAAK0a,KAAKoF,SAAUmM,IAAe,IAAMtK,GAAeM,KAAKvH,KAAKoF,SAAUmM,IAAe,IAAMtK,GAAeC,QAAQqK,GAAevR,KAAKqS,SACxKrS,KAAKuS,UAAYvS,KAAKwS,eACxB,CAGWvO,qBACT,OAAO6N,EACT,CAEW5N,yBACT,OAAOgO,EACT,CAEWzV,kBACT,OAAO+T,EACT,CAGAzJ,SACE,OAAO/G,KAAK2P,WAAa3P,KAAK4P,OAAS5P,KAAK6P,MAC9C,CAEAA,OACE,GAAIxU,GAAW2E,KAAKoF,WAAapF,KAAK2P,WACpC,OAGF,MAAM7P,EAAgB,CACpBA,cAAeE,KAAKoF,UAItB,IAFkB7E,GAAakB,QAAQzB,KAAKoF,SAAU2L,GAAcjR,GAEtD+B,iBAAd,CAUA,GANA7B,KAAKyS,gBAMD,iBAAkB3sB,SAASC,kBAAoBia,KAAKqS,QAAQlX,QA/ExC,eAgFtB,IAAK,MAAMnb,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAKwa,UAC/C5G,GAAaY,GAAGnhB,EAAS,YAAa8b,IAI1CkE,KAAKoF,SAASsN,QAEd1S,KAAKoF,SAASvjB,aAAa,iBAAiB,GAE5Cme,KAAKsS,MAAM9W,UAAUtE,IAAIka,IAEzBpR,KAAKoF,SAAS5J,UAAUtE,IAAIka,IAE5B7Q,GAAakB,QAAQzB,KAAKoF,SAAU4L,GAAelR,EAtBnD,CAuBF,CAEA8P,OACE,GAAIvU,GAAW2E,KAAKoF,YAAcpF,KAAK2P,WACrC,OAGF,MAAM7P,EAAgB,CACpBA,cAAeE,KAAKoF,UAGtBpF,KAAK2S,cAAc7S,EACrB,CAEAyF,UACMvF,KAAKoS,SACPpS,KAAKoS,QAAQ3Y,UAGf0L,MAAMI,SACR,CAEA/Z,SACEwU,KAAKuS,UAAYvS,KAAKwS,gBAElBxS,KAAKoS,SACPpS,KAAKoS,QAAQ5mB,QAEjB,CAGAmnB,cAAc7S,GAGZ,IAFkBS,GAAakB,QAAQzB,KAAKoF,SAAUyL,GAAc/Q,GAEtD+B,iBAAd,CAMA,GAAI,iBAAkB/b,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAKwa,UAC/C5G,GAAaC,IAAIxgB,EAAS,YAAa8b,IAIvCkE,KAAKoS,SACPpS,KAAKoS,QAAQ3Y,UAGfuG,KAAKsS,MAAM9W,UAAUuH,OAAOqO,IAE5BpR,KAAKoF,SAAS5J,UAAUuH,OAAOqO,IAE/BpR,KAAKoF,SAASvjB,aAAa,gBAAiB,SAE5C0hB,GAAYE,oBAAoBzD,KAAKsS,MAAO,UAC5C/R,GAAakB,QAAQzB,KAAKoF,SAAU0L,GAAgBhR,EArBpD,CAsBF,CAEAsE,WAAWC,GAGT,GAAgC,iBAFhCA,EAASc,MAAMf,WAAWC,IAERtlB,YAA2B,GAAUslB,EAAOtlB,YAAgE,mBAA3CslB,EAAOtlB,UAAUgF,sBAElG,MAAM,IAAIihB,UAAU,GAAGwL,GAAOvL,+GAGhC,OAAOZ,CACT,CAEAoO,gBACE,QAAsB,IAAX,EACT,MAAM,IAAIzN,UAAU,gEAGtB,IAAI4N,EAAmB5S,KAAKoF,SAEG,WAA3BpF,KAAKqF,QAAQtmB,UACf6zB,EAAmB5S,KAAKqS,QACf,GAAUrS,KAAKqF,QAAQtmB,WAChC6zB,EAAmB/X,GAAWmF,KAAKqF,QAAQtmB,WACA,iBAA3BihB,KAAKqF,QAAQtmB,YAC7B6zB,EAAmB5S,KAAKqF,QAAQtmB,WAGlC,MAAMkzB,EAAejS,KAAK6S,mBAE1B7S,KAAKoS,QAAU,GAAoBQ,EAAkB5S,KAAKsS,MAAOL,EACnE,CAEAtC,WACE,OAAO3P,KAAKsS,MAAM9W,UAAUvW,SAASmsB,GACvC,CAEA0B,gBACE,MAAMC,EAAiB/S,KAAKqS,QAE5B,GAAIU,EAAevX,UAAUvW,SAxMN,WAyMrB,OAAO2sB,GAGT,GAAImB,EAAevX,UAAUvW,SA3MJ,aA4MvB,OAAO4sB,GAGT,GAAIkB,EAAevX,UAAUvW,SA9MA,iBA+M3B,MAjMsB,MAoMxB,GAAI8tB,EAAevX,UAAUvW,SAjNE,mBAkN7B,MApMyB,SAwM3B,MAAM+tB,EAAkF,QAA1EttB,iBAAiBsa,KAAKsS,OAAOrX,iBAAiB,iBAAiBb,OAE7E,OAAI2Y,EAAevX,UAAUvW,SA5NP,UA6Nb+tB,EAAQvB,GAAmBD,GAG7BwB,EAAQrB,GAAsBD,EACvC,CAEAc,gBACE,OAAkD,OAA3CxS,KAAKoF,SAASjK,QA5ND,UA6NtB,CAEA8X,aACE,MAAM,OACJxqB,GACEuX,KAAKqF,QAET,MAAsB,iBAAX5c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAASmf,OAAO+P,SAASlvB,EAAO,MAGzC,mBAAXqK,EACFyqB,GAAczqB,EAAOyqB,EAAYlT,KAAKoF,UAGxC3c,CACT,CAEAoqB,mBACE,MAAMM,EAAwB,CAC5Bh0B,UAAW6gB,KAAK8S,gBAChBjc,UAAW,CAAC,CACV9V,KAAM,kBACNmB,QAAS,CACPwM,SAAUsR,KAAKqF,QAAQ3W,WAExB,CACD3N,KAAM,SACNmB,QAAS,CACPuG,OAAQuX,KAAKiT,iBAcnB,OATIjT,KAAKuS,WAAsC,WAAzBvS,KAAKqF,QAAQ2M,WACjCzO,GAAYC,iBAAiBxD,KAAKsS,MAAO,SAAU,UAEnDa,EAAsBtc,UAAY,CAAC,CACjC9V,KAAM,cACNC,SAAS,KAIN,IAAKmyB,KAC+B,mBAA9BnT,KAAKqF,QAAQ4M,aAA8BjS,KAAKqF,QAAQ4M,aAAakB,GAAyBnT,KAAKqF,QAAQ4M,aAE1H,CAEAmB,iBAAgB,IACd71B,EAAG,OACHyP,IAEA,MAAMsf,EAAQrF,GAAerU,KA/QF,8DA+Q+BoN,KAAKsS,OAAO1rB,QAAO5G,GAAW8a,GAAU9a,KAE7FssB,EAAMnb,QAMX2M,GAAqBwO,EAAOtf,EAAQzP,IAAQqzB,IAAmBtE,EAAMpS,SAASlN,IAAS0lB,OACzF,CAGA7M,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOqnB,GAAS9L,oBAAoBrG,KAAMqE,GAEhD,GAAsB,iBAAXA,EAAX,CAIA,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IANL,CAOF,GACF,CAEAwB,kBAAkBzG,GAChB,GAhUuB,IAgUnBA,EAAM4H,QAAgD,UAAf5H,EAAMqB,MAnUnC,QAmUuDrB,EAAM7hB,IACzE,OAGF,MAAM81B,EAAcpM,GAAerU,KAAK0e,IAExC,IAAK,MAAMvK,KAAUsM,EAAa,CAChC,MAAMC,EAAUnB,GAASrM,YAAYiB,GAErC,IAAKuM,IAAyC,IAA9BA,EAAQjO,QAAQ0M,UAC9B,SAGF,MAAMwB,EAAenU,EAAMmU,eACrBC,EAAeD,EAAarZ,SAASoZ,EAAQhB,OAEnD,GAAIiB,EAAarZ,SAASoZ,EAAQlO,WAA2C,WAA9BkO,EAAQjO,QAAQ0M,YAA2ByB,GAA8C,YAA9BF,EAAQjO,QAAQ0M,WAA2ByB,EACnJ,SAIF,GAAIF,EAAQhB,MAAMrtB,SAASma,EAAMpS,UAA2B,UAAfoS,EAAMqB,MAxVvC,QAwV2DrB,EAAM7hB,KAAqB,qCAAqCuG,KAAKsb,EAAMpS,OAAOoZ,UACvJ,SAGF,MAAMtG,EAAgB,CACpBA,cAAewT,EAAQlO,UAGN,UAAfhG,EAAMqB,OACRX,EAAcqG,WAAa/G,GAG7BkU,EAAQX,cAAc7S,EACxB,CACF,CAEA+F,6BAA6BzG,GAG3B,MAAMqU,EAAU,kBAAkB3vB,KAAKsb,EAAMpS,OAAOoZ,SAC9CsN,EA7WW,WA6WKtU,EAAM7hB,IACtBo2B,EAAkB,CAAChD,GAAgBC,IAAkB1W,SAASkF,EAAM7hB,KAE1E,IAAKo2B,IAAoBD,EACvB,OAGF,GAAID,IAAYC,EACd,OAGFtU,EAAM+C,iBAEN,MAAMyR,EAAkB5T,KAAKoH,QAAQiK,IAA0BrR,KAAOiH,GAAeM,KAAKvH,KAAMqR,IAAwB,IAAMpK,GAAe3hB,KAAK0a,KAAMqR,IAAwB,IAAMpK,GAAeC,QAAQmK,GAAwBjS,EAAMW,eAAeva,YACpPwF,EAAWmnB,GAAS9L,oBAAoBuN,GAE9C,GAAID,EAMF,OALAvU,EAAMyU,kBACN7oB,EAAS6kB,YAET7kB,EAASooB,gBAAgBhU,GAKvBpU,EAAS2kB,aAEXvQ,EAAMyU,kBACN7oB,EAAS4kB,OACTgE,EAAgBlB,QAEpB,EAQFnS,GAAaY,GAAGrb,SAAUorB,GAAwBG,GAAwBc,GAAS2B,uBACnFvT,GAAaY,GAAGrb,SAAUorB,GAAwBK,GAAeY,GAAS2B,uBAC1EvT,GAAaY,GAAGrb,SAAUmrB,GAAwBkB,GAAS4B,YAC3DxT,GAAaY,GAAGrb,SAAUqrB,GAAsBgB,GAAS4B,YACzDxT,GAAaY,GAAGrb,SAAUmrB,GAAwBI,IAAwB,SAAUjS,GAClFA,EAAM+C,iBACNgQ,GAAS9L,oBAAoBrG,MAAM+G,QACrC,IAKA1K,GAAmB8V,IAYnB,MAAM6B,GAAyB,oDACzBC,GAA0B,cAC1BC,GAAmB,gBACnBC,GAAkB,eAKxB,MAAMC,GACJ1P,cACE1E,KAAKoF,SAAWtf,SAAS6G,IAC3B,CAGA0nB,WAEE,MAAMC,EAAgBxuB,SAASC,gBAAgBuC,YAC/C,OAAO1F,KAAKoC,IAAI3E,OAAOk0B,WAAaD,EACtC,CAEA1E,OACE,MAAMtrB,EAAQ0b,KAAKqU,WAEnBrU,KAAKwU,mBAGLxU,KAAKyU,sBAAsBzU,KAAKoF,SAAU8O,IAAkBQ,GAAmBA,EAAkBpwB,IAGjG0b,KAAKyU,sBAAsBT,GAAwBE,IAAkBQ,GAAmBA,EAAkBpwB,IAE1G0b,KAAKyU,sBAAsBR,GAAyBE,IAAiBO,GAAmBA,EAAkBpwB,GAC5G,CAEAwO,QACEkN,KAAK2U,wBAAwB3U,KAAKoF,SAAU,YAE5CpF,KAAK2U,wBAAwB3U,KAAKoF,SAAU8O,IAE5ClU,KAAK2U,wBAAwBX,GAAwBE,IAErDlU,KAAK2U,wBAAwBV,GAAyBE,GACxD,CAEAS,gBACE,OAAO5U,KAAKqU,WAAa,CAC3B,CAGAG,mBACExU,KAAK6U,sBAAsB7U,KAAKoF,SAAU,YAE1CpF,KAAKoF,SAAS5jB,MAAM+K,SAAW,QACjC,CAEAkoB,sBAAsB1a,EAAU+a,EAAevY,GAC7C,MAAMwY,EAAiB/U,KAAKqU,WAa5BrU,KAAKgV,2BAA2Bjb,GAXH/Z,IAC3B,GAAIA,IAAYggB,KAAKoF,UAAY/kB,OAAOk0B,WAAav0B,EAAQsI,YAAcysB,EACzE,OAGF/U,KAAK6U,sBAAsB70B,EAAS80B,GAEpC,MAAMJ,EAAkBr0B,OAAOqF,iBAAiB1F,GAASib,iBAAiB6Z,GAC1E90B,EAAQwB,MAAMyzB,YAAYH,EAAe,GAAGvY,EAASgB,OAAOC,WAAWkX,QAAsB,GAIjG,CAEAG,sBAAsB70B,EAAS80B,GAC7B,MAAMI,EAAcl1B,EAAQwB,MAAMyZ,iBAAiB6Z,GAE/CI,GACF3R,GAAYC,iBAAiBxjB,EAAS80B,EAAeI,EAEzD,CAEAP,wBAAwB5a,EAAU+a,GAahC9U,KAAKgV,2BAA2Bjb,GAZH/Z,IAC3B,MAAM5B,EAAQmlB,GAAYQ,iBAAiB/jB,EAAS80B,GAEtC,OAAV12B,GAKJmlB,GAAYE,oBAAoBzjB,EAAS80B,GACzC90B,EAAQwB,MAAMyzB,YAAYH,EAAe12B,IALvC4B,EAAQwB,MAAM2zB,eAAeL,EAKgB,GAInD,CAEAE,2BAA2Bjb,EAAUqb,GACnC,GAAI,GAAUrb,GACZqb,EAASrb,QAIX,IAAK,MAAMsb,KAAOpO,GAAerU,KAAKmH,EAAUiG,KAAKoF,UACnDgQ,EAASC,EAEb,EAcF,MAAMC,GAAS,WAETC,GAAoB,OACpBC,GAAkB,gBAAgBF,KAClCG,GAAY,CAChBC,UAAW,iBACXC,cAAe,KACf/P,YAAY,EACZ9K,WAAW,EAEX8a,YAAa,QAGTC,GAAgB,CACpBH,UAAW,SACXC,cAAe,kBACf/P,WAAY,UACZ9K,UAAW,UACX8a,YAAa,oBAMf,MAAME,WAAiB9R,GACrBU,YAAYL,GACVc,QACAnF,KAAKqF,QAAUrF,KAAKoE,WAAWC,GAC/BrE,KAAK+V,aAAc,EACnB/V,KAAKoF,SAAW,IAClB,CAGWnB,qBACT,OAAOwR,EACT,CAEWvR,yBACT,OAAO2R,EACT,CAEWpZ,kBACT,OAAO6Y,EACT,CAGAzF,KAAKtT,GACH,IAAKyD,KAAKqF,QAAQvK,UAEhB,YADAiC,GAAQR,GAIVyD,KAAKgW,UAEL,MAAMh2B,EAAUggB,KAAKiW,cAEjBjW,KAAKqF,QAAQO,YACf7J,GAAO/b,GAGTA,EAAQwb,UAAUtE,IAAIqe,IAEtBvV,KAAKkW,mBAAkB,KACrBnZ,GAAQR,EAAS,GAErB,CAEAqT,KAAKrT,GACEyD,KAAKqF,QAAQvK,WAKlBkF,KAAKiW,cAAcza,UAAUuH,OAAOwS,IAEpCvV,KAAKkW,mBAAkB,KACrBlW,KAAKuF,UACLxI,GAAQR,EAAS,KARjBQ,GAAQR,EAUZ,CAEAgJ,UACOvF,KAAK+V,cAIVxV,GAAaC,IAAIR,KAAKoF,SAAUoQ,IAEhCxV,KAAKoF,SAASrC,SAEd/C,KAAK+V,aAAc,EACrB,CAGAE,cACE,IAAKjW,KAAKoF,SAAU,CAClB,MAAM+Q,EAAWrwB,SAASswB,cAAc,OACxCD,EAAST,UAAY1V,KAAKqF,QAAQqQ,UAE9B1V,KAAKqF,QAAQO,YACfuQ,EAAS3a,UAAUtE,IAnGD,QAsGpB8I,KAAKoF,SAAW+Q,CAClB,CAEA,OAAOnW,KAAKoF,QACd,CAEAb,kBAAkBF,GAGhB,OADAA,EAAOuR,YAAc/a,GAAWwJ,EAAOuR,aAChCvR,CACT,CAEA2R,UACE,GAAIhW,KAAK+V,YACP,OAGF,MAAM/1B,EAAUggB,KAAKiW,cAErBjW,KAAKqF,QAAQuQ,YAAYS,OAAOr2B,GAEhCugB,GAAaY,GAAGnhB,EAASw1B,IAAiB,KACxCzY,GAAQiD,KAAKqF,QAAQsQ,cAAc,IAErC3V,KAAK+V,aAAc,CACrB,CAEAG,kBAAkB3Z,GAChBS,GAAuBT,EAAUyD,KAAKiW,cAAejW,KAAKqF,QAAQO,WACpE,EAcF,MAEM0Q,GAAc,gBACdC,GAAkB,UAAUD,KAC5BE,GAAoB,cAAcF,KAGlCG,GAAmB,WACnBC,GAAY,CAChBC,WAAW,EACXC,YAAa,MAGTC,GAAgB,CACpBF,UAAW,UACXC,YAAa,WAMf,MAAME,WAAkB9S,GACtBU,YAAYL,GACVc,QACAnF,KAAKqF,QAAUrF,KAAKoE,WAAWC,GAC/BrE,KAAK+W,WAAY,EACjB/W,KAAKgX,qBAAuB,IAC9B,CAGW/S,qBACT,OAAOyS,EACT,CAEWxS,yBACT,OAAO2S,EACT,CAEWpa,kBACT,MAvCW,WAwCb,CAGAwa,WACMjX,KAAK+W,YAIL/W,KAAKqF,QAAQsR,WACf3W,KAAKqF,QAAQuR,YAAYlE,QAG3BnS,GAAaC,IAAI1a,SAAUwwB,IAE3B/V,GAAaY,GAAGrb,SAAUywB,IAAiBnX,GAASY,KAAKkX,eAAe9X,KACxEmB,GAAaY,GAAGrb,SAAU0wB,IAAmBpX,GAASY,KAAKmX,eAAe/X,KAC1EY,KAAK+W,WAAY,EACnB,CAEAK,aACOpX,KAAK+W,YAIV/W,KAAK+W,WAAY,EACjBxW,GAAaC,IAAI1a,SAAUwwB,IAC7B,CAGAY,eAAe9X,GACb,MAAM,YACJwX,GACE5W,KAAKqF,QAET,GAAIjG,EAAMpS,SAAWlH,UAAYsZ,EAAMpS,SAAW4pB,GAAeA,EAAY3xB,SAASma,EAAMpS,QAC1F,OAGF,MAAM1L,EAAW2lB,GAAeU,kBAAkBiP,GAE1B,IAApBt1B,EAAS6P,OACXylB,EAAYlE,QACH1S,KAAKgX,uBAAyBP,GACvCn1B,EAASA,EAAS6P,OAAS,GAAGuhB,QAE9BpxB,EAAS,GAAGoxB,OAEhB,CAEAyE,eAAe/X,GApFD,QAqFRA,EAAM7hB,MAIVyiB,KAAKgX,qBAAuB5X,EAAMiY,SAAWZ,GAxFzB,UAyFtB,EAcF,MAEMa,GAAc,YAGdC,GAAe,OAAOD,KACtBE,GAAyB,gBAAgBF,KACzCG,GAAiB,SAASH,KAC1BI,GAAe,OAAOJ,KACtBK,GAAgB,QAAQL,KACxBM,GAAiB,SAASN,KAC1BO,GAAsB,gBAAgBP,KACtCQ,GAA0B,oBAAoBR,KAC9CS,GAA0B,kBAAkBT,KAC5CU,GAAyB,QAAQV,cACjCW,GAAkB,aAElBC,GAAoB,OACpBC,GAAoB,eAKpBC,GAAY,CAChBjC,UAAU,EACVzD,OAAO,EACP3H,UAAU,GAENsN,GAAgB,CACpBlC,SAAU,mBACVzD,MAAO,UACP3H,SAAU,WAMZ,MAAMuN,WAAcpT,GAClBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAKuY,QAAUtR,GAAeC,QApBV,gBAoBmClH,KAAKoF,UAC5DpF,KAAKwY,UAAYxY,KAAKyY,sBACtBzY,KAAK0Y,WAAa1Y,KAAK2Y,uBACvB3Y,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EACxBnP,KAAK4Y,WAAa,IAAIxE,GAEtBpU,KAAK4L,oBACP,CAGW3H,qBACT,OAAOmU,EACT,CAEWlU,yBACT,OAAOmU,EACT,CAEW5b,kBACT,MA5DW,OA6Db,CAGAsK,OAAOjH,GACL,OAAOE,KAAK2P,SAAW3P,KAAK4P,OAAS5P,KAAK6P,KAAK/P,EACjD,CAEA+P,KAAK/P,GACCE,KAAK2P,UAAY3P,KAAKmP,kBAIR5O,GAAakB,QAAQzB,KAAKoF,SAAUsS,GAAc,CAClE5X,kBAGY+B,mBAId7B,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EAExBnP,KAAK4Y,WAAWhJ,OAEhB9pB,SAAS6G,KAAK6O,UAAUtE,IAAI+gB,IAE5BjY,KAAK6Y,gBAEL7Y,KAAKwY,UAAU3I,MAAK,IAAM7P,KAAK8Y,aAAahZ,KAC9C,CAEA8P,OACO5P,KAAK2P,WAAY3P,KAAKmP,mBAIT5O,GAAakB,QAAQzB,KAAKoF,SAAUmS,IAExC1V,mBAId7B,KAAK2P,UAAW,EAChB3P,KAAKmP,kBAAmB,EAExBnP,KAAK0Y,WAAWtB,aAEhBpX,KAAKoF,SAAS5J,UAAUuH,OAAOmV,IAE/BlY,KAAK2F,gBAAe,IAAM3F,KAAK+Y,cAAc/Y,KAAKoF,SAAUpF,KAAKgO,gBACnE,CAEAzI,UACE,IAAK,MAAMyT,IAAe,CAAC34B,OAAQ2f,KAAKuY,SACtChY,GAAaC,IAAIwY,EAAa1B,IAGhCtX,KAAKwY,UAAUjT,UAEfvF,KAAK0Y,WAAWtB,aAEhBjS,MAAMI,SACR,CAEA0T,eACEjZ,KAAK6Y,eACP,CAGAJ,sBACE,OAAO,IAAI3C,GAAS,CAClBhb,UAAWgG,QAAQd,KAAKqF,QAAQ8Q,UAEhCvQ,WAAY5F,KAAKgO,eAErB,CAEA2K,uBACE,OAAO,IAAI7B,GAAU,CACnBF,YAAa5W,KAAKoF,UAEtB,CAEA0T,aAAahZ,GAENha,SAAS6G,KAAK1H,SAAS+a,KAAKoF,WAC/Btf,SAAS6G,KAAK0pB,OAAOrW,KAAKoF,UAG5BpF,KAAKoF,SAAS5jB,MAAMwwB,QAAU,QAE9BhS,KAAKoF,SAASxjB,gBAAgB,eAE9Boe,KAAKoF,SAASvjB,aAAa,cAAc,GAEzCme,KAAKoF,SAASvjB,aAAa,OAAQ,UAEnCme,KAAKoF,SAASlZ,UAAY,EAC1B,MAAMgtB,EAAYjS,GAAeC,QA3IT,cA2IsClH,KAAKuY,SAE/DW,IACFA,EAAUhtB,UAAY,GAGxB6P,GAAOiE,KAAKoF,UAEZpF,KAAKoF,SAAS5J,UAAUtE,IAAIghB,IAa5BlY,KAAK2F,gBAXsB,KACrB3F,KAAKqF,QAAQqN,OACf1S,KAAK0Y,WAAWzB,WAGlBjX,KAAKmP,kBAAmB,EACxB5O,GAAakB,QAAQzB,KAAKoF,SAAUuS,GAAe,CACjD7X,iBACA,GAGoCE,KAAKuY,QAASvY,KAAKgO,cAC7D,CAEApC,qBACErL,GAAaY,GAAGnB,KAAKoF,SAAU2S,IAAyB3Y,IACtD,GAtLe,WAsLXA,EAAM7hB,IAIV,OAAIyiB,KAAKqF,QAAQ0F,UACf3L,EAAM+C,sBACNnC,KAAK4P,aAIP5P,KAAKmZ,4BAA4B,IAEnC5Y,GAAaY,GAAG9gB,OAAQu3B,IAAgB,KAClC5X,KAAK2P,WAAa3P,KAAKmP,kBACzBnP,KAAK6Y,eACP,IAEFtY,GAAaY,GAAGnB,KAAKoF,SAAU0S,IAAyB1Y,IAEtDmB,GAAaa,IAAIpB,KAAKoF,SAAUyS,IAAqBuB,IAC/CpZ,KAAKoF,WAAahG,EAAMpS,QAAUgT,KAAKoF,WAAagU,EAAOpsB,SAIjC,WAA1BgT,KAAKqF,QAAQ8Q,SAMbnW,KAAKqF,QAAQ8Q,UACfnW,KAAK4P,OANL5P,KAAKmZ,6BAOP,GACA,GAEN,CAEAJ,aACE/Y,KAAKoF,SAAS5jB,MAAMwwB,QAAU,OAE9BhS,KAAKoF,SAASvjB,aAAa,eAAe,GAE1Cme,KAAKoF,SAASxjB,gBAAgB,cAE9Boe,KAAKoF,SAASxjB,gBAAgB,QAE9Boe,KAAKmP,kBAAmB,EAExBnP,KAAKwY,UAAU5I,MAAK,KAClB9pB,SAAS6G,KAAK6O,UAAUuH,OAAOkV,IAE/BjY,KAAKqZ,oBAELrZ,KAAK4Y,WAAW9lB,QAEhByN,GAAakB,QAAQzB,KAAKoF,SAAUqS,GAAe,GAEvD,CAEAzJ,cACE,OAAOhO,KAAKoF,SAAS5J,UAAUvW,SAtOT,OAuOxB,CAEAk0B,6BAGE,GAFkB5Y,GAAakB,QAAQzB,KAAKoF,SAAUoS,IAExC3V,iBACZ,OAGF,MAAMyX,EAAqBtZ,KAAKoF,SAAStX,aAAehI,SAASC,gBAAgBsC,aAC3EkxB,EAAmBvZ,KAAKoF,SAAS5jB,MAAMiL,UAEpB,WAArB8sB,GAAiCvZ,KAAKoF,SAAS5J,UAAUvW,SAASkzB,MAIjEmB,IACHtZ,KAAKoF,SAAS5jB,MAAMiL,UAAY,UAGlCuT,KAAKoF,SAAS5J,UAAUtE,IAAIihB,IAE5BnY,KAAK2F,gBAAe,KAClB3F,KAAKoF,SAAS5J,UAAUuH,OAAOoV,IAE/BnY,KAAK2F,gBAAe,KAClB3F,KAAKoF,SAAS5jB,MAAMiL,UAAY8sB,CAAgB,GAC/CvZ,KAAKuY,QAAQ,GACfvY,KAAKuY,SAERvY,KAAKoF,SAASsN,QAChB,CAMAmG,gBACE,MAAMS,EAAqBtZ,KAAKoF,SAAStX,aAAehI,SAASC,gBAAgBsC,aAE3E0sB,EAAiB/U,KAAK4Y,WAAWvE,WAEjCmF,EAAoBzE,EAAiB,EAE3C,GAAIyE,IAAsBF,EAAoB,CAC5C,MAAM/2B,EAAW4Z,KAAU,cAAgB,eAC3C6D,KAAKoF,SAAS5jB,MAAMe,GAAY,GAAGwyB,KACrC,CAEA,IAAKyE,GAAqBF,EAAoB,CAC5C,MAAM/2B,EAAW4Z,KAAU,eAAiB,cAC5C6D,KAAKoF,SAAS5jB,MAAMe,GAAY,GAAGwyB,KACrC,CACF,CAEAsE,oBACErZ,KAAKoF,SAAS5jB,MAAMi4B,YAAc,GAClCzZ,KAAKoF,SAAS5jB,MAAMk4B,aAAe,EACrC,CAGA7T,uBAAuBxB,EAAQvE,GAC7B,OAAOE,KAAK4G,MAAK,WACf,MAAM9b,EAAOwtB,GAAMjS,oBAAoBrG,KAAMqE,GAE7C,GAAsB,iBAAXA,EAAX,CAIA,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,GAAQvE,EANb,CAOF,GACF,EAQFS,GAAaY,GAAGrb,SAAUkyB,GApTK,4BAoT2C,SAAU5Y,GAClF,MAAMpS,EAASsN,GAAuB0F,MAElC,CAAC,IAAK,QAAQ9F,SAAS8F,KAAKoG,UAC9BhH,EAAM+C,iBAGR5B,GAAaa,IAAIpU,EAAQ0qB,IAAciC,IACjCA,EAAU9X,kBAKdtB,GAAaa,IAAIpU,EAAQyqB,IAAgB,KACnC3c,GAAUkF,OACZA,KAAK0S,OACP,GACA,IAGJ,MAAMkH,EAAc3S,GAAeC,QA3Ub,eA6UlB0S,GACFtB,GAAMxS,YAAY8T,GAAahK,OAGpB0I,GAAMjS,oBAAoBrZ,GAClC+Z,OAAO/G,KACd,IACAgG,GAAqBsS,IAKrBjc,GAAmBic,IAYnB,MAEMuB,GAAc,gBACdC,GAAiB,YACjBC,GAAwB,OAAOF,KAAcC,KAE7CE,GAAoB,OACpBC,GAAuB,UACvBC,GAAoB,SAEpBC,GAAgB,kBAChBC,GAAe,OAAOP,KACtBQ,GAAgB,QAAQR,KACxBS,GAAe,OAAOT,KACtBU,GAAuB,gBAAgBV,KACvCW,GAAiB,SAASX,KAC1BY,GAAe,SAASZ,KACxBa,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAwB,kBAAkBd,KAE1Ce,GAAY,CAChBzE,UAAU,EACVpL,UAAU,EACV7f,QAAQ,GAEJ2vB,GAAgB,CACpB1E,SAAU,mBACVpL,SAAU,UACV7f,OAAQ,WAMV,MAAM4vB,WAAkB5V,GACtBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAK2P,UAAW,EAChB3P,KAAKwY,UAAYxY,KAAKyY,sBACtBzY,KAAK0Y,WAAa1Y,KAAK2Y,uBAEvB3Y,KAAK4L,oBACP,CAGW3H,qBACT,OAAO2W,EACT,CAEW1W,yBACT,OAAO2W,EACT,CAEWpe,kBACT,MAtDW,WAuDb,CAGAsK,OAAOjH,GACL,OAAOE,KAAK2P,SAAW3P,KAAK4P,OAAS5P,KAAK6P,KAAK/P,EACjD,CAEA+P,KAAK/P,GACCE,KAAK2P,UAISpP,GAAakB,QAAQzB,KAAKoF,SAAUgV,GAAc,CAClEta,kBAGY+B,mBAId7B,KAAK2P,UAAW,EAEhB3P,KAAKwY,UAAU3I,OAEV7P,KAAKqF,QAAQna,SAChB,IAAIkpB,IAAkBxE,OAGxB5P,KAAKoF,SAASvjB,aAAa,cAAc,GAEzCme,KAAKoF,SAASvjB,aAAa,OAAQ,UAEnCme,KAAKoF,SAAS5J,UAAUtE,IAAI+iB,IAgB5Bja,KAAK2F,gBAdoB,KAClB3F,KAAKqF,QAAQna,SAAU8U,KAAKqF,QAAQ8Q,UACvCnW,KAAK0Y,WAAWzB,WAGlBjX,KAAKoF,SAAS5J,UAAUtE,IAAI8iB,IAE5Bha,KAAKoF,SAAS5J,UAAUuH,OAAOkX,IAE/B1Z,GAAakB,QAAQzB,KAAKoF,SAAUiV,GAAe,CACjDva,iBACA,GAGkCE,KAAKoF,UAAU,GACvD,CAEAwK,OACO5P,KAAK2P,WAIQpP,GAAakB,QAAQzB,KAAKoF,SAAUkV,IAExCzY,mBAId7B,KAAK0Y,WAAWtB,aAEhBpX,KAAKoF,SAAS2V,OAEd/a,KAAK2P,UAAW,EAEhB3P,KAAKoF,SAAS5J,UAAUtE,IAAIgjB,IAE5Bla,KAAKwY,UAAU5I,OAgBf5P,KAAK2F,gBAdoB,KACvB3F,KAAKoF,SAAS5J,UAAUuH,OAAOiX,GAAmBE,IAElDla,KAAKoF,SAASxjB,gBAAgB,cAE9Boe,KAAKoF,SAASxjB,gBAAgB,QAEzBoe,KAAKqF,QAAQna,SAChB,IAAIkpB,IAAkBthB,QAGxByN,GAAakB,QAAQzB,KAAKoF,SAAUoV,GAAe,GAGfxa,KAAKoF,UAAU,IACvD,CAEAG,UACEvF,KAAKwY,UAAUjT,UAEfvF,KAAK0Y,WAAWtB,aAEhBjS,MAAMI,SACR,CAGAkT,sBACE,MAUM3d,EAAYgG,QAAQd,KAAKqF,QAAQ8Q,UACvC,OAAO,IAAIL,GAAS,CAClBJ,UA7JsB,qBA8JtB5a,YACA8K,YAAY,EACZgQ,YAAa5V,KAAKoF,SAAS5f,WAC3BmwB,cAAe7a,EAhBK,KACU,WAA1BkF,KAAKqF,QAAQ8Q,SAKjBnW,KAAK4P,OAJHrP,GAAakB,QAAQzB,KAAKoF,SAAUmV,GAI3B,EAUgC,MAE/C,CAEA5B,uBACE,OAAO,IAAI7B,GAAU,CACnBF,YAAa5W,KAAKoF,UAEtB,CAEAwG,qBACErL,GAAaY,GAAGnB,KAAKoF,SAAUuV,IAAuBvb,IAhLvC,WAiLTA,EAAM7hB,MAILyiB,KAAKqF,QAAQ0F,SAKlB/K,KAAK4P,OAJHrP,GAAakB,QAAQzB,KAAKoF,SAAUmV,IAI3B,GAEf,CAGA1U,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOgwB,GAAUzU,oBAAoBrG,KAAMqE,GAEjD,GAAsB,iBAAXA,EAAX,CAIA,QAAqB7K,IAAjB1O,EAAKuZ,IAAyBA,EAAOlK,WAAW,MAAmB,gBAAXkK,EAC1D,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,GAAQrE,KANb,CAOF,GACF,EAQFO,GAAaY,GAAGrb,SAAU40B,GAvMK,gCAuM2C,SAAUtb,GAClF,MAAMpS,EAASsN,GAAuB0F,MAMtC,GAJI,CAAC,IAAK,QAAQ9F,SAAS8F,KAAKoG,UAC9BhH,EAAM+C,iBAGJ9G,GAAW2E,MACb,OAGFO,GAAaa,IAAIpU,EAAQwtB,IAAgB,KAEnC1f,GAAUkF,OACZA,KAAK0S,OACP,IAGF,MAAMkH,EAAc3S,GAAeC,QAAQiT,IAEvCP,GAAeA,IAAgB5sB,GACjC8tB,GAAUhV,YAAY8T,GAAahK,OAGxBkL,GAAUzU,oBAAoBrZ,GACtC+Z,OAAO/G,KACd,IACAO,GAAaY,GAAG9gB,OAAQ05B,IAAuB,KAC7C,IAAK,MAAMhgB,KAAYkN,GAAerU,KAAKunB,IACzCW,GAAUzU,oBAAoBtM,GAAU8V,MAC1C,IAEFtP,GAAaY,GAAG9gB,OAAQo6B,IAAc,KACpC,IAAK,MAAMz6B,KAAWinB,GAAerU,KAAK,gDACG,UAAvClN,iBAAiB1F,GAASiC,UAC5B64B,GAAUzU,oBAAoBrmB,GAAS4vB,MAE3C,IAEF5J,GAAqB8U,IAKrBze,GAAmBye,IAQnB,MAAME,GAAgB,IAAIjkB,IAAI,CAAC,aAAc,OAAQ,OAAQ,WAAY,WAAY,SAAU,MAAO,eAQhGkkB,GAAmB,iEAOnBC,GAAmB,qIAEnBC,GAAmB,CAAC34B,EAAW44B,KACnC,MAAMC,EAAgB74B,EAAUvC,SAASC,cAEzC,OAAIk7B,EAAqBlhB,SAASmhB,IAC5BL,GAAc5jB,IAAIikB,IACbva,QAAQma,GAAiBn3B,KAAKtB,EAAU84B,YAAcJ,GAAiBp3B,KAAKtB,EAAU84B,YAO1FF,EAAqBx0B,QAAO20B,GAAkBA,aAA0BxW,SAAQ7R,MAAKsoB,GAASA,EAAM13B,KAAKu3B,IAAe,EAG3HI,GAAmB,CAEvB,IAAK,CAAC,QAAS,MAAO,KAAM,OAAQ,OAjCP,kBAkC7BnqB,EAAG,CAAC,SAAU,OAAQ,QAAS,OAC/BoqB,KAAM,GACNnqB,EAAG,GACHoqB,GAAI,GACJC,IAAK,GACLC,KAAM,GACNC,IAAK,GACLC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJxqB,EAAG,GACHgb,IAAK,CAAC,MAAO,SAAU,MAAO,QAAS,QAAS,UAChDyP,GAAI,GACJC,GAAI,GACJC,EAAG,GACHC,IAAK,GACLC,EAAG,GACHC,MAAO,GACPC,KAAM,GACNC,IAAK,GACLC,IAAK,GACLC,OAAQ,GACRC,EAAG,GACHC,GAAI,IA+CAC,GAAY,CAChBC,UAAW3B,GACX4B,QAAS,CAAC,EAEVC,WAAY,GACZhwB,MAAM,EACNiwB,UAAU,EACVC,WAAY,KACZC,SAAU,eAENC,GAAgB,CACpBN,UAAW,SACXC,QAAS,SACTC,WAAY,oBACZhwB,KAAM,UACNiwB,SAAU,UACVC,WAAY,kBACZC,SAAU,UAENE,GAAqB,CACzBC,MAAO,iCACP7jB,SAAU,oBAMZ,MAAM8jB,WAAwB7Z,GAC5BU,YAAYL,GACVc,QACAnF,KAAKqF,QAAUrF,KAAKoE,WAAWC,EACjC,CAGWJ,qBACT,OAAOkZ,EACT,CAEWjZ,yBACT,OAAOwZ,EACT,CAEWjhB,kBACT,MA5CW,iBA6Cb,CAGAqhB,aACE,OAAOrgC,OAAO0hB,OAAOa,KAAKqF,QAAQgY,SAAS95B,KAAI8gB,GAAUrE,KAAK+d,yBAAyB1Z,KAASzd,OAAOka,QACzG,CAEAkd,aACE,OAAOhe,KAAK8d,aAAa3sB,OAAS,CACpC,CAEA8sB,cAAcZ,GAMZ,OALArd,KAAKke,cAAcb,GAEnBrd,KAAKqF,QAAQgY,QAAU,IAAKrd,KAAKqF,QAAQgY,WACpCA,GAEErd,IACT,CAEAme,SACE,MAAMC,EAAkBt4B,SAASswB,cAAc,OAC/CgI,EAAgBC,UAAYre,KAAKse,eAAete,KAAKqF,QAAQoY,UAE7D,IAAK,MAAO1jB,EAAUwkB,KAAS9gC,OAAO4kB,QAAQrC,KAAKqF,QAAQgY,SACzDrd,KAAKwe,YAAYJ,EAAiBG,EAAMxkB,GAG1C,MAAM0jB,EAAWW,EAAgBjX,SAAS,GAEpCmW,EAAatd,KAAK+d,yBAAyB/d,KAAKqF,QAAQiY,YAM9D,OAJIA,GACFG,EAASjiB,UAAUtE,OAAOomB,EAAW36B,MAAM,MAGtC86B,CACT,CAGAjZ,iBAAiBH,GACfc,MAAMX,iBAAiBH,GAEvBrE,KAAKke,cAAc7Z,EAAOgZ,QAC5B,CAEAa,cAAcO,GACZ,IAAK,MAAO1kB,EAAUsjB,KAAY5/B,OAAO4kB,QAAQoc,GAC/CtZ,MAAMX,iBAAiB,CACrBzK,WACA6jB,MAAOP,GACNM,GAEP,CAEAa,YAAYf,EAAUJ,EAAStjB,GAC7B,MAAM2kB,EAAkBzX,GAAeC,QAAQnN,EAAU0jB,GAEpDiB,KAILrB,EAAUrd,KAAK+d,yBAAyBV,IAOpC,GAAUA,GACZrd,KAAK2e,sBAAsB9jB,GAAWwiB,GAAUqB,GAK9C1e,KAAKqF,QAAQ/X,KACfoxB,EAAgBL,UAAYre,KAAKse,eAAejB,GAIlDqB,EAAgBE,YAAcvB,EAf5BqB,EAAgB3b,SAgBpB,CAEAub,eAAeG,GACb,OAAOze,KAAKqF,QAAQkY,SA7KxB,SAAsBsB,EAAYzB,EAAW0B,GAC3C,IAAKD,EAAW1tB,OACd,OAAO0tB,EAGT,GAAIC,GAAgD,mBAArBA,EAC7B,OAAOA,EAAiBD,GAG1B,MACME,GADY,IAAI1+B,OAAO2+B,WACKC,gBAAgBJ,EAAY,aACxDv9B,EAAW,GAAGlC,UAAU2/B,EAAgBpyB,KAAKyT,iBAAiB,MAEpE,IAAK,MAAMpgB,KAAWsB,EAAU,CAC9B,MAAM49B,EAAcl/B,EAAQC,SAASC,cAErC,IAAKzC,OAAO4D,KAAK+7B,GAAWljB,SAASglB,GAAc,CACjDl/B,EAAQ+iB,SACR,QACF,CAEA,MAAMoc,EAAgB,GAAG//B,UAAUY,EAAQ0B,YACrC09B,EAAoB,GAAGhgC,OAAOg+B,EAAU,MAAQ,GAAIA,EAAU8B,IAAgB,IAEpF,IAAK,MAAM18B,KAAa28B,EACjBhE,GAAiB34B,EAAW48B,IAC/Bp/B,EAAQ4B,gBAAgBY,EAAUvC,SAGxC,CAEA,OAAO8+B,EAAgBpyB,KAAK0xB,SAC9B,CA6ImCgB,CAAaZ,EAAKze,KAAKqF,QAAQ+X,UAAWpd,KAAKqF,QAAQmY,YAAciB,CACtG,CAEAV,yBAAyBU,GACvB,MAAsB,mBAARA,EAAqBA,EAAIze,MAAQye,CACjD,CAEAE,sBAAsB3+B,EAAS0+B,GAC7B,GAAI1e,KAAKqF,QAAQ/X,KAGf,OAFAoxB,EAAgBL,UAAY,QAC5BK,EAAgBrI,OAAOr2B,GAIzB0+B,EAAgBE,YAAc5+B,EAAQ4+B,WACxC,EAcF,MACMU,GAAwB,IAAIvoB,IAAI,CAAC,WAAY,YAAa,eAC1DwoB,GAAoB,OAEpBC,GAAoB,OAEpBC,GAAiB,SACjBC,GAAmB,gBACnBC,GAAgB,QAChBC,GAAgB,QAahBC,GAAgB,CACpBC,KAAM,OACNC,IAAK,MACLC,MAAO7jB,KAAU,OAAS,QAC1B8jB,OAAQ,SACRC,KAAM/jB,KAAU,QAAU,QAEtBgkB,GAAY,CAChB/C,UAAW3B,GACX2E,WAAW,EACX1xB,SAAU,kBACV2xB,WAAW,EACXC,YAAa,GACbC,MAAO,EACP9vB,mBAAoB,CAAC,MAAO,QAAS,SAAU,QAC/CnD,MAAM,EACN7E,OAAQ,CAAC,EAAG,GACZtJ,UAAW,MACX8yB,aAAc,KACdsL,UAAU,EACVC,WAAY,KACZzjB,UAAU,EACV0jB,SAAU,+GACV+C,MAAO,GACP/e,QAAS,eAELgf,GAAgB,CACpBrD,UAAW,SACXgD,UAAW,UACX1xB,SAAU,mBACV2xB,UAAW,2BACXC,YAAa,oBACbC,MAAO,kBACP9vB,mBAAoB,QACpBnD,KAAM,UACN7E,OAAQ,0BACRtJ,UAAW,oBACX8yB,aAAc,yBACdsL,SAAU,UACVC,WAAY,kBACZzjB,SAAU,mBACV0jB,SAAU,SACV+C,MAAO,4BACP/e,QAAS,UAMX,MAAMif,WAAgBxb,GACpBR,YAAY1kB,EAASqkB,GACnB,QAAsB,IAAX,EACT,MAAM,IAAIW,UAAU,+DAGtBG,MAAMnlB,EAASqkB,GAEfrE,KAAK2gB,YAAa,EAClB3gB,KAAK4gB,SAAW,EAChB5gB,KAAK6gB,WAAa,KAClB7gB,KAAK8gB,eAAiB,CAAC,EACvB9gB,KAAKoS,QAAU,KACfpS,KAAK+gB,iBAAmB,KACxB/gB,KAAKghB,YAAc,KAEnBhhB,KAAKihB,IAAM,KAEXjhB,KAAKkhB,gBAEAlhB,KAAKqF,QAAQtL,UAChBiG,KAAKmhB,WAET,CAGWld,qBACT,OAAOkc,EACT,CAEWjc,yBACT,OAAOuc,EACT,CAEWhkB,kBACT,MA1GW,SA2Gb,CAGA2kB,SACEphB,KAAK2gB,YAAa,CACpB,CAEAU,UACErhB,KAAK2gB,YAAa,CACpB,CAEAW,gBACEthB,KAAK2gB,YAAc3gB,KAAK2gB,UAC1B,CAEA5Z,SACO/G,KAAK2gB,aAIV3gB,KAAK8gB,eAAeS,OAASvhB,KAAK8gB,eAAeS,MAE7CvhB,KAAK2P,WACP3P,KAAKwhB,SAKPxhB,KAAKyhB,SACP,CAEAlc,UACE0H,aAAajN,KAAK4gB,UAClBrgB,GAAaC,IAAIR,KAAKoF,SAASjK,QAAQskB,IAAiBC,GAAkB1f,KAAK0hB,mBAE3E1hB,KAAKoF,SAASpL,aAAa,2BAC7BgG,KAAKoF,SAASvjB,aAAa,QAASme,KAAKoF,SAASpL,aAAa,2BAGjEgG,KAAK2hB,iBAELxc,MAAMI,SACR,CAEAsK,OACE,GAAoC,SAAhC7P,KAAKoF,SAAS5jB,MAAMwwB,QACtB,MAAM,IAAI7N,MAAM,uCAGlB,IAAMnE,KAAK4hB,mBAAoB5hB,KAAK2gB,WAClC,OAGF,MAAMhH,EAAYpZ,GAAakB,QAAQzB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UAlJtD,SAqJXkU,GAFalmB,GAAeqE,KAAKoF,WAELpF,KAAKoF,SAAS7kB,cAAcwF,iBAAiBd,SAAS+a,KAAKoF,UAE7F,GAAIuU,EAAU9X,mBAAqBggB,EACjC,OAIF7hB,KAAK2hB,iBAEL,MAAMV,EAAMjhB,KAAK8hB,iBAEjB9hB,KAAKoF,SAASvjB,aAAa,mBAAoBo/B,EAAIjnB,aAAa,OAEhE,MAAM,UACJqmB,GACErgB,KAAKqF,QAaT,GAXKrF,KAAKoF,SAAS7kB,cAAcwF,gBAAgBd,SAAS+a,KAAKihB,OAC7DZ,EAAUhK,OAAO4K,GACjB1gB,GAAakB,QAAQzB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UAtKpC,cAyKnB3N,KAAKoS,QAAUpS,KAAKyS,cAAcwO,GAClCA,EAAIzlB,UAAUtE,IAAIsoB,IAKd,iBAAkB15B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAKwa,UAC/C5G,GAAaY,GAAGnhB,EAAS,YAAa8b,IAc1CkE,KAAK2F,gBAVY,KACfpF,GAAakB,QAAQzB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UAvLrC,WAyLQ,IAApB3N,KAAK6gB,YACP7gB,KAAKwhB,SAGPxhB,KAAK6gB,YAAa,CAAK,GAGK7gB,KAAKihB,IAAKjhB,KAAKgO,cAC/C,CAEA4B,OACE,GAAK5P,KAAK2P,aAIQpP,GAAakB,QAAQzB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UA3MtD,SA6MH9L,iBAAd,CASA,GALY7B,KAAK8hB,iBAEbtmB,UAAUuH,OAAOyc,IAGjB,iBAAkB15B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAKwa,UAC/C5G,GAAaC,IAAIxgB,EAAS,YAAa8b,IAI3CkE,KAAK8gB,eAA4B,OAAI,EACrC9gB,KAAK8gB,eAAelB,KAAiB,EACrC5f,KAAK8gB,eAAenB,KAAiB,EACrC3f,KAAK6gB,WAAa,KAgBlB7gB,KAAK2F,gBAdY,KACX3F,KAAK+hB,yBAIJ/hB,KAAK6gB,YACR7gB,KAAK2hB,iBAGP3hB,KAAKoF,SAASxjB,gBAAgB,oBAE9B2e,GAAakB,QAAQzB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UA3OpC,WA2O8D,GAGnD3N,KAAKihB,IAAKjhB,KAAKgO,cAhC7C,CAiCF,CAEAxiB,SACMwU,KAAKoS,SACPpS,KAAKoS,QAAQ5mB,QAEjB,CAGAo2B,iBACE,OAAO9gB,QAAQd,KAAKgiB,YACtB,CAEAF,iBAKE,OAJK9hB,KAAKihB,MACRjhB,KAAKihB,IAAMjhB,KAAKiiB,kBAAkBjiB,KAAKghB,aAAehhB,KAAKkiB,2BAGtDliB,KAAKihB,GACd,CAEAgB,kBAAkB5E,GAChB,MAAM4D,EAAMjhB,KAAKmiB,oBAAoB9E,GAASc,SAG9C,IAAK8C,EACH,OAAO,KAGTA,EAAIzlB,UAAUuH,OAAOwc,GAAmBC,IAExCyB,EAAIzlB,UAAUtE,IAAI,MAAM8I,KAAK0E,YAAYjI,aACzC,MAAM2lB,EA92HKC,KACb,GACEA,GAAUz/B,KAAK0/B,MAlBH,IAkBS1/B,KAAK2/B,gBACnBz8B,SAAS08B,eAAeH,IAEjC,OAAOA,CAAM,EAy2HGI,CAAOziB,KAAK0E,YAAYjI,MAAMnc,WAO5C,OANA2gC,EAAIp/B,aAAa,KAAMugC,GAEnBpiB,KAAKgO,eACPiT,EAAIzlB,UAAUtE,IAAIqoB,IAGb0B,CACT,CAEAyB,WAAWrF,GACTrd,KAAKghB,YAAc3D,EAEfrd,KAAK2P,aACP3P,KAAK2hB,iBAEL3hB,KAAK6P,OAET,CAEAsS,oBAAoB9E,GAYlB,OAXIrd,KAAK+gB,iBACP/gB,KAAK+gB,iBAAiB9C,cAAcZ,GAEpCrd,KAAK+gB,iBAAmB,IAAIlD,GAAgB,IAAK7d,KAAKqF,QAGpDgY,UACAC,WAAYtd,KAAK+d,yBAAyB/d,KAAKqF,QAAQib,eAIpDtgB,KAAK+gB,gBACd,CAEAmB,yBACE,MAAO,CACL,iBAA0BliB,KAAKgiB,YAEnC,CAEAA,YACE,OAAOhiB,KAAK+d,yBAAyB/d,KAAKqF,QAAQmb,QAAUxgB,KAAKoF,SAASpL,aAAa,yBACzF,CAGA2oB,6BAA6BvjB,GAC3B,OAAOY,KAAK0E,YAAY2B,oBAAoBjH,EAAMW,eAAgBC,KAAK4iB,qBACzE,CAEA5U,cACE,OAAOhO,KAAKqF,QAAQ+a,WAAapgB,KAAKihB,KAAOjhB,KAAKihB,IAAIzlB,UAAUvW,SAASs6B,GAC3E,CAEA5P,WACE,OAAO3P,KAAKihB,KAAOjhB,KAAKihB,IAAIzlB,UAAUvW,SAASu6B,GACjD,CAEA/M,cAAcwO,GACZ,MAAM9hC,EAA8C,mBAA3B6gB,KAAKqF,QAAQlmB,UAA2B6gB,KAAKqF,QAAQlmB,UAAUlB,KAAK+hB,KAAMihB,EAAKjhB,KAAKoF,UAAYpF,KAAKqF,QAAQlmB,UAChI0jC,EAAahD,GAAc1gC,EAAU8lB,eAC3C,OAAO,GAAoBjF,KAAKoF,SAAU6b,EAAKjhB,KAAK6S,iBAAiBgQ,GACvE,CAEA5P,aACE,MAAM,OACJxqB,GACEuX,KAAKqF,QAET,MAAsB,iBAAX5c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAASmf,OAAO+P,SAASlvB,EAAO,MAGzC,mBAAXqK,EACFyqB,GAAczqB,EAAOyqB,EAAYlT,KAAKoF,UAGxC3c,CACT,CAEAs1B,yBAAyBU,GACvB,MAAsB,mBAARA,EAAqBA,EAAIxgC,KAAK+hB,KAAKoF,UAAYqZ,CAC/D,CAEA5L,iBAAiBgQ,GACf,MAAM1P,EAAwB,CAC5Bh0B,UAAW0jC,EACXhsB,UAAW,CAAC,CACV9V,KAAM,OACNmB,QAAS,CACPuO,mBAAoBuP,KAAKqF,QAAQ5U,qBAElC,CACD1P,KAAM,SACNmB,QAAS,CACPuG,OAAQuX,KAAKiT,eAEd,CACDlyB,KAAM,kBACNmB,QAAS,CACPwM,SAAUsR,KAAKqF,QAAQ3W,WAExB,CACD3N,KAAM,QACNmB,QAAS,CACPlC,QAAS,IAAIggB,KAAK0E,YAAYjI,eAE/B,CACD1b,KAAM,kBACNC,SAAS,EACTC,MAAO,aACPC,GAAI4J,IAGFkV,KAAK8hB,iBAAiBjgC,aAAa,wBAAyBiJ,EAAK1J,MAAMjC,UAAU,KAIvF,MAAO,IAAKg0B,KAC+B,mBAA9BnT,KAAKqF,QAAQ4M,aAA8BjS,KAAKqF,QAAQ4M,aAAakB,GAAyBnT,KAAKqF,QAAQ4M,aAE1H,CAEAiP,gBACE,MAAM4B,EAAW9iB,KAAKqF,QAAQ5D,QAAQ9e,MAAM,KAE5C,IAAK,MAAM8e,KAAWqhB,EACpB,GAAgB,UAAZrhB,EACFlB,GAAaY,GAAGnB,KAAKoF,SAAUpF,KAAK0E,YAAYiJ,UA3YlC,SA2Y4D3N,KAAKqF,QAAQtL,UAAUqF,IAC/EY,KAAK2iB,6BAA6BvjB,GAE1C2H,QAAQ,SAEb,GAtZU,WAsZNtF,EAA4B,CACrC,MAAMshB,EAAUthB,IAAYke,GAAgB3f,KAAK0E,YAAYiJ,UA9Y5C,cA8Y0E3N,KAAK0E,YAAYiJ,UAhZ5F,WAiZVqV,EAAWvhB,IAAYke,GAAgB3f,KAAK0E,YAAYiJ,UA9Y7C,cA8Y2E3N,KAAK0E,YAAYiJ,UAhZ5F,YAiZjBpN,GAAaY,GAAGnB,KAAKoF,SAAU2d,EAAS/iB,KAAKqF,QAAQtL,UAAUqF,IAC7D,MAAMkU,EAAUtT,KAAK2iB,6BAA6BvjB,GAElDkU,EAAQwN,eAA8B,YAAf1hB,EAAMqB,KAAqBmf,GAAgBD,KAAiB,EAEnFrM,EAAQmO,QAAQ,IAElBlhB,GAAaY,GAAGnB,KAAKoF,SAAU4d,EAAUhjB,KAAKqF,QAAQtL,UAAUqF,IAC9D,MAAMkU,EAAUtT,KAAK2iB,6BAA6BvjB,GAElDkU,EAAQwN,eAA8B,aAAf1hB,EAAMqB,KAAsBmf,GAAgBD,IAAiBrM,EAAQlO,SAASngB,SAASma,EAAMU,eAEpHwT,EAAQkO,QAAQ,GAEpB,CAGFxhB,KAAK0hB,kBAAoB,KACnB1hB,KAAKoF,UACPpF,KAAK4P,MACP,EAGFrP,GAAaY,GAAGnB,KAAKoF,SAASjK,QAAQskB,IAAiBC,GAAkB1f,KAAK0hB,kBAChF,CAEAP,YACE,MAAMX,EAAQxgB,KAAKoF,SAASpL,aAAa,SAEpCwmB,IAIAxgB,KAAKoF,SAASpL,aAAa,eAAkBgG,KAAKoF,SAASwZ,YAAYxkB,QAC1E4F,KAAKoF,SAASvjB,aAAa,aAAc2+B,GAG3CxgB,KAAKoF,SAASvjB,aAAa,yBAA0B2+B,GAGrDxgB,KAAKoF,SAASxjB,gBAAgB,SAChC,CAEA6/B,SACMzhB,KAAK2P,YAAc3P,KAAK6gB,WAC1B7gB,KAAK6gB,YAAa,GAIpB7gB,KAAK6gB,YAAa,EAElB7gB,KAAKijB,aAAY,KACXjjB,KAAK6gB,YACP7gB,KAAK6P,MACP,GACC7P,KAAKqF,QAAQkb,MAAM1Q,MACxB,CAEA2R,SACMxhB,KAAK+hB,yBAIT/hB,KAAK6gB,YAAa,EAElB7gB,KAAKijB,aAAY,KACVjjB,KAAK6gB,YACR7gB,KAAK4P,MACP,GACC5P,KAAKqF,QAAQkb,MAAM3Q,MACxB,CAEAqT,YAAYrlB,EAASslB,GACnBjW,aAAajN,KAAK4gB,UAClB5gB,KAAK4gB,SAAW/iB,WAAWD,EAASslB,EACtC,CAEAnB,uBACE,OAAOtkC,OAAO0hB,OAAOa,KAAK8gB,gBAAgB5mB,UAAS,EACrD,CAEAkK,WAAWC,GACT,MAAM8e,EAAiB5f,GAAYG,kBAAkB1D,KAAKoF,UAE1D,IAAK,MAAMge,KAAiB3lC,OAAO4D,KAAK8hC,GAClC7D,GAAsBloB,IAAIgsB,WACrBD,EAAeC,GAY1B,OARA/e,EAAS,IAAK8e,KACU,iBAAX9e,GAAuBA,EAASA,EAAS,CAAC,GAEvDA,EAASrE,KAAKsE,gBAAgBD,GAC9BA,EAASrE,KAAKuE,kBAAkBF,GAEhCrE,KAAKwE,iBAAiBH,GAEfA,CACT,CAEAE,kBAAkBF,GAkBhB,OAjBAA,EAAOgc,WAAiC,IAArBhc,EAAOgc,UAAsBv6B,SAAS6G,KAAOkO,GAAWwJ,EAAOgc,WAEtD,iBAAjBhc,EAAOkc,QAChBlc,EAAOkc,MAAQ,CACb1Q,KAAMxL,EAAOkc,MACb3Q,KAAMvL,EAAOkc,QAIW,iBAAjBlc,EAAOmc,QAChBnc,EAAOmc,MAAQnc,EAAOmc,MAAMlgC,YAGA,iBAAnB+jB,EAAOgZ,UAChBhZ,EAAOgZ,QAAUhZ,EAAOgZ,QAAQ/8B,YAG3B+jB,CACT,CAEAue,qBACE,MAAMve,EAAS,CAAC,EAEhB,IAAK,MAAM9mB,KAAOyiB,KAAKqF,QACjBrF,KAAK0E,YAAYT,QAAQ1mB,KAASyiB,KAAKqF,QAAQ9nB,KACjD8mB,EAAO9mB,GAAOyiB,KAAKqF,QAAQ9nB,IAS/B,OALA8mB,EAAOtK,UAAW,EAClBsK,EAAO5C,QAAU,SAIV4C,CACT,CAEAsd,iBACM3hB,KAAKoS,UACPpS,KAAKoS,QAAQ3Y,UAEbuG,KAAKoS,QAAU,MAGbpS,KAAKihB,MACPjhB,KAAKihB,IAAIle,SACT/C,KAAKihB,IAAM,KAEf,CAGApb,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAO41B,GAAQra,oBAAoBrG,KAAMqE,GAE/C,GAAsB,iBAAXA,EAAX,CAIA,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IANL,CAOF,GACF,EAQFhI,GAAmBqkB,IAYnB,MAGM2C,GAAY,IAAK3C,GAAQzc,QAC7BoZ,QAAS,GACT50B,OAAQ,CAAC,EAAG,GACZtJ,UAAW,QACXs+B,SAAU,8IACVhc,QAAS,SAEL6hB,GAAgB,IAAK5C,GAAQxc,YACjCmZ,QAAS,kCAMX,MAAMkG,WAAgB7C,GAETzc,qBACT,OAAOof,EACT,CAEWnf,yBACT,OAAOof,EACT,CAEW7mB,kBACT,MA5BW,SA6Bb,CAGAmlB,iBACE,OAAO5hB,KAAKgiB,aAAehiB,KAAKwjB,aAClC,CAGAtB,yBACE,MAAO,CACL,kBAAkBliB,KAAKgiB,YACvB,gBAAoBhiB,KAAKwjB,cAE7B,CAEAA,cACE,OAAOxjB,KAAK+d,yBAAyB/d,KAAKqF,QAAQgY,QACpD,CAGAxX,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOy4B,GAAQld,oBAAoBrG,KAAMqE,GAE/C,GAAsB,iBAAXA,EAAX,CAIA,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IANL,CAOF,GACF,EAQFhI,GAAmBknB,IAYnB,MAEME,GAAc,gBAEdC,GAAiB,WAAWD,KAC5BE,GAAc,QAAQF,KACtBG,GAAwB,OAAOH,cAE/BI,GAAsB,SAEtBC,GAAwB,SAExBC,GAAqB,YAGrBC,GAAsB,GAAGD,mBAA+CA,uBAGxEE,GAAY,CAChBx7B,OAAQ,KAERy7B,WAAY,eACZC,cAAc,EACdn3B,OAAQ,KACRo3B,UAAW,CAAC,GAAK,GAAK,IAElBC,GAAgB,CACpB57B,OAAQ,gBAERy7B,WAAY,SACZC,aAAc,UACdn3B,OAAQ,UACRo3B,UAAW,SAMb,MAAME,WAAkBpf,GACtBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GAEfrE,KAAKukB,aAAe,IAAI5yB,IACxBqO,KAAKwkB,oBAAsB,IAAI7yB,IAC/BqO,KAAKykB,aAA6D,YAA9C/+B,iBAAiBsa,KAAKoF,UAAU3Y,UAA0B,KAAOuT,KAAKoF,SAC1FpF,KAAK0kB,cAAgB,KACrB1kB,KAAK2kB,UAAY,KACjB3kB,KAAK4kB,oBAAsB,CACzBC,gBAAiB,EACjBC,gBAAiB,GAEnB9kB,KAAK+kB,SACP,CAGW9gB,qBACT,OAAOggB,EACT,CAEW/f,yBACT,OAAOmgB,EACT,CAEW5nB,kBACT,MAhEW,WAiEb,CAGAsoB,UACE/kB,KAAKglB,mCAELhlB,KAAKilB,2BAEDjlB,KAAK2kB,UACP3kB,KAAK2kB,UAAUO,aAEfllB,KAAK2kB,UAAY3kB,KAAKmlB,kBAGxB,IAAK,MAAMC,KAAWplB,KAAKwkB,oBAAoBrlB,SAC7Ca,KAAK2kB,UAAUU,QAAQD,EAE3B,CAEA7f,UACEvF,KAAK2kB,UAAUO,aAEf/f,MAAMI,SACR,CAGAhB,kBAAkBF,GAUhB,OARAA,EAAOrX,OAAS6N,GAAWwJ,EAAOrX,SAAWlH,SAAS6G,KAEtD0X,EAAO6f,WAAa7f,EAAO5b,OAAS,GAAG4b,EAAO5b,oBAAsB4b,EAAO6f,WAE3C,iBAArB7f,EAAO+f,YAChB/f,EAAO+f,UAAY/f,EAAO+f,UAAUzhC,MAAM,KAAKY,KAAInF,GAASmf,OAAOC,WAAWpf,MAGzEimB,CACT,CAEA4gB,2BACOjlB,KAAKqF,QAAQ8e,eAKlB5jB,GAAaC,IAAIR,KAAKqF,QAAQrY,OAAQ22B,IACtCpjB,GAAaY,GAAGnB,KAAKqF,QAAQrY,OAAQ22B,GAAaG,IAAuB1kB,IACvE,MAAMkmB,EAAoBtlB,KAAKwkB,oBAAoB5mC,IAAIwhB,EAAMpS,OAAOtB,MAEpE,GAAI45B,EAAmB,CACrBlmB,EAAM+C,iBACN,MAAMtG,EAAOmE,KAAKykB,cAAgBpkC,OAC5BmE,EAAS8gC,EAAkBxgC,UAAYkb,KAAKoF,SAAStgB,UAE3D,GAAI+W,EAAK0pB,SAKP,YAJA1pB,EAAK0pB,SAAS,CACZnjC,IAAKoC,EACLghC,SAAU,WAMd3pB,EAAK3P,UAAY1H,CACnB,KAEJ,CAEA2gC,kBACE,MAAMjjC,EAAU,CACd2Z,KAAMmE,KAAKykB,aACXL,UAAWpkB,KAAKqF,QAAQ+e,UACxBF,WAAYlkB,KAAKqF,QAAQ6e,YAE3B,OAAO,IAAIuB,sBAAqBpjB,GAAWrC,KAAK0lB,kBAAkBrjB,IAAUngB,EAC9E,CAGAwjC,kBAAkBrjB,GAChB,MAAMsjB,EAAgB/H,GAAS5d,KAAKukB,aAAa3mC,IAAI,IAAIggC,EAAM5wB,OAAO44B,MAEhE3O,EAAW2G,IACf5d,KAAK4kB,oBAAoBC,gBAAkBjH,EAAM5wB,OAAOlI,UAExDkb,KAAK6lB,SAASF,EAAc/H,GAAO,EAG/BkH,GAAmB9kB,KAAKykB,cAAgB3+B,SAASC,iBAAiBmG,UAClE45B,EAAkBhB,GAAmB9kB,KAAK4kB,oBAAoBE,gBACpE9kB,KAAK4kB,oBAAoBE,gBAAkBA,EAE3C,IAAK,MAAMlH,KAASvb,EAAS,CAC3B,IAAKub,EAAMmI,eAAgB,CACzB/lB,KAAK0kB,cAAgB,KAErB1kB,KAAKgmB,kBAAkBL,EAAc/H,IAErC,QACF,CAEA,MAAMqI,EAA2BrI,EAAM5wB,OAAOlI,WAAakb,KAAK4kB,oBAAoBC,gBAEpF,GAAIiB,GAAmBG,GAGrB,GAFAhP,EAAS2G,IAEJkH,EACH,YAOCgB,GAAoBG,GACvBhP,EAAS2G,EAEb,CACF,CAEAoH,mCACEhlB,KAAKukB,aAAe,IAAI5yB,IACxBqO,KAAKwkB,oBAAsB,IAAI7yB,IAC/B,MAAMu0B,EAAcjf,GAAerU,KAAKkxB,GAAuB9jB,KAAKqF,QAAQrY,QAE5E,IAAK,MAAMm5B,KAAUD,EAAa,CAEhC,IAAKC,EAAOz6B,MAAQ2P,GAAW8qB,GAC7B,SAGF,MAAMb,EAAoBre,GAAeC,QAAQif,EAAOz6B,KAAMsU,KAAKoF,UAE/DtK,GAAUwqB,KACZtlB,KAAKukB,aAAa/xB,IAAI2zB,EAAOz6B,KAAMy6B,GAEnCnmB,KAAKwkB,oBAAoBhyB,IAAI2zB,EAAOz6B,KAAM45B,GAE9C,CACF,CAEAO,SAAS74B,GACHgT,KAAK0kB,gBAAkB13B,IAI3BgT,KAAKgmB,kBAAkBhmB,KAAKqF,QAAQrY,QAEpCgT,KAAK0kB,cAAgB13B,EACrBA,EAAOwO,UAAUtE,IAAI2sB,IAErB7jB,KAAKomB,iBAAiBp5B,GAEtBuT,GAAakB,QAAQzB,KAAKoF,SAAUse,GAAgB,CAClD5jB,cAAe9S,IAEnB,CAEAo5B,iBAAiBp5B,GAEf,GAAIA,EAAOwO,UAAUvW,SAzNQ,iBA0N3BgiB,GAAeC,QAhNc,mBAgNsBla,EAAOmO,QAjNtC,cAiNkEK,UAAUtE,IAAI2sB,SAItG,IAAK,MAAMwC,KAAapf,GAAeI,QAAQra,EA1NnB,qBA6N1B,IAAK,MAAMxJ,KAAQyjB,GAAeM,KAAK8e,EAAWrC,IAChDxgC,EAAKgY,UAAUtE,IAAI2sB,GAGzB,CAEAmC,kBAAkB9gC,GAChBA,EAAOsW,UAAUuH,OAAO8gB,IACxB,MAAMyC,EAAcrf,GAAerU,KAAK,GAAGkxB,MAAyBD,KAAuB3+B,GAE3F,IAAK,MAAM9E,KAAQkmC,EACjBlmC,EAAKob,UAAUuH,OAAO8gB,GAE1B,CAGAhe,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAOw5B,GAAUje,oBAAoBrG,KAAMqE,GAEjD,GAAsB,iBAAXA,EAAX,CAIA,QAAqB7K,IAAjB1O,EAAKuZ,IAAyBA,EAAOlK,WAAW,MAAmB,gBAAXkK,EAC1D,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IANL,CAOF,GACF,EAQF9D,GAAaY,GAAG9gB,OAAQujC,IAAuB,KAC7C,IAAK,MAAM2C,KAAOtf,GAAerU,KAtQT,0BAuQtB0xB,GAAUje,oBAAoBkgB,EAChC,IAMFlqB,GAAmBioB,IAYnB,MAEMkC,GAAc,UACdC,GAAe,OAAOD,KACtBE,GAAiB,SAASF,KAC1BG,GAAe,OAAOH,KACtBI,GAAgB,QAAQJ,KACxBK,GAAuB,QAAQL,KAC/BM,GAAgB,UAAUN,KAC1BO,GAAsB,OAAOP,KAC7BQ,GAAiB,YACjBC,GAAkB,aAClBC,GAAe,UACfC,GAAiB,YACjBC,GAAoB,SACpBC,GAAoB,OACpBC,GAAoB,OAIpBC,GAA+B,yBAI/BC,GAAuB,2EAEvBC,GAAsB,YAHOF,uBAAiDA,mBAA6CA,OAG/EC,KAC5CE,GAA8B,IAAIN,8BAA6CA,+BAA8CA,4BAKnI,MAAMO,WAAYziB,GAChBR,YAAY1kB,GACVmlB,MAAMnlB,GACNggB,KAAKqS,QAAUrS,KAAKoF,SAASjK,QAdN,uCAgBlB6E,KAAKqS,UAMVrS,KAAK4nB,sBAAsB5nB,KAAKqS,QAASrS,KAAK6nB,gBAE9CtnB,GAAaY,GAAGnB,KAAKoF,SAAU0hB,IAAe1nB,GAASY,KAAK4M,SAASxN,KACvE,CAGW3C,kBACT,MAlDW,KAmDb,CAGAoT,OAEE,MAAMiY,EAAY9nB,KAAKoF,SAEvB,GAAIpF,KAAK+nB,cAAcD,GACrB,OAIF,MAAME,EAAShoB,KAAKioB,iBAEdC,EAAYF,EAASznB,GAAakB,QAAQumB,EAAQvB,GAAc,CACpE3mB,cAAegoB,IACZ,KACavnB,GAAakB,QAAQqmB,EAAWnB,GAAc,CAC9D7mB,cAAekoB,IAGHnmB,kBAAoBqmB,GAAaA,EAAUrmB,mBAIzD7B,KAAKmoB,YAAYH,EAAQF,GAEzB9nB,KAAKooB,UAAUN,EAAWE,GAC5B,CAGAI,UAAUpoC,EAASqoC,GACZroC,IAILA,EAAQwb,UAAUtE,IAAIkwB,IAEtBpnB,KAAKooB,UAAU9tB,GAAuBta,IAmBtCggB,KAAK2F,gBAhBY,KACsB,QAAjC3lB,EAAQga,aAAa,SAKzBha,EAAQ4B,gBAAgB,YACxB5B,EAAQ6B,aAAa,iBAAiB,GAEtCme,KAAKsoB,gBAAgBtoC,GAAS,GAE9BugB,GAAakB,QAAQzhB,EAAS4mC,GAAe,CAC3C9mB,cAAeuoB,KAVfroC,EAAQwb,UAAUtE,IAAIowB,GAWtB,GAG0BtnC,EAASA,EAAQwb,UAAUvW,SAASoiC,KACpE,CAEAc,YAAYnoC,EAASqoC,GACdroC,IAILA,EAAQwb,UAAUuH,OAAOqkB,IACzBpnC,EAAQ+6B,OAER/a,KAAKmoB,YAAY7tB,GAAuBta,IAmBxCggB,KAAK2F,gBAhBY,KACsB,QAAjC3lB,EAAQga,aAAa,SAKzBha,EAAQ6B,aAAa,iBAAiB,GACtC7B,EAAQ6B,aAAa,WAAY,MAEjCme,KAAKsoB,gBAAgBtoC,GAAS,GAE9BugB,GAAakB,QAAQzhB,EAAS0mC,GAAgB,CAC5C5mB,cAAeuoB,KAVfroC,EAAQwb,UAAUuH,OAAOukB,GAWzB,GAG0BtnC,EAASA,EAAQwb,UAAUvW,SAASoiC,KACpE,CAEAza,SAASxN,GACP,IAAK,CAAC4nB,GAAgBC,GAAiBC,GAAcC,IAAgBjtB,SAASkF,EAAM7hB,KAClF,OAGF6hB,EAAMyU,kBAENzU,EAAM+C,iBACN,MAAMoL,EAAS,CAAC0Z,GAAiBE,IAAgBjtB,SAASkF,EAAM7hB,KAC1DgrC,EAAoBzqB,GAAqBkC,KAAK6nB,eAAejhC,QAAO5G,IAAYqb,GAAWrb,KAAWof,EAAMpS,OAAQugB,GAAQ,GAE9Hgb,IACFA,EAAkB7V,MAAM,CACtB8V,eAAe,IAEjBb,GAAIthB,oBAAoBkiB,GAAmB1Y,OAE/C,CAEAgY,eAEE,OAAO5gB,GAAerU,KAAK60B,GAAqBznB,KAAKqS,QACvD,CAEA4V,iBACE,OAAOjoB,KAAK6nB,eAAej1B,MAAKzN,GAAS6a,KAAK+nB,cAAc5iC,MAAW,IACzE,CAEAyiC,sBAAsB1iC,EAAQiiB,GAC5BnH,KAAKyoB,yBAAyBvjC,EAAQ,OAAQ,WAE9C,IAAK,MAAMC,KAASgiB,EAClBnH,KAAK0oB,6BAA6BvjC,EAEtC,CAEAujC,6BAA6BvjC,GAC3BA,EAAQ6a,KAAK2oB,iBAAiBxjC,GAE9B,MAAMyjC,EAAW5oB,KAAK+nB,cAAc5iC,GAE9B0jC,EAAY7oB,KAAK8oB,iBAAiB3jC,GAExCA,EAAMtD,aAAa,gBAAiB+mC,GAEhCC,IAAc1jC,GAChB6a,KAAKyoB,yBAAyBI,EAAW,OAAQ,gBAG9CD,GACHzjC,EAAMtD,aAAa,WAAY,MAGjCme,KAAKyoB,yBAAyBtjC,EAAO,OAAQ,OAG7C6a,KAAK+oB,mCAAmC5jC,EAC1C,CAEA4jC,mCAAmC5jC,GACjC,MAAM6H,EAASsN,GAAuBnV,GAEjC6H,IAILgT,KAAKyoB,yBAAyBz7B,EAAQ,OAAQ,YAE1C7H,EAAMygC,IACR5lB,KAAKyoB,yBAAyBz7B,EAAQ,kBAAmB,IAAI7H,EAAMygC,MAEvE,CAEA0C,gBAAgBtoC,EAASgpC,GACvB,MAAMH,EAAY7oB,KAAK8oB,iBAAiB9oC,GAExC,IAAK6oC,EAAUrtB,UAAUvW,SAxMN,YAyMjB,OAGF,MAAM8hB,EAAS,CAAChN,EAAU2b,KACxB,MAAM11B,EAAUinB,GAAeC,QAAQnN,EAAU8uB,GAE7C7oC,GACFA,EAAQwb,UAAUuL,OAAO2O,EAAWsT,EACtC,EAGFjiB,EAnN6B,mBAmNIqgB,IACjCrgB,EAnN2B,iBAmNIugB,IAC/BuB,EAAUhnC,aAAa,gBAAiBmnC,EAC1C,CAEAP,yBAAyBzoC,EAASwC,EAAWpE,GACtC4B,EAAQ0b,aAAalZ,IACxBxC,EAAQ6B,aAAaW,EAAWpE,EAEpC,CAEA2pC,cAAczY,GACZ,OAAOA,EAAK9T,UAAUvW,SAASmiC,GACjC,CAGAuB,iBAAiBrZ,GACf,OAAOA,EAAKlI,QAAQqgB,IAAuBnY,EAAOrI,GAAeC,QAAQugB,GAAqBnY,EAChG,CAGAwZ,iBAAiBxZ,GACf,OAAOA,EAAKnU,QArOO,gCAqOoBmU,CACzC,CAGAzJ,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAO68B,GAAIthB,oBAAoBrG,MAErC,GAAsB,iBAAXqE,EAAX,CAIA,QAAqB7K,IAAjB1O,EAAKuZ,IAAyBA,EAAOlK,WAAW,MAAmB,gBAAXkK,EAC1D,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,IANL,CAOF,GACF,EAQF9D,GAAaY,GAAGrb,SAAU+gC,GAAsBW,IAAsB,SAAUpoB,GAC1E,CAAC,IAAK,QAAQlF,SAAS8F,KAAKoG,UAC9BhH,EAAM+C,iBAGJ9G,GAAW2E,OAIf2nB,GAAIthB,oBAAoBrG,MAAM6P,MAChC,IAKAtP,GAAaY,GAAG9gB,OAAQ0mC,IAAqB,KAC3C,IAAK,MAAM/mC,KAAWinB,GAAerU,KAAK80B,IACxCC,GAAIthB,oBAAoBrmB,EAC1B,IAMFqc,GAAmBsrB,IAYnB,MAEMniB,GAAY,YACZyjB,GAAkB,YAAYzjB,KAC9B0jB,GAAiB,WAAW1jB,KAC5B2jB,GAAgB,UAAU3jB,KAC1B4jB,GAAiB,WAAW5jB,KAC5B6jB,GAAa,OAAO7jB,KACpB8jB,GAAe,SAAS9jB,KACxB+jB,GAAa,OAAO/jB,KACpBgkB,GAAc,QAAQhkB,KAEtBikB,GAAkB,OAElBC,GAAkB,OAClBC,GAAqB,UACrBzlB,GAAc,CAClBkc,UAAW,UACXwJ,SAAU,UACVrJ,MAAO,UAEHtc,GAAU,CACdmc,WAAW,EACXwJ,UAAU,EACVrJ,MAAO,KAMT,MAAMsJ,WAAc3kB,GAClBR,YAAY1kB,EAASqkB,GACnBc,MAAMnlB,EAASqkB,GACfrE,KAAK4gB,SAAW,KAChB5gB,KAAK8pB,sBAAuB,EAC5B9pB,KAAK+pB,yBAA0B,EAE/B/pB,KAAKkhB,eACP,CAGWjd,qBACT,OAAOA,EACT,CAEWC,yBACT,OAAOA,EACT,CAEWzH,kBACT,MAlDS,OAmDX,CAGAoT,OACoBtP,GAAakB,QAAQzB,KAAKoF,SAAUmkB,IAExC1nB,mBAId7B,KAAKgqB,gBAEDhqB,KAAKqF,QAAQ+a,WACfpgB,KAAKoF,SAAS5J,UAAUtE,IArDN,QAgEpB8I,KAAKoF,SAAS5J,UAAUuH,OAAO0mB,IAG/B1tB,GAAOiE,KAAKoF,UAEZpF,KAAKoF,SAAS5J,UAAUtE,IAAIwyB,GAAiBC,IAE7C3pB,KAAK2F,gBAfY,KACf3F,KAAKoF,SAAS5J,UAAUuH,OAAO4mB,IAE/BppB,GAAakB,QAAQzB,KAAKoF,SAAUokB,IAEpCxpB,KAAKiqB,oBAAoB,GAUGjqB,KAAKoF,SAAUpF,KAAKqF,QAAQ+a,WAC5D,CAEAxQ,OACO5P,KAAKkqB,YAIQ3pB,GAAakB,QAAQzB,KAAKoF,SAAUikB,IAExCxnB,mBAad7B,KAAKoF,SAAS5J,UAAUtE,IAAIyyB,IAE5B3pB,KAAK2F,gBAXY,KACf3F,KAAKoF,SAAS5J,UAAUtE,IAAIuyB,IAG5BzpB,KAAKoF,SAAS5J,UAAUuH,OAAO4mB,GAAoBD,IAEnDnpB,GAAakB,QAAQzB,KAAKoF,SAAUkkB,GAAa,GAKrBtpB,KAAKoF,SAAUpF,KAAKqF,QAAQ+a,YAC5D,CAEA7a,UACEvF,KAAKgqB,gBAEDhqB,KAAKkqB,WACPlqB,KAAKoF,SAAS5J,UAAUuH,OAAO2mB,IAGjCvkB,MAAMI,SACR,CAEA2kB,UACE,OAAOlqB,KAAKoF,SAAS5J,UAAUvW,SAASykC,GAC1C,CAGAO,qBACOjqB,KAAKqF,QAAQukB,WAId5pB,KAAK8pB,sBAAwB9pB,KAAK+pB,0BAItC/pB,KAAK4gB,SAAW/iB,YAAW,KACzBmC,KAAK4P,MAAM,GACV5P,KAAKqF,QAAQkb,QAClB,CAEA4J,eAAe/qB,EAAOgrB,GACpB,OAAQhrB,EAAMqB,MACZ,IAAK,YACL,IAAK,WAEDT,KAAK8pB,qBAAuBM,EAC5B,MAGJ,IAAK,UACL,IAAK,WAEDpqB,KAAK+pB,wBAA0BK,EAKrC,GAAIA,EAGF,YAFApqB,KAAKgqB,gBAKP,MAAMxc,EAAcpO,EAAMU,cAEtBE,KAAKoF,WAAaoI,GAAexN,KAAKoF,SAASngB,SAASuoB,IAI5DxN,KAAKiqB,oBACP,CAEA/I,gBACE3gB,GAAaY,GAAGnB,KAAKoF,SAAU6jB,IAAiB7pB,GAASY,KAAKmqB,eAAe/qB,GAAO,KACpFmB,GAAaY,GAAGnB,KAAKoF,SAAU8jB,IAAgB9pB,GAASY,KAAKmqB,eAAe/qB,GAAO,KACnFmB,GAAaY,GAAGnB,KAAKoF,SAAU+jB,IAAe/pB,GAASY,KAAKmqB,eAAe/qB,GAAO,KAClFmB,GAAaY,GAAGnB,KAAKoF,SAAUgkB,IAAgBhqB,GAASY,KAAKmqB,eAAe/qB,GAAO,IACrF,CAEA4qB,gBACE/c,aAAajN,KAAK4gB,UAClB5gB,KAAK4gB,SAAW,IAClB,CAGA/a,uBAAuBxB,GACrB,OAAOrE,KAAK4G,MAAK,WACf,MAAM9b,EAAO++B,GAAMxjB,oBAAoBrG,KAAMqE,GAE7C,GAAsB,iBAAXA,EAAqB,CAC9B,QAA4B,IAAjBvZ,EAAKuZ,GACd,MAAM,IAAIW,UAAU,oBAAoBX,MAG1CvZ,EAAKuZ,GAAQrE,KACf,CACF,GACF,ECxjKK,SAASqqB,GAAc9tB,GACD,WAAvBzW,SAASgX,WAAyBP,IACjCzW,SAASyF,iBAAiB,mBAAoBgR,EACrD,CD6jKAyJ,GAAqB6jB,IAKrBxtB,GAAmBwtB,IEzhKnBQ,IAvCA,WAC2B,GAAGx3B,MAAM5U,KAChC6H,SAASsa,iBAAiB,+BAET7c,KAAI,SAAU+mC,GAC/B,OAAO,IAAI5J,GAAQ4J,EAAkB,CAAE/J,MAAO,CAAE1Q,KAAM,IAAKD,KAAM,MACnE,GACF,IAiCAya,IA5BA,WACYvkC,SAAS08B,eAAe,mBAC9Bj3B,iBAAiB,SAAS,WAC5BzF,SAAS6G,KAAKT,UAAY,EAC1BpG,SAASC,gBAAgBmG,UAAY,CACvC,GACF,IAuBAm+B,IArBA,WACE,IAAIE,EAAMzkC,SAAS08B,eAAe,mBAC9BgI,EAAS1kC,SACV2kC,uBAAuB,aAAa,GACpC1mC,wBACH1D,OAAOkL,iBAAiB,UAAU,WAC5ByU,KAAK0qB,UAAY1qB,KAAK2qB,SAAW3qB,KAAK2qB,QAAUH,EAAOnsC,OACzDksC,EAAI/oC,MAAMwwB,QAAU,QAEpBuY,EAAI/oC,MAAMwwB,QAAU,OAEtBhS,KAAK0qB,UAAY1qB,KAAK2qB,OACxB,GACF","sources":["webpack://pydata_sphinx_theme/webpack/bootstrap","webpack://pydata_sphinx_theme/webpack/runtime/define property getters","webpack://pydata_sphinx_theme/webpack/runtime/hasOwnProperty shorthand","webpack://pydata_sphinx_theme/webpack/runtime/make namespace object","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/enums.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/instanceOf.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/applyStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getBasePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/math.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/userAgent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isLayoutViewport.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getBoundingClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getLayoutRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/contains.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getComputedStyle.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isTableElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getParentNode.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getOffsetParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getMainAxisFromPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/within.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergePaddingObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getFreshSideObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/expandToHashMap.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/arrow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getVariation.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/computeStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/eventListeners.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositeVariationPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScrollBarX.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/listScrollParents.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/rectToClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getClippingRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getViewportRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/detectOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/flip.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeAutoPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/hide.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/offset.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/popperOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/preventOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getAltAxis.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getCompositeRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getHTMLElementScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/orderModifiers.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/createPopper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/debounce.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergeByName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper-lite.js","webpack://pydata_sphinx_theme/./node_modules/bootstrap/dist/js/bootstrap.esm.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/mixin.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/bootstrap.js"],"sourcesContent":["// The require scope\nvar __webpack_require__ = {};\n\n","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","export var top = 'top';\nexport var bottom = 'bottom';\nexport var right = 'right';\nexport var left = 'left';\nexport var auto = 'auto';\nexport var basePlacements = [top, bottom, right, left];\nexport var start = 'start';\nexport var end = 'end';\nexport var clippingParents = 'clippingParents';\nexport var viewport = 'viewport';\nexport var popper = 'popper';\nexport var reference = 'reference';\nexport var variationPlacements = /*#__PURE__*/basePlacements.reduce(function (acc, placement) {\n return acc.concat([placement + \"-\" + start, placement + \"-\" + end]);\n}, []);\nexport var placements = /*#__PURE__*/[].concat(basePlacements, [auto]).reduce(function (acc, placement) {\n return acc.concat([placement, placement + \"-\" + start, placement + \"-\" + end]);\n}, []); // modifiers that need to read the DOM\n\nexport var beforeRead = 'beforeRead';\nexport var read = 'read';\nexport var afterRead = 'afterRead'; // pure-logic modifiers\n\nexport var beforeMain = 'beforeMain';\nexport var main = 'main';\nexport var afterMain = 'afterMain'; // modifier with the purpose to write to the DOM (or write into a framework state)\n\nexport var beforeWrite = 'beforeWrite';\nexport var write = 'write';\nexport var afterWrite = 'afterWrite';\nexport var modifierPhases = [beforeRead, read, afterRead, beforeMain, main, afterMain, beforeWrite, write, afterWrite];","export default function getNodeName(element) {\n return element ? (element.nodeName || '').toLowerCase() : null;\n}","export default function getWindow(node) {\n if (node == null) {\n return window;\n }\n\n if (node.toString() !== '[object Window]') {\n var ownerDocument = node.ownerDocument;\n return ownerDocument ? ownerDocument.defaultView || window : window;\n }\n\n return node;\n}","import getWindow from \"./getWindow.js\";\n\nfunction isElement(node) {\n var OwnElement = getWindow(node).Element;\n return node instanceof OwnElement || node instanceof Element;\n}\n\nfunction isHTMLElement(node) {\n var OwnElement = getWindow(node).HTMLElement;\n return node instanceof OwnElement || node instanceof HTMLElement;\n}\n\nfunction isShadowRoot(node) {\n // IE 11 has no ShadowRoot\n if (typeof ShadowRoot === 'undefined') {\n return false;\n }\n\n var OwnElement = getWindow(node).ShadowRoot;\n return node instanceof OwnElement || node instanceof ShadowRoot;\n}\n\nexport { isElement, isHTMLElement, isShadowRoot };","import getNodeName from \"../dom-utils/getNodeName.js\";\nimport { isHTMLElement } from \"../dom-utils/instanceOf.js\"; // This modifier takes the styles prepared by the `computeStyles` modifier\n// and applies them to the HTMLElements such as popper and arrow\n\nfunction applyStyles(_ref) {\n var state = _ref.state;\n Object.keys(state.elements).forEach(function (name) {\n var style = state.styles[name] || {};\n var attributes = state.attributes[name] || {};\n var element = state.elements[name]; // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n } // Flow doesn't support to extend this property, but it's the most\n // effective way to apply styles to an HTMLElement\n // $FlowFixMe[cannot-write]\n\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (name) {\n var value = attributes[name];\n\n if (value === false) {\n element.removeAttribute(name);\n } else {\n element.setAttribute(name, value === true ? '' : value);\n }\n });\n });\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state;\n var initialStyles = {\n popper: {\n position: state.options.strategy,\n left: '0',\n top: '0',\n margin: '0'\n },\n arrow: {\n position: 'absolute'\n },\n reference: {}\n };\n Object.assign(state.elements.popper.style, initialStyles.popper);\n state.styles = initialStyles;\n\n if (state.elements.arrow) {\n Object.assign(state.elements.arrow.style, initialStyles.arrow);\n }\n\n return function () {\n Object.keys(state.elements).forEach(function (name) {\n var element = state.elements[name];\n var attributes = state.attributes[name] || {};\n var styleProperties = Object.keys(state.styles.hasOwnProperty(name) ? state.styles[name] : initialStyles[name]); // Set all values to an empty string to unset them\n\n var style = styleProperties.reduce(function (style, property) {\n style[property] = '';\n return style;\n }, {}); // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n }\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (attribute) {\n element.removeAttribute(attribute);\n });\n });\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'applyStyles',\n enabled: true,\n phase: 'write',\n fn: applyStyles,\n effect: effect,\n requires: ['computeStyles']\n};","import { auto } from \"../enums.js\";\nexport default function getBasePlacement(placement) {\n return placement.split('-')[0];\n}","export var max = Math.max;\nexport var min = Math.min;\nexport var round = Math.round;","export default function getUAString() {\n var uaData = navigator.userAgentData;\n\n if (uaData != null && uaData.brands && Array.isArray(uaData.brands)) {\n return uaData.brands.map(function (item) {\n return item.brand + \"/\" + item.version;\n }).join(' ');\n }\n\n return navigator.userAgent;\n}","import getUAString from \"../utils/userAgent.js\";\nexport default function isLayoutViewport() {\n return !/^((?!chrome|android).)*safari/i.test(getUAString());\n}","import { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport { round } from \"../utils/math.js\";\nimport getWindow from \"./getWindow.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getBoundingClientRect(element, includeScale, isFixedStrategy) {\n if (includeScale === void 0) {\n includeScale = false;\n }\n\n if (isFixedStrategy === void 0) {\n isFixedStrategy = false;\n }\n\n var clientRect = element.getBoundingClientRect();\n var scaleX = 1;\n var scaleY = 1;\n\n if (includeScale && isHTMLElement(element)) {\n scaleX = element.offsetWidth > 0 ? round(clientRect.width) / element.offsetWidth || 1 : 1;\n scaleY = element.offsetHeight > 0 ? round(clientRect.height) / element.offsetHeight || 1 : 1;\n }\n\n var _ref = isElement(element) ? getWindow(element) : window,\n visualViewport = _ref.visualViewport;\n\n var addVisualOffsets = !isLayoutViewport() && isFixedStrategy;\n var x = (clientRect.left + (addVisualOffsets && visualViewport ? visualViewport.offsetLeft : 0)) / scaleX;\n var y = (clientRect.top + (addVisualOffsets && visualViewport ? visualViewport.offsetTop : 0)) / scaleY;\n var width = clientRect.width / scaleX;\n var height = clientRect.height / scaleY;\n return {\n width: width,\n height: height,\n top: y,\n right: x + width,\n bottom: y + height,\n left: x,\n x: x,\n y: y\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\"; // Returns the layout rect of an element relative to its offsetParent. Layout\n// means it doesn't take into account transforms.\n\nexport default function getLayoutRect(element) {\n var clientRect = getBoundingClientRect(element); // Use the clientRect sizes if it's not been transformed.\n // Fixes https://github.com/popperjs/popper-core/issues/1223\n\n var width = element.offsetWidth;\n var height = element.offsetHeight;\n\n if (Math.abs(clientRect.width - width) <= 1) {\n width = clientRect.width;\n }\n\n if (Math.abs(clientRect.height - height) <= 1) {\n height = clientRect.height;\n }\n\n return {\n x: element.offsetLeft,\n y: element.offsetTop,\n width: width,\n height: height\n };\n}","import { isShadowRoot } from \"./instanceOf.js\";\nexport default function contains(parent, child) {\n var rootNode = child.getRootNode && child.getRootNode(); // First, attempt with faster native method\n\n if (parent.contains(child)) {\n return true;\n } // then fallback to custom implementation with Shadow DOM support\n else if (rootNode && isShadowRoot(rootNode)) {\n var next = child;\n\n do {\n if (next && parent.isSameNode(next)) {\n return true;\n } // $FlowFixMe[prop-missing]: need a better way to handle this...\n\n\n next = next.parentNode || next.host;\n } while (next);\n } // Give up, the result is false\n\n\n return false;\n}","import getWindow from \"./getWindow.js\";\nexport default function getComputedStyle(element) {\n return getWindow(element).getComputedStyle(element);\n}","import getNodeName from \"./getNodeName.js\";\nexport default function isTableElement(element) {\n return ['table', 'td', 'th'].indexOf(getNodeName(element)) >= 0;\n}","import { isElement } from \"./instanceOf.js\";\nexport default function getDocumentElement(element) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return ((isElement(element) ? element.ownerDocument : // $FlowFixMe[prop-missing]\n element.document) || window.document).documentElement;\n}","import getNodeName from \"./getNodeName.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport { isShadowRoot } from \"./instanceOf.js\";\nexport default function getParentNode(element) {\n if (getNodeName(element) === 'html') {\n return element;\n }\n\n return (// this is a quicker (but less type safe) way to save quite some bytes from the bundle\n // $FlowFixMe[incompatible-return]\n // $FlowFixMe[prop-missing]\n element.assignedSlot || // step into the shadow DOM of the parent of a slotted node\n element.parentNode || ( // DOM Element detected\n isShadowRoot(element) ? element.host : null) || // ShadowRoot detected\n // $FlowFixMe[incompatible-call]: HTMLElement is a Node\n getDocumentElement(element) // fallback\n\n );\n}","import getWindow from \"./getWindow.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isHTMLElement, isShadowRoot } from \"./instanceOf.js\";\nimport isTableElement from \"./isTableElement.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getUAString from \"../utils/userAgent.js\";\n\nfunction getTrueOffsetParent(element) {\n if (!isHTMLElement(element) || // https://github.com/popperjs/popper-core/issues/837\n getComputedStyle(element).position === 'fixed') {\n return null;\n }\n\n return element.offsetParent;\n} // `.offsetParent` reports `null` for fixed elements, while absolute elements\n// return the containing block\n\n\nfunction getContainingBlock(element) {\n var isFirefox = /firefox/i.test(getUAString());\n var isIE = /Trident/i.test(getUAString());\n\n if (isIE && isHTMLElement(element)) {\n // In IE 9, 10 and 11 fixed elements containing block is always established by the viewport\n var elementCss = getComputedStyle(element);\n\n if (elementCss.position === 'fixed') {\n return null;\n }\n }\n\n var currentNode = getParentNode(element);\n\n if (isShadowRoot(currentNode)) {\n currentNode = currentNode.host;\n }\n\n while (isHTMLElement(currentNode) && ['html', 'body'].indexOf(getNodeName(currentNode)) < 0) {\n var css = getComputedStyle(currentNode); // This is non-exhaustive but covers the most common CSS properties that\n // create a containing block.\n // https://developer.mozilla.org/en-US/docs/Web/CSS/Containing_block#identifying_the_containing_block\n\n if (css.transform !== 'none' || css.perspective !== 'none' || css.contain === 'paint' || ['transform', 'perspective'].indexOf(css.willChange) !== -1 || isFirefox && css.willChange === 'filter' || isFirefox && css.filter && css.filter !== 'none') {\n return currentNode;\n } else {\n currentNode = currentNode.parentNode;\n }\n }\n\n return null;\n} // Gets the closest ancestor positioned element. Handles some edge cases,\n// such as table ancestors and cross browser bugs.\n\n\nexport default function getOffsetParent(element) {\n var window = getWindow(element);\n var offsetParent = getTrueOffsetParent(element);\n\n while (offsetParent && isTableElement(offsetParent) && getComputedStyle(offsetParent).position === 'static') {\n offsetParent = getTrueOffsetParent(offsetParent);\n }\n\n if (offsetParent && (getNodeName(offsetParent) === 'html' || getNodeName(offsetParent) === 'body' && getComputedStyle(offsetParent).position === 'static')) {\n return window;\n }\n\n return offsetParent || getContainingBlock(element) || window;\n}","export default function getMainAxisFromPlacement(placement) {\n return ['top', 'bottom'].indexOf(placement) >= 0 ? 'x' : 'y';\n}","import { max as mathMax, min as mathMin } from \"./math.js\";\nexport function within(min, value, max) {\n return mathMax(min, mathMin(value, max));\n}\nexport function withinMaxClamp(min, value, max) {\n var v = within(min, value, max);\n return v > max ? max : v;\n}","import getFreshSideObject from \"./getFreshSideObject.js\";\nexport default function mergePaddingObject(paddingObject) {\n return Object.assign({}, getFreshSideObject(), paddingObject);\n}","export default function getFreshSideObject() {\n return {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0\n };\n}","export default function expandToHashMap(value, keys) {\n return keys.reduce(function (hashMap, key) {\n hashMap[key] = value;\n return hashMap;\n }, {});\n}","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport contains from \"../dom-utils/contains.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport { within } from \"../utils/within.js\";\nimport mergePaddingObject from \"../utils/mergePaddingObject.js\";\nimport expandToHashMap from \"../utils/expandToHashMap.js\";\nimport { left, right, basePlacements, top, bottom } from \"../enums.js\";\nimport { isHTMLElement } from \"../dom-utils/instanceOf.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar toPaddingObject = function toPaddingObject(padding, state) {\n padding = typeof padding === 'function' ? padding(Object.assign({}, state.rects, {\n placement: state.placement\n })) : padding;\n return mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n};\n\nfunction arrow(_ref) {\n var _state$modifiersData$;\n\n var state = _ref.state,\n name = _ref.name,\n options = _ref.options;\n var arrowElement = state.elements.arrow;\n var popperOffsets = state.modifiersData.popperOffsets;\n var basePlacement = getBasePlacement(state.placement);\n var axis = getMainAxisFromPlacement(basePlacement);\n var isVertical = [left, right].indexOf(basePlacement) >= 0;\n var len = isVertical ? 'height' : 'width';\n\n if (!arrowElement || !popperOffsets) {\n return;\n }\n\n var paddingObject = toPaddingObject(options.padding, state);\n var arrowRect = getLayoutRect(arrowElement);\n var minProp = axis === 'y' ? top : left;\n var maxProp = axis === 'y' ? bottom : right;\n var endDiff = state.rects.reference[len] + state.rects.reference[axis] - popperOffsets[axis] - state.rects.popper[len];\n var startDiff = popperOffsets[axis] - state.rects.reference[axis];\n var arrowOffsetParent = getOffsetParent(arrowElement);\n var clientSize = arrowOffsetParent ? axis === 'y' ? arrowOffsetParent.clientHeight || 0 : arrowOffsetParent.clientWidth || 0 : 0;\n var centerToReference = endDiff / 2 - startDiff / 2; // Make sure the arrow doesn't overflow the popper if the center point is\n // outside of the popper bounds\n\n var min = paddingObject[minProp];\n var max = clientSize - arrowRect[len] - paddingObject[maxProp];\n var center = clientSize / 2 - arrowRect[len] / 2 + centerToReference;\n var offset = within(min, center, max); // Prevents breaking syntax highlighting...\n\n var axisProp = axis;\n state.modifiersData[name] = (_state$modifiersData$ = {}, _state$modifiersData$[axisProp] = offset, _state$modifiersData$.centerOffset = offset - center, _state$modifiersData$);\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state,\n options = _ref2.options;\n var _options$element = options.element,\n arrowElement = _options$element === void 0 ? '[data-popper-arrow]' : _options$element;\n\n if (arrowElement == null) {\n return;\n } // CSS selector\n\n\n if (typeof arrowElement === 'string') {\n arrowElement = state.elements.popper.querySelector(arrowElement);\n\n if (!arrowElement) {\n return;\n }\n }\n\n if (process.env.NODE_ENV !== \"production\") {\n if (!isHTMLElement(arrowElement)) {\n console.error(['Popper: \"arrow\" element must be an HTMLElement (not an SVGElement).', 'To use an SVG arrow, wrap it in an HTMLElement that will be used as', 'the arrow.'].join(' '));\n }\n }\n\n if (!contains(state.elements.popper, arrowElement)) {\n if (process.env.NODE_ENV !== \"production\") {\n console.error(['Popper: \"arrow\" modifier\\'s `element` must be a child of the popper', 'element.'].join(' '));\n }\n\n return;\n }\n\n state.elements.arrow = arrowElement;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'arrow',\n enabled: true,\n phase: 'main',\n fn: arrow,\n effect: effect,\n requires: ['popperOffsets'],\n requiresIfExists: ['preventOverflow']\n};","export default function getVariation(placement) {\n return placement.split('-')[1];\n}","import { top, left, right, bottom, end } from \"../enums.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getWindow from \"../dom-utils/getWindow.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getComputedStyle from \"../dom-utils/getComputedStyle.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport { round } from \"../utils/math.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar unsetSides = {\n top: 'auto',\n right: 'auto',\n bottom: 'auto',\n left: 'auto'\n}; // Round the offsets to the nearest suitable subpixel based on the DPR.\n// Zooming can change the DPR, but it seems to report a value that will\n// cleanly divide the values into the appropriate subpixels.\n\nfunction roundOffsetsByDPR(_ref, win) {\n var x = _ref.x,\n y = _ref.y;\n var dpr = win.devicePixelRatio || 1;\n return {\n x: round(x * dpr) / dpr || 0,\n y: round(y * dpr) / dpr || 0\n };\n}\n\nexport function mapToStyles(_ref2) {\n var _Object$assign2;\n\n var popper = _ref2.popper,\n popperRect = _ref2.popperRect,\n placement = _ref2.placement,\n variation = _ref2.variation,\n offsets = _ref2.offsets,\n position = _ref2.position,\n gpuAcceleration = _ref2.gpuAcceleration,\n adaptive = _ref2.adaptive,\n roundOffsets = _ref2.roundOffsets,\n isFixed = _ref2.isFixed;\n var _offsets$x = offsets.x,\n x = _offsets$x === void 0 ? 0 : _offsets$x,\n _offsets$y = offsets.y,\n y = _offsets$y === void 0 ? 0 : _offsets$y;\n\n var _ref3 = typeof roundOffsets === 'function' ? roundOffsets({\n x: x,\n y: y\n }) : {\n x: x,\n y: y\n };\n\n x = _ref3.x;\n y = _ref3.y;\n var hasX = offsets.hasOwnProperty('x');\n var hasY = offsets.hasOwnProperty('y');\n var sideX = left;\n var sideY = top;\n var win = window;\n\n if (adaptive) {\n var offsetParent = getOffsetParent(popper);\n var heightProp = 'clientHeight';\n var widthProp = 'clientWidth';\n\n if (offsetParent === getWindow(popper)) {\n offsetParent = getDocumentElement(popper);\n\n if (getComputedStyle(offsetParent).position !== 'static' && position === 'absolute') {\n heightProp = 'scrollHeight';\n widthProp = 'scrollWidth';\n }\n } // $FlowFixMe[incompatible-cast]: force type refinement, we compare offsetParent with window above, but Flow doesn't detect it\n\n\n offsetParent = offsetParent;\n\n if (placement === top || (placement === left || placement === right) && variation === end) {\n sideY = bottom;\n var offsetY = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.height : // $FlowFixMe[prop-missing]\n offsetParent[heightProp];\n y -= offsetY - popperRect.height;\n y *= gpuAcceleration ? 1 : -1;\n }\n\n if (placement === left || (placement === top || placement === bottom) && variation === end) {\n sideX = right;\n var offsetX = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.width : // $FlowFixMe[prop-missing]\n offsetParent[widthProp];\n x -= offsetX - popperRect.width;\n x *= gpuAcceleration ? 1 : -1;\n }\n }\n\n var commonStyles = Object.assign({\n position: position\n }, adaptive && unsetSides);\n\n var _ref4 = roundOffsets === true ? roundOffsetsByDPR({\n x: x,\n y: y\n }, getWindow(popper)) : {\n x: x,\n y: y\n };\n\n x = _ref4.x;\n y = _ref4.y;\n\n if (gpuAcceleration) {\n var _Object$assign;\n\n return Object.assign({}, commonStyles, (_Object$assign = {}, _Object$assign[sideY] = hasY ? '0' : '', _Object$assign[sideX] = hasX ? '0' : '', _Object$assign.transform = (win.devicePixelRatio || 1) <= 1 ? \"translate(\" + x + \"px, \" + y + \"px)\" : \"translate3d(\" + x + \"px, \" + y + \"px, 0)\", _Object$assign));\n }\n\n return Object.assign({}, commonStyles, (_Object$assign2 = {}, _Object$assign2[sideY] = hasY ? y + \"px\" : '', _Object$assign2[sideX] = hasX ? x + \"px\" : '', _Object$assign2.transform = '', _Object$assign2));\n}\n\nfunction computeStyles(_ref5) {\n var state = _ref5.state,\n options = _ref5.options;\n var _options$gpuAccelerat = options.gpuAcceleration,\n gpuAcceleration = _options$gpuAccelerat === void 0 ? true : _options$gpuAccelerat,\n _options$adaptive = options.adaptive,\n adaptive = _options$adaptive === void 0 ? true : _options$adaptive,\n _options$roundOffsets = options.roundOffsets,\n roundOffsets = _options$roundOffsets === void 0 ? true : _options$roundOffsets;\n\n if (process.env.NODE_ENV !== \"production\") {\n var transitionProperty = getComputedStyle(state.elements.popper).transitionProperty || '';\n\n if (adaptive && ['transform', 'top', 'right', 'bottom', 'left'].some(function (property) {\n return transitionProperty.indexOf(property) >= 0;\n })) {\n console.warn(['Popper: Detected CSS transitions on at least one of the following', 'CSS properties: \"transform\", \"top\", \"right\", \"bottom\", \"left\".', '\\n\\n', 'Disable the \"computeStyles\" modifier\\'s `adaptive` option to allow', 'for smooth transitions, or remove these properties from the CSS', 'transition declaration on the popper element if only transitioning', 'opacity or background-color for example.', '\\n\\n', 'We recommend using the popper element as a wrapper around an inner', 'element that can have any CSS property transitioned for animations.'].join(' '));\n }\n }\n\n var commonStyles = {\n placement: getBasePlacement(state.placement),\n variation: getVariation(state.placement),\n popper: state.elements.popper,\n popperRect: state.rects.popper,\n gpuAcceleration: gpuAcceleration,\n isFixed: state.options.strategy === 'fixed'\n };\n\n if (state.modifiersData.popperOffsets != null) {\n state.styles.popper = Object.assign({}, state.styles.popper, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.popperOffsets,\n position: state.options.strategy,\n adaptive: adaptive,\n roundOffsets: roundOffsets\n })));\n }\n\n if (state.modifiersData.arrow != null) {\n state.styles.arrow = Object.assign({}, state.styles.arrow, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.arrow,\n position: 'absolute',\n adaptive: false,\n roundOffsets: roundOffsets\n })));\n }\n\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-placement': state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'computeStyles',\n enabled: true,\n phase: 'beforeWrite',\n fn: computeStyles,\n data: {}\n};","import getWindow from \"../dom-utils/getWindow.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar passive = {\n passive: true\n};\n\nfunction effect(_ref) {\n var state = _ref.state,\n instance = _ref.instance,\n options = _ref.options;\n var _options$scroll = options.scroll,\n scroll = _options$scroll === void 0 ? true : _options$scroll,\n _options$resize = options.resize,\n resize = _options$resize === void 0 ? true : _options$resize;\n var window = getWindow(state.elements.popper);\n var scrollParents = [].concat(state.scrollParents.reference, state.scrollParents.popper);\n\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.addEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.addEventListener('resize', instance.update, passive);\n }\n\n return function () {\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.removeEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.removeEventListener('resize', instance.update, passive);\n }\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'eventListeners',\n enabled: true,\n phase: 'write',\n fn: function fn() {},\n effect: effect,\n data: {}\n};","var hash = {\n left: 'right',\n right: 'left',\n bottom: 'top',\n top: 'bottom'\n};\nexport default function getOppositePlacement(placement) {\n return placement.replace(/left|right|bottom|top/g, function (matched) {\n return hash[matched];\n });\n}","var hash = {\n start: 'end',\n end: 'start'\n};\nexport default function getOppositeVariationPlacement(placement) {\n return placement.replace(/start|end/g, function (matched) {\n return hash[matched];\n });\n}","import getWindow from \"./getWindow.js\";\nexport default function getWindowScroll(node) {\n var win = getWindow(node);\n var scrollLeft = win.pageXOffset;\n var scrollTop = win.pageYOffset;\n return {\n scrollLeft: scrollLeft,\n scrollTop: scrollTop\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nexport default function getWindowScrollBarX(element) {\n // If has a CSS width greater than the viewport, then this will be\n // incorrect for RTL.\n // Popper 1 is broken in this case and never had a bug report so let's assume\n // it's not an issue. I don't think anyone ever specifies width on \n // anyway.\n // Browsers where the left scrollbar doesn't cause an issue report `0` for\n // this (e.g. Edge 2019, IE11, Safari)\n return getBoundingClientRect(getDocumentElement(element)).left + getWindowScroll(element).scrollLeft;\n}","import getComputedStyle from \"./getComputedStyle.js\";\nexport default function isScrollParent(element) {\n // Firefox wants us to check `-x` and `-y` variations as well\n var _getComputedStyle = getComputedStyle(element),\n overflow = _getComputedStyle.overflow,\n overflowX = _getComputedStyle.overflowX,\n overflowY = _getComputedStyle.overflowY;\n\n return /auto|scroll|overlay|hidden/.test(overflow + overflowY + overflowX);\n}","import getParentNode from \"./getParentNode.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nexport default function getScrollParent(node) {\n if (['html', 'body', '#document'].indexOf(getNodeName(node)) >= 0) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return node.ownerDocument.body;\n }\n\n if (isHTMLElement(node) && isScrollParent(node)) {\n return node;\n }\n\n return getScrollParent(getParentNode(node));\n}","import getScrollParent from \"./getScrollParent.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getWindow from \"./getWindow.js\";\nimport isScrollParent from \"./isScrollParent.js\";\n/*\ngiven a DOM element, return the list of all scroll parents, up the list of ancesors\nuntil we get to the top window object. This list is what we attach scroll listeners\nto, because if any of these parent elements scroll, we'll need to re-calculate the\nreference element's position.\n*/\n\nexport default function listScrollParents(element, list) {\n var _element$ownerDocumen;\n\n if (list === void 0) {\n list = [];\n }\n\n var scrollParent = getScrollParent(element);\n var isBody = scrollParent === ((_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body);\n var win = getWindow(scrollParent);\n var target = isBody ? [win].concat(win.visualViewport || [], isScrollParent(scrollParent) ? scrollParent : []) : scrollParent;\n var updatedList = list.concat(target);\n return isBody ? updatedList : // $FlowFixMe[incompatible-call]: isBody tells us target will be an HTMLElement here\n updatedList.concat(listScrollParents(getParentNode(target)));\n}","export default function rectToClientRect(rect) {\n return Object.assign({}, rect, {\n left: rect.x,\n top: rect.y,\n right: rect.x + rect.width,\n bottom: rect.y + rect.height\n });\n}","import { viewport } from \"../enums.js\";\nimport getViewportRect from \"./getViewportRect.js\";\nimport getDocumentRect from \"./getDocumentRect.js\";\nimport listScrollParents from \"./listScrollParents.js\";\nimport getOffsetParent from \"./getOffsetParent.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport contains from \"./contains.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport rectToClientRect from \"../utils/rectToClientRect.js\";\nimport { max, min } from \"../utils/math.js\";\n\nfunction getInnerBoundingClientRect(element, strategy) {\n var rect = getBoundingClientRect(element, false, strategy === 'fixed');\n rect.top = rect.top + element.clientTop;\n rect.left = rect.left + element.clientLeft;\n rect.bottom = rect.top + element.clientHeight;\n rect.right = rect.left + element.clientWidth;\n rect.width = element.clientWidth;\n rect.height = element.clientHeight;\n rect.x = rect.left;\n rect.y = rect.top;\n return rect;\n}\n\nfunction getClientRectFromMixedType(element, clippingParent, strategy) {\n return clippingParent === viewport ? rectToClientRect(getViewportRect(element, strategy)) : isElement(clippingParent) ? getInnerBoundingClientRect(clippingParent, strategy) : rectToClientRect(getDocumentRect(getDocumentElement(element)));\n} // A \"clipping parent\" is an overflowable container with the characteristic of\n// clipping (or hiding) overflowing elements with a position different from\n// `initial`\n\n\nfunction getClippingParents(element) {\n var clippingParents = listScrollParents(getParentNode(element));\n var canEscapeClipping = ['absolute', 'fixed'].indexOf(getComputedStyle(element).position) >= 0;\n var clipperElement = canEscapeClipping && isHTMLElement(element) ? getOffsetParent(element) : element;\n\n if (!isElement(clipperElement)) {\n return [];\n } // $FlowFixMe[incompatible-return]: https://github.com/facebook/flow/issues/1414\n\n\n return clippingParents.filter(function (clippingParent) {\n return isElement(clippingParent) && contains(clippingParent, clipperElement) && getNodeName(clippingParent) !== 'body';\n });\n} // Gets the maximum area that the element is visible in due to any number of\n// clipping parents\n\n\nexport default function getClippingRect(element, boundary, rootBoundary, strategy) {\n var mainClippingParents = boundary === 'clippingParents' ? getClippingParents(element) : [].concat(boundary);\n var clippingParents = [].concat(mainClippingParents, [rootBoundary]);\n var firstClippingParent = clippingParents[0];\n var clippingRect = clippingParents.reduce(function (accRect, clippingParent) {\n var rect = getClientRectFromMixedType(element, clippingParent, strategy);\n accRect.top = max(rect.top, accRect.top);\n accRect.right = min(rect.right, accRect.right);\n accRect.bottom = min(rect.bottom, accRect.bottom);\n accRect.left = max(rect.left, accRect.left);\n return accRect;\n }, getClientRectFromMixedType(element, firstClippingParent, strategy));\n clippingRect.width = clippingRect.right - clippingRect.left;\n clippingRect.height = clippingRect.bottom - clippingRect.top;\n clippingRect.x = clippingRect.left;\n clippingRect.y = clippingRect.top;\n return clippingRect;\n}","import getWindow from \"./getWindow.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getViewportRect(element, strategy) {\n var win = getWindow(element);\n var html = getDocumentElement(element);\n var visualViewport = win.visualViewport;\n var width = html.clientWidth;\n var height = html.clientHeight;\n var x = 0;\n var y = 0;\n\n if (visualViewport) {\n width = visualViewport.width;\n height = visualViewport.height;\n var layoutViewport = isLayoutViewport();\n\n if (layoutViewport || !layoutViewport && strategy === 'fixed') {\n x = visualViewport.offsetLeft;\n y = visualViewport.offsetTop;\n }\n }\n\n return {\n width: width,\n height: height,\n x: x + getWindowScrollBarX(element),\n y: y\n };\n}","import getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nimport { max } from \"../utils/math.js\"; // Gets the entire size of the scrollable document area, even extending outside\n// of the `` and `` rect bounds if horizontally scrollable\n\nexport default function getDocumentRect(element) {\n var _element$ownerDocumen;\n\n var html = getDocumentElement(element);\n var winScroll = getWindowScroll(element);\n var body = (_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body;\n var width = max(html.scrollWidth, html.clientWidth, body ? body.scrollWidth : 0, body ? body.clientWidth : 0);\n var height = max(html.scrollHeight, html.clientHeight, body ? body.scrollHeight : 0, body ? body.clientHeight : 0);\n var x = -winScroll.scrollLeft + getWindowScrollBarX(element);\n var y = -winScroll.scrollTop;\n\n if (getComputedStyle(body || html).direction === 'rtl') {\n x += max(html.clientWidth, body ? body.clientWidth : 0) - width;\n }\n\n return {\n width: width,\n height: height,\n x: x,\n y: y\n };\n}","import getBasePlacement from \"./getBasePlacement.js\";\nimport getVariation from \"./getVariation.js\";\nimport getMainAxisFromPlacement from \"./getMainAxisFromPlacement.js\";\nimport { top, right, bottom, left, start, end } from \"../enums.js\";\nexport default function computeOffsets(_ref) {\n var reference = _ref.reference,\n element = _ref.element,\n placement = _ref.placement;\n var basePlacement = placement ? getBasePlacement(placement) : null;\n var variation = placement ? getVariation(placement) : null;\n var commonX = reference.x + reference.width / 2 - element.width / 2;\n var commonY = reference.y + reference.height / 2 - element.height / 2;\n var offsets;\n\n switch (basePlacement) {\n case top:\n offsets = {\n x: commonX,\n y: reference.y - element.height\n };\n break;\n\n case bottom:\n offsets = {\n x: commonX,\n y: reference.y + reference.height\n };\n break;\n\n case right:\n offsets = {\n x: reference.x + reference.width,\n y: commonY\n };\n break;\n\n case left:\n offsets = {\n x: reference.x - element.width,\n y: commonY\n };\n break;\n\n default:\n offsets = {\n x: reference.x,\n y: reference.y\n };\n }\n\n var mainAxis = basePlacement ? getMainAxisFromPlacement(basePlacement) : null;\n\n if (mainAxis != null) {\n var len = mainAxis === 'y' ? 'height' : 'width';\n\n switch (variation) {\n case start:\n offsets[mainAxis] = offsets[mainAxis] - (reference[len] / 2 - element[len] / 2);\n break;\n\n case end:\n offsets[mainAxis] = offsets[mainAxis] + (reference[len] / 2 - element[len] / 2);\n break;\n\n default:\n }\n }\n\n return offsets;\n}","import getClippingRect from \"../dom-utils/getClippingRect.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getBoundingClientRect from \"../dom-utils/getBoundingClientRect.js\";\nimport computeOffsets from \"./computeOffsets.js\";\nimport rectToClientRect from \"./rectToClientRect.js\";\nimport { clippingParents, reference, popper, bottom, top, right, basePlacements, viewport } from \"../enums.js\";\nimport { isElement } from \"../dom-utils/instanceOf.js\";\nimport mergePaddingObject from \"./mergePaddingObject.js\";\nimport expandToHashMap from \"./expandToHashMap.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport default function detectOverflow(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n _options$placement = _options.placement,\n placement = _options$placement === void 0 ? state.placement : _options$placement,\n _options$strategy = _options.strategy,\n strategy = _options$strategy === void 0 ? state.strategy : _options$strategy,\n _options$boundary = _options.boundary,\n boundary = _options$boundary === void 0 ? clippingParents : _options$boundary,\n _options$rootBoundary = _options.rootBoundary,\n rootBoundary = _options$rootBoundary === void 0 ? viewport : _options$rootBoundary,\n _options$elementConte = _options.elementContext,\n elementContext = _options$elementConte === void 0 ? popper : _options$elementConte,\n _options$altBoundary = _options.altBoundary,\n altBoundary = _options$altBoundary === void 0 ? false : _options$altBoundary,\n _options$padding = _options.padding,\n padding = _options$padding === void 0 ? 0 : _options$padding;\n var paddingObject = mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n var altContext = elementContext === popper ? reference : popper;\n var popperRect = state.rects.popper;\n var element = state.elements[altBoundary ? altContext : elementContext];\n var clippingClientRect = getClippingRect(isElement(element) ? element : element.contextElement || getDocumentElement(state.elements.popper), boundary, rootBoundary, strategy);\n var referenceClientRect = getBoundingClientRect(state.elements.reference);\n var popperOffsets = computeOffsets({\n reference: referenceClientRect,\n element: popperRect,\n strategy: 'absolute',\n placement: placement\n });\n var popperClientRect = rectToClientRect(Object.assign({}, popperRect, popperOffsets));\n var elementClientRect = elementContext === popper ? popperClientRect : referenceClientRect; // positive = overflowing the clipping rect\n // 0 or negative = within the clipping rect\n\n var overflowOffsets = {\n top: clippingClientRect.top - elementClientRect.top + paddingObject.top,\n bottom: elementClientRect.bottom - clippingClientRect.bottom + paddingObject.bottom,\n left: clippingClientRect.left - elementClientRect.left + paddingObject.left,\n right: elementClientRect.right - clippingClientRect.right + paddingObject.right\n };\n var offsetData = state.modifiersData.offset; // Offsets can be applied only to the popper element\n\n if (elementContext === popper && offsetData) {\n var offset = offsetData[placement];\n Object.keys(overflowOffsets).forEach(function (key) {\n var multiply = [right, bottom].indexOf(key) >= 0 ? 1 : -1;\n var axis = [top, bottom].indexOf(key) >= 0 ? 'y' : 'x';\n overflowOffsets[key] += offset[axis] * multiply;\n });\n }\n\n return overflowOffsets;\n}","import getOppositePlacement from \"../utils/getOppositePlacement.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getOppositeVariationPlacement from \"../utils/getOppositeVariationPlacement.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport computeAutoPlacement from \"../utils/computeAutoPlacement.js\";\nimport { bottom, top, start, right, left, auto } from \"../enums.js\";\nimport getVariation from \"../utils/getVariation.js\"; // eslint-disable-next-line import/no-unused-modules\n\nfunction getExpandedFallbackPlacements(placement) {\n if (getBasePlacement(placement) === auto) {\n return [];\n }\n\n var oppositePlacement = getOppositePlacement(placement);\n return [getOppositeVariationPlacement(placement), oppositePlacement, getOppositeVariationPlacement(oppositePlacement)];\n}\n\nfunction flip(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n\n if (state.modifiersData[name]._skip) {\n return;\n }\n\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? true : _options$altAxis,\n specifiedFallbackPlacements = options.fallbackPlacements,\n padding = options.padding,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n _options$flipVariatio = options.flipVariations,\n flipVariations = _options$flipVariatio === void 0 ? true : _options$flipVariatio,\n allowedAutoPlacements = options.allowedAutoPlacements;\n var preferredPlacement = state.options.placement;\n var basePlacement = getBasePlacement(preferredPlacement);\n var isBasePlacement = basePlacement === preferredPlacement;\n var fallbackPlacements = specifiedFallbackPlacements || (isBasePlacement || !flipVariations ? [getOppositePlacement(preferredPlacement)] : getExpandedFallbackPlacements(preferredPlacement));\n var placements = [preferredPlacement].concat(fallbackPlacements).reduce(function (acc, placement) {\n return acc.concat(getBasePlacement(placement) === auto ? computeAutoPlacement(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n flipVariations: flipVariations,\n allowedAutoPlacements: allowedAutoPlacements\n }) : placement);\n }, []);\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var checksMap = new Map();\n var makeFallbackChecks = true;\n var firstFittingPlacement = placements[0];\n\n for (var i = 0; i < placements.length; i++) {\n var placement = placements[i];\n\n var _basePlacement = getBasePlacement(placement);\n\n var isStartVariation = getVariation(placement) === start;\n var isVertical = [top, bottom].indexOf(_basePlacement) >= 0;\n var len = isVertical ? 'width' : 'height';\n var overflow = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n altBoundary: altBoundary,\n padding: padding\n });\n var mainVariationSide = isVertical ? isStartVariation ? right : left : isStartVariation ? bottom : top;\n\n if (referenceRect[len] > popperRect[len]) {\n mainVariationSide = getOppositePlacement(mainVariationSide);\n }\n\n var altVariationSide = getOppositePlacement(mainVariationSide);\n var checks = [];\n\n if (checkMainAxis) {\n checks.push(overflow[_basePlacement] <= 0);\n }\n\n if (checkAltAxis) {\n checks.push(overflow[mainVariationSide] <= 0, overflow[altVariationSide] <= 0);\n }\n\n if (checks.every(function (check) {\n return check;\n })) {\n firstFittingPlacement = placement;\n makeFallbackChecks = false;\n break;\n }\n\n checksMap.set(placement, checks);\n }\n\n if (makeFallbackChecks) {\n // `2` may be desired in some cases – research later\n var numberOfChecks = flipVariations ? 3 : 1;\n\n var _loop = function _loop(_i) {\n var fittingPlacement = placements.find(function (placement) {\n var checks = checksMap.get(placement);\n\n if (checks) {\n return checks.slice(0, _i).every(function (check) {\n return check;\n });\n }\n });\n\n if (fittingPlacement) {\n firstFittingPlacement = fittingPlacement;\n return \"break\";\n }\n };\n\n for (var _i = numberOfChecks; _i > 0; _i--) {\n var _ret = _loop(_i);\n\n if (_ret === \"break\") break;\n }\n }\n\n if (state.placement !== firstFittingPlacement) {\n state.modifiersData[name]._skip = true;\n state.placement = firstFittingPlacement;\n state.reset = true;\n }\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'flip',\n enabled: true,\n phase: 'main',\n fn: flip,\n requiresIfExists: ['offset'],\n data: {\n _skip: false\n }\n};","import getVariation from \"./getVariation.js\";\nimport { variationPlacements, basePlacements, placements as allPlacements } from \"../enums.js\";\nimport detectOverflow from \"./detectOverflow.js\";\nimport getBasePlacement from \"./getBasePlacement.js\";\nexport default function computeAutoPlacement(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n placement = _options.placement,\n boundary = _options.boundary,\n rootBoundary = _options.rootBoundary,\n padding = _options.padding,\n flipVariations = _options.flipVariations,\n _options$allowedAutoP = _options.allowedAutoPlacements,\n allowedAutoPlacements = _options$allowedAutoP === void 0 ? allPlacements : _options$allowedAutoP;\n var variation = getVariation(placement);\n var placements = variation ? flipVariations ? variationPlacements : variationPlacements.filter(function (placement) {\n return getVariation(placement) === variation;\n }) : basePlacements;\n var allowedPlacements = placements.filter(function (placement) {\n return allowedAutoPlacements.indexOf(placement) >= 0;\n });\n\n if (allowedPlacements.length === 0) {\n allowedPlacements = placements;\n\n if (process.env.NODE_ENV !== \"production\") {\n console.error(['Popper: The `allowedAutoPlacements` option did not allow any', 'placements. Ensure the `placement` option matches the variation', 'of the allowed placements.', 'For example, \"auto\" cannot be used to allow \"bottom-start\".', 'Use \"auto-start\" instead.'].join(' '));\n }\n } // $FlowFixMe[incompatible-type]: Flow seems to have problems with two array unions...\n\n\n var overflows = allowedPlacements.reduce(function (acc, placement) {\n acc[placement] = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding\n })[getBasePlacement(placement)];\n return acc;\n }, {});\n return Object.keys(overflows).sort(function (a, b) {\n return overflows[a] - overflows[b];\n });\n}","import { top, bottom, left, right } from \"../enums.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\n\nfunction getSideOffsets(overflow, rect, preventedOffsets) {\n if (preventedOffsets === void 0) {\n preventedOffsets = {\n x: 0,\n y: 0\n };\n }\n\n return {\n top: overflow.top - rect.height - preventedOffsets.y,\n right: overflow.right - rect.width + preventedOffsets.x,\n bottom: overflow.bottom - rect.height + preventedOffsets.y,\n left: overflow.left - rect.width - preventedOffsets.x\n };\n}\n\nfunction isAnySideFullyClipped(overflow) {\n return [top, right, bottom, left].some(function (side) {\n return overflow[side] >= 0;\n });\n}\n\nfunction hide(_ref) {\n var state = _ref.state,\n name = _ref.name;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var preventedOffsets = state.modifiersData.preventOverflow;\n var referenceOverflow = detectOverflow(state, {\n elementContext: 'reference'\n });\n var popperAltOverflow = detectOverflow(state, {\n altBoundary: true\n });\n var referenceClippingOffsets = getSideOffsets(referenceOverflow, referenceRect);\n var popperEscapeOffsets = getSideOffsets(popperAltOverflow, popperRect, preventedOffsets);\n var isReferenceHidden = isAnySideFullyClipped(referenceClippingOffsets);\n var hasPopperEscaped = isAnySideFullyClipped(popperEscapeOffsets);\n state.modifiersData[name] = {\n referenceClippingOffsets: referenceClippingOffsets,\n popperEscapeOffsets: popperEscapeOffsets,\n isReferenceHidden: isReferenceHidden,\n hasPopperEscaped: hasPopperEscaped\n };\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-reference-hidden': isReferenceHidden,\n 'data-popper-escaped': hasPopperEscaped\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'hide',\n enabled: true,\n phase: 'main',\n requiresIfExists: ['preventOverflow'],\n fn: hide\n};","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport { top, left, right, placements } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport function distanceAndSkiddingToXY(placement, rects, offset) {\n var basePlacement = getBasePlacement(placement);\n var invertDistance = [left, top].indexOf(basePlacement) >= 0 ? -1 : 1;\n\n var _ref = typeof offset === 'function' ? offset(Object.assign({}, rects, {\n placement: placement\n })) : offset,\n skidding = _ref[0],\n distance = _ref[1];\n\n skidding = skidding || 0;\n distance = (distance || 0) * invertDistance;\n return [left, right].indexOf(basePlacement) >= 0 ? {\n x: distance,\n y: skidding\n } : {\n x: skidding,\n y: distance\n };\n}\n\nfunction offset(_ref2) {\n var state = _ref2.state,\n options = _ref2.options,\n name = _ref2.name;\n var _options$offset = options.offset,\n offset = _options$offset === void 0 ? [0, 0] : _options$offset;\n var data = placements.reduce(function (acc, placement) {\n acc[placement] = distanceAndSkiddingToXY(placement, state.rects, offset);\n return acc;\n }, {});\n var _data$state$placement = data[state.placement],\n x = _data$state$placement.x,\n y = _data$state$placement.y;\n\n if (state.modifiersData.popperOffsets != null) {\n state.modifiersData.popperOffsets.x += x;\n state.modifiersData.popperOffsets.y += y;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'offset',\n enabled: true,\n phase: 'main',\n requires: ['popperOffsets'],\n fn: offset\n};","import computeOffsets from \"../utils/computeOffsets.js\";\n\nfunction popperOffsets(_ref) {\n var state = _ref.state,\n name = _ref.name;\n // Offsets are the actual position the popper needs to have to be\n // properly positioned near its reference element\n // This is the most basic placement, and will be adjusted by\n // the modifiers in the next step\n state.modifiersData[name] = computeOffsets({\n reference: state.rects.reference,\n element: state.rects.popper,\n strategy: 'absolute',\n placement: state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'popperOffsets',\n enabled: true,\n phase: 'read',\n fn: popperOffsets,\n data: {}\n};","import { top, left, right, bottom, start } from \"../enums.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport getAltAxis from \"../utils/getAltAxis.js\";\nimport { within, withinMaxClamp } from \"../utils/within.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport getFreshSideObject from \"../utils/getFreshSideObject.js\";\nimport { min as mathMin, max as mathMax } from \"../utils/math.js\";\n\nfunction preventOverflow(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? false : _options$altAxis,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n padding = options.padding,\n _options$tether = options.tether,\n tether = _options$tether === void 0 ? true : _options$tether,\n _options$tetherOffset = options.tetherOffset,\n tetherOffset = _options$tetherOffset === void 0 ? 0 : _options$tetherOffset;\n var overflow = detectOverflow(state, {\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n altBoundary: altBoundary\n });\n var basePlacement = getBasePlacement(state.placement);\n var variation = getVariation(state.placement);\n var isBasePlacement = !variation;\n var mainAxis = getMainAxisFromPlacement(basePlacement);\n var altAxis = getAltAxis(mainAxis);\n var popperOffsets = state.modifiersData.popperOffsets;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var tetherOffsetValue = typeof tetherOffset === 'function' ? tetherOffset(Object.assign({}, state.rects, {\n placement: state.placement\n })) : tetherOffset;\n var normalizedTetherOffsetValue = typeof tetherOffsetValue === 'number' ? {\n mainAxis: tetherOffsetValue,\n altAxis: tetherOffsetValue\n } : Object.assign({\n mainAxis: 0,\n altAxis: 0\n }, tetherOffsetValue);\n var offsetModifierState = state.modifiersData.offset ? state.modifiersData.offset[state.placement] : null;\n var data = {\n x: 0,\n y: 0\n };\n\n if (!popperOffsets) {\n return;\n }\n\n if (checkMainAxis) {\n var _offsetModifierState$;\n\n var mainSide = mainAxis === 'y' ? top : left;\n var altSide = mainAxis === 'y' ? bottom : right;\n var len = mainAxis === 'y' ? 'height' : 'width';\n var offset = popperOffsets[mainAxis];\n var min = offset + overflow[mainSide];\n var max = offset - overflow[altSide];\n var additive = tether ? -popperRect[len] / 2 : 0;\n var minLen = variation === start ? referenceRect[len] : popperRect[len];\n var maxLen = variation === start ? -popperRect[len] : -referenceRect[len]; // We need to include the arrow in the calculation so the arrow doesn't go\n // outside the reference bounds\n\n var arrowElement = state.elements.arrow;\n var arrowRect = tether && arrowElement ? getLayoutRect(arrowElement) : {\n width: 0,\n height: 0\n };\n var arrowPaddingObject = state.modifiersData['arrow#persistent'] ? state.modifiersData['arrow#persistent'].padding : getFreshSideObject();\n var arrowPaddingMin = arrowPaddingObject[mainSide];\n var arrowPaddingMax = arrowPaddingObject[altSide]; // If the reference length is smaller than the arrow length, we don't want\n // to include its full size in the calculation. If the reference is small\n // and near the edge of a boundary, the popper can overflow even if the\n // reference is not overflowing as well (e.g. virtual elements with no\n // width or height)\n\n var arrowLen = within(0, referenceRect[len], arrowRect[len]);\n var minOffset = isBasePlacement ? referenceRect[len] / 2 - additive - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis : minLen - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis;\n var maxOffset = isBasePlacement ? -referenceRect[len] / 2 + additive + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis : maxLen + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis;\n var arrowOffsetParent = state.elements.arrow && getOffsetParent(state.elements.arrow);\n var clientOffset = arrowOffsetParent ? mainAxis === 'y' ? arrowOffsetParent.clientTop || 0 : arrowOffsetParent.clientLeft || 0 : 0;\n var offsetModifierValue = (_offsetModifierState$ = offsetModifierState == null ? void 0 : offsetModifierState[mainAxis]) != null ? _offsetModifierState$ : 0;\n var tetherMin = offset + minOffset - offsetModifierValue - clientOffset;\n var tetherMax = offset + maxOffset - offsetModifierValue;\n var preventedOffset = within(tether ? mathMin(min, tetherMin) : min, offset, tether ? mathMax(max, tetherMax) : max);\n popperOffsets[mainAxis] = preventedOffset;\n data[mainAxis] = preventedOffset - offset;\n }\n\n if (checkAltAxis) {\n var _offsetModifierState$2;\n\n var _mainSide = mainAxis === 'x' ? top : left;\n\n var _altSide = mainAxis === 'x' ? bottom : right;\n\n var _offset = popperOffsets[altAxis];\n\n var _len = altAxis === 'y' ? 'height' : 'width';\n\n var _min = _offset + overflow[_mainSide];\n\n var _max = _offset - overflow[_altSide];\n\n var isOriginSide = [top, left].indexOf(basePlacement) !== -1;\n\n var _offsetModifierValue = (_offsetModifierState$2 = offsetModifierState == null ? void 0 : offsetModifierState[altAxis]) != null ? _offsetModifierState$2 : 0;\n\n var _tetherMin = isOriginSide ? _min : _offset - referenceRect[_len] - popperRect[_len] - _offsetModifierValue + normalizedTetherOffsetValue.altAxis;\n\n var _tetherMax = isOriginSide ? _offset + referenceRect[_len] + popperRect[_len] - _offsetModifierValue - normalizedTetherOffsetValue.altAxis : _max;\n\n var _preventedOffset = tether && isOriginSide ? withinMaxClamp(_tetherMin, _offset, _tetherMax) : within(tether ? _tetherMin : _min, _offset, tether ? _tetherMax : _max);\n\n popperOffsets[altAxis] = _preventedOffset;\n data[altAxis] = _preventedOffset - _offset;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'preventOverflow',\n enabled: true,\n phase: 'main',\n fn: preventOverflow,\n requiresIfExists: ['offset']\n};","export default function getAltAxis(axis) {\n return axis === 'x' ? 'y' : 'x';\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getNodeScroll from \"./getNodeScroll.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport { round } from \"../utils/math.js\";\n\nfunction isElementScaled(element) {\n var rect = element.getBoundingClientRect();\n var scaleX = round(rect.width) / element.offsetWidth || 1;\n var scaleY = round(rect.height) / element.offsetHeight || 1;\n return scaleX !== 1 || scaleY !== 1;\n} // Returns the composite rect of an element relative to its offsetParent.\n// Composite means it takes into account transforms as well as layout.\n\n\nexport default function getCompositeRect(elementOrVirtualElement, offsetParent, isFixed) {\n if (isFixed === void 0) {\n isFixed = false;\n }\n\n var isOffsetParentAnElement = isHTMLElement(offsetParent);\n var offsetParentIsScaled = isHTMLElement(offsetParent) && isElementScaled(offsetParent);\n var documentElement = getDocumentElement(offsetParent);\n var rect = getBoundingClientRect(elementOrVirtualElement, offsetParentIsScaled, isFixed);\n var scroll = {\n scrollLeft: 0,\n scrollTop: 0\n };\n var offsets = {\n x: 0,\n y: 0\n };\n\n if (isOffsetParentAnElement || !isOffsetParentAnElement && !isFixed) {\n if (getNodeName(offsetParent) !== 'body' || // https://github.com/popperjs/popper-core/issues/1078\n isScrollParent(documentElement)) {\n scroll = getNodeScroll(offsetParent);\n }\n\n if (isHTMLElement(offsetParent)) {\n offsets = getBoundingClientRect(offsetParent, true);\n offsets.x += offsetParent.clientLeft;\n offsets.y += offsetParent.clientTop;\n } else if (documentElement) {\n offsets.x = getWindowScrollBarX(documentElement);\n }\n }\n\n return {\n x: rect.left + scroll.scrollLeft - offsets.x,\n y: rect.top + scroll.scrollTop - offsets.y,\n width: rect.width,\n height: rect.height\n };\n}","import getWindowScroll from \"./getWindowScroll.js\";\nimport getWindow from \"./getWindow.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getHTMLElementScroll from \"./getHTMLElementScroll.js\";\nexport default function getNodeScroll(node) {\n if (node === getWindow(node) || !isHTMLElement(node)) {\n return getWindowScroll(node);\n } else {\n return getHTMLElementScroll(node);\n }\n}","export default function getHTMLElementScroll(element) {\n return {\n scrollLeft: element.scrollLeft,\n scrollTop: element.scrollTop\n };\n}","import { modifierPhases } from \"../enums.js\"; // source: https://stackoverflow.com/questions/49875255\n\nfunction order(modifiers) {\n var map = new Map();\n var visited = new Set();\n var result = [];\n modifiers.forEach(function (modifier) {\n map.set(modifier.name, modifier);\n }); // On visiting object, check for its dependencies and visit them recursively\n\n function sort(modifier) {\n visited.add(modifier.name);\n var requires = [].concat(modifier.requires || [], modifier.requiresIfExists || []);\n requires.forEach(function (dep) {\n if (!visited.has(dep)) {\n var depModifier = map.get(dep);\n\n if (depModifier) {\n sort(depModifier);\n }\n }\n });\n result.push(modifier);\n }\n\n modifiers.forEach(function (modifier) {\n if (!visited.has(modifier.name)) {\n // check for visited object\n sort(modifier);\n }\n });\n return result;\n}\n\nexport default function orderModifiers(modifiers) {\n // order based on dependencies\n var orderedModifiers = order(modifiers); // order based on phase\n\n return modifierPhases.reduce(function (acc, phase) {\n return acc.concat(orderedModifiers.filter(function (modifier) {\n return modifier.phase === phase;\n }));\n }, []);\n}","import getCompositeRect from \"./dom-utils/getCompositeRect.js\";\nimport getLayoutRect from \"./dom-utils/getLayoutRect.js\";\nimport listScrollParents from \"./dom-utils/listScrollParents.js\";\nimport getOffsetParent from \"./dom-utils/getOffsetParent.js\";\nimport getComputedStyle from \"./dom-utils/getComputedStyle.js\";\nimport orderModifiers from \"./utils/orderModifiers.js\";\nimport debounce from \"./utils/debounce.js\";\nimport validateModifiers from \"./utils/validateModifiers.js\";\nimport uniqueBy from \"./utils/uniqueBy.js\";\nimport getBasePlacement from \"./utils/getBasePlacement.js\";\nimport mergeByName from \"./utils/mergeByName.js\";\nimport detectOverflow from \"./utils/detectOverflow.js\";\nimport { isElement } from \"./dom-utils/instanceOf.js\";\nimport { auto } from \"./enums.js\";\nvar INVALID_ELEMENT_ERROR = 'Popper: Invalid reference or popper argument provided. They must be either a DOM element or virtual element.';\nvar INFINITE_LOOP_ERROR = 'Popper: An infinite loop in the modifiers cycle has been detected! The cycle has been interrupted to prevent a browser crash.';\nvar DEFAULT_OPTIONS = {\n placement: 'bottom',\n modifiers: [],\n strategy: 'absolute'\n};\n\nfunction areValidElements() {\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return !args.some(function (element) {\n return !(element && typeof element.getBoundingClientRect === 'function');\n });\n}\n\nexport function popperGenerator(generatorOptions) {\n if (generatorOptions === void 0) {\n generatorOptions = {};\n }\n\n var _generatorOptions = generatorOptions,\n _generatorOptions$def = _generatorOptions.defaultModifiers,\n defaultModifiers = _generatorOptions$def === void 0 ? [] : _generatorOptions$def,\n _generatorOptions$def2 = _generatorOptions.defaultOptions,\n defaultOptions = _generatorOptions$def2 === void 0 ? DEFAULT_OPTIONS : _generatorOptions$def2;\n return function createPopper(reference, popper, options) {\n if (options === void 0) {\n options = defaultOptions;\n }\n\n var state = {\n placement: 'bottom',\n orderedModifiers: [],\n options: Object.assign({}, DEFAULT_OPTIONS, defaultOptions),\n modifiersData: {},\n elements: {\n reference: reference,\n popper: popper\n },\n attributes: {},\n styles: {}\n };\n var effectCleanupFns = [];\n var isDestroyed = false;\n var instance = {\n state: state,\n setOptions: function setOptions(setOptionsAction) {\n var options = typeof setOptionsAction === 'function' ? setOptionsAction(state.options) : setOptionsAction;\n cleanupModifierEffects();\n state.options = Object.assign({}, defaultOptions, state.options, options);\n state.scrollParents = {\n reference: isElement(reference) ? listScrollParents(reference) : reference.contextElement ? listScrollParents(reference.contextElement) : [],\n popper: listScrollParents(popper)\n }; // Orders the modifiers based on their dependencies and `phase`\n // properties\n\n var orderedModifiers = orderModifiers(mergeByName([].concat(defaultModifiers, state.options.modifiers))); // Strip out disabled modifiers\n\n state.orderedModifiers = orderedModifiers.filter(function (m) {\n return m.enabled;\n }); // Validate the provided modifiers so that the consumer will get warned\n // if one of the modifiers is invalid for any reason\n\n if (process.env.NODE_ENV !== \"production\") {\n var modifiers = uniqueBy([].concat(orderedModifiers, state.options.modifiers), function (_ref) {\n var name = _ref.name;\n return name;\n });\n validateModifiers(modifiers);\n\n if (getBasePlacement(state.options.placement) === auto) {\n var flipModifier = state.orderedModifiers.find(function (_ref2) {\n var name = _ref2.name;\n return name === 'flip';\n });\n\n if (!flipModifier) {\n console.error(['Popper: \"auto\" placements require the \"flip\" modifier be', 'present and enabled to work.'].join(' '));\n }\n }\n\n var _getComputedStyle = getComputedStyle(popper),\n marginTop = _getComputedStyle.marginTop,\n marginRight = _getComputedStyle.marginRight,\n marginBottom = _getComputedStyle.marginBottom,\n marginLeft = _getComputedStyle.marginLeft; // We no longer take into account `margins` on the popper, and it can\n // cause bugs with positioning, so we'll warn the consumer\n\n\n if ([marginTop, marginRight, marginBottom, marginLeft].some(function (margin) {\n return parseFloat(margin);\n })) {\n console.warn(['Popper: CSS \"margin\" styles cannot be used to apply padding', 'between the popper and its reference element or boundary.', 'To replicate margin, use the `offset` modifier, as well as', 'the `padding` option in the `preventOverflow` and `flip`', 'modifiers.'].join(' '));\n }\n }\n\n runModifierEffects();\n return instance.update();\n },\n // Sync update – it will always be executed, even if not necessary. This\n // is useful for low frequency updates where sync behavior simplifies the\n // logic.\n // For high frequency updates (e.g. `resize` and `scroll` events), always\n // prefer the async Popper#update method\n forceUpdate: function forceUpdate() {\n if (isDestroyed) {\n return;\n }\n\n var _state$elements = state.elements,\n reference = _state$elements.reference,\n popper = _state$elements.popper; // Don't proceed if `reference` or `popper` are not valid elements\n // anymore\n\n if (!areValidElements(reference, popper)) {\n if (process.env.NODE_ENV !== \"production\") {\n console.error(INVALID_ELEMENT_ERROR);\n }\n\n return;\n } // Store the reference and popper rects to be read by modifiers\n\n\n state.rects = {\n reference: getCompositeRect(reference, getOffsetParent(popper), state.options.strategy === 'fixed'),\n popper: getLayoutRect(popper)\n }; // Modifiers have the ability to reset the current update cycle. The\n // most common use case for this is the `flip` modifier changing the\n // placement, which then needs to re-run all the modifiers, because the\n // logic was previously ran for the previous placement and is therefore\n // stale/incorrect\n\n state.reset = false;\n state.placement = state.options.placement; // On each update cycle, the `modifiersData` property for each modifier\n // is filled with the initial data specified by the modifier. This means\n // it doesn't persist and is fresh on each update.\n // To ensure persistent data, use `${name}#persistent`\n\n state.orderedModifiers.forEach(function (modifier) {\n return state.modifiersData[modifier.name] = Object.assign({}, modifier.data);\n });\n var __debug_loops__ = 0;\n\n for (var index = 0; index < state.orderedModifiers.length; index++) {\n if (process.env.NODE_ENV !== \"production\") {\n __debug_loops__ += 1;\n\n if (__debug_loops__ > 100) {\n console.error(INFINITE_LOOP_ERROR);\n break;\n }\n }\n\n if (state.reset === true) {\n state.reset = false;\n index = -1;\n continue;\n }\n\n var _state$orderedModifie = state.orderedModifiers[index],\n fn = _state$orderedModifie.fn,\n _state$orderedModifie2 = _state$orderedModifie.options,\n _options = _state$orderedModifie2 === void 0 ? {} : _state$orderedModifie2,\n name = _state$orderedModifie.name;\n\n if (typeof fn === 'function') {\n state = fn({\n state: state,\n options: _options,\n name: name,\n instance: instance\n }) || state;\n }\n }\n },\n // Async and optimistically optimized update – it will not be executed if\n // not necessary (debounced to run at most once-per-tick)\n update: debounce(function () {\n return new Promise(function (resolve) {\n instance.forceUpdate();\n resolve(state);\n });\n }),\n destroy: function destroy() {\n cleanupModifierEffects();\n isDestroyed = true;\n }\n };\n\n if (!areValidElements(reference, popper)) {\n if (process.env.NODE_ENV !== \"production\") {\n console.error(INVALID_ELEMENT_ERROR);\n }\n\n return instance;\n }\n\n instance.setOptions(options).then(function (state) {\n if (!isDestroyed && options.onFirstUpdate) {\n options.onFirstUpdate(state);\n }\n }); // Modifiers have the ability to execute arbitrary code before the first\n // update cycle runs. They will be executed in the same order as the update\n // cycle. This is useful when a modifier adds some persistent data that\n // other modifiers need to use, but the modifier is run after the dependent\n // one.\n\n function runModifierEffects() {\n state.orderedModifiers.forEach(function (_ref3) {\n var name = _ref3.name,\n _ref3$options = _ref3.options,\n options = _ref3$options === void 0 ? {} : _ref3$options,\n effect = _ref3.effect;\n\n if (typeof effect === 'function') {\n var cleanupFn = effect({\n state: state,\n name: name,\n instance: instance,\n options: options\n });\n\n var noopFn = function noopFn() {};\n\n effectCleanupFns.push(cleanupFn || noopFn);\n }\n });\n }\n\n function cleanupModifierEffects() {\n effectCleanupFns.forEach(function (fn) {\n return fn();\n });\n effectCleanupFns = [];\n }\n\n return instance;\n };\n}\nexport var createPopper = /*#__PURE__*/popperGenerator(); // eslint-disable-next-line import/no-unused-modules\n\nexport { detectOverflow };","export default function debounce(fn) {\n var pending;\n return function () {\n if (!pending) {\n pending = new Promise(function (resolve) {\n Promise.resolve().then(function () {\n pending = undefined;\n resolve(fn());\n });\n });\n }\n\n return pending;\n };\n}","export default function mergeByName(modifiers) {\n var merged = modifiers.reduce(function (merged, current) {\n var existing = merged[current.name];\n merged[current.name] = existing ? Object.assign({}, existing, current, {\n options: Object.assign({}, existing.options, current.options),\n data: Object.assign({}, existing.data, current.data)\n }) : current;\n return merged;\n }, {}); // IE11 does not support Object.values\n\n return Object.keys(merged).map(function (key) {\n return merged[key];\n });\n}","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nimport offset from \"./modifiers/offset.js\";\nimport flip from \"./modifiers/flip.js\";\nimport preventOverflow from \"./modifiers/preventOverflow.js\";\nimport arrow from \"./modifiers/arrow.js\";\nimport hide from \"./modifiers/hide.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles, offset, flip, preventOverflow, arrow, hide];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow }; // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper as createPopperLite } from \"./popper-lite.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport * from \"./modifiers/index.js\";","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow };","/*!\n * Bootstrap v5.2.3 (https://getbootstrap.com/)\n * Copyright 2011-2022 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */\nimport * as Popper from '@popperjs/core';\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/index.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\nconst MAX_UID = 1000000;\nconst MILLISECONDS_MULTIPLIER = 1000;\nconst TRANSITION_END = 'transitionend'; // Shout-out Angus Croll (https://goo.gl/pxwQGp)\n\nconst toType = object => {\n if (object === null || object === undefined) {\n return `${object}`;\n }\n\n return Object.prototype.toString.call(object).match(/\\s([a-z]+)/i)[1].toLowerCase();\n};\n/**\n * Public Util API\n */\n\n\nconst getUID = prefix => {\n do {\n prefix += Math.floor(Math.random() * MAX_UID);\n } while (document.getElementById(prefix));\n\n return prefix;\n};\n\nconst getSelector = element => {\n let selector = element.getAttribute('data-bs-target');\n\n if (!selector || selector === '#') {\n let hrefAttribute = element.getAttribute('href'); // The only valid content that could double as a selector are IDs or classes,\n // so everything starting with `#` or `.`. If a \"real\" URL is used as the selector,\n // `document.querySelector` will rightfully complain it is invalid.\n // See https://github.com/twbs/bootstrap/issues/32273\n\n if (!hrefAttribute || !hrefAttribute.includes('#') && !hrefAttribute.startsWith('.')) {\n return null;\n } // Just in case some CMS puts out a full URL with the anchor appended\n\n\n if (hrefAttribute.includes('#') && !hrefAttribute.startsWith('#')) {\n hrefAttribute = `#${hrefAttribute.split('#')[1]}`;\n }\n\n selector = hrefAttribute && hrefAttribute !== '#' ? hrefAttribute.trim() : null;\n }\n\n return selector;\n};\n\nconst getSelectorFromElement = element => {\n const selector = getSelector(element);\n\n if (selector) {\n return document.querySelector(selector) ? selector : null;\n }\n\n return null;\n};\n\nconst getElementFromSelector = element => {\n const selector = getSelector(element);\n return selector ? document.querySelector(selector) : null;\n};\n\nconst getTransitionDurationFromElement = element => {\n if (!element) {\n return 0;\n } // Get transition-duration of the element\n\n\n let {\n transitionDuration,\n transitionDelay\n } = window.getComputedStyle(element);\n const floatTransitionDuration = Number.parseFloat(transitionDuration);\n const floatTransitionDelay = Number.parseFloat(transitionDelay); // Return 0 if element or transition duration is not found\n\n if (!floatTransitionDuration && !floatTransitionDelay) {\n return 0;\n } // If multiple durations are defined, take the first\n\n\n transitionDuration = transitionDuration.split(',')[0];\n transitionDelay = transitionDelay.split(',')[0];\n return (Number.parseFloat(transitionDuration) + Number.parseFloat(transitionDelay)) * MILLISECONDS_MULTIPLIER;\n};\n\nconst triggerTransitionEnd = element => {\n element.dispatchEvent(new Event(TRANSITION_END));\n};\n\nconst isElement = object => {\n if (!object || typeof object !== 'object') {\n return false;\n }\n\n if (typeof object.jquery !== 'undefined') {\n object = object[0];\n }\n\n return typeof object.nodeType !== 'undefined';\n};\n\nconst getElement = object => {\n // it's a jQuery object or a node element\n if (isElement(object)) {\n return object.jquery ? object[0] : object;\n }\n\n if (typeof object === 'string' && object.length > 0) {\n return document.querySelector(object);\n }\n\n return null;\n};\n\nconst isVisible = element => {\n if (!isElement(element) || element.getClientRects().length === 0) {\n return false;\n }\n\n const elementIsVisible = getComputedStyle(element).getPropertyValue('visibility') === 'visible'; // Handle `details` element as its content may falsie appear visible when it is closed\n\n const closedDetails = element.closest('details:not([open])');\n\n if (!closedDetails) {\n return elementIsVisible;\n }\n\n if (closedDetails !== element) {\n const summary = element.closest('summary');\n\n if (summary && summary.parentNode !== closedDetails) {\n return false;\n }\n\n if (summary === null) {\n return false;\n }\n }\n\n return elementIsVisible;\n};\n\nconst isDisabled = element => {\n if (!element || element.nodeType !== Node.ELEMENT_NODE) {\n return true;\n }\n\n if (element.classList.contains('disabled')) {\n return true;\n }\n\n if (typeof element.disabled !== 'undefined') {\n return element.disabled;\n }\n\n return element.hasAttribute('disabled') && element.getAttribute('disabled') !== 'false';\n};\n\nconst findShadowRoot = element => {\n if (!document.documentElement.attachShadow) {\n return null;\n } // Can find the shadow root otherwise it'll return the document\n\n\n if (typeof element.getRootNode === 'function') {\n const root = element.getRootNode();\n return root instanceof ShadowRoot ? root : null;\n }\n\n if (element instanceof ShadowRoot) {\n return element;\n } // when we don't find a shadow root\n\n\n if (!element.parentNode) {\n return null;\n }\n\n return findShadowRoot(element.parentNode);\n};\n\nconst noop = () => {};\n/**\n * Trick to restart an element's animation\n *\n * @param {HTMLElement} element\n * @return void\n *\n * @see https://www.charistheo.io/blog/2021/02/restart-a-css-animation-with-javascript/#restarting-a-css-animation\n */\n\n\nconst reflow = element => {\n element.offsetHeight; // eslint-disable-line no-unused-expressions\n};\n\nconst getjQuery = () => {\n if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {\n return window.jQuery;\n }\n\n return null;\n};\n\nconst DOMContentLoadedCallbacks = [];\n\nconst onDOMContentLoaded = callback => {\n if (document.readyState === 'loading') {\n // add listener on the first call when the document is in loading state\n if (!DOMContentLoadedCallbacks.length) {\n document.addEventListener('DOMContentLoaded', () => {\n for (const callback of DOMContentLoadedCallbacks) {\n callback();\n }\n });\n }\n\n DOMContentLoadedCallbacks.push(callback);\n } else {\n callback();\n }\n};\n\nconst isRTL = () => document.documentElement.dir === 'rtl';\n\nconst defineJQueryPlugin = plugin => {\n onDOMContentLoaded(() => {\n const $ = getjQuery();\n /* istanbul ignore if */\n\n if ($) {\n const name = plugin.NAME;\n const JQUERY_NO_CONFLICT = $.fn[name];\n $.fn[name] = plugin.jQueryInterface;\n $.fn[name].Constructor = plugin;\n\n $.fn[name].noConflict = () => {\n $.fn[name] = JQUERY_NO_CONFLICT;\n return plugin.jQueryInterface;\n };\n }\n });\n};\n\nconst execute = callback => {\n if (typeof callback === 'function') {\n callback();\n }\n};\n\nconst executeAfterTransition = (callback, transitionElement, waitForTransition = true) => {\n if (!waitForTransition) {\n execute(callback);\n return;\n }\n\n const durationPadding = 5;\n const emulatedDuration = getTransitionDurationFromElement(transitionElement) + durationPadding;\n let called = false;\n\n const handler = ({\n target\n }) => {\n if (target !== transitionElement) {\n return;\n }\n\n called = true;\n transitionElement.removeEventListener(TRANSITION_END, handler);\n execute(callback);\n };\n\n transitionElement.addEventListener(TRANSITION_END, handler);\n setTimeout(() => {\n if (!called) {\n triggerTransitionEnd(transitionElement);\n }\n }, emulatedDuration);\n};\n/**\n * Return the previous/next element of a list.\n *\n * @param {array} list The list of elements\n * @param activeElement The active element\n * @param shouldGetNext Choose to get next or previous element\n * @param isCycleAllowed\n * @return {Element|elem} The proper element\n */\n\n\nconst getNextActiveElement = (list, activeElement, shouldGetNext, isCycleAllowed) => {\n const listLength = list.length;\n let index = list.indexOf(activeElement); // if the element does not exist in the list return an element\n // depending on the direction and if cycle is allowed\n\n if (index === -1) {\n return !shouldGetNext && isCycleAllowed ? list[listLength - 1] : list[0];\n }\n\n index += shouldGetNext ? 1 : -1;\n\n if (isCycleAllowed) {\n index = (index + listLength) % listLength;\n }\n\n return list[Math.max(0, Math.min(index, listLength - 1))];\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): dom/event-handler.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst namespaceRegex = /[^.]*(?=\\..*)\\.|.*/;\nconst stripNameRegex = /\\..*/;\nconst stripUidRegex = /::\\d+$/;\nconst eventRegistry = {}; // Events storage\n\nlet uidEvent = 1;\nconst customEvents = {\n mouseenter: 'mouseover',\n mouseleave: 'mouseout'\n};\nconst nativeEvents = new Set(['click', 'dblclick', 'mouseup', 'mousedown', 'contextmenu', 'mousewheel', 'DOMMouseScroll', 'mouseover', 'mouseout', 'mousemove', 'selectstart', 'selectend', 'keydown', 'keypress', 'keyup', 'orientationchange', 'touchstart', 'touchmove', 'touchend', 'touchcancel', 'pointerdown', 'pointermove', 'pointerup', 'pointerleave', 'pointercancel', 'gesturestart', 'gesturechange', 'gestureend', 'focus', 'blur', 'change', 'reset', 'select', 'submit', 'focusin', 'focusout', 'load', 'unload', 'beforeunload', 'resize', 'move', 'DOMContentLoaded', 'readystatechange', 'error', 'abort', 'scroll']);\n/**\n * Private methods\n */\n\nfunction makeEventUid(element, uid) {\n return uid && `${uid}::${uidEvent++}` || element.uidEvent || uidEvent++;\n}\n\nfunction getElementEvents(element) {\n const uid = makeEventUid(element);\n element.uidEvent = uid;\n eventRegistry[uid] = eventRegistry[uid] || {};\n return eventRegistry[uid];\n}\n\nfunction bootstrapHandler(element, fn) {\n return function handler(event) {\n hydrateObj(event, {\n delegateTarget: element\n });\n\n if (handler.oneOff) {\n EventHandler.off(element, event.type, fn);\n }\n\n return fn.apply(element, [event]);\n };\n}\n\nfunction bootstrapDelegationHandler(element, selector, fn) {\n return function handler(event) {\n const domElements = element.querySelectorAll(selector);\n\n for (let {\n target\n } = event; target && target !== this; target = target.parentNode) {\n for (const domElement of domElements) {\n if (domElement !== target) {\n continue;\n }\n\n hydrateObj(event, {\n delegateTarget: target\n });\n\n if (handler.oneOff) {\n EventHandler.off(element, event.type, selector, fn);\n }\n\n return fn.apply(target, [event]);\n }\n }\n };\n}\n\nfunction findHandler(events, callable, delegationSelector = null) {\n return Object.values(events).find(event => event.callable === callable && event.delegationSelector === delegationSelector);\n}\n\nfunction normalizeParameters(originalTypeEvent, handler, delegationFunction) {\n const isDelegated = typeof handler === 'string'; // todo: tooltip passes `false` instead of selector, so we need to check\n\n const callable = isDelegated ? delegationFunction : handler || delegationFunction;\n let typeEvent = getTypeEvent(originalTypeEvent);\n\n if (!nativeEvents.has(typeEvent)) {\n typeEvent = originalTypeEvent;\n }\n\n return [isDelegated, callable, typeEvent];\n}\n\nfunction addHandler(element, originalTypeEvent, handler, delegationFunction, oneOff) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n\n let [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction); // in case of mouseenter or mouseleave wrap the handler within a function that checks for its DOM position\n // this prevents the handler from being dispatched the same way as mouseover or mouseout does\n\n if (originalTypeEvent in customEvents) {\n const wrapFunction = fn => {\n return function (event) {\n if (!event.relatedTarget || event.relatedTarget !== event.delegateTarget && !event.delegateTarget.contains(event.relatedTarget)) {\n return fn.call(this, event);\n }\n };\n };\n\n callable = wrapFunction(callable);\n }\n\n const events = getElementEvents(element);\n const handlers = events[typeEvent] || (events[typeEvent] = {});\n const previousFunction = findHandler(handlers, callable, isDelegated ? handler : null);\n\n if (previousFunction) {\n previousFunction.oneOff = previousFunction.oneOff && oneOff;\n return;\n }\n\n const uid = makeEventUid(callable, originalTypeEvent.replace(namespaceRegex, ''));\n const fn = isDelegated ? bootstrapDelegationHandler(element, handler, callable) : bootstrapHandler(element, callable);\n fn.delegationSelector = isDelegated ? handler : null;\n fn.callable = callable;\n fn.oneOff = oneOff;\n fn.uidEvent = uid;\n handlers[uid] = fn;\n element.addEventListener(typeEvent, fn, isDelegated);\n}\n\nfunction removeHandler(element, events, typeEvent, handler, delegationSelector) {\n const fn = findHandler(events[typeEvent], handler, delegationSelector);\n\n if (!fn) {\n return;\n }\n\n element.removeEventListener(typeEvent, fn, Boolean(delegationSelector));\n delete events[typeEvent][fn.uidEvent];\n}\n\nfunction removeNamespacedHandlers(element, events, typeEvent, namespace) {\n const storeElementEvent = events[typeEvent] || {};\n\n for (const handlerKey of Object.keys(storeElementEvent)) {\n if (handlerKey.includes(namespace)) {\n const event = storeElementEvent[handlerKey];\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n}\n\nfunction getTypeEvent(event) {\n // allow to get the native events from namespaced events ('click.bs.button' --> 'click')\n event = event.replace(stripNameRegex, '');\n return customEvents[event] || event;\n}\n\nconst EventHandler = {\n on(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, false);\n },\n\n one(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, true);\n },\n\n off(element, originalTypeEvent, handler, delegationFunction) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n\n const [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n const inNamespace = typeEvent !== originalTypeEvent;\n const events = getElementEvents(element);\n const storeElementEvent = events[typeEvent] || {};\n const isNamespace = originalTypeEvent.startsWith('.');\n\n if (typeof callable !== 'undefined') {\n // Simplest case: handler is passed, remove that listener ONLY.\n if (!Object.keys(storeElementEvent).length) {\n return;\n }\n\n removeHandler(element, events, typeEvent, callable, isDelegated ? handler : null);\n return;\n }\n\n if (isNamespace) {\n for (const elementEvent of Object.keys(events)) {\n removeNamespacedHandlers(element, events, elementEvent, originalTypeEvent.slice(1));\n }\n }\n\n for (const keyHandlers of Object.keys(storeElementEvent)) {\n const handlerKey = keyHandlers.replace(stripUidRegex, '');\n\n if (!inNamespace || originalTypeEvent.includes(handlerKey)) {\n const event = storeElementEvent[keyHandlers];\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n },\n\n trigger(element, event, args) {\n if (typeof event !== 'string' || !element) {\n return null;\n }\n\n const $ = getjQuery();\n const typeEvent = getTypeEvent(event);\n const inNamespace = event !== typeEvent;\n let jQueryEvent = null;\n let bubbles = true;\n let nativeDispatch = true;\n let defaultPrevented = false;\n\n if (inNamespace && $) {\n jQueryEvent = $.Event(event, args);\n $(element).trigger(jQueryEvent);\n bubbles = !jQueryEvent.isPropagationStopped();\n nativeDispatch = !jQueryEvent.isImmediatePropagationStopped();\n defaultPrevented = jQueryEvent.isDefaultPrevented();\n }\n\n let evt = new Event(event, {\n bubbles,\n cancelable: true\n });\n evt = hydrateObj(evt, args);\n\n if (defaultPrevented) {\n evt.preventDefault();\n }\n\n if (nativeDispatch) {\n element.dispatchEvent(evt);\n }\n\n if (evt.defaultPrevented && jQueryEvent) {\n jQueryEvent.preventDefault();\n }\n\n return evt;\n }\n\n};\n\nfunction hydrateObj(obj, meta) {\n for (const [key, value] of Object.entries(meta || {})) {\n try {\n obj[key] = value;\n } catch (_unused) {\n Object.defineProperty(obj, key, {\n configurable: true,\n\n get() {\n return value;\n }\n\n });\n }\n }\n\n return obj;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): dom/data.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n/**\n * Constants\n */\nconst elementMap = new Map();\nconst Data = {\n set(element, key, instance) {\n if (!elementMap.has(element)) {\n elementMap.set(element, new Map());\n }\n\n const instanceMap = elementMap.get(element); // make it clear we only want one instance per element\n // can be removed later when multiple key/instances are fine to be used\n\n if (!instanceMap.has(key) && instanceMap.size !== 0) {\n // eslint-disable-next-line no-console\n console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(instanceMap.keys())[0]}.`);\n return;\n }\n\n instanceMap.set(key, instance);\n },\n\n get(element, key) {\n if (elementMap.has(element)) {\n return elementMap.get(element).get(key) || null;\n }\n\n return null;\n },\n\n remove(element, key) {\n if (!elementMap.has(element)) {\n return;\n }\n\n const instanceMap = elementMap.get(element);\n instanceMap.delete(key); // free up element references if there are no instances left for an element\n\n if (instanceMap.size === 0) {\n elementMap.delete(element);\n }\n }\n\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): dom/manipulator.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\nfunction normalizeData(value) {\n if (value === 'true') {\n return true;\n }\n\n if (value === 'false') {\n return false;\n }\n\n if (value === Number(value).toString()) {\n return Number(value);\n }\n\n if (value === '' || value === 'null') {\n return null;\n }\n\n if (typeof value !== 'string') {\n return value;\n }\n\n try {\n return JSON.parse(decodeURIComponent(value));\n } catch (_unused) {\n return value;\n }\n}\n\nfunction normalizeDataKey(key) {\n return key.replace(/[A-Z]/g, chr => `-${chr.toLowerCase()}`);\n}\n\nconst Manipulator = {\n setDataAttribute(element, key, value) {\n element.setAttribute(`data-bs-${normalizeDataKey(key)}`, value);\n },\n\n removeDataAttribute(element, key) {\n element.removeAttribute(`data-bs-${normalizeDataKey(key)}`);\n },\n\n getDataAttributes(element) {\n if (!element) {\n return {};\n }\n\n const attributes = {};\n const bsKeys = Object.keys(element.dataset).filter(key => key.startsWith('bs') && !key.startsWith('bsConfig'));\n\n for (const key of bsKeys) {\n let pureKey = key.replace(/^bs/, '');\n pureKey = pureKey.charAt(0).toLowerCase() + pureKey.slice(1, pureKey.length);\n attributes[pureKey] = normalizeData(element.dataset[key]);\n }\n\n return attributes;\n },\n\n getDataAttribute(element, key) {\n return normalizeData(element.getAttribute(`data-bs-${normalizeDataKey(key)}`));\n }\n\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/config.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Class definition\n */\n\nclass Config {\n // Getters\n static get Default() {\n return {};\n }\n\n static get DefaultType() {\n return {};\n }\n\n static get NAME() {\n throw new Error('You have to implement the static method \"NAME\", for each component!');\n }\n\n _getConfig(config) {\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n\n this._typeCheckConfig(config);\n\n return config;\n }\n\n _configAfterMerge(config) {\n return config;\n }\n\n _mergeConfigObj(config, element) {\n const jsonConfig = isElement(element) ? Manipulator.getDataAttribute(element, 'config') : {}; // try to parse\n\n return { ...this.constructor.Default,\n ...(typeof jsonConfig === 'object' ? jsonConfig : {}),\n ...(isElement(element) ? Manipulator.getDataAttributes(element) : {}),\n ...(typeof config === 'object' ? config : {})\n };\n }\n\n _typeCheckConfig(config, configTypes = this.constructor.DefaultType) {\n for (const property of Object.keys(configTypes)) {\n const expectedTypes = configTypes[property];\n const value = config[property];\n const valueType = isElement(value) ? 'element' : toType(value);\n\n if (!new RegExp(expectedTypes).test(valueType)) {\n throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option \"${property}\" provided type \"${valueType}\" but expected type \"${expectedTypes}\".`);\n }\n }\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): base-component.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst VERSION = '5.2.3';\n/**\n * Class definition\n */\n\nclass BaseComponent extends Config {\n constructor(element, config) {\n super();\n element = getElement(element);\n\n if (!element) {\n return;\n }\n\n this._element = element;\n this._config = this._getConfig(config);\n Data.set(this._element, this.constructor.DATA_KEY, this);\n } // Public\n\n\n dispose() {\n Data.remove(this._element, this.constructor.DATA_KEY);\n EventHandler.off(this._element, this.constructor.EVENT_KEY);\n\n for (const propertyName of Object.getOwnPropertyNames(this)) {\n this[propertyName] = null;\n }\n }\n\n _queueCallback(callback, element, isAnimated = true) {\n executeAfterTransition(callback, element, isAnimated);\n }\n\n _getConfig(config) {\n config = this._mergeConfigObj(config, this._element);\n config = this._configAfterMerge(config);\n\n this._typeCheckConfig(config);\n\n return config;\n } // Static\n\n\n static getInstance(element) {\n return Data.get(getElement(element), this.DATA_KEY);\n }\n\n static getOrCreateInstance(element, config = {}) {\n return this.getInstance(element) || new this(element, typeof config === 'object' ? config : null);\n }\n\n static get VERSION() {\n return VERSION;\n }\n\n static get DATA_KEY() {\n return `bs.${this.NAME}`;\n }\n\n static get EVENT_KEY() {\n return `.${this.DATA_KEY}`;\n }\n\n static eventName(name) {\n return `${name}${this.EVENT_KEY}`;\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/component-functions.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst enableDismissTrigger = (component, method = 'hide') => {\n const clickEvent = `click.dismiss${component.EVENT_KEY}`;\n const name = component.NAME;\n EventHandler.on(document, clickEvent, `[data-bs-dismiss=\"${name}\"]`, function (event) {\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n\n if (isDisabled(this)) {\n return;\n }\n\n const target = getElementFromSelector(this) || this.closest(`.${name}`);\n const instance = component.getOrCreateInstance(target); // Method argument is left, for Alert and only, as it doesn't implement the 'hide' method\n\n instance[method]();\n });\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): alert.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$f = 'alert';\nconst DATA_KEY$a = 'bs.alert';\nconst EVENT_KEY$b = `.${DATA_KEY$a}`;\nconst EVENT_CLOSE = `close${EVENT_KEY$b}`;\nconst EVENT_CLOSED = `closed${EVENT_KEY$b}`;\nconst CLASS_NAME_FADE$5 = 'fade';\nconst CLASS_NAME_SHOW$8 = 'show';\n/**\n * Class definition\n */\n\nclass Alert extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$f;\n } // Public\n\n\n close() {\n const closeEvent = EventHandler.trigger(this._element, EVENT_CLOSE);\n\n if (closeEvent.defaultPrevented) {\n return;\n }\n\n this._element.classList.remove(CLASS_NAME_SHOW$8);\n\n const isAnimated = this._element.classList.contains(CLASS_NAME_FADE$5);\n\n this._queueCallback(() => this._destroyElement(), this._element, isAnimated);\n } // Private\n\n\n _destroyElement() {\n this._element.remove();\n\n EventHandler.trigger(this._element, EVENT_CLOSED);\n this.dispose();\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Alert.getOrCreateInstance(this);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config](this);\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nenableDismissTrigger(Alert, 'close');\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Alert);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): button.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$e = 'button';\nconst DATA_KEY$9 = 'bs.button';\nconst EVENT_KEY$a = `.${DATA_KEY$9}`;\nconst DATA_API_KEY$6 = '.data-api';\nconst CLASS_NAME_ACTIVE$3 = 'active';\nconst SELECTOR_DATA_TOGGLE$5 = '[data-bs-toggle=\"button\"]';\nconst EVENT_CLICK_DATA_API$6 = `click${EVENT_KEY$a}${DATA_API_KEY$6}`;\n/**\n * Class definition\n */\n\nclass Button extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$e;\n } // Public\n\n\n toggle() {\n // Toggle class and sync the `aria-pressed` attribute with the return value of the `.toggle()` method\n this._element.setAttribute('aria-pressed', this._element.classList.toggle(CLASS_NAME_ACTIVE$3));\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Button.getOrCreateInstance(this);\n\n if (config === 'toggle') {\n data[config]();\n }\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$6, SELECTOR_DATA_TOGGLE$5, event => {\n event.preventDefault();\n const button = event.target.closest(SELECTOR_DATA_TOGGLE$5);\n const data = Button.getOrCreateInstance(button);\n data.toggle();\n});\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Button);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): dom/selector-engine.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst SelectorEngine = {\n find(selector, element = document.documentElement) {\n return [].concat(...Element.prototype.querySelectorAll.call(element, selector));\n },\n\n findOne(selector, element = document.documentElement) {\n return Element.prototype.querySelector.call(element, selector);\n },\n\n children(element, selector) {\n return [].concat(...element.children).filter(child => child.matches(selector));\n },\n\n parents(element, selector) {\n const parents = [];\n let ancestor = element.parentNode.closest(selector);\n\n while (ancestor) {\n parents.push(ancestor);\n ancestor = ancestor.parentNode.closest(selector);\n }\n\n return parents;\n },\n\n prev(element, selector) {\n let previous = element.previousElementSibling;\n\n while (previous) {\n if (previous.matches(selector)) {\n return [previous];\n }\n\n previous = previous.previousElementSibling;\n }\n\n return [];\n },\n\n // TODO: this is now unused; remove later along with prev()\n next(element, selector) {\n let next = element.nextElementSibling;\n\n while (next) {\n if (next.matches(selector)) {\n return [next];\n }\n\n next = next.nextElementSibling;\n }\n\n return [];\n },\n\n focusableChildren(element) {\n const focusables = ['a', 'button', 'input', 'textarea', 'select', 'details', '[tabindex]', '[contenteditable=\"true\"]'].map(selector => `${selector}:not([tabindex^=\"-\"])`).join(',');\n return this.find(focusables, element).filter(el => !isDisabled(el) && isVisible(el));\n }\n\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/swipe.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$d = 'swipe';\nconst EVENT_KEY$9 = '.bs.swipe';\nconst EVENT_TOUCHSTART = `touchstart${EVENT_KEY$9}`;\nconst EVENT_TOUCHMOVE = `touchmove${EVENT_KEY$9}`;\nconst EVENT_TOUCHEND = `touchend${EVENT_KEY$9}`;\nconst EVENT_POINTERDOWN = `pointerdown${EVENT_KEY$9}`;\nconst EVENT_POINTERUP = `pointerup${EVENT_KEY$9}`;\nconst POINTER_TYPE_TOUCH = 'touch';\nconst POINTER_TYPE_PEN = 'pen';\nconst CLASS_NAME_POINTER_EVENT = 'pointer-event';\nconst SWIPE_THRESHOLD = 40;\nconst Default$c = {\n endCallback: null,\n leftCallback: null,\n rightCallback: null\n};\nconst DefaultType$c = {\n endCallback: '(function|null)',\n leftCallback: '(function|null)',\n rightCallback: '(function|null)'\n};\n/**\n * Class definition\n */\n\nclass Swipe extends Config {\n constructor(element, config) {\n super();\n this._element = element;\n\n if (!element || !Swipe.isSupported()) {\n return;\n }\n\n this._config = this._getConfig(config);\n this._deltaX = 0;\n this._supportPointerEvents = Boolean(window.PointerEvent);\n\n this._initEvents();\n } // Getters\n\n\n static get Default() {\n return Default$c;\n }\n\n static get DefaultType() {\n return DefaultType$c;\n }\n\n static get NAME() {\n return NAME$d;\n } // Public\n\n\n dispose() {\n EventHandler.off(this._element, EVENT_KEY$9);\n } // Private\n\n\n _start(event) {\n if (!this._supportPointerEvents) {\n this._deltaX = event.touches[0].clientX;\n return;\n }\n\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX;\n }\n }\n\n _end(event) {\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX - this._deltaX;\n }\n\n this._handleSwipe();\n\n execute(this._config.endCallback);\n }\n\n _move(event) {\n this._deltaX = event.touches && event.touches.length > 1 ? 0 : event.touches[0].clientX - this._deltaX;\n }\n\n _handleSwipe() {\n const absDeltaX = Math.abs(this._deltaX);\n\n if (absDeltaX <= SWIPE_THRESHOLD) {\n return;\n }\n\n const direction = absDeltaX / this._deltaX;\n this._deltaX = 0;\n\n if (!direction) {\n return;\n }\n\n execute(direction > 0 ? this._config.rightCallback : this._config.leftCallback);\n }\n\n _initEvents() {\n if (this._supportPointerEvents) {\n EventHandler.on(this._element, EVENT_POINTERDOWN, event => this._start(event));\n EventHandler.on(this._element, EVENT_POINTERUP, event => this._end(event));\n\n this._element.classList.add(CLASS_NAME_POINTER_EVENT);\n } else {\n EventHandler.on(this._element, EVENT_TOUCHSTART, event => this._start(event));\n EventHandler.on(this._element, EVENT_TOUCHMOVE, event => this._move(event));\n EventHandler.on(this._element, EVENT_TOUCHEND, event => this._end(event));\n }\n }\n\n _eventIsPointerPenTouch(event) {\n return this._supportPointerEvents && (event.pointerType === POINTER_TYPE_PEN || event.pointerType === POINTER_TYPE_TOUCH);\n } // Static\n\n\n static isSupported() {\n return 'ontouchstart' in document.documentElement || navigator.maxTouchPoints > 0;\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): carousel.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$c = 'carousel';\nconst DATA_KEY$8 = 'bs.carousel';\nconst EVENT_KEY$8 = `.${DATA_KEY$8}`;\nconst DATA_API_KEY$5 = '.data-api';\nconst ARROW_LEFT_KEY$1 = 'ArrowLeft';\nconst ARROW_RIGHT_KEY$1 = 'ArrowRight';\nconst TOUCHEVENT_COMPAT_WAIT = 500; // Time for mouse compat events to fire after touch\n\nconst ORDER_NEXT = 'next';\nconst ORDER_PREV = 'prev';\nconst DIRECTION_LEFT = 'left';\nconst DIRECTION_RIGHT = 'right';\nconst EVENT_SLIDE = `slide${EVENT_KEY$8}`;\nconst EVENT_SLID = `slid${EVENT_KEY$8}`;\nconst EVENT_KEYDOWN$1 = `keydown${EVENT_KEY$8}`;\nconst EVENT_MOUSEENTER$1 = `mouseenter${EVENT_KEY$8}`;\nconst EVENT_MOUSELEAVE$1 = `mouseleave${EVENT_KEY$8}`;\nconst EVENT_DRAG_START = `dragstart${EVENT_KEY$8}`;\nconst EVENT_LOAD_DATA_API$3 = `load${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst EVENT_CLICK_DATA_API$5 = `click${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst CLASS_NAME_CAROUSEL = 'carousel';\nconst CLASS_NAME_ACTIVE$2 = 'active';\nconst CLASS_NAME_SLIDE = 'slide';\nconst CLASS_NAME_END = 'carousel-item-end';\nconst CLASS_NAME_START = 'carousel-item-start';\nconst CLASS_NAME_NEXT = 'carousel-item-next';\nconst CLASS_NAME_PREV = 'carousel-item-prev';\nconst SELECTOR_ACTIVE = '.active';\nconst SELECTOR_ITEM = '.carousel-item';\nconst SELECTOR_ACTIVE_ITEM = SELECTOR_ACTIVE + SELECTOR_ITEM;\nconst SELECTOR_ITEM_IMG = '.carousel-item img';\nconst SELECTOR_INDICATORS = '.carousel-indicators';\nconst SELECTOR_DATA_SLIDE = '[data-bs-slide], [data-bs-slide-to]';\nconst SELECTOR_DATA_RIDE = '[data-bs-ride=\"carousel\"]';\nconst KEY_TO_DIRECTION = {\n [ARROW_LEFT_KEY$1]: DIRECTION_RIGHT,\n [ARROW_RIGHT_KEY$1]: DIRECTION_LEFT\n};\nconst Default$b = {\n interval: 5000,\n keyboard: true,\n pause: 'hover',\n ride: false,\n touch: true,\n wrap: true\n};\nconst DefaultType$b = {\n interval: '(number|boolean)',\n // TODO:v6 remove boolean support\n keyboard: 'boolean',\n pause: '(string|boolean)',\n ride: '(boolean|string)',\n touch: 'boolean',\n wrap: 'boolean'\n};\n/**\n * Class definition\n */\n\nclass Carousel extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._interval = null;\n this._activeElement = null;\n this._isSliding = false;\n this.touchTimeout = null;\n this._swipeHelper = null;\n this._indicatorsElement = SelectorEngine.findOne(SELECTOR_INDICATORS, this._element);\n\n this._addEventListeners();\n\n if (this._config.ride === CLASS_NAME_CAROUSEL) {\n this.cycle();\n }\n } // Getters\n\n\n static get Default() {\n return Default$b;\n }\n\n static get DefaultType() {\n return DefaultType$b;\n }\n\n static get NAME() {\n return NAME$c;\n } // Public\n\n\n next() {\n this._slide(ORDER_NEXT);\n }\n\n nextWhenVisible() {\n // FIXME TODO use `document.visibilityState`\n // Don't call next when the page isn't visible\n // or the carousel or its parent isn't visible\n if (!document.hidden && isVisible(this._element)) {\n this.next();\n }\n }\n\n prev() {\n this._slide(ORDER_PREV);\n }\n\n pause() {\n if (this._isSliding) {\n triggerTransitionEnd(this._element);\n }\n\n this._clearInterval();\n }\n\n cycle() {\n this._clearInterval();\n\n this._updateInterval();\n\n this._interval = setInterval(() => this.nextWhenVisible(), this._config.interval);\n }\n\n _maybeEnableCycle() {\n if (!this._config.ride) {\n return;\n }\n\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.cycle());\n return;\n }\n\n this.cycle();\n }\n\n to(index) {\n const items = this._getItems();\n\n if (index > items.length - 1 || index < 0) {\n return;\n }\n\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.to(index));\n return;\n }\n\n const activeIndex = this._getItemIndex(this._getActive());\n\n if (activeIndex === index) {\n return;\n }\n\n const order = index > activeIndex ? ORDER_NEXT : ORDER_PREV;\n\n this._slide(order, items[index]);\n }\n\n dispose() {\n if (this._swipeHelper) {\n this._swipeHelper.dispose();\n }\n\n super.dispose();\n } // Private\n\n\n _configAfterMerge(config) {\n config.defaultInterval = config.interval;\n return config;\n }\n\n _addEventListeners() {\n if (this._config.keyboard) {\n EventHandler.on(this._element, EVENT_KEYDOWN$1, event => this._keydown(event));\n }\n\n if (this._config.pause === 'hover') {\n EventHandler.on(this._element, EVENT_MOUSEENTER$1, () => this.pause());\n EventHandler.on(this._element, EVENT_MOUSELEAVE$1, () => this._maybeEnableCycle());\n }\n\n if (this._config.touch && Swipe.isSupported()) {\n this._addTouchEventListeners();\n }\n }\n\n _addTouchEventListeners() {\n for (const img of SelectorEngine.find(SELECTOR_ITEM_IMG, this._element)) {\n EventHandler.on(img, EVENT_DRAG_START, event => event.preventDefault());\n }\n\n const endCallBack = () => {\n if (this._config.pause !== 'hover') {\n return;\n } // If it's a touch-enabled device, mouseenter/leave are fired as\n // part of the mouse compatibility events on first tap - the carousel\n // would stop cycling until user tapped out of it;\n // here, we listen for touchend, explicitly pause the carousel\n // (as if it's the second time we tap on it, mouseenter compat event\n // is NOT fired) and after a timeout (to allow for mouse compatibility\n // events to fire) we explicitly restart cycling\n\n\n this.pause();\n\n if (this.touchTimeout) {\n clearTimeout(this.touchTimeout);\n }\n\n this.touchTimeout = setTimeout(() => this._maybeEnableCycle(), TOUCHEVENT_COMPAT_WAIT + this._config.interval);\n };\n\n const swipeConfig = {\n leftCallback: () => this._slide(this._directionToOrder(DIRECTION_LEFT)),\n rightCallback: () => this._slide(this._directionToOrder(DIRECTION_RIGHT)),\n endCallback: endCallBack\n };\n this._swipeHelper = new Swipe(this._element, swipeConfig);\n }\n\n _keydown(event) {\n if (/input|textarea/i.test(event.target.tagName)) {\n return;\n }\n\n const direction = KEY_TO_DIRECTION[event.key];\n\n if (direction) {\n event.preventDefault();\n\n this._slide(this._directionToOrder(direction));\n }\n }\n\n _getItemIndex(element) {\n return this._getItems().indexOf(element);\n }\n\n _setActiveIndicatorElement(index) {\n if (!this._indicatorsElement) {\n return;\n }\n\n const activeIndicator = SelectorEngine.findOne(SELECTOR_ACTIVE, this._indicatorsElement);\n activeIndicator.classList.remove(CLASS_NAME_ACTIVE$2);\n activeIndicator.removeAttribute('aria-current');\n const newActiveIndicator = SelectorEngine.findOne(`[data-bs-slide-to=\"${index}\"]`, this._indicatorsElement);\n\n if (newActiveIndicator) {\n newActiveIndicator.classList.add(CLASS_NAME_ACTIVE$2);\n newActiveIndicator.setAttribute('aria-current', 'true');\n }\n }\n\n _updateInterval() {\n const element = this._activeElement || this._getActive();\n\n if (!element) {\n return;\n }\n\n const elementInterval = Number.parseInt(element.getAttribute('data-bs-interval'), 10);\n this._config.interval = elementInterval || this._config.defaultInterval;\n }\n\n _slide(order, element = null) {\n if (this._isSliding) {\n return;\n }\n\n const activeElement = this._getActive();\n\n const isNext = order === ORDER_NEXT;\n const nextElement = element || getNextActiveElement(this._getItems(), activeElement, isNext, this._config.wrap);\n\n if (nextElement === activeElement) {\n return;\n }\n\n const nextElementIndex = this._getItemIndex(nextElement);\n\n const triggerEvent = eventName => {\n return EventHandler.trigger(this._element, eventName, {\n relatedTarget: nextElement,\n direction: this._orderToDirection(order),\n from: this._getItemIndex(activeElement),\n to: nextElementIndex\n });\n };\n\n const slideEvent = triggerEvent(EVENT_SLIDE);\n\n if (slideEvent.defaultPrevented) {\n return;\n }\n\n if (!activeElement || !nextElement) {\n // Some weirdness is happening, so we bail\n // todo: change tests that use empty divs to avoid this check\n return;\n }\n\n const isCycling = Boolean(this._interval);\n this.pause();\n this._isSliding = true;\n\n this._setActiveIndicatorElement(nextElementIndex);\n\n this._activeElement = nextElement;\n const directionalClassName = isNext ? CLASS_NAME_START : CLASS_NAME_END;\n const orderClassName = isNext ? CLASS_NAME_NEXT : CLASS_NAME_PREV;\n nextElement.classList.add(orderClassName);\n reflow(nextElement);\n activeElement.classList.add(directionalClassName);\n nextElement.classList.add(directionalClassName);\n\n const completeCallBack = () => {\n nextElement.classList.remove(directionalClassName, orderClassName);\n nextElement.classList.add(CLASS_NAME_ACTIVE$2);\n activeElement.classList.remove(CLASS_NAME_ACTIVE$2, orderClassName, directionalClassName);\n this._isSliding = false;\n triggerEvent(EVENT_SLID);\n };\n\n this._queueCallback(completeCallBack, activeElement, this._isAnimated());\n\n if (isCycling) {\n this.cycle();\n }\n }\n\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_SLIDE);\n }\n\n _getActive() {\n return SelectorEngine.findOne(SELECTOR_ACTIVE_ITEM, this._element);\n }\n\n _getItems() {\n return SelectorEngine.find(SELECTOR_ITEM, this._element);\n }\n\n _clearInterval() {\n if (this._interval) {\n clearInterval(this._interval);\n this._interval = null;\n }\n }\n\n _directionToOrder(direction) {\n if (isRTL()) {\n return direction === DIRECTION_LEFT ? ORDER_PREV : ORDER_NEXT;\n }\n\n return direction === DIRECTION_LEFT ? ORDER_NEXT : ORDER_PREV;\n }\n\n _orderToDirection(order) {\n if (isRTL()) {\n return order === ORDER_PREV ? DIRECTION_LEFT : DIRECTION_RIGHT;\n }\n\n return order === ORDER_PREV ? DIRECTION_RIGHT : DIRECTION_LEFT;\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Carousel.getOrCreateInstance(this, config);\n\n if (typeof config === 'number') {\n data.to(config);\n return;\n }\n\n if (typeof config === 'string') {\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config]();\n }\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$5, SELECTOR_DATA_SLIDE, function (event) {\n const target = getElementFromSelector(this);\n\n if (!target || !target.classList.contains(CLASS_NAME_CAROUSEL)) {\n return;\n }\n\n event.preventDefault();\n const carousel = Carousel.getOrCreateInstance(target);\n const slideIndex = this.getAttribute('data-bs-slide-to');\n\n if (slideIndex) {\n carousel.to(slideIndex);\n\n carousel._maybeEnableCycle();\n\n return;\n }\n\n if (Manipulator.getDataAttribute(this, 'slide') === 'next') {\n carousel.next();\n\n carousel._maybeEnableCycle();\n\n return;\n }\n\n carousel.prev();\n\n carousel._maybeEnableCycle();\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$3, () => {\n const carousels = SelectorEngine.find(SELECTOR_DATA_RIDE);\n\n for (const carousel of carousels) {\n Carousel.getOrCreateInstance(carousel);\n }\n});\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Carousel);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): collapse.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$b = 'collapse';\nconst DATA_KEY$7 = 'bs.collapse';\nconst EVENT_KEY$7 = `.${DATA_KEY$7}`;\nconst DATA_API_KEY$4 = '.data-api';\nconst EVENT_SHOW$6 = `show${EVENT_KEY$7}`;\nconst EVENT_SHOWN$6 = `shown${EVENT_KEY$7}`;\nconst EVENT_HIDE$6 = `hide${EVENT_KEY$7}`;\nconst EVENT_HIDDEN$6 = `hidden${EVENT_KEY$7}`;\nconst EVENT_CLICK_DATA_API$4 = `click${EVENT_KEY$7}${DATA_API_KEY$4}`;\nconst CLASS_NAME_SHOW$7 = 'show';\nconst CLASS_NAME_COLLAPSE = 'collapse';\nconst CLASS_NAME_COLLAPSING = 'collapsing';\nconst CLASS_NAME_COLLAPSED = 'collapsed';\nconst CLASS_NAME_DEEPER_CHILDREN = `:scope .${CLASS_NAME_COLLAPSE} .${CLASS_NAME_COLLAPSE}`;\nconst CLASS_NAME_HORIZONTAL = 'collapse-horizontal';\nconst WIDTH = 'width';\nconst HEIGHT = 'height';\nconst SELECTOR_ACTIVES = '.collapse.show, .collapse.collapsing';\nconst SELECTOR_DATA_TOGGLE$4 = '[data-bs-toggle=\"collapse\"]';\nconst Default$a = {\n parent: null,\n toggle: true\n};\nconst DefaultType$a = {\n parent: '(null|element)',\n toggle: 'boolean'\n};\n/**\n * Class definition\n */\n\nclass Collapse extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isTransitioning = false;\n this._triggerArray = [];\n const toggleList = SelectorEngine.find(SELECTOR_DATA_TOGGLE$4);\n\n for (const elem of toggleList) {\n const selector = getSelectorFromElement(elem);\n const filterElement = SelectorEngine.find(selector).filter(foundElement => foundElement === this._element);\n\n if (selector !== null && filterElement.length) {\n this._triggerArray.push(elem);\n }\n }\n\n this._initializeChildren();\n\n if (!this._config.parent) {\n this._addAriaAndCollapsedClass(this._triggerArray, this._isShown());\n }\n\n if (this._config.toggle) {\n this.toggle();\n }\n } // Getters\n\n\n static get Default() {\n return Default$a;\n }\n\n static get DefaultType() {\n return DefaultType$a;\n }\n\n static get NAME() {\n return NAME$b;\n } // Public\n\n\n toggle() {\n if (this._isShown()) {\n this.hide();\n } else {\n this.show();\n }\n }\n\n show() {\n if (this._isTransitioning || this._isShown()) {\n return;\n }\n\n let activeChildren = []; // find active children\n\n if (this._config.parent) {\n activeChildren = this._getFirstLevelChildren(SELECTOR_ACTIVES).filter(element => element !== this._element).map(element => Collapse.getOrCreateInstance(element, {\n toggle: false\n }));\n }\n\n if (activeChildren.length && activeChildren[0]._isTransitioning) {\n return;\n }\n\n const startEvent = EventHandler.trigger(this._element, EVENT_SHOW$6);\n\n if (startEvent.defaultPrevented) {\n return;\n }\n\n for (const activeInstance of activeChildren) {\n activeInstance.hide();\n }\n\n const dimension = this._getDimension();\n\n this._element.classList.remove(CLASS_NAME_COLLAPSE);\n\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n\n this._element.style[dimension] = 0;\n\n this._addAriaAndCollapsedClass(this._triggerArray, true);\n\n this._isTransitioning = true;\n\n const complete = () => {\n this._isTransitioning = false;\n\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n\n this._element.classList.add(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n\n this._element.style[dimension] = '';\n EventHandler.trigger(this._element, EVENT_SHOWN$6);\n };\n\n const capitalizedDimension = dimension[0].toUpperCase() + dimension.slice(1);\n const scrollSize = `scroll${capitalizedDimension}`;\n\n this._queueCallback(complete, this._element, true);\n\n this._element.style[dimension] = `${this._element[scrollSize]}px`;\n }\n\n hide() {\n if (this._isTransitioning || !this._isShown()) {\n return;\n }\n\n const startEvent = EventHandler.trigger(this._element, EVENT_HIDE$6);\n\n if (startEvent.defaultPrevented) {\n return;\n }\n\n const dimension = this._getDimension();\n\n this._element.style[dimension] = `${this._element.getBoundingClientRect()[dimension]}px`;\n reflow(this._element);\n\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n\n this._element.classList.remove(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n\n for (const trigger of this._triggerArray) {\n const element = getElementFromSelector(trigger);\n\n if (element && !this._isShown(element)) {\n this._addAriaAndCollapsedClass([trigger], false);\n }\n }\n\n this._isTransitioning = true;\n\n const complete = () => {\n this._isTransitioning = false;\n\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n\n this._element.classList.add(CLASS_NAME_COLLAPSE);\n\n EventHandler.trigger(this._element, EVENT_HIDDEN$6);\n };\n\n this._element.style[dimension] = '';\n\n this._queueCallback(complete, this._element, true);\n }\n\n _isShown(element = this._element) {\n return element.classList.contains(CLASS_NAME_SHOW$7);\n } // Private\n\n\n _configAfterMerge(config) {\n config.toggle = Boolean(config.toggle); // Coerce string values\n\n config.parent = getElement(config.parent);\n return config;\n }\n\n _getDimension() {\n return this._element.classList.contains(CLASS_NAME_HORIZONTAL) ? WIDTH : HEIGHT;\n }\n\n _initializeChildren() {\n if (!this._config.parent) {\n return;\n }\n\n const children = this._getFirstLevelChildren(SELECTOR_DATA_TOGGLE$4);\n\n for (const element of children) {\n const selected = getElementFromSelector(element);\n\n if (selected) {\n this._addAriaAndCollapsedClass([element], this._isShown(selected));\n }\n }\n }\n\n _getFirstLevelChildren(selector) {\n const children = SelectorEngine.find(CLASS_NAME_DEEPER_CHILDREN, this._config.parent); // remove children if greater depth\n\n return SelectorEngine.find(selector, this._config.parent).filter(element => !children.includes(element));\n }\n\n _addAriaAndCollapsedClass(triggerArray, isOpen) {\n if (!triggerArray.length) {\n return;\n }\n\n for (const element of triggerArray) {\n element.classList.toggle(CLASS_NAME_COLLAPSED, !isOpen);\n element.setAttribute('aria-expanded', isOpen);\n }\n } // Static\n\n\n static jQueryInterface(config) {\n const _config = {};\n\n if (typeof config === 'string' && /show|hide/.test(config)) {\n _config.toggle = false;\n }\n\n return this.each(function () {\n const data = Collapse.getOrCreateInstance(this, _config);\n\n if (typeof config === 'string') {\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config]();\n }\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$4, SELECTOR_DATA_TOGGLE$4, function (event) {\n // preventDefault only for elements (which change the URL) not inside the collapsible element\n if (event.target.tagName === 'A' || event.delegateTarget && event.delegateTarget.tagName === 'A') {\n event.preventDefault();\n }\n\n const selector = getSelectorFromElement(this);\n const selectorElements = SelectorEngine.find(selector);\n\n for (const element of selectorElements) {\n Collapse.getOrCreateInstance(element, {\n toggle: false\n }).toggle();\n }\n});\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Collapse);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): dropdown.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$a = 'dropdown';\nconst DATA_KEY$6 = 'bs.dropdown';\nconst EVENT_KEY$6 = `.${DATA_KEY$6}`;\nconst DATA_API_KEY$3 = '.data-api';\nconst ESCAPE_KEY$2 = 'Escape';\nconst TAB_KEY$1 = 'Tab';\nconst ARROW_UP_KEY$1 = 'ArrowUp';\nconst ARROW_DOWN_KEY$1 = 'ArrowDown';\nconst RIGHT_MOUSE_BUTTON = 2; // MouseEvent.button value for the secondary button, usually the right button\n\nconst EVENT_HIDE$5 = `hide${EVENT_KEY$6}`;\nconst EVENT_HIDDEN$5 = `hidden${EVENT_KEY$6}`;\nconst EVENT_SHOW$5 = `show${EVENT_KEY$6}`;\nconst EVENT_SHOWN$5 = `shown${EVENT_KEY$6}`;\nconst EVENT_CLICK_DATA_API$3 = `click${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYDOWN_DATA_API = `keydown${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYUP_DATA_API = `keyup${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst CLASS_NAME_SHOW$6 = 'show';\nconst CLASS_NAME_DROPUP = 'dropup';\nconst CLASS_NAME_DROPEND = 'dropend';\nconst CLASS_NAME_DROPSTART = 'dropstart';\nconst CLASS_NAME_DROPUP_CENTER = 'dropup-center';\nconst CLASS_NAME_DROPDOWN_CENTER = 'dropdown-center';\nconst SELECTOR_DATA_TOGGLE$3 = '[data-bs-toggle=\"dropdown\"]:not(.disabled):not(:disabled)';\nconst SELECTOR_DATA_TOGGLE_SHOWN = `${SELECTOR_DATA_TOGGLE$3}.${CLASS_NAME_SHOW$6}`;\nconst SELECTOR_MENU = '.dropdown-menu';\nconst SELECTOR_NAVBAR = '.navbar';\nconst SELECTOR_NAVBAR_NAV = '.navbar-nav';\nconst SELECTOR_VISIBLE_ITEMS = '.dropdown-menu .dropdown-item:not(.disabled):not(:disabled)';\nconst PLACEMENT_TOP = isRTL() ? 'top-end' : 'top-start';\nconst PLACEMENT_TOPEND = isRTL() ? 'top-start' : 'top-end';\nconst PLACEMENT_BOTTOM = isRTL() ? 'bottom-end' : 'bottom-start';\nconst PLACEMENT_BOTTOMEND = isRTL() ? 'bottom-start' : 'bottom-end';\nconst PLACEMENT_RIGHT = isRTL() ? 'left-start' : 'right-start';\nconst PLACEMENT_LEFT = isRTL() ? 'right-start' : 'left-start';\nconst PLACEMENT_TOPCENTER = 'top';\nconst PLACEMENT_BOTTOMCENTER = 'bottom';\nconst Default$9 = {\n autoClose: true,\n boundary: 'clippingParents',\n display: 'dynamic',\n offset: [0, 2],\n popperConfig: null,\n reference: 'toggle'\n};\nconst DefaultType$9 = {\n autoClose: '(boolean|string)',\n boundary: '(string|element)',\n display: 'string',\n offset: '(array|string|function)',\n popperConfig: '(null|object|function)',\n reference: '(string|element|object)'\n};\n/**\n * Class definition\n */\n\nclass Dropdown extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._popper = null;\n this._parent = this._element.parentNode; // dropdown wrapper\n // todo: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.2/forms/input-group/\n\n this._menu = SelectorEngine.next(this._element, SELECTOR_MENU)[0] || SelectorEngine.prev(this._element, SELECTOR_MENU)[0] || SelectorEngine.findOne(SELECTOR_MENU, this._parent);\n this._inNavbar = this._detectNavbar();\n } // Getters\n\n\n static get Default() {\n return Default$9;\n }\n\n static get DefaultType() {\n return DefaultType$9;\n }\n\n static get NAME() {\n return NAME$a;\n } // Public\n\n\n toggle() {\n return this._isShown() ? this.hide() : this.show();\n }\n\n show() {\n if (isDisabled(this._element) || this._isShown()) {\n return;\n }\n\n const relatedTarget = {\n relatedTarget: this._element\n };\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$5, relatedTarget);\n\n if (showEvent.defaultPrevented) {\n return;\n }\n\n this._createPopper(); // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n\n\n if ('ontouchstart' in document.documentElement && !this._parent.closest(SELECTOR_NAVBAR_NAV)) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n\n this._element.focus();\n\n this._element.setAttribute('aria-expanded', true);\n\n this._menu.classList.add(CLASS_NAME_SHOW$6);\n\n this._element.classList.add(CLASS_NAME_SHOW$6);\n\n EventHandler.trigger(this._element, EVENT_SHOWN$5, relatedTarget);\n }\n\n hide() {\n if (isDisabled(this._element) || !this._isShown()) {\n return;\n }\n\n const relatedTarget = {\n relatedTarget: this._element\n };\n\n this._completeHide(relatedTarget);\n }\n\n dispose() {\n if (this._popper) {\n this._popper.destroy();\n }\n\n super.dispose();\n }\n\n update() {\n this._inNavbar = this._detectNavbar();\n\n if (this._popper) {\n this._popper.update();\n }\n } // Private\n\n\n _completeHide(relatedTarget) {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$5, relatedTarget);\n\n if (hideEvent.defaultPrevented) {\n return;\n } // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n\n\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n\n if (this._popper) {\n this._popper.destroy();\n }\n\n this._menu.classList.remove(CLASS_NAME_SHOW$6);\n\n this._element.classList.remove(CLASS_NAME_SHOW$6);\n\n this._element.setAttribute('aria-expanded', 'false');\n\n Manipulator.removeDataAttribute(this._menu, 'popper');\n EventHandler.trigger(this._element, EVENT_HIDDEN$5, relatedTarget);\n }\n\n _getConfig(config) {\n config = super._getConfig(config);\n\n if (typeof config.reference === 'object' && !isElement(config.reference) && typeof config.reference.getBoundingClientRect !== 'function') {\n // Popper virtual elements require a getBoundingClientRect method\n throw new TypeError(`${NAME$a.toUpperCase()}: Option \"reference\" provided type \"object\" without a required \"getBoundingClientRect\" method.`);\n }\n\n return config;\n }\n\n _createPopper() {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s dropdowns require Popper (https://popper.js.org)');\n }\n\n let referenceElement = this._element;\n\n if (this._config.reference === 'parent') {\n referenceElement = this._parent;\n } else if (isElement(this._config.reference)) {\n referenceElement = getElement(this._config.reference);\n } else if (typeof this._config.reference === 'object') {\n referenceElement = this._config.reference;\n }\n\n const popperConfig = this._getPopperConfig();\n\n this._popper = Popper.createPopper(referenceElement, this._menu, popperConfig);\n }\n\n _isShown() {\n return this._menu.classList.contains(CLASS_NAME_SHOW$6);\n }\n\n _getPlacement() {\n const parentDropdown = this._parent;\n\n if (parentDropdown.classList.contains(CLASS_NAME_DROPEND)) {\n return PLACEMENT_RIGHT;\n }\n\n if (parentDropdown.classList.contains(CLASS_NAME_DROPSTART)) {\n return PLACEMENT_LEFT;\n }\n\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP_CENTER)) {\n return PLACEMENT_TOPCENTER;\n }\n\n if (parentDropdown.classList.contains(CLASS_NAME_DROPDOWN_CENTER)) {\n return PLACEMENT_BOTTOMCENTER;\n } // We need to trim the value because custom properties can also include spaces\n\n\n const isEnd = getComputedStyle(this._menu).getPropertyValue('--bs-position').trim() === 'end';\n\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP)) {\n return isEnd ? PLACEMENT_TOPEND : PLACEMENT_TOP;\n }\n\n return isEnd ? PLACEMENT_BOTTOMEND : PLACEMENT_BOTTOM;\n }\n\n _detectNavbar() {\n return this._element.closest(SELECTOR_NAVBAR) !== null;\n }\n\n _getOffset() {\n const {\n offset\n } = this._config;\n\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n\n return offset;\n }\n\n _getPopperConfig() {\n const defaultBsPopperConfig = {\n placement: this._getPlacement(),\n modifiers: [{\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }]\n }; // Disable Popper if we have a static display or Dropdown is in Navbar\n\n if (this._inNavbar || this._config.display === 'static') {\n Manipulator.setDataAttribute(this._menu, 'popper', 'static'); // todo:v6 remove\n\n defaultBsPopperConfig.modifiers = [{\n name: 'applyStyles',\n enabled: false\n }];\n }\n\n return { ...defaultBsPopperConfig,\n ...(typeof this._config.popperConfig === 'function' ? this._config.popperConfig(defaultBsPopperConfig) : this._config.popperConfig)\n };\n }\n\n _selectMenuItem({\n key,\n target\n }) {\n const items = SelectorEngine.find(SELECTOR_VISIBLE_ITEMS, this._menu).filter(element => isVisible(element));\n\n if (!items.length) {\n return;\n } // if target isn't included in items (e.g. when expanding the dropdown)\n // allow cycling to get the last item in case key equals ARROW_UP_KEY\n\n\n getNextActiveElement(items, target, key === ARROW_DOWN_KEY$1, !items.includes(target)).focus();\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Dropdown.getOrCreateInstance(this, config);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config]();\n });\n }\n\n static clearMenus(event) {\n if (event.button === RIGHT_MOUSE_BUTTON || event.type === 'keyup' && event.key !== TAB_KEY$1) {\n return;\n }\n\n const openToggles = SelectorEngine.find(SELECTOR_DATA_TOGGLE_SHOWN);\n\n for (const toggle of openToggles) {\n const context = Dropdown.getInstance(toggle);\n\n if (!context || context._config.autoClose === false) {\n continue;\n }\n\n const composedPath = event.composedPath();\n const isMenuTarget = composedPath.includes(context._menu);\n\n if (composedPath.includes(context._element) || context._config.autoClose === 'inside' && !isMenuTarget || context._config.autoClose === 'outside' && isMenuTarget) {\n continue;\n } // Tab navigation through the dropdown menu or events from contained inputs shouldn't close the menu\n\n\n if (context._menu.contains(event.target) && (event.type === 'keyup' && event.key === TAB_KEY$1 || /input|select|option|textarea|form/i.test(event.target.tagName))) {\n continue;\n }\n\n const relatedTarget = {\n relatedTarget: context._element\n };\n\n if (event.type === 'click') {\n relatedTarget.clickEvent = event;\n }\n\n context._completeHide(relatedTarget);\n }\n }\n\n static dataApiKeydownHandler(event) {\n // If not an UP | DOWN | ESCAPE key => not a dropdown command\n // If input/textarea && if key is other than ESCAPE => not a dropdown command\n const isInput = /input|textarea/i.test(event.target.tagName);\n const isEscapeEvent = event.key === ESCAPE_KEY$2;\n const isUpOrDownEvent = [ARROW_UP_KEY$1, ARROW_DOWN_KEY$1].includes(event.key);\n\n if (!isUpOrDownEvent && !isEscapeEvent) {\n return;\n }\n\n if (isInput && !isEscapeEvent) {\n return;\n }\n\n event.preventDefault(); // todo: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.2/forms/input-group/\n\n const getToggleButton = this.matches(SELECTOR_DATA_TOGGLE$3) ? this : SelectorEngine.prev(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.next(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.findOne(SELECTOR_DATA_TOGGLE$3, event.delegateTarget.parentNode);\n const instance = Dropdown.getOrCreateInstance(getToggleButton);\n\n if (isUpOrDownEvent) {\n event.stopPropagation();\n instance.show();\n\n instance._selectMenuItem(event);\n\n return;\n }\n\n if (instance._isShown()) {\n // else is escape and we check if it is shown\n event.stopPropagation();\n instance.hide();\n getToggleButton.focus();\n }\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_DATA_TOGGLE$3, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_MENU, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_KEYUP_DATA_API, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, SELECTOR_DATA_TOGGLE$3, function (event) {\n event.preventDefault();\n Dropdown.getOrCreateInstance(this).toggle();\n});\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Dropdown);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/scrollBar.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst SELECTOR_FIXED_CONTENT = '.fixed-top, .fixed-bottom, .is-fixed, .sticky-top';\nconst SELECTOR_STICKY_CONTENT = '.sticky-top';\nconst PROPERTY_PADDING = 'padding-right';\nconst PROPERTY_MARGIN = 'margin-right';\n/**\n * Class definition\n */\n\nclass ScrollBarHelper {\n constructor() {\n this._element = document.body;\n } // Public\n\n\n getWidth() {\n // https://developer.mozilla.org/en-US/docs/Web/API/Window/innerWidth#usage_notes\n const documentWidth = document.documentElement.clientWidth;\n return Math.abs(window.innerWidth - documentWidth);\n }\n\n hide() {\n const width = this.getWidth();\n\n this._disableOverFlow(); // give padding to element to balance the hidden scrollbar width\n\n\n this._setElementAttributes(this._element, PROPERTY_PADDING, calculatedValue => calculatedValue + width); // trick: We adjust positive paddingRight and negative marginRight to sticky-top elements to keep showing fullwidth\n\n\n this._setElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n\n this._setElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN, calculatedValue => calculatedValue - width);\n }\n\n reset() {\n this._resetElementAttributes(this._element, 'overflow');\n\n this._resetElementAttributes(this._element, PROPERTY_PADDING);\n\n this._resetElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING);\n\n this._resetElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN);\n }\n\n isOverflowing() {\n return this.getWidth() > 0;\n } // Private\n\n\n _disableOverFlow() {\n this._saveInitialAttribute(this._element, 'overflow');\n\n this._element.style.overflow = 'hidden';\n }\n\n _setElementAttributes(selector, styleProperty, callback) {\n const scrollbarWidth = this.getWidth();\n\n const manipulationCallBack = element => {\n if (element !== this._element && window.innerWidth > element.clientWidth + scrollbarWidth) {\n return;\n }\n\n this._saveInitialAttribute(element, styleProperty);\n\n const calculatedValue = window.getComputedStyle(element).getPropertyValue(styleProperty);\n element.style.setProperty(styleProperty, `${callback(Number.parseFloat(calculatedValue))}px`);\n };\n\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n\n _saveInitialAttribute(element, styleProperty) {\n const actualValue = element.style.getPropertyValue(styleProperty);\n\n if (actualValue) {\n Manipulator.setDataAttribute(element, styleProperty, actualValue);\n }\n }\n\n _resetElementAttributes(selector, styleProperty) {\n const manipulationCallBack = element => {\n const value = Manipulator.getDataAttribute(element, styleProperty); // We only want to remove the property if the value is `null`; the value can also be zero\n\n if (value === null) {\n element.style.removeProperty(styleProperty);\n return;\n }\n\n Manipulator.removeDataAttribute(element, styleProperty);\n element.style.setProperty(styleProperty, value);\n };\n\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n\n _applyManipulationCallback(selector, callBack) {\n if (isElement(selector)) {\n callBack(selector);\n return;\n }\n\n for (const sel of SelectorEngine.find(selector, this._element)) {\n callBack(sel);\n }\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/backdrop.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$9 = 'backdrop';\nconst CLASS_NAME_FADE$4 = 'fade';\nconst CLASS_NAME_SHOW$5 = 'show';\nconst EVENT_MOUSEDOWN = `mousedown.bs.${NAME$9}`;\nconst Default$8 = {\n className: 'modal-backdrop',\n clickCallback: null,\n isAnimated: false,\n isVisible: true,\n // if false, we use the backdrop helper without adding any element to the dom\n rootElement: 'body' // give the choice to place backdrop under different elements\n\n};\nconst DefaultType$8 = {\n className: 'string',\n clickCallback: '(function|null)',\n isAnimated: 'boolean',\n isVisible: 'boolean',\n rootElement: '(element|string)'\n};\n/**\n * Class definition\n */\n\nclass Backdrop extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isAppended = false;\n this._element = null;\n } // Getters\n\n\n static get Default() {\n return Default$8;\n }\n\n static get DefaultType() {\n return DefaultType$8;\n }\n\n static get NAME() {\n return NAME$9;\n } // Public\n\n\n show(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n\n this._append();\n\n const element = this._getElement();\n\n if (this._config.isAnimated) {\n reflow(element);\n }\n\n element.classList.add(CLASS_NAME_SHOW$5);\n\n this._emulateAnimation(() => {\n execute(callback);\n });\n }\n\n hide(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n\n this._getElement().classList.remove(CLASS_NAME_SHOW$5);\n\n this._emulateAnimation(() => {\n this.dispose();\n execute(callback);\n });\n }\n\n dispose() {\n if (!this._isAppended) {\n return;\n }\n\n EventHandler.off(this._element, EVENT_MOUSEDOWN);\n\n this._element.remove();\n\n this._isAppended = false;\n } // Private\n\n\n _getElement() {\n if (!this._element) {\n const backdrop = document.createElement('div');\n backdrop.className = this._config.className;\n\n if (this._config.isAnimated) {\n backdrop.classList.add(CLASS_NAME_FADE$4);\n }\n\n this._element = backdrop;\n }\n\n return this._element;\n }\n\n _configAfterMerge(config) {\n // use getElement() with the default \"body\" to get a fresh Element on each instantiation\n config.rootElement = getElement(config.rootElement);\n return config;\n }\n\n _append() {\n if (this._isAppended) {\n return;\n }\n\n const element = this._getElement();\n\n this._config.rootElement.append(element);\n\n EventHandler.on(element, EVENT_MOUSEDOWN, () => {\n execute(this._config.clickCallback);\n });\n this._isAppended = true;\n }\n\n _emulateAnimation(callback) {\n executeAfterTransition(callback, this._getElement(), this._config.isAnimated);\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/focustrap.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$8 = 'focustrap';\nconst DATA_KEY$5 = 'bs.focustrap';\nconst EVENT_KEY$5 = `.${DATA_KEY$5}`;\nconst EVENT_FOCUSIN$2 = `focusin${EVENT_KEY$5}`;\nconst EVENT_KEYDOWN_TAB = `keydown.tab${EVENT_KEY$5}`;\nconst TAB_KEY = 'Tab';\nconst TAB_NAV_FORWARD = 'forward';\nconst TAB_NAV_BACKWARD = 'backward';\nconst Default$7 = {\n autofocus: true,\n trapElement: null // The element to trap focus inside of\n\n};\nconst DefaultType$7 = {\n autofocus: 'boolean',\n trapElement: 'element'\n};\n/**\n * Class definition\n */\n\nclass FocusTrap extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isActive = false;\n this._lastTabNavDirection = null;\n } // Getters\n\n\n static get Default() {\n return Default$7;\n }\n\n static get DefaultType() {\n return DefaultType$7;\n }\n\n static get NAME() {\n return NAME$8;\n } // Public\n\n\n activate() {\n if (this._isActive) {\n return;\n }\n\n if (this._config.autofocus) {\n this._config.trapElement.focus();\n }\n\n EventHandler.off(document, EVENT_KEY$5); // guard against infinite focus loop\n\n EventHandler.on(document, EVENT_FOCUSIN$2, event => this._handleFocusin(event));\n EventHandler.on(document, EVENT_KEYDOWN_TAB, event => this._handleKeydown(event));\n this._isActive = true;\n }\n\n deactivate() {\n if (!this._isActive) {\n return;\n }\n\n this._isActive = false;\n EventHandler.off(document, EVENT_KEY$5);\n } // Private\n\n\n _handleFocusin(event) {\n const {\n trapElement\n } = this._config;\n\n if (event.target === document || event.target === trapElement || trapElement.contains(event.target)) {\n return;\n }\n\n const elements = SelectorEngine.focusableChildren(trapElement);\n\n if (elements.length === 0) {\n trapElement.focus();\n } else if (this._lastTabNavDirection === TAB_NAV_BACKWARD) {\n elements[elements.length - 1].focus();\n } else {\n elements[0].focus();\n }\n }\n\n _handleKeydown(event) {\n if (event.key !== TAB_KEY) {\n return;\n }\n\n this._lastTabNavDirection = event.shiftKey ? TAB_NAV_BACKWARD : TAB_NAV_FORWARD;\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): modal.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$7 = 'modal';\nconst DATA_KEY$4 = 'bs.modal';\nconst EVENT_KEY$4 = `.${DATA_KEY$4}`;\nconst DATA_API_KEY$2 = '.data-api';\nconst ESCAPE_KEY$1 = 'Escape';\nconst EVENT_HIDE$4 = `hide${EVENT_KEY$4}`;\nconst EVENT_HIDE_PREVENTED$1 = `hidePrevented${EVENT_KEY$4}`;\nconst EVENT_HIDDEN$4 = `hidden${EVENT_KEY$4}`;\nconst EVENT_SHOW$4 = `show${EVENT_KEY$4}`;\nconst EVENT_SHOWN$4 = `shown${EVENT_KEY$4}`;\nconst EVENT_RESIZE$1 = `resize${EVENT_KEY$4}`;\nconst EVENT_CLICK_DISMISS = `click.dismiss${EVENT_KEY$4}`;\nconst EVENT_MOUSEDOWN_DISMISS = `mousedown.dismiss${EVENT_KEY$4}`;\nconst EVENT_KEYDOWN_DISMISS$1 = `keydown.dismiss${EVENT_KEY$4}`;\nconst EVENT_CLICK_DATA_API$2 = `click${EVENT_KEY$4}${DATA_API_KEY$2}`;\nconst CLASS_NAME_OPEN = 'modal-open';\nconst CLASS_NAME_FADE$3 = 'fade';\nconst CLASS_NAME_SHOW$4 = 'show';\nconst CLASS_NAME_STATIC = 'modal-static';\nconst OPEN_SELECTOR$1 = '.modal.show';\nconst SELECTOR_DIALOG = '.modal-dialog';\nconst SELECTOR_MODAL_BODY = '.modal-body';\nconst SELECTOR_DATA_TOGGLE$2 = '[data-bs-toggle=\"modal\"]';\nconst Default$6 = {\n backdrop: true,\n focus: true,\n keyboard: true\n};\nconst DefaultType$6 = {\n backdrop: '(boolean|string)',\n focus: 'boolean',\n keyboard: 'boolean'\n};\n/**\n * Class definition\n */\n\nclass Modal extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._dialog = SelectorEngine.findOne(SELECTOR_DIALOG, this._element);\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._isShown = false;\n this._isTransitioning = false;\n this._scrollBar = new ScrollBarHelper();\n\n this._addEventListeners();\n } // Getters\n\n\n static get Default() {\n return Default$6;\n }\n\n static get DefaultType() {\n return DefaultType$6;\n }\n\n static get NAME() {\n return NAME$7;\n } // Public\n\n\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n\n show(relatedTarget) {\n if (this._isShown || this._isTransitioning) {\n return;\n }\n\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$4, {\n relatedTarget\n });\n\n if (showEvent.defaultPrevented) {\n return;\n }\n\n this._isShown = true;\n this._isTransitioning = true;\n\n this._scrollBar.hide();\n\n document.body.classList.add(CLASS_NAME_OPEN);\n\n this._adjustDialog();\n\n this._backdrop.show(() => this._showElement(relatedTarget));\n }\n\n hide() {\n if (!this._isShown || this._isTransitioning) {\n return;\n }\n\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$4);\n\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n this._isShown = false;\n this._isTransitioning = true;\n\n this._focustrap.deactivate();\n\n this._element.classList.remove(CLASS_NAME_SHOW$4);\n\n this._queueCallback(() => this._hideModal(), this._element, this._isAnimated());\n }\n\n dispose() {\n for (const htmlElement of [window, this._dialog]) {\n EventHandler.off(htmlElement, EVENT_KEY$4);\n }\n\n this._backdrop.dispose();\n\n this._focustrap.deactivate();\n\n super.dispose();\n }\n\n handleUpdate() {\n this._adjustDialog();\n } // Private\n\n\n _initializeBackDrop() {\n return new Backdrop({\n isVisible: Boolean(this._config.backdrop),\n // 'static' option will be translated to true, and booleans will keep their value,\n isAnimated: this._isAnimated()\n });\n }\n\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n\n _showElement(relatedTarget) {\n // try to append dynamic modal\n if (!document.body.contains(this._element)) {\n document.body.append(this._element);\n }\n\n this._element.style.display = 'block';\n\n this._element.removeAttribute('aria-hidden');\n\n this._element.setAttribute('aria-modal', true);\n\n this._element.setAttribute('role', 'dialog');\n\n this._element.scrollTop = 0;\n const modalBody = SelectorEngine.findOne(SELECTOR_MODAL_BODY, this._dialog);\n\n if (modalBody) {\n modalBody.scrollTop = 0;\n }\n\n reflow(this._element);\n\n this._element.classList.add(CLASS_NAME_SHOW$4);\n\n const transitionComplete = () => {\n if (this._config.focus) {\n this._focustrap.activate();\n }\n\n this._isTransitioning = false;\n EventHandler.trigger(this._element, EVENT_SHOWN$4, {\n relatedTarget\n });\n };\n\n this._queueCallback(transitionComplete, this._dialog, this._isAnimated());\n }\n\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS$1, event => {\n if (event.key !== ESCAPE_KEY$1) {\n return;\n }\n\n if (this._config.keyboard) {\n event.preventDefault();\n this.hide();\n return;\n }\n\n this._triggerBackdropTransition();\n });\n EventHandler.on(window, EVENT_RESIZE$1, () => {\n if (this._isShown && !this._isTransitioning) {\n this._adjustDialog();\n }\n });\n EventHandler.on(this._element, EVENT_MOUSEDOWN_DISMISS, event => {\n // a bad trick to segregate clicks that may start inside dialog but end outside, and avoid listen to scrollbar clicks\n EventHandler.one(this._element, EVENT_CLICK_DISMISS, event2 => {\n if (this._element !== event.target || this._element !== event2.target) {\n return;\n }\n\n if (this._config.backdrop === 'static') {\n this._triggerBackdropTransition();\n\n return;\n }\n\n if (this._config.backdrop) {\n this.hide();\n }\n });\n });\n }\n\n _hideModal() {\n this._element.style.display = 'none';\n\n this._element.setAttribute('aria-hidden', true);\n\n this._element.removeAttribute('aria-modal');\n\n this._element.removeAttribute('role');\n\n this._isTransitioning = false;\n\n this._backdrop.hide(() => {\n document.body.classList.remove(CLASS_NAME_OPEN);\n\n this._resetAdjustments();\n\n this._scrollBar.reset();\n\n EventHandler.trigger(this._element, EVENT_HIDDEN$4);\n });\n }\n\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_FADE$3);\n }\n\n _triggerBackdropTransition() {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED$1);\n\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const initialOverflowY = this._element.style.overflowY; // return if the following background transition hasn't yet completed\n\n if (initialOverflowY === 'hidden' || this._element.classList.contains(CLASS_NAME_STATIC)) {\n return;\n }\n\n if (!isModalOverflowing) {\n this._element.style.overflowY = 'hidden';\n }\n\n this._element.classList.add(CLASS_NAME_STATIC);\n\n this._queueCallback(() => {\n this._element.classList.remove(CLASS_NAME_STATIC);\n\n this._queueCallback(() => {\n this._element.style.overflowY = initialOverflowY;\n }, this._dialog);\n }, this._dialog);\n\n this._element.focus();\n }\n /**\n * The following methods are used to handle overflowing modals\n */\n\n\n _adjustDialog() {\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n\n const scrollbarWidth = this._scrollBar.getWidth();\n\n const isBodyOverflowing = scrollbarWidth > 0;\n\n if (isBodyOverflowing && !isModalOverflowing) {\n const property = isRTL() ? 'paddingLeft' : 'paddingRight';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n\n if (!isBodyOverflowing && isModalOverflowing) {\n const property = isRTL() ? 'paddingRight' : 'paddingLeft';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n }\n\n _resetAdjustments() {\n this._element.style.paddingLeft = '';\n this._element.style.paddingRight = '';\n } // Static\n\n\n static jQueryInterface(config, relatedTarget) {\n return this.each(function () {\n const data = Modal.getOrCreateInstance(this, config);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config](relatedTarget);\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$2, SELECTOR_DATA_TOGGLE$2, function (event) {\n const target = getElementFromSelector(this);\n\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n\n EventHandler.one(target, EVENT_SHOW$4, showEvent => {\n if (showEvent.defaultPrevented) {\n // only register focus restorer if modal will actually get shown\n return;\n }\n\n EventHandler.one(target, EVENT_HIDDEN$4, () => {\n if (isVisible(this)) {\n this.focus();\n }\n });\n }); // avoid conflict when clicking modal toggler while another one is open\n\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR$1);\n\n if (alreadyOpen) {\n Modal.getInstance(alreadyOpen).hide();\n }\n\n const data = Modal.getOrCreateInstance(target);\n data.toggle(this);\n});\nenableDismissTrigger(Modal);\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Modal);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): offcanvas.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$6 = 'offcanvas';\nconst DATA_KEY$3 = 'bs.offcanvas';\nconst EVENT_KEY$3 = `.${DATA_KEY$3}`;\nconst DATA_API_KEY$1 = '.data-api';\nconst EVENT_LOAD_DATA_API$2 = `load${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst ESCAPE_KEY = 'Escape';\nconst CLASS_NAME_SHOW$3 = 'show';\nconst CLASS_NAME_SHOWING$1 = 'showing';\nconst CLASS_NAME_HIDING = 'hiding';\nconst CLASS_NAME_BACKDROP = 'offcanvas-backdrop';\nconst OPEN_SELECTOR = '.offcanvas.show';\nconst EVENT_SHOW$3 = `show${EVENT_KEY$3}`;\nconst EVENT_SHOWN$3 = `shown${EVENT_KEY$3}`;\nconst EVENT_HIDE$3 = `hide${EVENT_KEY$3}`;\nconst EVENT_HIDE_PREVENTED = `hidePrevented${EVENT_KEY$3}`;\nconst EVENT_HIDDEN$3 = `hidden${EVENT_KEY$3}`;\nconst EVENT_RESIZE = `resize${EVENT_KEY$3}`;\nconst EVENT_CLICK_DATA_API$1 = `click${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst EVENT_KEYDOWN_DISMISS = `keydown.dismiss${EVENT_KEY$3}`;\nconst SELECTOR_DATA_TOGGLE$1 = '[data-bs-toggle=\"offcanvas\"]';\nconst Default$5 = {\n backdrop: true,\n keyboard: true,\n scroll: false\n};\nconst DefaultType$5 = {\n backdrop: '(boolean|string)',\n keyboard: 'boolean',\n scroll: 'boolean'\n};\n/**\n * Class definition\n */\n\nclass Offcanvas extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isShown = false;\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n\n this._addEventListeners();\n } // Getters\n\n\n static get Default() {\n return Default$5;\n }\n\n static get DefaultType() {\n return DefaultType$5;\n }\n\n static get NAME() {\n return NAME$6;\n } // Public\n\n\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n\n show(relatedTarget) {\n if (this._isShown) {\n return;\n }\n\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$3, {\n relatedTarget\n });\n\n if (showEvent.defaultPrevented) {\n return;\n }\n\n this._isShown = true;\n\n this._backdrop.show();\n\n if (!this._config.scroll) {\n new ScrollBarHelper().hide();\n }\n\n this._element.setAttribute('aria-modal', true);\n\n this._element.setAttribute('role', 'dialog');\n\n this._element.classList.add(CLASS_NAME_SHOWING$1);\n\n const completeCallBack = () => {\n if (!this._config.scroll || this._config.backdrop) {\n this._focustrap.activate();\n }\n\n this._element.classList.add(CLASS_NAME_SHOW$3);\n\n this._element.classList.remove(CLASS_NAME_SHOWING$1);\n\n EventHandler.trigger(this._element, EVENT_SHOWN$3, {\n relatedTarget\n });\n };\n\n this._queueCallback(completeCallBack, this._element, true);\n }\n\n hide() {\n if (!this._isShown) {\n return;\n }\n\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$3);\n\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n this._focustrap.deactivate();\n\n this._element.blur();\n\n this._isShown = false;\n\n this._element.classList.add(CLASS_NAME_HIDING);\n\n this._backdrop.hide();\n\n const completeCallback = () => {\n this._element.classList.remove(CLASS_NAME_SHOW$3, CLASS_NAME_HIDING);\n\n this._element.removeAttribute('aria-modal');\n\n this._element.removeAttribute('role');\n\n if (!this._config.scroll) {\n new ScrollBarHelper().reset();\n }\n\n EventHandler.trigger(this._element, EVENT_HIDDEN$3);\n };\n\n this._queueCallback(completeCallback, this._element, true);\n }\n\n dispose() {\n this._backdrop.dispose();\n\n this._focustrap.deactivate();\n\n super.dispose();\n } // Private\n\n\n _initializeBackDrop() {\n const clickCallback = () => {\n if (this._config.backdrop === 'static') {\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n return;\n }\n\n this.hide();\n }; // 'static' option will be translated to true, and booleans will keep their value\n\n\n const isVisible = Boolean(this._config.backdrop);\n return new Backdrop({\n className: CLASS_NAME_BACKDROP,\n isVisible,\n isAnimated: true,\n rootElement: this._element.parentNode,\n clickCallback: isVisible ? clickCallback : null\n });\n }\n\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS, event => {\n if (event.key !== ESCAPE_KEY) {\n return;\n }\n\n if (!this._config.keyboard) {\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n return;\n }\n\n this.hide();\n });\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Offcanvas.getOrCreateInstance(this, config);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config](this);\n });\n }\n\n}\n/**\n * Data API implementation\n */\n\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$1, SELECTOR_DATA_TOGGLE$1, function (event) {\n const target = getElementFromSelector(this);\n\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n\n if (isDisabled(this)) {\n return;\n }\n\n EventHandler.one(target, EVENT_HIDDEN$3, () => {\n // focus on trigger when it is closed\n if (isVisible(this)) {\n this.focus();\n }\n }); // avoid conflict when clicking a toggler of an offcanvas, while another is open\n\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR);\n\n if (alreadyOpen && alreadyOpen !== target) {\n Offcanvas.getInstance(alreadyOpen).hide();\n }\n\n const data = Offcanvas.getOrCreateInstance(target);\n data.toggle(this);\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$2, () => {\n for (const selector of SelectorEngine.find(OPEN_SELECTOR)) {\n Offcanvas.getOrCreateInstance(selector).show();\n }\n});\nEventHandler.on(window, EVENT_RESIZE, () => {\n for (const element of SelectorEngine.find('[aria-modal][class*=show][class*=offcanvas-]')) {\n if (getComputedStyle(element).position !== 'fixed') {\n Offcanvas.getOrCreateInstance(element).hide();\n }\n }\n});\nenableDismissTrigger(Offcanvas);\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Offcanvas);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/sanitizer.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\nconst uriAttributes = new Set(['background', 'cite', 'href', 'itemtype', 'longdesc', 'poster', 'src', 'xlink:href']);\nconst ARIA_ATTRIBUTE_PATTERN = /^aria-[\\w-]*$/i;\n/**\n * A pattern that recognizes a commonly useful subset of URLs that are safe.\n *\n * Shout-out to Angular https://github.com/angular/angular/blob/12.2.x/packages/core/src/sanitization/url_sanitizer.ts\n */\n\nconst SAFE_URL_PATTERN = /^(?:(?:https?|mailto|ftp|tel|file|sms):|[^#&/:?]*(?:[#/?]|$))/i;\n/**\n * A pattern that matches safe data URLs. Only matches image, video and audio types.\n *\n * Shout-out to Angular https://github.com/angular/angular/blob/12.2.x/packages/core/src/sanitization/url_sanitizer.ts\n */\n\nconst DATA_URL_PATTERN = /^data:(?:image\\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\\/(?:mpeg|mp4|ogg|webm)|audio\\/(?:mp3|oga|ogg|opus));base64,[\\d+/a-z]+=*$/i;\n\nconst allowedAttribute = (attribute, allowedAttributeList) => {\n const attributeName = attribute.nodeName.toLowerCase();\n\n if (allowedAttributeList.includes(attributeName)) {\n if (uriAttributes.has(attributeName)) {\n return Boolean(SAFE_URL_PATTERN.test(attribute.nodeValue) || DATA_URL_PATTERN.test(attribute.nodeValue));\n }\n\n return true;\n } // Check if a regular expression validates the attribute.\n\n\n return allowedAttributeList.filter(attributeRegex => attributeRegex instanceof RegExp).some(regex => regex.test(attributeName));\n};\n\nconst DefaultAllowlist = {\n // Global attributes allowed on any supplied element below.\n '*': ['class', 'dir', 'id', 'lang', 'role', ARIA_ATTRIBUTE_PATTERN],\n a: ['target', 'href', 'title', 'rel'],\n area: [],\n b: [],\n br: [],\n col: [],\n code: [],\n div: [],\n em: [],\n hr: [],\n h1: [],\n h2: [],\n h3: [],\n h4: [],\n h5: [],\n h6: [],\n i: [],\n img: ['src', 'srcset', 'alt', 'title', 'width', 'height'],\n li: [],\n ol: [],\n p: [],\n pre: [],\n s: [],\n small: [],\n span: [],\n sub: [],\n sup: [],\n strong: [],\n u: [],\n ul: []\n};\nfunction sanitizeHtml(unsafeHtml, allowList, sanitizeFunction) {\n if (!unsafeHtml.length) {\n return unsafeHtml;\n }\n\n if (sanitizeFunction && typeof sanitizeFunction === 'function') {\n return sanitizeFunction(unsafeHtml);\n }\n\n const domParser = new window.DOMParser();\n const createdDocument = domParser.parseFromString(unsafeHtml, 'text/html');\n const elements = [].concat(...createdDocument.body.querySelectorAll('*'));\n\n for (const element of elements) {\n const elementName = element.nodeName.toLowerCase();\n\n if (!Object.keys(allowList).includes(elementName)) {\n element.remove();\n continue;\n }\n\n const attributeList = [].concat(...element.attributes);\n const allowedAttributes = [].concat(allowList['*'] || [], allowList[elementName] || []);\n\n for (const attribute of attributeList) {\n if (!allowedAttribute(attribute, allowedAttributes)) {\n element.removeAttribute(attribute.nodeName);\n }\n }\n }\n\n return createdDocument.body.innerHTML;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): util/template-factory.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$5 = 'TemplateFactory';\nconst Default$4 = {\n allowList: DefaultAllowlist,\n content: {},\n // { selector : text , selector2 : text2 , }\n extraClass: '',\n html: false,\n sanitize: true,\n sanitizeFn: null,\n template: ''\n};\nconst DefaultType$4 = {\n allowList: 'object',\n content: 'object',\n extraClass: '(string|function)',\n html: 'boolean',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n template: 'string'\n};\nconst DefaultContentType = {\n entry: '(string|element|function|null)',\n selector: '(string|element)'\n};\n/**\n * Class definition\n */\n\nclass TemplateFactory extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n } // Getters\n\n\n static get Default() {\n return Default$4;\n }\n\n static get DefaultType() {\n return DefaultType$4;\n }\n\n static get NAME() {\n return NAME$5;\n } // Public\n\n\n getContent() {\n return Object.values(this._config.content).map(config => this._resolvePossibleFunction(config)).filter(Boolean);\n }\n\n hasContent() {\n return this.getContent().length > 0;\n }\n\n changeContent(content) {\n this._checkContent(content);\n\n this._config.content = { ...this._config.content,\n ...content\n };\n return this;\n }\n\n toHtml() {\n const templateWrapper = document.createElement('div');\n templateWrapper.innerHTML = this._maybeSanitize(this._config.template);\n\n for (const [selector, text] of Object.entries(this._config.content)) {\n this._setContent(templateWrapper, text, selector);\n }\n\n const template = templateWrapper.children[0];\n\n const extraClass = this._resolvePossibleFunction(this._config.extraClass);\n\n if (extraClass) {\n template.classList.add(...extraClass.split(' '));\n }\n\n return template;\n } // Private\n\n\n _typeCheckConfig(config) {\n super._typeCheckConfig(config);\n\n this._checkContent(config.content);\n }\n\n _checkContent(arg) {\n for (const [selector, content] of Object.entries(arg)) {\n super._typeCheckConfig({\n selector,\n entry: content\n }, DefaultContentType);\n }\n }\n\n _setContent(template, content, selector) {\n const templateElement = SelectorEngine.findOne(selector, template);\n\n if (!templateElement) {\n return;\n }\n\n content = this._resolvePossibleFunction(content);\n\n if (!content) {\n templateElement.remove();\n return;\n }\n\n if (isElement(content)) {\n this._putElementInTemplate(getElement(content), templateElement);\n\n return;\n }\n\n if (this._config.html) {\n templateElement.innerHTML = this._maybeSanitize(content);\n return;\n }\n\n templateElement.textContent = content;\n }\n\n _maybeSanitize(arg) {\n return this._config.sanitize ? sanitizeHtml(arg, this._config.allowList, this._config.sanitizeFn) : arg;\n }\n\n _resolvePossibleFunction(arg) {\n return typeof arg === 'function' ? arg(this) : arg;\n }\n\n _putElementInTemplate(element, templateElement) {\n if (this._config.html) {\n templateElement.innerHTML = '';\n templateElement.append(element);\n return;\n }\n\n templateElement.textContent = element.textContent;\n }\n\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): tooltip.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$4 = 'tooltip';\nconst DISALLOWED_ATTRIBUTES = new Set(['sanitize', 'allowList', 'sanitizeFn']);\nconst CLASS_NAME_FADE$2 = 'fade';\nconst CLASS_NAME_MODAL = 'modal';\nconst CLASS_NAME_SHOW$2 = 'show';\nconst SELECTOR_TOOLTIP_INNER = '.tooltip-inner';\nconst SELECTOR_MODAL = `.${CLASS_NAME_MODAL}`;\nconst EVENT_MODAL_HIDE = 'hide.bs.modal';\nconst TRIGGER_HOVER = 'hover';\nconst TRIGGER_FOCUS = 'focus';\nconst TRIGGER_CLICK = 'click';\nconst TRIGGER_MANUAL = 'manual';\nconst EVENT_HIDE$2 = 'hide';\nconst EVENT_HIDDEN$2 = 'hidden';\nconst EVENT_SHOW$2 = 'show';\nconst EVENT_SHOWN$2 = 'shown';\nconst EVENT_INSERTED = 'inserted';\nconst EVENT_CLICK$1 = 'click';\nconst EVENT_FOCUSIN$1 = 'focusin';\nconst EVENT_FOCUSOUT$1 = 'focusout';\nconst EVENT_MOUSEENTER = 'mouseenter';\nconst EVENT_MOUSELEAVE = 'mouseleave';\nconst AttachmentMap = {\n AUTO: 'auto',\n TOP: 'top',\n RIGHT: isRTL() ? 'left' : 'right',\n BOTTOM: 'bottom',\n LEFT: isRTL() ? 'right' : 'left'\n};\nconst Default$3 = {\n allowList: DefaultAllowlist,\n animation: true,\n boundary: 'clippingParents',\n container: false,\n customClass: '',\n delay: 0,\n fallbackPlacements: ['top', 'right', 'bottom', 'left'],\n html: false,\n offset: [0, 0],\n placement: 'top',\n popperConfig: null,\n sanitize: true,\n sanitizeFn: null,\n selector: false,\n template: '',\n title: '',\n trigger: 'hover focus'\n};\nconst DefaultType$3 = {\n allowList: 'object',\n animation: 'boolean',\n boundary: '(string|element)',\n container: '(string|element|boolean)',\n customClass: '(string|function)',\n delay: '(number|object)',\n fallbackPlacements: 'array',\n html: 'boolean',\n offset: '(array|string|function)',\n placement: '(string|function)',\n popperConfig: '(null|object|function)',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n selector: '(string|boolean)',\n template: 'string',\n title: '(string|element|function)',\n trigger: 'string'\n};\n/**\n * Class definition\n */\n\nclass Tooltip extends BaseComponent {\n constructor(element, config) {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s tooltips require Popper (https://popper.js.org)');\n }\n\n super(element, config); // Private\n\n this._isEnabled = true;\n this._timeout = 0;\n this._isHovered = null;\n this._activeTrigger = {};\n this._popper = null;\n this._templateFactory = null;\n this._newContent = null; // Protected\n\n this.tip = null;\n\n this._setListeners();\n\n if (!this._config.selector) {\n this._fixTitle();\n }\n } // Getters\n\n\n static get Default() {\n return Default$3;\n }\n\n static get DefaultType() {\n return DefaultType$3;\n }\n\n static get NAME() {\n return NAME$4;\n } // Public\n\n\n enable() {\n this._isEnabled = true;\n }\n\n disable() {\n this._isEnabled = false;\n }\n\n toggleEnabled() {\n this._isEnabled = !this._isEnabled;\n }\n\n toggle() {\n if (!this._isEnabled) {\n return;\n }\n\n this._activeTrigger.click = !this._activeTrigger.click;\n\n if (this._isShown()) {\n this._leave();\n\n return;\n }\n\n this._enter();\n }\n\n dispose() {\n clearTimeout(this._timeout);\n EventHandler.off(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n\n if (this._element.getAttribute('data-bs-original-title')) {\n this._element.setAttribute('title', this._element.getAttribute('data-bs-original-title'));\n }\n\n this._disposePopper();\n\n super.dispose();\n }\n\n show() {\n if (this._element.style.display === 'none') {\n throw new Error('Please use show on visible elements');\n }\n\n if (!(this._isWithContent() && this._isEnabled)) {\n return;\n }\n\n const showEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOW$2));\n const shadowRoot = findShadowRoot(this._element);\n\n const isInTheDom = (shadowRoot || this._element.ownerDocument.documentElement).contains(this._element);\n\n if (showEvent.defaultPrevented || !isInTheDom) {\n return;\n } // todo v6 remove this OR make it optional\n\n\n this._disposePopper();\n\n const tip = this._getTipElement();\n\n this._element.setAttribute('aria-describedby', tip.getAttribute('id'));\n\n const {\n container\n } = this._config;\n\n if (!this._element.ownerDocument.documentElement.contains(this.tip)) {\n container.append(tip);\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_INSERTED));\n }\n\n this._popper = this._createPopper(tip);\n tip.classList.add(CLASS_NAME_SHOW$2); // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n\n const complete = () => {\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOWN$2));\n\n if (this._isHovered === false) {\n this._leave();\n }\n\n this._isHovered = false;\n };\n\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n\n hide() {\n if (!this._isShown()) {\n return;\n }\n\n const hideEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDE$2));\n\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n const tip = this._getTipElement();\n\n tip.classList.remove(CLASS_NAME_SHOW$2); // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n\n this._activeTrigger[TRIGGER_CLICK] = false;\n this._activeTrigger[TRIGGER_FOCUS] = false;\n this._activeTrigger[TRIGGER_HOVER] = false;\n this._isHovered = null; // it is a trick to support manual triggering\n\n const complete = () => {\n if (this._isWithActiveTrigger()) {\n return;\n }\n\n if (!this._isHovered) {\n this._disposePopper();\n }\n\n this._element.removeAttribute('aria-describedby');\n\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDDEN$2));\n };\n\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n\n update() {\n if (this._popper) {\n this._popper.update();\n }\n } // Protected\n\n\n _isWithContent() {\n return Boolean(this._getTitle());\n }\n\n _getTipElement() {\n if (!this.tip) {\n this.tip = this._createTipElement(this._newContent || this._getContentForTemplate());\n }\n\n return this.tip;\n }\n\n _createTipElement(content) {\n const tip = this._getTemplateFactory(content).toHtml(); // todo: remove this check on v6\n\n\n if (!tip) {\n return null;\n }\n\n tip.classList.remove(CLASS_NAME_FADE$2, CLASS_NAME_SHOW$2); // todo: on v6 the following can be achieved with CSS only\n\n tip.classList.add(`bs-${this.constructor.NAME}-auto`);\n const tipId = getUID(this.constructor.NAME).toString();\n tip.setAttribute('id', tipId);\n\n if (this._isAnimated()) {\n tip.classList.add(CLASS_NAME_FADE$2);\n }\n\n return tip;\n }\n\n setContent(content) {\n this._newContent = content;\n\n if (this._isShown()) {\n this._disposePopper();\n\n this.show();\n }\n }\n\n _getTemplateFactory(content) {\n if (this._templateFactory) {\n this._templateFactory.changeContent(content);\n } else {\n this._templateFactory = new TemplateFactory({ ...this._config,\n // the `content` var has to be after `this._config`\n // to override config.content in case of popover\n content,\n extraClass: this._resolvePossibleFunction(this._config.customClass)\n });\n }\n\n return this._templateFactory;\n }\n\n _getContentForTemplate() {\n return {\n [SELECTOR_TOOLTIP_INNER]: this._getTitle()\n };\n }\n\n _getTitle() {\n return this._resolvePossibleFunction(this._config.title) || this._element.getAttribute('data-bs-original-title');\n } // Private\n\n\n _initializeOnDelegatedTarget(event) {\n return this.constructor.getOrCreateInstance(event.delegateTarget, this._getDelegateConfig());\n }\n\n _isAnimated() {\n return this._config.animation || this.tip && this.tip.classList.contains(CLASS_NAME_FADE$2);\n }\n\n _isShown() {\n return this.tip && this.tip.classList.contains(CLASS_NAME_SHOW$2);\n }\n\n _createPopper(tip) {\n const placement = typeof this._config.placement === 'function' ? this._config.placement.call(this, tip, this._element) : this._config.placement;\n const attachment = AttachmentMap[placement.toUpperCase()];\n return Popper.createPopper(this._element, tip, this._getPopperConfig(attachment));\n }\n\n _getOffset() {\n const {\n offset\n } = this._config;\n\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n\n return offset;\n }\n\n _resolvePossibleFunction(arg) {\n return typeof arg === 'function' ? arg.call(this._element) : arg;\n }\n\n _getPopperConfig(attachment) {\n const defaultBsPopperConfig = {\n placement: attachment,\n modifiers: [{\n name: 'flip',\n options: {\n fallbackPlacements: this._config.fallbackPlacements\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }, {\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'arrow',\n options: {\n element: `.${this.constructor.NAME}-arrow`\n }\n }, {\n name: 'preSetPlacement',\n enabled: true,\n phase: 'beforeMain',\n fn: data => {\n // Pre-set Popper's placement attribute in order to read the arrow sizes properly.\n // Otherwise, Popper mixes up the width and height dimensions since the initial arrow style is for top placement\n this._getTipElement().setAttribute('data-popper-placement', data.state.placement);\n }\n }]\n };\n return { ...defaultBsPopperConfig,\n ...(typeof this._config.popperConfig === 'function' ? this._config.popperConfig(defaultBsPopperConfig) : this._config.popperConfig)\n };\n }\n\n _setListeners() {\n const triggers = this._config.trigger.split(' ');\n\n for (const trigger of triggers) {\n if (trigger === 'click') {\n EventHandler.on(this._element, this.constructor.eventName(EVENT_CLICK$1), this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n\n context.toggle();\n });\n } else if (trigger !== TRIGGER_MANUAL) {\n const eventIn = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSEENTER) : this.constructor.eventName(EVENT_FOCUSIN$1);\n const eventOut = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSELEAVE) : this.constructor.eventName(EVENT_FOCUSOUT$1);\n EventHandler.on(this._element, eventIn, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n\n context._activeTrigger[event.type === 'focusin' ? TRIGGER_FOCUS : TRIGGER_HOVER] = true;\n\n context._enter();\n });\n EventHandler.on(this._element, eventOut, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n\n context._activeTrigger[event.type === 'focusout' ? TRIGGER_FOCUS : TRIGGER_HOVER] = context._element.contains(event.relatedTarget);\n\n context._leave();\n });\n }\n }\n\n this._hideModalHandler = () => {\n if (this._element) {\n this.hide();\n }\n };\n\n EventHandler.on(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n }\n\n _fixTitle() {\n const title = this._element.getAttribute('title');\n\n if (!title) {\n return;\n }\n\n if (!this._element.getAttribute('aria-label') && !this._element.textContent.trim()) {\n this._element.setAttribute('aria-label', title);\n }\n\n this._element.setAttribute('data-bs-original-title', title); // DO NOT USE IT. Is only for backwards compatibility\n\n\n this._element.removeAttribute('title');\n }\n\n _enter() {\n if (this._isShown() || this._isHovered) {\n this._isHovered = true;\n return;\n }\n\n this._isHovered = true;\n\n this._setTimeout(() => {\n if (this._isHovered) {\n this.show();\n }\n }, this._config.delay.show);\n }\n\n _leave() {\n if (this._isWithActiveTrigger()) {\n return;\n }\n\n this._isHovered = false;\n\n this._setTimeout(() => {\n if (!this._isHovered) {\n this.hide();\n }\n }, this._config.delay.hide);\n }\n\n _setTimeout(handler, timeout) {\n clearTimeout(this._timeout);\n this._timeout = setTimeout(handler, timeout);\n }\n\n _isWithActiveTrigger() {\n return Object.values(this._activeTrigger).includes(true);\n }\n\n _getConfig(config) {\n const dataAttributes = Manipulator.getDataAttributes(this._element);\n\n for (const dataAttribute of Object.keys(dataAttributes)) {\n if (DISALLOWED_ATTRIBUTES.has(dataAttribute)) {\n delete dataAttributes[dataAttribute];\n }\n }\n\n config = { ...dataAttributes,\n ...(typeof config === 'object' && config ? config : {})\n };\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n\n this._typeCheckConfig(config);\n\n return config;\n }\n\n _configAfterMerge(config) {\n config.container = config.container === false ? document.body : getElement(config.container);\n\n if (typeof config.delay === 'number') {\n config.delay = {\n show: config.delay,\n hide: config.delay\n };\n }\n\n if (typeof config.title === 'number') {\n config.title = config.title.toString();\n }\n\n if (typeof config.content === 'number') {\n config.content = config.content.toString();\n }\n\n return config;\n }\n\n _getDelegateConfig() {\n const config = {};\n\n for (const key in this._config) {\n if (this.constructor.Default[key] !== this._config[key]) {\n config[key] = this._config[key];\n }\n }\n\n config.selector = false;\n config.trigger = 'manual'; // In the future can be replaced with:\n // const keysWithDifferentValues = Object.entries(this._config).filter(entry => this.constructor.Default[entry[0]] !== this._config[entry[0]])\n // `Object.fromEntries(keysWithDifferentValues)`\n\n return config;\n }\n\n _disposePopper() {\n if (this._popper) {\n this._popper.destroy();\n\n this._popper = null;\n }\n\n if (this.tip) {\n this.tip.remove();\n this.tip = null;\n }\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Tooltip.getOrCreateInstance(this, config);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config]();\n });\n }\n\n}\n/**\n * jQuery\n */\n\n\ndefineJQueryPlugin(Tooltip);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): popover.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$3 = 'popover';\nconst SELECTOR_TITLE = '.popover-header';\nconst SELECTOR_CONTENT = '.popover-body';\nconst Default$2 = { ...Tooltip.Default,\n content: '',\n offset: [0, 8],\n placement: 'right',\n template: '',\n trigger: 'click'\n};\nconst DefaultType$2 = { ...Tooltip.DefaultType,\n content: '(null|string|element|function)'\n};\n/**\n * Class definition\n */\n\nclass Popover extends Tooltip {\n // Getters\n static get Default() {\n return Default$2;\n }\n\n static get DefaultType() {\n return DefaultType$2;\n }\n\n static get NAME() {\n return NAME$3;\n } // Overrides\n\n\n _isWithContent() {\n return this._getTitle() || this._getContent();\n } // Private\n\n\n _getContentForTemplate() {\n return {\n [SELECTOR_TITLE]: this._getTitle(),\n [SELECTOR_CONTENT]: this._getContent()\n };\n }\n\n _getContent() {\n return this._resolvePossibleFunction(this._config.content);\n } // Static\n\n\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Popover.getOrCreateInstance(this, config);\n\n if (typeof config !== 'string') {\n return;\n }\n\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n\n data[config]();\n });\n }\n\n}\n/**\n * jQuery\n */\n\n\ndefineJQueryPlugin(Popover);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap (v5.2.3): scrollspy.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n/**\n * Constants\n */\n\nconst NAME$2 = 'scrollspy';\nconst DATA_KEY$2 = 'bs.scrollspy';\nconst EVENT_KEY$2 = `.${DATA_KEY$2}`;\nconst DATA_API_KEY = '.data-api';\nconst EVENT_ACTIVATE = `activate${EVENT_KEY$2}`;\nconst EVENT_CLICK = `click${EVENT_KEY$2}`;\nconst EVENT_LOAD_DATA_API$1 = `load${EVENT_KEY$2}${DATA_API_KEY}`;\nconst CLASS_NAME_DROPDOWN_ITEM = 'dropdown-item';\nconst CLASS_NAME_ACTIVE$1 = 'active';\nconst SELECTOR_DATA_SPY = '[data-bs-spy=\"scroll\"]';\nconst SELECTOR_TARGET_LINKS = '[href]';\nconst SELECTOR_NAV_LIST_GROUP = '.nav, .list-group';\nconst SELECTOR_NAV_LINKS = '.nav-link';\nconst SELECTOR_NAV_ITEMS = '.nav-item';\nconst SELECTOR_LIST_ITEMS = '.list-group-item';\nconst SELECTOR_LINK_ITEMS = `${SELECTOR_NAV_LINKS}, ${SELECTOR_NAV_ITEMS} > ${SELECTOR_NAV_LINKS}, ${SELECTOR_LIST_ITEMS}`;\nconst SELECTOR_DROPDOWN = '.dropdown';\nconst SELECTOR_DROPDOWN_TOGGLE$1 = '.dropdown-toggle';\nconst Default$1 = {\n offset: null,\n // TODO: v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: '0px 0px -25%',\n smoothScroll: false,\n target: null,\n threshold: [0.1, 0.5, 1]\n};\nconst DefaultType$1 = {\n offset: '(number|null)',\n // TODO v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: 'string',\n smoothScroll: 'boolean',\n target: 'element',\n threshold: 'array'\n};\n/**\n * Class definition\n */\n\nclass ScrollSpy extends BaseComponent {\n constructor(element, config) {\n super(element, config); // this._element is the observablesContainer and config.target the menu links wrapper\n\n this._targetLinks = new Map();\n this._observableSections = new Map();\n this._rootElement = getComputedStyle(this._element).overflowY === 'visible' ? null : this._element;\n this._activeTarget = null;\n this._observer = null;\n this._previousScrollData = {\n visibleEntryTop: 0,\n parentScrollTop: 0\n };\n this.refresh(); // initialize\n } // Getters\n\n\n static get Default() {\n return Default$1;\n }\n\n static get DefaultType() {\n return DefaultType$1;\n }\n\n static get NAME() {\n return NAME$2;\n } // Public\n\n\n refresh() {\n this._initializeTargetsAndObservables();\n\n this._maybeEnableSmoothScroll();\n\n if (this._observer) {\n this._observer.disconnect();\n } else {\n this._observer = this._getNewObserver();\n }\n\n for (const section of this._observableSections.values()) {\n this._observer.observe(section);\n }\n }\n\n dispose() {\n this._observer.disconnect();\n\n super.dispose();\n } // Private\n\n\n _configAfterMerge(config) {\n // TODO: on v6 target should be given explicitly & remove the {target: 'ss-target'} case\n config.target = getElement(config.target) || document.body; // TODO: v6 Only for backwards compatibility reasons. Use rootMargin only\n\n config.rootMargin = config.offset ? `${config.offset}px 0px -30%` : config.rootMargin;\n\n if (typeof config.threshold === 'string') {\n config.threshold = config.threshold.split(',').map(value => Number.parseFloat(value));\n }\n\n return config;\n }\n\n _maybeEnableSmoothScroll() {\n if (!this._config.smoothScroll) {\n return;\n } // unregister any previous listeners\n\n\n EventHandler.off(this._config.target, EVENT_CLICK);\n EventHandler.on(this._config.target, EVENT_CLICK, SELECTOR_TARGET_LINKS, event => {\n const observableSection = this._observableSections.get(event.target.hash);\n\n if (observableSection) {\n event.preventDefault();\n const root = this._rootElement || window;\n const height = observableSection.offsetTop - this._element.offsetTop;\n\n if (root.scrollTo) {\n root.scrollTo({\n top: height,\n behavior: 'smooth'\n });\n return;\n } // Chrome 60 doesn't support `scrollTo`\n\n\n root.scrollTop = height;\n }\n });\n }\n\n _getNewObserver() {\n const options = {\n root: this._rootElement,\n threshold: this._config.threshold,\n rootMargin: this._config.rootMargin\n };\n return new IntersectionObserver(entries => this._observerCallback(entries), options);\n } // The logic of selection\n\n\n _observerCallback(entries) {\n const targetElement = entry => this._targetLinks.get(`#${entry.target.id}`);\n\n const activate = entry => {\n this._previousScrollData.visibleEntryTop = entry.target.offsetTop;\n\n this._process(targetElement(entry));\n };\n\n const parentScrollTop = (this._rootElement || document.documentElement).scrollTop;\n const userScrollsDown = parentScrollTop >= this._previousScrollData.parentScrollTop;\n this._previousScrollData.parentScrollTop = parentScrollTop;\n\n for (const entry of entries) {\n if (!entry.isIntersecting) {\n this._activeTarget = null;\n\n this._clearActiveClass(targetElement(entry));\n\n continue;\n }\n\n const entryIsLowerThanPrevious = entry.target.offsetTop >= this._previousScrollData.visibleEntryTop; // if we are scrolling down, pick the bigger offsetTop\n\n if (userScrollsDown && entryIsLowerThanPrevious) {\n activate(entry); // if parent isn't scrolled, let's keep the first visible item, breaking the iteration\n\n if (!parentScrollTop) {\n return;\n }\n\n continue;\n } // if we are scrolling up, pick the smallest offsetTop\n\n\n if (!userScrollsDown && !entryIsLowerThanPrevious) {\n activate(entry);\n }\n }\n }\n\n _initializeTargetsAndObservables() {\n this._targetLinks = new Map();\n this._observableSections = new Map();\n const targetLinks = SelectorEngine.find(SELECTOR_TARGET_LINKS, this._config.target);\n\n for (const anchor of targetLinks) {\n // ensure that the anchor has an id and is not disabled\n if (!anchor.hash || isDisabled(anchor)) {\n continue;\n }\n\n const observableSection = SelectorEngine.findOne(anchor.hash, this._element); // ensure that the observableSection exists & is visible\n\n if (isVisible(observableSection)) {\n this._targetLinks.set(anchor.hash, anchor);\n\n this._observableSections.set(anchor.hash, observableSection);\n }\n }\n }\n\n _process(target) {\n if (this._activeTarget === target) {\n return;\n }\n\n this._clearActiveClass(this._config.target);\n\n this._activeTarget = target;\n target.classList.add(CLASS_NAME_ACTIVE$1);\n\n this._activateParents(target);\n\n EventHandler.trigger(this._element, EVENT_ACTIVATE, {\n relatedTarget: target\n });\n }\n\n _activateParents(target) {\n // Activate dropdown parents\n if (target.classList.contains(CLASS_NAME_DROPDOWN_ITEM)) {\n SelectorEngine.findOne(SELECTOR_DROPDOWN_TOGGLE$1, target.closest(SELECTOR_DROPDOWN)).classList.add(CLASS_NAME_ACTIVE$1);\n return;\n }\n\n for (const listGroup of SelectorEngine.parents(target, SELECTOR_NAV_LIST_GROUP)) {\n // Set triggered links parents as active\n // With both and
+
+ # Name of the Trained Model Config file stored in config sub-directory in the trained_model_path parent directory
+ trained_model_config:
+
+ #Evaluate the monchromatic RMSE metric.
+ eval_mono_metric_rmse: True
+
+ #Evaluate the OPD RMSE metric.
+ eval_opd_metric_rmse: True
+
+ #Evaluate the shape RMSE metrics at super-resolution (sr) for the training dataset.
+ eval_train_shape_sr_metric_rmse: True
+
+ # Name of Plotting Config file - Enter name of yaml file to run plot metrics else if empty run metrics evaluation only
+ plotting_config:
+
+ ground_truth_model:
+ model_params:
+ .
+ .
+ .
+ metrics_hparams:
+ .
+ .
+ .
+
+```
+The metrics key `model_save_path` enables a choice of running the metrics evaluation for a fully trained PSF model or the weights of a given checkpoint cycle.
+The parameter `saved_training_cycle` specifies the cycle at which to run metrics evaluation.
+
+As stated in the previous section, the `metrics` evaluation pipeline can be executed subsequently after the completion of the `training` routine to evaluate the trained PSF model. It can also be launched independently to compute the metrics of a previously trained model. This is done by setting the value of the parameter `trained_model_path` to the absolute path of the parent directory containing the output files of the model. This is the directory with the naming convention: `wf-outputs-timestamp` (see this {ref}`example of the run output directory`). The user must then provide as an entry for the key: `trained_model_config` the subdirectory path to the training configuration file, e.g. `config/train_config.yaml`. Below we show an example of this for the case where a user wants to run metrics evaluation of a pretrained full PSF model saved in the directory `wf-outputs-202310161536`.
+
+```
+WaveDiff Pre-trained Model
+--------------------------
+
+wf-outputs-202310161536
+├── checkpoint
+│ ├── checkpoint
+│ ├── checkpoint_callback_poly-coherent_euclid_200stars_cycle1.data-00000-of-00001
+│ ├── checkpoint_callback_poly-coherent_euclid_200stars_cycle1.index
+│ ├── checkpoint_callback_poly-coherent_euclid_200stars_cycle2.data-00000-of-00001
+│ └── checkpoint_callback_poly-coherent_euclid_200stars_cycle2.index
+├── config
+│ ├── configs.yaml
+│ ├── data_config.yaml
+│ ├── metrics_config.yaml
+│ └── training_config.yaml
+├── log-files
+│ └── wf-psf_202310161536.log
+├── metrics
+│ └── metrics-poly-coherent_euclid_200stars.npy
+├── optim-hist
+│ └── optim_hist_poly-coherent_euclid_200stars.npy
+├── plots
+└── psf_model
+ ├── checkpoint
+ ├── psf_model_poly-coherent_euclid_200stars_cycle1.data-00000-of-00001
+ ├── psf_model_poly-coherent_euclid_200stars_cycle1.index
+ ├── psf_model_poly-coherent_euclid_200stars_cycle2.data-00000-of-00001
+ └── psf_model_poly-coherent_euclid_200stars_cycle2.index
+
+metrics_config.yaml
+-------------------
+
+metrics:
+ # Specify the type of model weights to load by entering "psf_model" to load weights of final psf model or "checkpoint" to load weights from a checkpoint callback.
+ model_save_path: psf_model
+ # Choose the training cycle for which to evaluate the psf_model. Can be: 1, 2, ...
+ saved_training_cycle: 2
+ # Metrics-only run: Specify model_params for a pre-trained model else leave blank if running training + metrics
+ # Specify path to Parent Directory of Trained Model
+ trained_model_path: /path/to/wf-outputs-202310161536
+ # Name of Trained Model Config file inside /trained_model_path/ parent directory
+ trained_model_config: training_config.yaml
+```
+The results of the metrics evaluation will be saved in the new output directory created at runtime (not in the pretrained model directory created previously).
+
+When the trained_model fields are left empty as stated in the commented line, WaveDiff will run the `training` and `metrics` pipelines in serial. At the start of the `metrics` evaluation task, it will automatically retrieve the model weights at the specific cycle defined by `model_save_path` and `saved_training_cycle` from the `wf-outputs-` sub-directories generated at runtime just before the training task.
+
+The WaveDiff `metrics` pipeline is programmed to automatically evaluate the Polychromatic Pixel reconstruction metrics for both the test (at low- and super-pixel resolution) and training data sets (at low-pixel resolution). The Monochromatic Pixel Reconstruction and OPD Reconstruction metrics are both optional and can be selected by setting the Boolean flags for `eval_{metric_type}_metric_rmse` to `True` to compute the metric or `False` to disable. Finally, the Weak Lensing Shape Metrics are computed by default for the test data set at super-pixel resolution and as an option for the training data set by setting the parameter `eval_train_shape_sr_metric_rmse` to `True` or `False` (Note: setting this option to `True` will also trigger WaveDiff to compute the Polychromatric Pixel Reconstruction metrics at super-pixel resolution for the training data set). The table below provides a summary of these different settings.
+
+(metrics_settings)=
+| Metric type | Metric Identifier | Test Data Set | Training Data Set |
+| ----------- | ------- | ------- | ------- |
+| Polychromatic Pixel Reconstruction | `poly` | Default | Default (low-res), Optional (super-res) |
+| Monochromatic Pixel Reconstruction | `mono` | Optional | Optional |
+| Optical Path Differences Reconstruction (OPD) | `opd` | Optional | Optional |
+| Weak Lensing Shape Metrics (super-res only) | `shape_sr` | Default | Optional |
+
+The option to generate plots of the metric evaluation results is provided by setting the value of the parameter `plotting_config` to the name of the [plotting configuration](plotting_config) file, e.g. `plotting_config.yaml`. This will trigger WaveDiff's plotting pipeline to produce plots after completion of the metrics evaluation pipeline. If the field is left empty, no plots are generated.
+
+To compute the errors of the trained PSF model, the `metrics` package can retrieve a ground truth data set if it exists in the dataset files listed in the [data_configuration](data_config) file. If they do exist, WaveDiff can generate at runtime a `ground truth model` using the parameters in the metrics configuration file associated to the key: `ground_truth_model`. The parameter settings for the ground truth model are similar to those contained in the [training configuration](training_config) file. Currently, the choice of model, which is indicated by the key `model_name`, is currently limited to the polychromatic PSF model, referenced by the short name `poly`.
+
+The `metrics` package is run using [TensorFlow](https://www.tensorflow.org) to reconstruct the PSF model and to evaluate the various metrics. The `metrics_hparams` key contains some standard machine learning hyperparameters such as the `batch_size` as well as additional parameters like `output_dim`, which sets the dimension of the output pixel postage stamp, etc.
+
+(plotting_config)=
+## Plot Configuration
+
+The [plotting_config.yaml](https://github.com/CosmoStat/wf-psf/blob/dummy_main/config/plotting_config.yaml) file stores the configuration parameters for the WaveDiff pipeline to generate plots for the metrics listed in the {ref}`metrics settings table ` for each data set.
+
+An example of the contents of the `plotting_config.yaml` file is shown below.
+
+```
+plotting_params:
+ # Specify path to parent folder containing wf-psf metrics outputs for all runs, e.g. $WORK/wf-outputs/
+ metrics_output_path:
+ # List all of the parent output directories (i.e. wf-outputs-xxxxxxxxxxx) that contain metrics results to be included in the plot
+ metrics_dir:
+ # - wf-outputs-xxxxxxxxxxx1
+ # - wf-outputs-xxxxxxxxxxx2
+ # List the corresponding names of the metric config file in each of the parent output directories (would like to change such that code goes and finds them in the metrics_dir)
+ metrics_config:
+ # - metrics_config_1.yaml
+ # - metrics_config_2.yaml
+ # Show plots flag
+ plot_show: False
+```
+As nearly all of the specific plotting parameters are pre-coded by default, the `plotting_config` file parameters enable the option to plot jointly the metrics for other trained PSF models. Consider the example below where the user would like to plot the metrics from three output runs in the directories labelled: `wf-outputs-`.
+
+```
+wf-outputs/
+├── wf-outputs-202305271829
+│ ├── config
+│ │ ├── data_config.yaml
+│ │ └── metrics_config_200.yaml
+│ ├── metrics
+│ │ └── metrics-poly-coherent_euclid_200stars.npy
+├── wf-outputs-202305271845
+│ ├── config
+│ │ ├── data_config.yaml
+│ │ └── metrics_config_500.yaml
+│ ├── metrics
+│ │ └── metrics-poly-coherent_euclid_500stars.npy
+├── wf-outputs-202305271918
+│ ├── config
+│ │ ├── data_config.yaml
+│ │ └── metrics_config_1000.yaml
+│ ├── metrics
+│ │ └── metrics-poly-coherent_euclid_1000stars.npy
+
+```
+
+Below is the following `plotting_config.yaml` file that would generate plots including each of the three metrics outputs in the example above:
+
+```
+plotting_params:
+ # Specify path to parent folder containing wf-psf metrics outputs for all runs, ex: $WORK/wf-outputs/
+ metrics_output_path: $WORK/wf-outputs/
+ # List all of the parent output directories (i.e. wf-outputs-xxxxxxxxxxx) that contain metrics results to be included in the plot
+ metrics_dir:
+ - wf-outputs-202305271829
+ - wf-outputs-202305271845
+ - wf-outputs-202305271918
+
+ # List the corresponding names of the metric config file in each of the parent output directories (would like to change such that code goes and finds them in the metrics_dir)
+ metrics_config:
+ - metrics_config_200.yaml
+ - metrics_config_500.yaml
+ - metrics_config_1000.yaml
+
+ # Show plots flag
+ plot_show: False
+```
+ In the field for the key: `metrics_output_path`, the user provides the path to the parent directory containing the subdirectories of the runs to be plotted. Then under the `metrics_dir` key, the user lists row-by-row the names of the parent directories for each run. Similarly, for the `metrics_config` key, the user can list the names of the `metrics_config` files used for the additional runs row-by-row. Note, if the user only wants to plot the metric from an active metrics evaluation run, i.e. the `plotting` pipeline task is run subsequently after the `metrics` pipeline, these fields can be left empty. The only plotting parameter `plot_show` is a Boolean used to trigger a display of the plot at runtime (as in an interactive session). If False, no plot is displayed.
+
+(master_config_file)=
+## Master Configuration
+
+The `configs.yaml` file is the master configuration file that is used to define all of the pipeline tasks to be submitted and executed by `WaveDiff` during runtime. In this file, the user lists the processing tasks (one or more) to be performed by setting the values of the associated configuration variables `{pipeline_task}_conf` and the name of the configuration file `{pipeline_task}_config.yaml`. See an example below to configure `WaveDiff` to launch a sequence of runs to train models 1...n with their respective configurations given in the files `training_config_{id}.yaml`.
+
+```
+---
+ training_conf_1: training_config_1.yaml
+ training_conf_2: training_config_2.yaml
+ ...
+ training_conf_n: training_config_n.yaml
+```
+Each training task is run sequentially and independently of the others. All of the results are stored in the same `wf-outputs-` directory as shown in the example below.
+
+```
+├── wf-outputs-202310131055
+│ ├── checkpoint
+│ │ ├── checkpoint
+│ │ ├── checkpoint_callback_poly-coherent_euclid_200stars_1_cycle1.data-00000-of-00001
+│ │ ├── checkpoint_callback_poly-coherent_euclid_200stars_1_cycle1.index
+│ ├── ...
+│ │ ├── checkpoint_callback_poly-coherent_euclid_200stars_n_cycle1.data-00000-of-00001
+│ │ ├── checkpoint_callback_poly-coherent_euclid_200stars_n_cycle1.index
+│ ├── config
+│ │ ├── configs.yaml
+│ │ ├── data_config.yaml
+│ │ ├── training_config_1.yaml
+│ │ ├── ...
+│ │ └── training_config_n.yaml
+│ ├── log-files
+│ │ └── wf-psf_202310131055.log
+│ ├── optim-hist
+│ │ ├── optim_hist_poly-coherent_euclid_200stars_1.npy
+│ │ ├── ...
+│ │ └── optim_hist_poly-coherent_euclid_200stars_n.npy
+│ ├── plots
+│ └── psf_model
+│ ├── checkpoint
+│ ├── psf_model_poly-coherent_euclid_200stars_1_cycle1.data-00000-of-00001
+│ ├── psf_model_poly-coherent_euclid_200stars_1_cycle1.index
+│ ├── ...
+│ ├── psf_model_poly-coherent_euclid_200stars_n_cycle1.data-00000-of-00001
+│ ├── psf_model_poly-coherent_euclid_200stars_n_cycle1.index
+```
+
+Likewise, to perform a metrics evaluation and generate plots for each training run (as in the example above), the corresponding names of the `metrics_config.yaml` and `plotting_config.yaml` files need to be provided as values to the `{metrics}_config` and `{plotting}_config` parameters in `training_config_{id}.yaml` and `metrics_config.yaml` files, respectively. The same `metrics_config.yaml` and `plotting_config.yaml` files can be used for each `training_config_{id}.yaml` file. Below is an example of the `config` tree structure for a `training` + `metrics` + `plotting` run:
+
+```
+config/
+├── configs.yaml
+├── data_config.yaml
+├── metrics_config.yaml
+├── plotting_config.yaml
+├── training_config_1.yaml
+├── ...
+└── training_config_n.yaml
+```
+
+Note, in this version of WaveDiff produces a single plot per each metric per trained model. To display all of the metrics results for each trained model in a single plot, the user must do so in a different run following the steps defined in the section [Plot Configuration](plotting_config). The next upgrade to WaveDiff will feature options to produce independent metrics plots per trained model or a single master plot comparing the respective metric results for all trained models.
+
+The master configuration file can include a combination of the three pipeline tasks, i.e. training, metrics and plotting. This will prompt WaveDiff to do independent tasks like train a new PSF model, compute the metrics of a pre-trained PSF model, or produce plots for a selection of pre-computed metrics. While currently WaveDiff executes these jobs sequentially on a single GPU, the future plan is to distribute these tasks in parallel across multiple GPUs to accelerate the computation.
+
+If you have any questions or feedback, please don't hesitate to open a [Github issue](https://github.com/CosmoStat/wf-psf/issues).
\ No newline at end of file
diff --git a/docs/source/dependencies.md b/docs/source/dependencies.md
new file mode 100644
index 00000000..6b73a6a9
--- /dev/null
+++ b/docs/source/dependencies.md
@@ -0,0 +1,20 @@
+# Dependencies
+
+Third-party software packages required by WaveDiff are installed automatically (see [Installation](installation.md)).
+
+## Python Dependencies
+
+| Package Name | References |
+|--------------|---------------------------------------------------|
+| [numpy](https://numpy.org/) | {cite:t}`harris:20` |
+| [scipy](https://scipy.org) | {cite:t}`SciPy-NMeth:20` |
+| [tensorflow](https://www.tensorflow.org) | {cite:t}`tensorflow:15` |
+| [tensorflow-addons](https://www.tensorflow.org/addons) |{cite:t}`tensorflow:15` |
+| [tensorflow-estimator](https://www.tensorflow.org/api_docs/python/tf/estimator) |{cite:t}`tensorflow:15` |
+| [zernike](https://github.com/jacopoantonello/zernike) | {cite:t}`Antonello:15` |
+| [opencv-python](https://docs.opencv.org/4.x/index.html) | {cite:t}`opencv_library:08` |
+| [pillow](https://pillow.readthedocs.io/en/stable/) | {cite:t}`clark:15` |
+| [galsim](http://galsim-developers.github.io/GalSim/_build/html/index.html#) | {cite:t}`rowe:15` |
+| [astropy](https://www.astropy.org) | {cite:t}`astropy:13,astropy:18`,
{cite:t}`astropy:22` |
+| [matplotlib](https://matplotlib.org) | {cite:t}`Hunter:07` |
+| [seaborn](https://seaborn.pydata.org) | {cite:t}`Waskom:21` |
\ No newline at end of file
diff --git a/docs/source/imgs/cosmostat_logo.png b/docs/source/imgs/cosmostat_logo.png
new file mode 100644
index 00000000..2a2ba60a
Binary files /dev/null and b/docs/source/imgs/cosmostat_logo.png differ
diff --git a/docs/source/imgs/psf_model_diagram_v6.png b/docs/source/imgs/psf_model_diagram_v6.png
new file mode 100644
index 00000000..50b81c3b
Binary files /dev/null and b/docs/source/imgs/psf_model_diagram_v6.png differ
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 00000000..bb4dcfc8
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,29 @@
+.. wf-psf documentation master file, created by
+ sphinx-quickstart on Fri Oct 13 14:36:06 2023.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+WaveDiff
+==================================
+.. Include table of contents
+.. include:: toc.rst
+
+WaveDiff is a differentiable data-driven wavefront-based PSF modelling framework developed in the
+|link-to-cosmostat| lab at CEA Paris-Saclay.
+
+This documentation provides information for the installation
+and running of WaveDiff. If you are unable to find what you are looking for here,
+please |link-to-issues| on the GitHub repository.
+
+If you use WaveDiff for your academic work, we ask you to please
+cite :cite:t:`Liaudat:23`.
+
+.. |link-to-cosmostat| raw:: html
+.. |link-to-cosmostat| raw:: html
+
+ CosmoStat
+
+.. |link-to-issues| raw:: html
+
+ open an issue
\ No newline at end of file
diff --git a/docs/source/installation.md b/docs/source/installation.md
new file mode 100644
index 00000000..88db29e9
--- /dev/null
+++ b/docs/source/installation.md
@@ -0,0 +1,23 @@
+# Installation
+
+
+WaveDiff is a software written in Python and uses the [TensorFlow](https://www.tensorflow.org) framework to train and evaluate the model parameters of the PSF model. Therefore, it is advisable to run the WaveDiff software on machines equipped with a GPU.
+
+Note: You may want to set up a dedicated Python environment for running WaveDiff using e.g. [Conda](https://docs.conda.io/en/latest/). You can use the minimal installer [Miniconda](https://docs.conda.io/projects/miniconda/en/latest/) to set up the environment in which we run the command below to install the subset of packages needed for running WaveDiff.
+
+## Installation Steps
+
+Clone the repository:
+
+```
+$ git clone https://github.com/CosmoStat/wf-psf.git
+```
+
+Next you can enter the `wf-psf` directory and run `pip` to install WaveDiff and its [dependencies](dependencies.md).
+
+```
+$ cd wf-psf
+$ pip install .
+```
+
+You can now proceed to the next section, where we show you how to run WaveDiff.
diff --git a/docs/source/modules.rst b/docs/source/modules.rst
new file mode 100644
index 00000000..c437a60e
--- /dev/null
+++ b/docs/source/modules.rst
@@ -0,0 +1,7 @@
+wf_psf
+======
+
+.. toctree::
+ :maxdepth: 4
+
+ wf_psf
diff --git a/docs/source/refs.bib b/docs/source/refs.bib
new file mode 100644
index 00000000..65eb9143
--- /dev/null
+++ b/docs/source/refs.bib
@@ -0,0 +1,289 @@
+@article{Antonello:15,
+author = {Jacopo Antonello and Michel Verhaegen},
+journal = {J. Opt. Soc. Am. A},
+keywords = {Wave-front sensing; Phase retrieval; Aberration compensation; Active or adaptive optics; Adaptive optics; Deformable mirrors; Fourier transforms; Optical components; Optical systems; Phase retrieval},
+number = {6},
+pages = {1160--1170},
+publisher = {Optica Publishing Group},
+title = {Modal-based phase retrieval for adaptive optics},
+volume = {32},
+month = {Jun},
+year = {2015},
+url = {https://opg.optica.org/josaa/abstract.cfm?URI=josaa-32-6-1160},
+doi = {10.1364/JOSAA.32.001160},
+}
+
+@article{astropy:13,
+Adsnote = {Provided by the SAO/NASA Astrophysics Data System},
+Adsurl = {http://adsabs.harvard.edu/abs/2013A%26A...558A..33A},
+Archiveprefix = {arXiv},
+Author = {{Astropy Collaboration} and {Robitaille}, T.~P. and {Tollerud}, E.~J. and {Greenfield}, P. and {Droettboom}, M. and {Bray}, E. and {Aldcroft}, T. and {Davis}, M. and {Ginsburg}, A. and {Price-Whelan}, A.~M. and {Kerzendorf}, W.~E. and {Conley}, A. and {Crighton}, N. and {Barbary}, K. and {Muna}, D. and {Ferguson}, H. and {Grollier}, F. and {Parikh}, M.~M. and {Nair}, P.~H. and {Unther}, H.~M. and {Deil}, C. and {Woillez}, J. and {Conseil}, S. and {Kramer}, R. and {Turner}, J.~E.~H. and {Singer}, L. and {Fox}, R. and {Weaver}, B.~A. and {Zabalza}, V. and {Edwards}, Z.~I. and {Azalee Bostroem}, K. and {Burke}, D.~J. and {Casey}, A.~R. and {Crawford}, S.~M. and {Dencheva}, N. and {Ely}, J. and {Jenness}, T. and {Labrie}, K. and {Lim}, P.~L. and {Pierfederici}, F. and {Pontzen}, A. and {Ptak}, A. and {Refsdal}, B. and {Servillat}, M. and {Streicher}, O.},
+Doi = {10.1051/0004-6361/201322068},
+Eid = {A33},
+Eprint = {1307.6212},
+Journal = {\aap},
+Keywords = {methods: data analysis, methods: miscellaneous, virtual observatory tools},
+Month = oct,
+Pages = {A33},
+Primaryclass = {astro-ph.IM},
+Title = {{Astropy: A community Python package for astronomy}},
+Volume = 558,
+Year = 2013,
+Bdsk-Url-1 = {https://dx.doi.org/10.1051/0004-6361/201322068}}
+
+@ARTICLE{astropy:18,
+ author = {{Astropy Collaboration} and {Price-Whelan}, A.~M. and
+ {Sip{\H{o}}cz}, B.~M. and {G{\"u}nther}, H.~M. and {Lim}, P.~L. and
+ {Crawford}, S.~M. and {Conseil}, S. and {Shupe}, D.~L. and
+ {Craig}, M.~W. and {Dencheva}, N. and {Ginsburg}, A. and {Vand
+ erPlas}, J.~T. and {Bradley}, L.~D. and {P{\'e}rez-Su{\'a}rez}, D. and
+ {de Val-Borro}, M. and {Aldcroft}, T.~L. and {Cruz}, K.~L. and
+ {Robitaille}, T.~P. and {Tollerud}, E.~J. and {Ardelean}, C. and
+ {Babej}, T. and {Bach}, Y.~P. and {Bachetti}, M. and {Bakanov}, A.~V. and
+ {Bamford}, S.~P. and {Barentsen}, G. and {Barmby}, P. and
+ {Baumbach}, A. and {Berry}, K.~L. and {Biscani}, F. and {Boquien}, M. and
+ {Bostroem}, K.~A. and {Bouma}, L.~G. and {Brammer}, G.~B. and
+ {Bray}, E.~M. and {Breytenbach}, H. and {Buddelmeijer}, H. and
+ {Burke}, D.~J. and {Calderone}, G. and {Cano Rodr{\'\i}guez}, J.~L. and
+ {Cara}, M. and {Cardoso}, J.~V.~M. and {Cheedella}, S. and {Copin}, Y. and
+ {Corrales}, L. and {Crichton}, D. and {D'Avella}, D. and {Deil}, C. and
+ {Depagne}, {\'E}. and {Dietrich}, J.~P. and {Donath}, A. and
+ {Droettboom}, M. and {Earl}, N. and {Erben}, T. and {Fabbro}, S. and
+ {Ferreira}, L.~A. and {Finethy}, T. and {Fox}, R.~T. and
+ {Garrison}, L.~H. and {Gibbons}, S.~L.~J. and {Goldstein}, D.~A. and
+ {Gommers}, R. and {Greco}, J.~P. and {Greenfield}, P. and
+ {Groener}, A.~M. and {Grollier}, F. and {Hagen}, A. and {Hirst}, P. and
+ {Homeier}, D. and {Horton}, A.~J. and {Hosseinzadeh}, G. and {Hu}, L. and
+ {Hunkeler}, J.~S. and {Ivezi{\'c}}, {\v{Z}}. and {Jain}, A. and
+ {Jenness}, T. and {Kanarek}, G. and {Kendrew}, S. and {Kern}, N.~S. and
+ {Kerzendorf}, W.~E. and {Khvalko}, A. and {King}, J. and {Kirkby}, D. and
+ {Kulkarni}, A.~M. and {Kumar}, A. and {Lee}, A. and {Lenz}, D. and
+ {Littlefair}, S.~P. and {Ma}, Z. and {Macleod}, D.~M. and
+ {Mastropietro}, M. and {McCully}, C. and {Montagnac}, S. and
+ {Morris}, B.~M. and {Mueller}, M. and {Mumford}, S.~J. and {Muna}, D. and
+ {Murphy}, N.~A. and {Nelson}, S. and {Nguyen}, G.~H. and
+ {Ninan}, J.~P. and {N{\"o}the}, M. and {Ogaz}, S. and {Oh}, S. and
+ {Parejko}, J.~K. and {Parley}, N. and {Pascual}, S. and {Patil}, R. and
+ {Patil}, A.~A. and {Plunkett}, A.~L. and {Prochaska}, J.~X. and
+ {Rastogi}, T. and {Reddy Janga}, V. and {Sabater}, J. and
+ {Sakurikar}, P. and {Seifert}, M. and {Sherbert}, L.~E. and
+ {Sherwood-Taylor}, H. and {Shih}, A.~Y. and {Sick}, J. and
+ {Silbiger}, M.~T. and {Singanamalla}, S. and {Singer}, L.~P. and
+ {Sladen}, P.~H. and {Sooley}, K.~A. and {Sornarajah}, S. and
+ {Streicher}, O. and {Teuben}, P. and {Thomas}, S.~W. and
+ {Tremblay}, G.~R. and {Turner}, J.~E.~H. and {Terr{\'o}n}, V. and
+ {van Kerkwijk}, M.~H. and {de la Vega}, A. and {Watkins}, L.~L. and
+ {Weaver}, B.~A. and {Whitmore}, J.~B. and {Woillez}, J. and
+ {Zabalza}, V. and {Astropy Contributors}},
+ title = "{The Astropy Project: Building an Open-science Project and Status of the v2.0 Core Package}",
+ journal = {\aj},
+ keywords = {methods: data analysis, methods: miscellaneous, methods: statistical, reference systems, Astrophysics - Instrumentation and Methods for Astrophysics},
+ year = 2018,
+ month = sep,
+ volume = {156},
+ number = {3},
+ eid = {123},
+ pages = {123},
+ doi = {10.3847/1538-3881/aabc4f},
+archivePrefix = {arXiv},
+ eprint = {1801.02634},
+ primaryClass = {astro-ph.IM},
+ adsurl = {https://ui.adsabs.harvard.edu/abs/2018AJ....156..123A},
+ adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+@ARTICLE{astropy:22,
+ author = {{Astropy Collaboration} and {Price-Whelan}, Adrian M. and {Lim}, Pey Lian and {Earl}, Nicholas and {Starkman}, Nathaniel and {Bradley}, Larry and {Shupe}, David L. and {Patil}, Aarya A. and {Corrales}, Lia and {Brasseur}, C.~E. and {N{"o}the}, Maximilian and {Donath}, Axel and {Tollerud}, Erik and {Morris}, Brett M. and {Ginsburg}, Adam and {Vaher}, Eero and {Weaver}, Benjamin A. and {Tocknell}, James and {Jamieson}, William and {van Kerkwijk}, Marten H. and {Robitaille}, Thomas P. and {Merry}, Bruce and {Bachetti}, Matteo and {G{"u}nther}, H. Moritz and {Aldcroft}, Thomas L. and {Alvarado-Montes}, Jaime A. and {Archibald}, Anne M. and {B{'o}di}, Attila and {Bapat}, Shreyas and {Barentsen}, Geert and {Baz{'a}n}, Juanjo and {Biswas}, Manish and {Boquien}, M{'e}d{'e}ric and {Burke}, D.~J. and {Cara}, Daria and {Cara}, Mihai and {Conroy}, Kyle E. and {Conseil}, Simon and {Craig}, Matthew W. and {Cross}, Robert M. and {Cruz}, Kelle L. and {D'Eugenio}, Francesco and {Dencheva}, Nadia and {Devillepoix}, Hadrien A.~R. and {Dietrich}, J{"o}rg P. and {Eigenbrot}, Arthur Davis and {Erben}, Thomas and {Ferreira}, Leonardo and {Foreman-Mackey}, Daniel and {Fox}, Ryan and {Freij}, Nabil and {Garg}, Suyog and {Geda}, Robel and {Glattly}, Lauren and {Gondhalekar}, Yash and {Gordon}, Karl D. and {Grant}, David and {Greenfield}, Perry and {Groener}, Austen M. and {Guest}, Steve and {Gurovich}, Sebastian and {Handberg}, Rasmus and {Hart}, Akeem and {Hatfield-Dodds}, Zac and {Homeier}, Derek and {Hosseinzadeh}, Griffin and {Jenness}, Tim and {Jones}, Craig K. and {Joseph}, Prajwel and {Kalmbach}, J. Bryce and {Karamehmetoglu}, Emir and {Ka{l}uszy{'n}ski}, Miko{l}aj and {Kelley}, Michael S.~P. and {Kern}, Nicholas and {Kerzendorf}, Wolfgang E. and {Koch}, Eric W. and {Kulumani}, Shankar and {Lee}, Antony and {Ly}, Chun and {Ma}, Zhiyuan and {MacBride}, Conor and {Maljaars}, Jakob M. and {Muna}, Demitri and {Murphy}, N.~A. and {Norman}, Henrik and {O'Steen}, Richard and {Oman}, Kyle A. and {Pacifici}, Camilla and {Pascual}, Sergio and {Pascual-Granado}, J. and {Patil}, Rohit R. and {Perren}, Gabriel I. and {Pickering}, Timothy E. and {Rastogi}, Tanuj and {Roulston}, Benjamin R. and {Ryan}, Daniel F. and {Rykoff}, Eli S. and {Sabater}, Jose and {Sakurikar}, Parikshit and {Salgado}, Jes{'u}s and {Sanghi}, Aniket and {Saunders}, Nicholas and {Savchenko}, Volodymyr and {Schwardt}, Ludwig and {Seifert-Eckert}, Michael and {Shih}, Albert Y. and {Jain}, Anany Shrey and {Shukla}, Gyanendra and {Sick}, Jonathan and {Simpson}, Chris and {Singanamalla}, Sudheesh and {Singer}, Leo P. and {Singhal}, Jaladh and {Sinha}, Manodeep and {Sip{H{o}}cz}, Brigitta M. and {Spitler}, Lee R. and {Stansby}, David and {Streicher}, Ole and {{{S}}umak}, Jani and {Swinbank}, John D. and {Taranu}, Dan S. and {Tewary}, Nikita and {Tremblay}, Grant R. and {Val-Borro}, Miguel de and {Van Kooten}, Samuel J. and {Vasovi{'c}}, Zlatan and {Verma}, Shresth and {de Miranda Cardoso}, Jos{'e} Vin{'i}cius and {Williams}, Peter K.~G. and {Wilson}, Tom J. and {Winkel}, Benjamin and {Wood-Vasey}, W.~M. and {Xue}, Rui and {Yoachim}, Peter and {Zhang}, Chen and {Zonca}, Andrea and {Astropy Project Contributors}},
+ title = "{The Astropy Project: Sustaining and Growing a Community-oriented Open-source Project and the Latest Major Release (v5.0) of the Core Package}",
+ journal = {apj},
+ keywords = {Astronomy software, Open source software, Astronomy data analysis, 1855, 1866, 1858, Astrophysics - Instrumentation and Methods for Astrophysics},
+ year = 2022,
+ month = aug,
+ volume = {935},
+ number = {2},
+ eid = {167},
+ pages = {167},
+ doi = {10.3847/1538-4357/ac7c74},
+archivePrefix = {arXiv},
+ eprint = {2206.14220},
+ primaryClass = {astro-ph.IM},
+ adsurl = {https://ui.adsabs.harvard.edu/abs/2022ApJ...935..167A},
+ adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+
+@article{Liaudat:23,
+doi = {10.1088/1361-6420/acb664},
+url = {https://dx.doi.org/10.1088/1361-6420/acb664},
+year = {2023},
+month = {feb},
+publisher = {IOP Publishing},
+volume = {39},
+number = {3},
+pages = {035008},
+author = {Tobias Liaudat and Jean-Luc Starck and Martin Kilbinger and Pierre-Antoine Frugier},
+title = {Rethinking data-driven point spread function modeling with a differentiable optical model},
+journal = {Inverse Problems},
+}
+
+@misc{clark:15,
+ title={Pillow (PIL Fork) Documentation},
+ author={Clark, Alex},
+ year={2015},
+ publisher={readthedocs},
+ url={https://buildmedia.readthedocs.org/media/pdf/pillow/latest/pillow.pdf}
+}
+
+@Article{harris:20,
+ title = {Array programming with {NumPy}},
+ author = {Charles R. Harris and K. Jarrod Millman and St{\'{e}}fan J.
+ van der Walt and Ralf Gommers and Pauli Virtanen and David
+ Cournapeau and Eric Wieser and Julian Taylor and Sebastian
+ Berg and Nathaniel J. Smith and Robert Kern and Matti Picus
+ and Stephan Hoyer and Marten H. van Kerkwijk and Matthew
+ Brett and Allan Haldane and Jaime Fern{\'{a}}ndez del
+ R{\'{i}}o and Mark Wiebe and Pearu Peterson and Pierre
+ G{\'{e}}rard-Marchant and Kevin Sheppard and Tyler Reddy and
+ Warren Weckesser and Hameer Abbasi and Christoph Gohlke and
+ Travis E. Oliphant},
+ year = {2020},
+ month = sep,
+ journal = {Nature},
+ volume = {585},
+ number = {7825},
+ pages = {357--362},
+ doi = {10.1038/s41586-020-2649-2},
+ publisher = {Springer Science and Business Media {LLC}},
+ url = {https://doi.org/10.1038/s41586-020-2649-2}
+}
+
+@Article{Hunter:07,
+ Author = {Hunter, J. D.},
+ Title = {Matplotlib: A 2D graphics environment},
+ Journal = {Computing in Science \& Engineering},
+ Volume = {9},
+ Number = {3},
+ Pages = {90--95},
+ abstract = {Matplotlib is a 2D graphics package used for Python for
+ application development, interactive scripting, and publication-quality
+ image generation across user interfaces and operating systems.},
+ publisher = {IEEE COMPUTER SOC},
+ doi = {10.1109/MCSE.2007.55},
+ year = 2007
+}
+
+@ARTICLE{SciPy-NMeth:20,
+ author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
+ Haberland, Matt and Reddy, Tyler and Cournapeau, David and
+ Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
+ Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
+ Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
+ Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
+ Kern, Robert and Larson, Eric and Carey, C J and
+ Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
+ {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
+ Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
+ Harris, Charles R. and Archibald, Anne M. and
+ Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
+ {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
+ title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
+ Computing in Python}},
+ journal = {Nature Methods},
+ year = {2020},
+ volume = {17},
+ pages = {261--272},
+ adsurl = {https://rdcu.be/b08Wh},
+ doi = {10.1038/s41592-019-0686-2},
+}
+
+@article{opencv_library:08,
+ author = {Bradski, G.},
+ citeulike-article-id = {2236121},
+ journal = {Dr. Dobb's Journal of Software Tools},
+ keywords = {bibtex-import},
+ posted-at = {2008-01-15 19:21:54},
+ priority = {4},
+ title = {{The OpenCV Library}},
+ year = {2000}
+}
+
+@ARTICLE{rowe:15,
+ author = {{Rowe}, B.~T.~P. and {Jarvis}, M. and {Mandelbaum}, R. and {Bernstein}, G.~M. and {Bosch}, J. and {Simet}, M. and {Meyers}, J.~E. and {Kacprzak}, T. and {Nakajima}, R. and {Zuntz}, J. and {Miyatake}, H. and {Dietrich}, J.~P. and {Armstrong}, R. and {Melchior}, P. and {Gill}, M.~S.~S.},
+ title = "{GALSIM: The modular galaxy image simulation toolkit}",
+ journal = {Astronomy and Computing},
+ keywords = {Methods: data analysis, Techniques: image processing, Gravitational lensing, Cosmology: observations, Astrophysics - Instrumentation and Methods for Astrophysics, Astrophysics - Cosmology and Nongalactic Astrophysics, 85-04},
+ year = 2015,
+ month = apr,
+ volume = {10},
+ pages = {121-150},
+ doi = {10.1016/j.ascom.2015.02.002},
+archivePrefix = {arXiv},
+ eprint = {1407.7676},
+ primaryClass = {astro-ph.IM},
+ adsurl = {https://ui.adsabs.harvard.edu/abs/2015A&C....10..121R},
+ adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
+
+
+
+@misc{tensorflow:15,
+title={ {TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
+url={https://www.tensorflow.org/},
+note={Software available from tensorflow.org},
+author={
+ Mart\'{i}n~Abadi and
+ Ashish~Agarwal and
+ Paul~Barham and
+ Eugene~Brevdo and
+ Zhifeng~Chen and
+ Craig~Citro and
+ Greg~S.~Corrado and
+ Andy~Davis and
+ Jeffrey~Dean and
+ Matthieu~Devin and
+ Sanjay~Ghemawat and
+ Ian~Goodfellow and
+ Andrew~Harp and
+ Geoffrey~Irving and
+ Michael~Isard and
+ Yangqing Jia and
+ Rafal~Jozefowicz and
+ Lukasz~Kaiser and
+ Manjunath~Kudlur and
+ Josh~Levenberg and
+ Dandelion~Man\'{e} and
+ Rajat~Monga and
+ Sherry~Moore and
+ Derek~Murray and
+ Chris~Olah and
+ Mike~Schuster and
+ Jonathon~Shlens and
+ Benoit~Steiner and
+ Ilya~Sutskever and
+ Kunal~Talwar and
+ Paul~Tucker and
+ Vincent~Vanhoucke and
+ Vijay~Vasudevan and
+ Fernanda~Vi\'{e}gas and
+ Oriol~Vinyals and
+ Pete~Warden and
+ Martin~Wattenberg and
+ Martin~Wicke and
+ Yuan~Yu and
+ Xiaoqiang~Zheng},
+ year={2015},
+}
+
+@article{Waskom:21,
+ doi = {10.21105/joss.03021},
+ url = {https://doi.org/10.21105/joss.03021},
+ year = {2021},
+ publisher = {The Open Journal},
+ volume = {6},
+ number = {60},
+ pages = {3021},
+ author = {Michael L. Waskom},
+ title = {seaborn: statistical data visualization},
+ journal = {Journal of Open Source Software}
+ }
+
diff --git a/docs/source/toc.rst b/docs/source/toc.rst
new file mode 100644
index 00000000..df168e9d
--- /dev/null
+++ b/docs/source/toc.rst
@@ -0,0 +1,37 @@
+.. Toctrees define sidebar contents
+
+.. toctree::
+ :hidden:
+ :titlesonly:
+ :caption: About WaveDiff
+
+ about
+
+.. toctree::
+ :hidden:
+ :caption: Installing WaveDiff
+
+ dependencies
+ installation
+
+.. toctree::
+ :maxdepth: 4
+ :hidden:
+ :caption: Running WaveDiff
+
+ basic_execution
+ configuration
+
+.. toctree::
+ :hidden:
+ :titlesonly:
+ :caption: API Documentation
+
+ wf_psf
+
+.. toctree::
+ :hidden:
+ :titlesonly:
+ :caption: References
+
+ z_ref
\ No newline at end of file
diff --git a/docs/source/wf_psf.data.rst b/docs/source/wf_psf.data.rst
new file mode 100644
index 00000000..ec021128
--- /dev/null
+++ b/docs/source/wf_psf.data.rst
@@ -0,0 +1,15 @@
+wf\_psf.data package
+====================
+
+.. automodule:: wf_psf.data
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ wf_psf.data.training_preprocessing
diff --git a/docs/source/wf_psf.data.training_preprocessing.rst b/docs/source/wf_psf.data.training_preprocessing.rst
new file mode 100644
index 00000000..a2f13368
--- /dev/null
+++ b/docs/source/wf_psf.data.training_preprocessing.rst
@@ -0,0 +1,7 @@
+wf\_psf.data.training\_preprocessing module
+===========================================
+
+.. automodule:: wf_psf.data.training_preprocessing
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.info.rst b/docs/source/wf_psf.info.rst
new file mode 100644
index 00000000..948d6e67
--- /dev/null
+++ b/docs/source/wf_psf.info.rst
@@ -0,0 +1,7 @@
+wf\_psf.info module
+===================
+
+.. automodule:: wf_psf.info
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.metrics.metrics.rst b/docs/source/wf_psf.metrics.metrics.rst
new file mode 100644
index 00000000..6ecf4513
--- /dev/null
+++ b/docs/source/wf_psf.metrics.metrics.rst
@@ -0,0 +1,7 @@
+wf\_psf.metrics.metrics module
+==============================
+
+.. automodule:: wf_psf.metrics.metrics
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.metrics.metrics_interface.rst b/docs/source/wf_psf.metrics.metrics_interface.rst
new file mode 100644
index 00000000..2b87a2b3
--- /dev/null
+++ b/docs/source/wf_psf.metrics.metrics_interface.rst
@@ -0,0 +1,7 @@
+wf\_psf.metrics.metrics\_interface module
+=========================================
+
+.. automodule:: wf_psf.metrics.metrics_interface
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.metrics.rst b/docs/source/wf_psf.metrics.rst
new file mode 100644
index 00000000..6216f5d3
--- /dev/null
+++ b/docs/source/wf_psf.metrics.rst
@@ -0,0 +1,16 @@
+wf\_psf.metrics package
+=======================
+
+.. automodule:: wf_psf.metrics
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ wf_psf.metrics.metrics
+ wf_psf.metrics.metrics_interface
diff --git a/docs/source/wf_psf.plotting.plot_optimisation_metrics.rst b/docs/source/wf_psf.plotting.plot_optimisation_metrics.rst
new file mode 100644
index 00000000..acf60b46
--- /dev/null
+++ b/docs/source/wf_psf.plotting.plot_optimisation_metrics.rst
@@ -0,0 +1,7 @@
+wf\_psf.plotting.plot\_optimisation\_metrics module
+===================================================
+
+.. automodule:: wf_psf.plotting.plot_optimisation_metrics
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.plotting.plots_interface.rst b/docs/source/wf_psf.plotting.plots_interface.rst
new file mode 100644
index 00000000..bcd04a57
--- /dev/null
+++ b/docs/source/wf_psf.plotting.plots_interface.rst
@@ -0,0 +1,7 @@
+wf\_psf.plotting.plots\_interface module
+========================================
+
+.. automodule:: wf_psf.plotting.plots_interface
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.plotting.rst b/docs/source/wf_psf.plotting.rst
new file mode 100644
index 00000000..565697a3
--- /dev/null
+++ b/docs/source/wf_psf.plotting.rst
@@ -0,0 +1,16 @@
+wf\_psf.plotting package
+========================
+
+.. automodule:: wf_psf.plotting
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ wf_psf.plotting.plot_optimisation_metrics
+ wf_psf.plotting.plots_interface
diff --git a/docs/source/wf_psf.psf_models.psf_model_parametric.rst b/docs/source/wf_psf.psf_models.psf_model_parametric.rst
new file mode 100644
index 00000000..a315b990
--- /dev/null
+++ b/docs/source/wf_psf.psf_models.psf_model_parametric.rst
@@ -0,0 +1,7 @@
+wf\_psf.psf\_models.psf\_model\_parametric module
+=================================================
+
+.. automodule:: wf_psf.psf_models.psf_model_parametric
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.psf_models.psf_model_semiparametric.rst b/docs/source/wf_psf.psf_models.psf_model_semiparametric.rst
new file mode 100644
index 00000000..20a40a21
--- /dev/null
+++ b/docs/source/wf_psf.psf_models.psf_model_semiparametric.rst
@@ -0,0 +1,7 @@
+wf\_psf.psf\_models.psf\_model\_semiparametric module
+=====================================================
+
+.. automodule:: wf_psf.psf_models.psf_model_semiparametric
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.psf_models.psf_models.rst b/docs/source/wf_psf.psf_models.psf_models.rst
new file mode 100644
index 00000000..791c08ce
--- /dev/null
+++ b/docs/source/wf_psf.psf_models.psf_models.rst
@@ -0,0 +1,7 @@
+wf\_psf.psf\_models.psf\_models module
+======================================
+
+.. automodule:: wf_psf.psf_models.psf_models
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.psf_models.rst b/docs/source/wf_psf.psf_models.rst
new file mode 100644
index 00000000..7cffb950
--- /dev/null
+++ b/docs/source/wf_psf.psf_models.rst
@@ -0,0 +1,21 @@
+wf\_psf.psf\_models package
+===========================
+
+.. automodule:: wf_psf.psf_models
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ wf_psf.psf_models.psf_model_parametric
+ wf_psf.psf_models.psf_model_semiparametric
+ wf_psf.psf_models.psf_models
+ wf_psf.psf_models.tf_layers
+ wf_psf.psf_models.tf_modules
+ wf_psf.psf_models.tf_psf_field
+ wf_psf.psf_models.zernikes
diff --git a/docs/source/wf_psf.psf_models.tf_layers.rst b/docs/source/wf_psf.psf_models.tf_layers.rst
new file mode 100644
index 00000000..928860cd
--- /dev/null
+++ b/docs/source/wf_psf.psf_models.tf_layers.rst
@@ -0,0 +1,7 @@
+wf\_psf.psf\_models.tf\_layers module
+=====================================
+
+.. automodule:: wf_psf.psf_models.tf_layers
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.psf_models.tf_modules.rst b/docs/source/wf_psf.psf_models.tf_modules.rst
new file mode 100644
index 00000000..322178c1
--- /dev/null
+++ b/docs/source/wf_psf.psf_models.tf_modules.rst
@@ -0,0 +1,7 @@
+wf\_psf.psf\_models.tf\_modules module
+======================================
+
+.. automodule:: wf_psf.psf_models.tf_modules
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.psf_models.tf_psf_field.rst b/docs/source/wf_psf.psf_models.tf_psf_field.rst
new file mode 100644
index 00000000..c67c95ab
--- /dev/null
+++ b/docs/source/wf_psf.psf_models.tf_psf_field.rst
@@ -0,0 +1,7 @@
+wf\_psf.psf\_models.tf\_psf\_field module
+=========================================
+
+.. automodule:: wf_psf.psf_models.tf_psf_field
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.psf_models.zernikes.rst b/docs/source/wf_psf.psf_models.zernikes.rst
new file mode 100644
index 00000000..f12ef53b
--- /dev/null
+++ b/docs/source/wf_psf.psf_models.zernikes.rst
@@ -0,0 +1,7 @@
+wf\_psf.psf\_models.zernikes module
+===================================
+
+.. automodule:: wf_psf.psf_models.zernikes
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.rst b/docs/source/wf_psf.rst
new file mode 100644
index 00000000..5fd64626
--- /dev/null
+++ b/docs/source/wf_psf.rst
@@ -0,0 +1,30 @@
+wf\_psf package
+===============
+
+.. automodule:: wf_psf
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Subpackages
+-----------
+
+.. toctree::
+ :maxdepth: 4
+
+ wf_psf.data
+ wf_psf.metrics
+ wf_psf.plotting
+ wf_psf.psf_models
+ wf_psf.sims
+ wf_psf.training
+ wf_psf.utils
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ wf_psf.info
+ wf_psf.run
diff --git a/docs/source/wf_psf.run.rst b/docs/source/wf_psf.run.rst
new file mode 100644
index 00000000..c93fd948
--- /dev/null
+++ b/docs/source/wf_psf.run.rst
@@ -0,0 +1,7 @@
+wf\_psf.run module
+==================
+
+.. automodule:: wf_psf.run
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.sims.SimPSFToolkit.rst b/docs/source/wf_psf.sims.SimPSFToolkit.rst
new file mode 100644
index 00000000..b71254bc
--- /dev/null
+++ b/docs/source/wf_psf.sims.SimPSFToolkit.rst
@@ -0,0 +1,7 @@
+wf\_psf.sims.SimPSFToolkit module
+=================================
+
+.. automodule:: wf_psf.sims.SimPSFToolkit
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.sims.rst b/docs/source/wf_psf.sims.rst
new file mode 100644
index 00000000..184d80bf
--- /dev/null
+++ b/docs/source/wf_psf.sims.rst
@@ -0,0 +1,15 @@
+wf\_psf.sims package
+====================
+
+.. automodule:: wf_psf.sims
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ wf_psf.sims.SimPSFToolkit
diff --git a/docs/source/wf_psf.training.rst b/docs/source/wf_psf.training.rst
new file mode 100644
index 00000000..7024b55e
--- /dev/null
+++ b/docs/source/wf_psf.training.rst
@@ -0,0 +1,16 @@
+wf\_psf.training package
+========================
+
+.. automodule:: wf_psf.training
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ wf_psf.training.train
+ wf_psf.training.train_utils
diff --git a/docs/source/wf_psf.training.train.rst b/docs/source/wf_psf.training.train.rst
new file mode 100644
index 00000000..581dddaa
--- /dev/null
+++ b/docs/source/wf_psf.training.train.rst
@@ -0,0 +1,7 @@
+wf\_psf.training.train module
+=============================
+
+.. automodule:: wf_psf.training.train
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.training.train_utils.rst b/docs/source/wf_psf.training.train_utils.rst
new file mode 100644
index 00000000..a0e396da
--- /dev/null
+++ b/docs/source/wf_psf.training.train_utils.rst
@@ -0,0 +1,7 @@
+wf\_psf.training.train\_utils module
+====================================
+
+.. automodule:: wf_psf.training.train_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.utils.configs_handler.rst b/docs/source/wf_psf.utils.configs_handler.rst
new file mode 100644
index 00000000..acae97e0
--- /dev/null
+++ b/docs/source/wf_psf.utils.configs_handler.rst
@@ -0,0 +1,7 @@
+wf\_psf.utils.configs\_handler module
+=====================================
+
+.. automodule:: wf_psf.utils.configs_handler
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.utils.graph_utils.rst b/docs/source/wf_psf.utils.graph_utils.rst
new file mode 100644
index 00000000..37d4b33b
--- /dev/null
+++ b/docs/source/wf_psf.utils.graph_utils.rst
@@ -0,0 +1,7 @@
+wf\_psf.utils.graph\_utils module
+=================================
+
+.. automodule:: wf_psf.utils.graph_utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.utils.io.rst b/docs/source/wf_psf.utils.io.rst
new file mode 100644
index 00000000..ff41d208
--- /dev/null
+++ b/docs/source/wf_psf.utils.io.rst
@@ -0,0 +1,7 @@
+wf\_psf.utils.io module
+=======================
+
+.. automodule:: wf_psf.utils.io
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.utils.read_config.rst b/docs/source/wf_psf.utils.read_config.rst
new file mode 100644
index 00000000..cc85900a
--- /dev/null
+++ b/docs/source/wf_psf.utils.read_config.rst
@@ -0,0 +1,7 @@
+wf\_psf.utils.read\_config module
+=================================
+
+.. automodule:: wf_psf.utils.read_config
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/wf_psf.utils.rst b/docs/source/wf_psf.utils.rst
new file mode 100644
index 00000000..48c1d81a
--- /dev/null
+++ b/docs/source/wf_psf.utils.rst
@@ -0,0 +1,19 @@
+wf\_psf.utils package
+=====================
+
+.. automodule:: wf_psf.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Submodules
+----------
+
+.. toctree::
+ :maxdepth: 4
+
+ wf_psf.utils.configs_handler
+ wf_psf.utils.graph_utils
+ wf_psf.utils.io
+ wf_psf.utils.read_config
+ wf_psf.utils.utils
diff --git a/docs/source/wf_psf.utils.utils.rst b/docs/source/wf_psf.utils.utils.rst
new file mode 100644
index 00000000..e4ade0bf
--- /dev/null
+++ b/docs/source/wf_psf.utils.utils.rst
@@ -0,0 +1,7 @@
+wf\_psf.utils.utils module
+==========================
+
+.. automodule:: wf_psf.utils.utils
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/source/z_ref.rst b/docs/source/z_ref.rst
new file mode 100644
index 00000000..73451684
--- /dev/null
+++ b/docs/source/z_ref.rst
@@ -0,0 +1,4 @@
+References
+==========
+
+.. bibliography:: refs.bib
\ No newline at end of file
diff --git a/jz-submissions/README.md b/jz-submissions/README.md
deleted file mode 100644
index 5e8c7e4f..00000000
--- a/jz-submissions/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# WF-PSF experiences
-
-Describe how to do the WF-PSF experiences.
-
diff --git a/jz-submissions/jobs-cli/1k_poly_v2_1cycle.sh b/jz-submissions/jobs-cli/1k_poly_v2_1cycle.sh
deleted file mode 100644
index 9ac77e03..00000000
--- a/jz-submissions/jobs-cli/1k_poly_v2_1cycle.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=1k_poly_v2_1cycle # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=1k_poly_v2_1cycle%j.out # nom du fichier de sortie
-#SBATCH --error=1k_poly_v2_1cycle%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --id_name _chkp_1k_poly_v2_1cycle \
- --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy \
- --n_epochs_param 20 20 \
- --n_epochs_non_param 200 150 \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle1 \
- --total_cycles 1 \
diff --git a/jz-submissions/jobs-cli/1k_poly_v2_2cycle.sh b/jz-submissions/jobs-cli/1k_poly_v2_2cycle.sh
deleted file mode 100644
index 5c02fb30..00000000
--- a/jz-submissions/jobs-cli/1k_poly_v2_2cycle.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=1k_poly_v2_2cycle # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=1k_poly_v2_2cycle%j.out # nom du fichier de sortie
-#SBATCH --error=1k_poly_v2_2cycle%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --id_name _chkp_1k_poly_v2_2cycle \
- --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy \
- --n_epochs_param 20 20 \
- --n_epochs_non_param 150 200 \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.01 \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
diff --git a/jz-submissions/jobs-cli/1k_poly_v2_2cycle_no2param.sh b/jz-submissions/jobs-cli/1k_poly_v2_2cycle_no2param.sh
deleted file mode 100644
index 1d9106bc..00000000
--- a/jz-submissions/jobs-cli/1k_poly_v2_2cycle_no2param.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=1k_poly_v2_2cycle_n2p # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=1k_poly_v2_2cycle_n2p%j.out # nom du fichier de sortie
-#SBATCH --error=1k_poly_v2_2cycle_n2p%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --id_name _chkp_1k_poly_v2_2cycle_no2param \
- --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy \
- --n_epochs_param 20 2 \
- --n_epochs_non_param 150 200 \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.01 \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
diff --git a/jz-submissions/jobs-cli/1k_poly_var_l2_opd.sh b/jz-submissions/jobs-cli/1k_poly_var_l2_opd.sh
deleted file mode 100644
index 6b841d1e..00000000
--- a/jz-submissions/jobs-cli/1k_poly_var_l2_opd.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=1k_poly_l2_opd # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=1k_poly_l2_opd%j.out # nom du fichier de sortie
-#SBATCH --error=1k_poly_l2_opd%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-#SBATCH --array=0-3
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--l2_param 1e-10"
-opt[1]="--l2_param 5e-11"
-opt[2]="--l2_param 1e-11"
-opt[3]="--l2_param 5e-12"
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/l2_param_tr_ev.py \
- --model poly \
- --id_name 1k_poly_l2_opd \
- --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy \
- --n_epochs_param 20 20 \
- --n_epochs_non_param 150 150 \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.01 \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_1k_poly_sample_w.sh b/jz-submissions/jobs-cli/full_1k_poly_sample_w.sh
deleted file mode 100644
index 0104c55b..00000000
--- a/jz-submissions/jobs-cli/full_1k_poly_sample_w.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_1k_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_1k_sample_w%j.out # nom du fichier de sortie
-#SBATCH --error=poly_1k_sample_w%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _1k_sample_w_2c --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --l_rate_param 0.01 0.002 --l_rate_non_param 0.1 0.02"
-opt[1]="--id_name _1k_sample_w_5c --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --l_rate_param 0.01 0.004 --l_rate_non_param 0.1 0.04"
-opt[2]="--id_name _1k_sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --l_rate_param 0.01 0.008 --l_rate_non_param 0.1 0.08"
-opt[3]="--id_name _1k_sample_w_2k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --l_rate_param 0.01 0.010 --l_rate_non_param 0.1 0.10"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --n_epochs_param 30 30 \
- --n_epochs_non_param 150 100 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _1k_sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_1k_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_1k_poly_sample_w_l2param_opd.sh b/jz-submissions/jobs-cli/full_1k_poly_sample_w_l2param_opd.sh
deleted file mode 100644
index 3a4b6d75..00000000
--- a/jz-submissions/jobs-cli/full_1k_poly_sample_w_l2param_opd.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_1k_sample_w_l2param_opd # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_1k_sample_w_l2param_opd%j.out # nom du fichier de sortie
-#SBATCH --error=poly_1k_sample_w_l2param_opd%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _1k_sample_w_l2param_opd_2c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.002 --l_rate_non_param 0.1 0.02"
-opt[1]="--id_name _1k_sample_w_l2param_opd_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.004 --l_rate_non_param 0.1 0.04"
-opt[2]="--id_name _1k_sample_w_l2param_opd_1k --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.008 --l_rate_non_param 0.1 0.08"
-opt[3]="--id_name _1k_sample_w_l2param_opd_2k --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.010 --l_rate_non_param 0.1 0.10"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --n_epochs_param 30 30 \
- --n_epochs_non_param 150 100 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 5e-12 \
- --base_id_name _1k_sample_w_l2param_opd_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_1k_sample_w_l2param_opd/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_5c_poly_sample_w.sh b/jz-submissions/jobs-cli/full_5c_poly_sample_w.sh
deleted file mode 100644
index b7056016..00000000
--- a/jz-submissions/jobs-cli/full_5c_poly_sample_w.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_5c_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_5c_sample_w%j.out # nom du fichier de sortie
-#SBATCH --error=poly_5c_sample_w%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _5c_sample_w_2c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.002 --l_rate_non_param 0.1 0.02"
-opt[1]="--id_name _5c_sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.004 --l_rate_non_param 0.1 0.04"
-opt[2]="--id_name _5c_sample_w_1k --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.008 --l_rate_non_param 0.1 0.08"
-opt[3]="--id_name _5c_sample_w_2k --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.010 --l_rate_non_param 0.1 0.10"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --n_epochs_param 30 30 \
- --n_epochs_non_param 200 150 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _5c_sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_5c_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_5c_poly_sample_w_l2param_opd.sh b/jz-submissions/jobs-cli/full_5c_poly_sample_w_l2param_opd.sh
deleted file mode 100644
index 4353cb56..00000000
--- a/jz-submissions/jobs-cli/full_5c_poly_sample_w_l2param_opd.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_5c_sample_w_l2param_opd # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_5c_sample_w_l2param_opd%j.out # nom du fichier de sortie
-#SBATCH --error=poly_5c_sample_w_l2param_opd%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _5c_sample_w_l2param_opd_2c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.002 --l_rate_non_param 0.1 0.01"
-opt[1]="--id_name _5c_sample_w_l2param_opd_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.004 --l_rate_non_param 0.1 0.04"
-opt[2]="--id_name _5c_sample_w_l2param_opd_1k --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.008 --l_rate_non_param 0.1 0.08"
-opt[3]="--id_name _5c_sample_w_l2param_opd_2k --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_param 0.01 0.010 --l_rate_non_param 0.1 0.10"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --n_epochs_param 30 30 \
- --n_epochs_non_param 200 150 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 5e-12 \
- --base_id_name _5c_sample_w_l2param_opd_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_5c_sample_w_l2param_opd/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_graph_sample_w.sh b/jz-submissions/jobs-cli/full_graph_sample_w.sh
deleted file mode 100644
index 58188288..00000000
--- a/jz-submissions/jobs-cli/full_graph_sample_w.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=graph_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=graph_sample_w_%j.out # nom du fichier de sortie
-#SBATCH --error=graph_sample_w_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _sample_w_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model graph \
- --n_zernikes 15 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/graph_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_graph_sample_w_tunned.sh b/jz-submissions/jobs-cli/full_graph_sample_w_tunned.sh
deleted file mode 100644
index d2517838..00000000
--- a/jz-submissions/jobs-cli/full_graph_sample_w_tunned.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=graph_sample_w_tunned # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=graph_sample_w_tunned_%j.out # nom du fichier de sortie
-#SBATCH --error=graph_sample_w_tunned_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_tunned_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_tunned_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _sample_w_tunned_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _sample_w_tunned_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model graph \
- --n_zernikes 15 \
- --graph_features 21 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.4 0.2 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _sample_w_tunned_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/graph_sample_w_tunned/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_mccd_sample_w.sh b/jz-submissions/jobs-cli/full_mccd_sample_w.sh
deleted file mode 100644
index bd60eeed..00000000
--- a/jz-submissions/jobs-cli/full_mccd_sample_w.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=mccd_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=mccd_sample_w%j.out # nom du fichier de sortie
-#SBATCH --error=mccd_sample_w%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _sample_w_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model mccd \
- --d_max_nonparam 3 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/mccd_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_mccd_sample_w_bis1.sh b/jz-submissions/jobs-cli/full_mccd_sample_w_bis1.sh
deleted file mode 100644
index 894c7996..00000000
--- a/jz-submissions/jobs-cli/full_mccd_sample_w_bis1.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=mccd_sample_w_bis1_ # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=mccd_sample_w_bis1_%j.out # nom du fichier de sortie
-#SBATCH --error=mccd_sample_w_bis1_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_bis1_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_bis1_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _sample_w_bis1_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _sample_w_bis1_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model mccd \
- --d_max_nonparam 3 \
- --graph_features 10 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _sample_w_bis1_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/mccd_sample_w_bis1/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_mccd_sample_w_bis2.sh b/jz-submissions/jobs-cli/full_mccd_sample_w_bis2.sh
deleted file mode 100644
index 5dece54c..00000000
--- a/jz-submissions/jobs-cli/full_mccd_sample_w_bis2.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=mccd_sample_w_bis2_ # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=mccd_sample_w_bis2_%j.out # nom du fichier de sortie
-#SBATCH --error=mccd_sample_w_bis2_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_bis2_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_bis2_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _sample_w_bis2_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _sample_w_bis2_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model mccd \
- --d_max_nonparam 3 \
- --graph_features 10 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _sample_w_bis2_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/mccd_sample_w_bis2/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_mccd_sample_w_l2param_opd.sh b/jz-submissions/jobs-cli/full_mccd_sample_w_l2param_opd.sh
deleted file mode 100644
index c49dc1a3..00000000
--- a/jz-submissions/jobs-cli/full_mccd_sample_w_l2param_opd.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=mccd_sample_w_l2param_opd # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=mccd_sample_w_l2param_opd%j.out # nom du fichier de sortie
-#SBATCH --error=mccd_sample_w_l2param_opd%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_l2param_opd_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_l2param_opd_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _sample_w_l2param_opd_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _sample_w_l2param_opd_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model mccd \
- --d_max_nonparam 3 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 5e-12 \
- --base_id_name _sample_w_l2param_opd_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/mccd_sample_w_l2param_opd/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_mccd_standard.sh b/jz-submissions/jobs-cli/full_mccd_standard.sh
deleted file mode 100644
index 21ffaa8c..00000000
--- a/jz-submissions/jobs-cli/full_mccd_standard.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=mccd_standard # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=mccd_standard%j.out # nom du fichier de sortie
-#SBATCH --error=mccd_standard%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _standard_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _standard_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _standard_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _standard_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model mccd \
- --d_max_nonparam 3 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _standard_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/mccd_standard/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_complete.sh b/jz-submissions/jobs-cli/full_param_complete.sh
deleted file mode 100644
index 5820e0cd..00000000
--- a/jz-submissions/jobs-cli/full_param_complete.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_complete # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_complete%j.out # nom du fichier de sortie
-#SBATCH --error=param_complete%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _complete_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _complete_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _complete_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _complete_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 45 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _complete_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_complete/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_complete_sample_w.sh b/jz-submissions/jobs-cli/full_param_complete_sample_w.sh
deleted file mode 100644
index 31fd0ee3..00000000
--- a/jz-submissions/jobs-cli/full_param_complete_sample_w.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_complete_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_complete_sample_w%j.out # nom du fichier de sortie
-#SBATCH --error=param_complete_sample_w%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _complete_sample_w_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _complete_sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _complete_sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _complete_sample_w_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 45 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _complete_sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_complete_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_complete_v2.sh b/jz-submissions/jobs-cli/full_param_complete_v2.sh
deleted file mode 100644
index 55f8db58..00000000
--- a/jz-submissions/jobs-cli/full_param_complete_v2.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_complete_v2 # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_complete_v2_%j.out # nom du fichier de sortie
-#SBATCH --error=param_complete_v2_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _complete_v2_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _complete_v2_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _complete_v2_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _complete_v2_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 45 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _complete_v2_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_complete_v2/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_complete_v3.sh b/jz-submissions/jobs-cli/full_param_complete_v3.sh
deleted file mode 100644
index 7757fc87..00000000
--- a/jz-submissions/jobs-cli/full_param_complete_v3.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_complete_v3 # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_complete_v3_%j.out # nom du fichier de sortie
-#SBATCH --error=param_complete_v3_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _complete_v3_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _complete_v3_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _complete_v3_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _complete_v3_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 45 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _complete_v3_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_complete_v3/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_complete_v4.sh b/jz-submissions/jobs-cli/full_param_complete_v4.sh
deleted file mode 100644
index 674d22ad..00000000
--- a/jz-submissions/jobs-cli/full_param_complete_v4.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_complete_v4 # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_complete_v4_%j.out # nom du fichier de sortie
-#SBATCH --error=param_complete_v4_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _complete_v4_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _complete_v4_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _complete_v4_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _complete_v4_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 45 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _complete_v4_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_complete_v4/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_incomplete.sh b/jz-submissions/jobs-cli/full_param_incomplete.sh
deleted file mode 100644
index 9d790466..00000000
--- a/jz-submissions/jobs-cli/full_param_incomplete.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_incomplete # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_incomplete%j.out # nom du fichier de sortie
-#SBATCH --error=param_incomplete%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _incomplete_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _incomplete_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _incomplete_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _incomplete_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 15 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _incomplete_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_incomplete/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_incomplete_15_sample_w.sh b/jz-submissions/jobs-cli/full_param_incomplete_15_sample_w.sh
deleted file mode 100644
index bcb71e73..00000000
--- a/jz-submissions/jobs-cli/full_param_incomplete_15_sample_w.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_incomplete_15_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_incomplete_15_sample_w%j.out # nom du fichier de sortie
-#SBATCH --error=param_incomplete_15_sample_w%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _incomplete_15_sample_w_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _incomplete_15_sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _incomplete_15_sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _incomplete_15_sample_w_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 15 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _incomplete_15_sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_incomplete_15_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_incomplete_30.sh b/jz-submissions/jobs-cli/full_param_incomplete_30.sh
deleted file mode 100644
index 39b4fdce..00000000
--- a/jz-submissions/jobs-cli/full_param_incomplete_30.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_incomplete_30 # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_incomplete_30%j.out # nom du fichier de sortie
-#SBATCH --error=param_incomplete_30%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _incomplete_30_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _incomplete_30_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _incomplete_30_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _incomplete_30_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 30 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _incomplete_30_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_incomplete_30/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_incomplete_40.sh b/jz-submissions/jobs-cli/full_param_incomplete_40.sh
deleted file mode 100644
index 0514e0c7..00000000
--- a/jz-submissions/jobs-cli/full_param_incomplete_40.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_incomplete_40 # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_incomplete_40%j.out # nom du fichier de sortie
-#SBATCH --error=param_incomplete_40%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _incomplete_40_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _incomplete_40_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _incomplete_40_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _incomplete_40_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 40 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _incomplete_40_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_incomplete_40/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_incomplete_40_sample_w.sh b/jz-submissions/jobs-cli/full_param_incomplete_40_sample_w.sh
deleted file mode 100644
index f64c15b7..00000000
--- a/jz-submissions/jobs-cli/full_param_incomplete_40_sample_w.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_incomplete_40_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_incomplete_40_sample_w%j.out # nom du fichier de sortie
-#SBATCH --error=param_incomplete_40_sample_w%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _incomplete_40_sample_w_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _incomplete_40_sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _incomplete_40_sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _incomplete_40_sample_w_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 40 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _incomplete_40_sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_incomplete_40_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_incomplete_40_sample_w_bis1.sh b/jz-submissions/jobs-cli/full_param_incomplete_40_sample_w_bis1.sh
deleted file mode 100644
index 4608299e..00000000
--- a/jz-submissions/jobs-cli/full_param_incomplete_40_sample_w_bis1.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_incomplete_40_sample_w_bis1_ # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_incomplete_40_sample_w_bis1_%j.out # nom du fichier de sortie
-#SBATCH --error=param_incomplete_40_sample_w_bis1_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _incomplete_40_sample_w_bis1_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _incomplete_40_sample_w_bis1_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _incomplete_40_sample_w_bis1_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _incomplete_40_sample_w_bis1_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 40 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _incomplete_40_sample_w_bis1_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_incomplete_40_sample_w_bis1/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_incomplete_40_v2.sh b/jz-submissions/jobs-cli/full_param_incomplete_40_v2.sh
deleted file mode 100644
index 6bb14047..00000000
--- a/jz-submissions/jobs-cli/full_param_incomplete_40_v2.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_incomplete_40_v2_ # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_incomplete_40_v2_%j.out # nom du fichier de sortie
-#SBATCH --error=param_incomplete_40_v2_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _incomplete_40_v2_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _incomplete_40_v2_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _incomplete_40_v2_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _incomplete_40_v2_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 40 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _incomplete_40_v2_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_incomplete_40_v2/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_incomplete_40_v3.sh b/jz-submissions/jobs-cli/full_param_incomplete_40_v3.sh
deleted file mode 100644
index eb9e9236..00000000
--- a/jz-submissions/jobs-cli/full_param_incomplete_40_v3.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_incomplete_40_v3_ # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_incomplete_40_v3_%j.out # nom du fichier de sortie
-#SBATCH --error=param_incomplete_40_v3_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _incomplete_40_v3_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _incomplete_40_v3_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _incomplete_40_v3_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _incomplete_40_v3_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 40 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _incomplete_40_v3_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_incomplete_40_v3/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_overcomplete_55_v1.sh b/jz-submissions/jobs-cli/full_param_overcomplete_55_v1.sh
deleted file mode 100644
index 726f8728..00000000
--- a/jz-submissions/jobs-cli/full_param_overcomplete_55_v1.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_overcomplete_55_v1 # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_overcomplete_55_v1_%j.out # nom du fichier de sortie
-#SBATCH --error=param_overcomplete_55_v1_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _overcomplete_55_v1_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _overcomplete_55_v1_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _overcomplete_55_v1_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _overcomplete_55_v1_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 55 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _overcomplete_55_v1_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_overcomplete_55_v1/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_overcomplete_55_v1_sample_w.sh b/jz-submissions/jobs-cli/full_param_overcomplete_55_v1_sample_w.sh
deleted file mode 100644
index fcc5de37..00000000
--- a/jz-submissions/jobs-cli/full_param_overcomplete_55_v1_sample_w.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_overcomplete_55_v1_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_overcomplete_55_v1_sample_w_%j.out # nom du fichier de sortie
-#SBATCH --error=param_overcomplete_55_v1_sample_w_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _overcomplete_55_v1_sample_w_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _overcomplete_55_v1_sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _overcomplete_55_v1_sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _overcomplete_55_v1_sample_w_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 55 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _overcomplete_55_v1_sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_overcomplete_55_v1_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_param_sample_w_l2opd_incomplete.sh b/jz-submissions/jobs-cli/full_param_sample_w_l2opd_incomplete.sh
deleted file mode 100644
index 35b2ef2a..00000000
--- a/jz-submissions/jobs-cli/full_param_sample_w_l2opd_incomplete.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_incomplete_sample_w_l2opd # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_incomplete_sample_w_l2opd%j.out # nom du fichier de sortie
-#SBATCH --error=param_incomplete_sample_w_l2opd%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _incomplete_sample_w_l2opd_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _incomplete_sample_w_l2opd_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _incomplete_sample_w_l2opd_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _incomplete_sample_w_l2opd_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 15 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 5e-12 \
- --base_id_name _incomplete_sample_w_l2opd_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_incomplete_sample_w_l2opd/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_poly_sample_w.sh b/jz-submissions/jobs-cli/full_poly_sample_w.sh
deleted file mode 100644
index 2e9e809d..00000000
--- a/jz-submissions/jobs-cli/full_poly_sample_w.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_sample_w%j.out # nom du fichier de sortie
-#SBATCH --error=poly_sample_w%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _sample_w_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_poly_sample_w_bis1.sh b/jz-submissions/jobs-cli/full_poly_sample_w_bis1.sh
deleted file mode 100644
index 69734721..00000000
--- a/jz-submissions/jobs-cli/full_poly_sample_w_bis1.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_sample_w_bis1_ # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_sample_w_bis1_%j.out # nom du fichier de sortie
-#SBATCH --error=poly_sample_w_bis1_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_bis1_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_bis1_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _sample_w_bis1_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _sample_w_bis1_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _sample_w_bis1_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_sample_w_bis1/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_poly_sample_w_bis2.sh b/jz-submissions/jobs-cli/full_poly_sample_w_bis2.sh
deleted file mode 100644
index a6e02eef..00000000
--- a/jz-submissions/jobs-cli/full_poly_sample_w_bis2.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_sample_w_bis2_ # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_sample_w_bis2_%j.out # nom du fichier de sortie
-#SBATCH --error=poly_sample_w_bis2_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_bis2_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_bis2_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _sample_w_bis2_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _sample_w_bis2_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _sample_w_bis2_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_sample_w_bis2/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
\ No newline at end of file
diff --git a/jz-submissions/jobs-cli/full_poly_sample_w_l2param_opd.sh b/jz-submissions/jobs-cli/full_poly_sample_w_l2param_opd.sh
deleted file mode 100644
index dfd78060..00000000
--- a/jz-submissions/jobs-cli/full_poly_sample_w_l2param_opd.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_sample_w_l2param_opd # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_sample_w_l2param_opd%j.out # nom du fichier de sortie
-#SBATCH --error=poly_sample_w_l2param_opd%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_l2param_opd_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_l2param_opd_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _sample_w_l2param_opd_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _sample_w_l2param_opd_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 5e-12 \
- --base_id_name _sample_w_l2param_opd_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_sample_w_l2param_opd/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_poly_sample_w_l2param_opd_comparison.sh b/jz-submissions/jobs-cli/full_poly_sample_w_l2param_opd_comparison.sh
deleted file mode 100644
index 8804ad08..00000000
--- a/jz-submissions/jobs-cli/full_poly_sample_w_l2param_opd_comparison.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_sample_w_l2param_opd_comparison # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_sample_w_l2param_opd_comparison%j.out # nom du fichier de sortie
-#SBATCH --error=poly_sample_w_l2param_opd_comparison%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_w_l2param_opd_comparison_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _sample_w_l2param_opd_comparison_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 250 250"
-opt[2]="--id_name _sample_w_l2param_opd_comparison_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 150"
-opt[3]="--id_name _sample_w_l2param_opd_comparison_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 150 100"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.01 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 5e-12 \
- --base_id_name _sample_w_l2param_opd_comparison_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_sample_w_l2param_opd_comparison/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_poly_standard.sh b/jz-submissions/jobs-cli/full_poly_standard.sh
deleted file mode 100644
index 1e0e08b4..00000000
--- a/jz-submissions/jobs-cli/full_poly_standard.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_standard # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_standard%j.out # nom du fichier de sortie
-#SBATCH --error=poly_standard%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _standard_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _standard_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[2]="--id_name _standard_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 200 150"
-opt[3]="--id_name _standard_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 150 100"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.04 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _standard_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_standard/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/full_poly_standard_new_epochs.sh b/jz-submissions/jobs-cli/full_poly_standard_new_epochs.sh
deleted file mode 100644
index 4186f79d..00000000
--- a/jz-submissions/jobs-cli/full_poly_standard_new_epochs.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_standard_new_epochs # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_standard_new_epochs%j.out # nom du fichier de sortie
-#SBATCH --error=poly_standard_new_epochs%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the development queue, max 2h
-##SBATCH --qos=qos_gpu-t3 # using the default queue, max 20h
-##SBATCH --qos=qos_gpu-t4 # using the queue for long runs, max 100h
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _standard_new_epochs_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _standard_new_epochs_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 200 150"
-opt[2]="--id_name _standard_new_epochs_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-opt[3]="--id_name _standard_new_epochs_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 100 50"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --l_rate_param 0.01 0.004 \
- --l_rate_non_param 0.1 0.06 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- --l2_param 0. \
- --base_id_name _standard_new_epochs_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_standard_new_epochs/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/graph_lr_tunning.sh b/jz-submissions/jobs-cli/graph_lr_tunning.sh
deleted file mode 100644
index 0bd6143b..00000000
--- a/jz-submissions/jobs-cli/graph_lr_tunning.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=graph_lr_tunning # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=graph_lr_tunning_%j.out # nom du fichier de sortie
-#SBATCH --error=graph_lr_tunning_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _lr_tunning_2c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_non_param 0.5 0.25"
-opt[1]="--id_name _lr_tunning_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_non_param 1.0 0.50"
-opt[2]="--id_name _lr_tunning_1k --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_non_param 5.0 2.5"
-opt[3]="--id_name _lr_tunning_2k --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --l_rate_non_param 10. 5.0"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model graph \
- --n_zernikes 15 \
- --l_rate_param 0.01 0.004 \
- --n_epochs_param 30 30 \
- --n_epochs_non_param 200 150 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _lr_tunning_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/graph_lr_tunning/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
\ No newline at end of file
diff --git a/jz-submissions/jobs-cli/graph_lr_tunning_v2.sh b/jz-submissions/jobs-cli/graph_lr_tunning_v2.sh
deleted file mode 100644
index 12bbd257..00000000
--- a/jz-submissions/jobs-cli/graph_lr_tunning_v2.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=graph_lr_tunning_v2_ # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=graph_lr_tunning_v2_%j.out # nom du fichier de sortie
-#SBATCH --error=graph_lr_tunning_v2_%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _lr_tunning_v2_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --l_rate_non_param 0.2 0.10"
-opt[1]="--id_name _lr_tunning_v2_5c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --l_rate_non_param 0.4 0.20"
-opt[2]="--id_name _lr_tunning_v2_1k --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --l_rate_non_param 0.8 0.4"
-opt[3]="--id_name _lr_tunning_v2_2k --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --l_rate_non_param 2.0 1.0"
-
-
-cd $WORK/repo/wf-psf/long-runs/
-
-srun python -u ./train_eval_plot_script_click.py \
- --model graph \
- --n_zernikes 15 \
- --graph_features 21 \
- --l_rate_param 0.01 0.004 \
- --n_epochs_param 30 30 \
- --n_epochs_non_param 200 150 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0. \
- --base_id_name _lr_tunning_v2_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/graph_lr_tunning_v2/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/mccd_l2_opd_sample_w.sh b/jz-submissions/jobs-cli/mccd_l2_opd_sample_w.sh
deleted file mode 100644
index a72a507c..00000000
--- a/jz-submissions/jobs-cli/mccd_l2_opd_sample_w.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=mccd_l2_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=mccd_l2_sample_w%j.out # nom du fichier de sortie
-#SBATCH --error=mccd_l2_sample_w%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _l2_opd_sample_w_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _l2_opd_sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 250 250"
-opt[2]="--id_name _l2_opd_sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 150"
-opt[3]="--id_name _l2_opd_sample_w_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 150 100"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model mccd \
- --d_max_nonparam 5 \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.01 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 5e-12 \
- --base_id_name _l2_opd_sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/mccd_l2_opd_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/param_l2_opd_full.sh b/jz-submissions/jobs-cli/param_l2_opd_full.sh
deleted file mode 100644
index b2b8e29e..00000000
--- a/jz-submissions/jobs-cli/param_l2_opd_full.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_l2_opd_full # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_l2_opd_full%j.out # nom du fichier de sortie
-#SBATCH --error=param_l2_opd_full%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name param_l2_opd_2c_ --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001 "
-opt[1]="--id_name param_l2_opd_5c_ --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001 "
-opt[2]="--id_name param_l2_opd_1k_ --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001 "
-opt[3]="--id_name param_l2_opd_2k_ --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001 "
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/l2_param_tr_ev.py \
- --model param \
- --n_zernikes 45 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --l2_param 5e-12 \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/param_l2_opd_sample_w.sh b/jz-submissions/jobs-cli/param_l2_opd_sample_w.sh
deleted file mode 100644
index 194c3e90..00000000
--- a/jz-submissions/jobs-cli/param_l2_opd_sample_w.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=param_l2_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=param_l2_sample_w%j.out # nom du fichier de sortie
-#SBATCH --error=param_l2_sample_w%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _l2_opd_sample_w_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --l_rate_param 0.005 0.001"
-opt[1]="--id_name _l2_opd_sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[2]="--id_name _l2_opd_sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 30 30 --l_rate_param 0.005 0.001"
-opt[3]="--id_name _l2_opd_sample_w_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --l_rate_param 0.005 0.001"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model param \
- --n_zernikes 45 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 5e-12 \
- --base_id_name _l2_opd_sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/param_l2_opd_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/poly_l2_opd_full.sh b/jz-submissions/jobs-cli/poly_l2_opd_full.sh
deleted file mode 100644
index 2373b472..00000000
--- a/jz-submissions/jobs-cli/poly_l2_opd_full.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_l2_opd_full # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_l2_opd_full%j.out # nom du fichier de sortie
-#SBATCH --error=poly_l2_opd_full%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-2
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name poly_l2_opd_2c_ --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 250 250 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-opt[1]="--id_name poly_l2_opd_5c_ --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 100 120 --l_rate_param 0.01 0.01 --l_rate_non_param 0.1 0.1"
-opt[2]="--id_name poly_l2_opd_2k_ --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/l2_param_tr_ev.py \
- --model poly \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --l2_param 1e-11 \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/poly_l2_opd_sample_w.sh b/jz-submissions/jobs-cli/poly_l2_opd_sample_w.sh
deleted file mode 100644
index 4324a6b5..00000000
--- a/jz-submissions/jobs-cli/poly_l2_opd_sample_w.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_l2_sample_w # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_l2_sample_w%j.out # nom du fichier de sortie
-#SBATCH --error=poly_l2_sample_w%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _l2_opd_sample_w_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[1]="--id_name _l2_opd_sample_w_5c --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 250 250"
-opt[2]="--id_name _l2_opd_sample_w_1k --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 150"
-opt[3]="--id_name _l2_opd_sample_w_2k --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 150 100"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model poly \
- --d_max_nonparam 5 \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.01 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 5e-12 \
- --base_id_name _l2_opd_sample_w_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/poly_l2_opd_sample_w/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/poly_lr_coherent_epochs.sh b/jz-submissions/jobs-cli/poly_lr_coherent_epochs.sh
deleted file mode 100644
index c0d71f38..00000000
--- a/jz-submissions/jobs-cli/poly_lr_coherent_epochs.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_lr_coherent_epochs # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_lr_coherent_epochs%j.out # nom du fichier de sortie
-#SBATCH --error=poly_lr_coherent_epochs%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-
-opt[0]="--id_name _2c_chkp_poly_lr_coherent_epochs --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 40 40 --n_epochs_non_param 500 500"
-opt[1]="--id_name _5c_chkp_poly_lr_coherent_epochs --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300"
-opt[2]="--id_name _1k_chkp_poly_lr_coherent_epochs --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 150"
-opt[3]="--id_name _2k_chkp_poly_lr_coherent_epochs --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100"
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.01 \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/poly_new_lr.sh b/jz-submissions/jobs-cli/poly_new_lr.sh
deleted file mode 100644
index e46931f3..00000000
--- a/jz-submissions/jobs-cli/poly_new_lr.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_new_lr # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_new_lr%j.out # nom du fichier de sortie
-#SBATCH --error=poly_new_lr%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _1k_chkp_poly_new_lr --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy"
-opt[1]="--id_name _2c_chkp_poly_new_lr --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy"
-opt[2]="--id_name _5c_chkp_poly_new_lr --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy"
-opt[3]="--id_name _2k_chkp_poly_new_lr --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy"
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --n_epochs_param 20 20 \
- --n_epochs_non_param 100 150 \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.02 \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/poly_sample_weights.sh b/jz-submissions/jobs-cli/poly_sample_weights.sh
deleted file mode 100644
index a262c762..00000000
--- a/jz-submissions/jobs-cli/poly_sample_weights.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=sample_weights_1k # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=sample_weights_1k%j.out # nom du fichier de sortie
-#SBATCH --error=sample_weights_1k%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --id_name _sample_weights_1k_ \
- --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy \
- --n_epochs_param 20 20 \
- --n_epochs_non_param 150 150 \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.01 \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
diff --git a/jz-submissions/jobs-cli/poly_sample_weights_full.sh b/jz-submissions/jobs-cli/poly_sample_weights_full.sh
deleted file mode 100644
index 5ca0120d..00000000
--- a/jz-submissions/jobs-cli/poly_sample_weights_full.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=sample_weights_full # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=sample_weights_full%j.out # nom du fichier de sortie
-#SBATCH --error=sample_weights_full%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-2
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_weights_2c_ --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 250 250 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-opt[1]="--id_name _sample_weights_5c_ --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 100 120 --l_rate_param 0.01 0.01 --l_rate_non_param 0.1 0.1"
-opt[2]="--id_name _sample_weights_2k_ --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 100 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/poly_sample_weights_full_strategy1.sh b/jz-submissions/jobs-cli/poly_sample_weights_full_strategy1.sh
deleted file mode 100644
index 13aedf54..00000000
--- a/jz-submissions/jobs-cli/poly_sample_weights_full_strategy1.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=sample_weights_full_strategy1 # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=sample_weights_full_strategy1%j.out # nom du fichier de sortie
-#SBATCH --error=sample_weights_full_strategy1%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _sample_weights_strategy1_2c_ --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-opt[1]="--id_name _sample_weights_strategy1_5c_ --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 250 250 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-opt[2]="--id_name _sample_weights_strategy1_1k_ --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 150 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-opt[3]="--id_name _sample_weights_strategy1_2k_ --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 150 100 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/poly_v2_original_lr_as_sample_w_strat1.sh b/jz-submissions/jobs-cli/poly_v2_original_lr_as_sample_w_strat1.sh
deleted file mode 100644
index 7eb9de30..00000000
--- a/jz-submissions/jobs-cli/poly_v2_original_lr_as_sample_w_strat1.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=poly_v2_original_lr_as_sample_w_strat1 # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=poly_v2_original_lr_as_sample_w_strat1%j.out # nom du fichier de sortie
-#SBATCH --error=poly_v2_original_lr_as_sample_w_strat1%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _poly_v2_original_lr_as_sample_w_strat1_2c_ --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy --n_epochs_param 30 30 --n_epochs_non_param 300 300 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-opt[1]="--id_name _poly_v2_original_lr_as_sample_w_strat1_5c_ --train_dataset_file train_Euclid_res_500_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 250 250 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-opt[2]="--id_name _poly_v2_original_lr_as_sample_w_strat1_1k_ --train_dataset_file train_Euclid_res_1000_TrainStars_id_001.npy --n_epochs_param 20 20 --n_epochs_non_param 150 150 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-opt[3]="--id_name _poly_v2_original_lr_as_sample_w_strat1_2k_ --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy --n_epochs_param 15 15 --n_epochs_non_param 150 100 --l_rate_param 0.01 0.001 --l_rate_non_param 0.1 0.01"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights False \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-cli/slurm-logs/slurm_output_logs.md b/jz-submissions/jobs-cli/slurm-logs/slurm_output_logs.md
deleted file mode 100644
index c033fecc..00000000
--- a/jz-submissions/jobs-cli/slurm-logs/slurm_output_logs.md
+++ /dev/null
@@ -1,2 +0,0 @@
-slurm output logs
-
\ No newline at end of file
diff --git a/jz-submissions/jobs-cli/test_sample_weights.sh b/jz-submissions/jobs-cli/test_sample_weights.sh
deleted file mode 100644
index 0735b3aa..00000000
--- a/jz-submissions/jobs-cli/test_sample_weights.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=test_sample_weights # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=00:20:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=test_sample_weights%j.out # nom du fichier de sortie
-#SBATCH --error=test_sample_weights%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-#SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --id_name _test_sample_weights \
- --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy \
- --n_epochs_param 20 20 \
- --n_epochs_non_param 150 200 \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.01 \
- --d_max_nonparam 5 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
diff --git a/jz-submissions/jobs-cli/testing_train_eval_plot.sh b/jz-submissions/jobs-cli/testing_train_eval_plot.sh
deleted file mode 100644
index fadc2298..00000000
--- a/jz-submissions/jobs-cli/testing_train_eval_plot.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=testing_auto # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=02:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=testing_auto%j.out # nom du fichier de sortie
-#SBATCH --error=testing_auto%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-#SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-opt[0]="--id_name _testing_auto_2c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy"
-opt[1]="--id_name _testing_auto_5c --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy"
-opt[2]="--id_name _testing_auto_1k --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy"
-opt[3]="--id_name _testing_auto_2k --train_dataset_file train_Euclid_res_200_TrainStars_id_001.npy"
-
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/train_eval_plot_script_click.py \
- --model param \
- --d_max_nonparam 5 \
- --n_epochs_param 2 2 \
- --n_epochs_non_param 2 2 \
- --l_rate_param 0.01 0.001 \
- --l_rate_non_param 0.1 0.01 \
- --saved_model_type checkpoint \
- --saved_cycle cycle2 \
- --total_cycles 2 \
- --use_sample_weights True \
- --l2_param 0.0 \
- --base_id_name _testing_auto_ \
- --suffix_id_name 2c --suffix_id_name 5c --suffix_id_name 1k --suffix_id_name 2k \
- --star_numbers 200 --star_numbers 500 --star_numbers 1000 --star_numbers 2000 \
- --plots_folder plots/testing_plot_folder/ \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
diff --git a/jz-submissions/jobs-test/generic_cli_train_eval.sh b/jz-submissions/jobs-test/generic_cli_train_eval.sh
deleted file mode 100644
index e024a37f..00000000
--- a/jz-submissions/jobs-test/generic_cli_train_eval.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2k_poly_tr_ev_click # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2k_poly_tr_ev_click%j.out # nom du fichier de sortie
-#SBATCH --error=2k_poly_tr_ev_click%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy \
- --n_epochs_param 2 2 \
- --n_epochs_non_param 2 2 \
- --id_name _test-coherent_euclid_2000stars \
- --d_max_nonparam 5 \
diff --git a/jz-submissions/jobs-test/generic_cli_train_eval_v2.sh b/jz-submissions/jobs-test/generic_cli_train_eval_v2.sh
deleted file mode 100644
index 8b90c10e..00000000
--- a/jz-submissions/jobs-test/generic_cli_train_eval_v2.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2k_poly_tr_ev_click # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-##SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=00:05:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2k_poly_tr_ev_click%j.out # nom du fichier de sortie
-#SBATCH --error=2k_poly_tr_ev_click%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-#SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python -u ./../../long-runs/alternative_train_eval_script.py \
- --model poly \
- --train_dataset_file train_Euclid_res_2000_TrainStars_id_001.npy \
- --n_epochs_param 2 2 \
- --n_epochs_non_param 2 2 \
- --id_name _test-coherent_euclid_2000stars \
- --d_max_nonparam 5 \
diff --git a/jz-submissions/jobs/evaluation_mccd_1000.sh b/jz-submissions/jobs/evaluation_mccd_1000.sh
deleted file mode 100644
index cb3e9080..00000000
--- a/jz-submissions/jobs/evaluation_mccd_1000.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=1k_mccd_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=1k_mccd_eval%j.out # nom du fichier de sortie
-#SBATCH --error=1k_mccd_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_mccd_1000.py
diff --git a/jz-submissions/jobs/evaluation_mccd_200.sh b/jz-submissions/jobs/evaluation_mccd_200.sh
deleted file mode 100644
index dd4671d6..00000000
--- a/jz-submissions/jobs/evaluation_mccd_200.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2c_mccd_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2c_mccd_eval%j.out # nom du fichier de sortie
-#SBATCH --error=2c_mccd_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_mccd_200.py
diff --git a/jz-submissions/jobs/evaluation_mccd_2000.sh b/jz-submissions/jobs/evaluation_mccd_2000.sh
deleted file mode 100644
index 7e860c55..00000000
--- a/jz-submissions/jobs/evaluation_mccd_2000.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2k_mccd_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2k_mccd_eval%j.out # nom du fichier de sortie
-#SBATCH --error=2k_mccd_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_mccd_2000.py
diff --git a/jz-submissions/jobs/evaluation_mccd_500.sh b/jz-submissions/jobs/evaluation_mccd_500.sh
deleted file mode 100644
index baac1033..00000000
--- a/jz-submissions/jobs/evaluation_mccd_500.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=5c_mccd_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=5c_mccd_eval%j.out # nom du fichier de sortie
-#SBATCH --error=5c_mccd_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_mccd_500.py
diff --git a/jz-submissions/jobs/evaluation_param_1000.sh b/jz-submissions/jobs/evaluation_param_1000.sh
deleted file mode 100644
index 6db65b17..00000000
--- a/jz-submissions/jobs/evaluation_param_1000.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=1k_param_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=1k_param_eval%j.out # nom du fichier de sortie
-#SBATCH --error=1k_param_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_param_1000.py
diff --git a/jz-submissions/jobs/evaluation_param_200.sh b/jz-submissions/jobs/evaluation_param_200.sh
deleted file mode 100644
index 3bcffaa5..00000000
--- a/jz-submissions/jobs/evaluation_param_200.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2c_param_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2c_param_eval%j.out # nom du fichier de sortie
-#SBATCH --error=2c_param_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_param_200.py
diff --git a/jz-submissions/jobs/evaluation_param_2000.sh b/jz-submissions/jobs/evaluation_param_2000.sh
deleted file mode 100644
index 6610d308..00000000
--- a/jz-submissions/jobs/evaluation_param_2000.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2k_param_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2k_param_eval%j.out # nom du fichier de sortie
-#SBATCH --error=2k_param_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_param_2000.py
diff --git a/jz-submissions/jobs/evaluation_param_500.sh b/jz-submissions/jobs/evaluation_param_500.sh
deleted file mode 100644
index d20be58a..00000000
--- a/jz-submissions/jobs/evaluation_param_500.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=5c_param_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=5c_param_eval%j.out # nom du fichier de sortie
-#SBATCH --error=5c_param_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_param_500.py
diff --git a/jz-submissions/jobs/evaluation_poly_1000.sh b/jz-submissions/jobs/evaluation_poly_1000.sh
deleted file mode 100644
index c95ece3a..00000000
--- a/jz-submissions/jobs/evaluation_poly_1000.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=1k_poly_v2_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=1k_poly_v2_eval%j.out # nom du fichier de sortie
-#SBATCH --error=1k_poly_v2_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_poly_1000.py
diff --git a/jz-submissions/jobs/evaluation_poly_200.sh b/jz-submissions/jobs/evaluation_poly_200.sh
deleted file mode 100644
index dc00c666..00000000
--- a/jz-submissions/jobs/evaluation_poly_200.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2c_poly_v2_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2c_poly_v2_eval%j.out # nom du fichier de sortie
-#SBATCH --error=2c_poly_v2_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_poly_200.py
diff --git a/jz-submissions/jobs/evaluation_poly_2000.sh b/jz-submissions/jobs/evaluation_poly_2000.sh
deleted file mode 100644
index 59652338..00000000
--- a/jz-submissions/jobs/evaluation_poly_2000.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2k_poly_v2_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2k_poly_v2_eval%j.out # nom du fichier de sortie
-#SBATCH --error=2k_poly_v2_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_poly_2000.py
diff --git a/jz-submissions/jobs/evaluation_poly_500.sh b/jz-submissions/jobs/evaluation_poly_500.sh
deleted file mode 100644
index c65e14a2..00000000
--- a/jz-submissions/jobs/evaluation_poly_500.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=5c_poly_v2_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=5c_poly_v2_eval%j.out # nom du fichier de sortie
-#SBATCH --error=5c_poly_v2_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/evaluation_poly_500.py
diff --git a/jz-submissions/jobs/train_eval_poly_200.sh b/jz-submissions/jobs/train_eval_poly_200.sh
deleted file mode 100644
index c54895f9..00000000
--- a/jz-submissions/jobs/train_eval_poly_200.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2c_poly_train_eval # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-##SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2c_poly_train_eval%j.out # nom du fichier de sortie
-#SBATCH --error=2c_poly_train_eval%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/train_eval_poly_200.py
diff --git a/jz-submissions/jobs/train_eval_poly_200_click.sh b/jz-submissions/jobs/train_eval_poly_200_click.sh
deleted file mode 100644
index a887318b..00000000
--- a/jz-submissions/jobs/train_eval_poly_200_click.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2c_poly_tr_ev_click # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2c_poly_tr_ev_click%j.out # nom du fichier de sortie
-#SBATCH --error=2c_poly_tr_ev_click%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/train_eval_poly_200_click.py --n_epochs_param 2 2 --n_epochs_non_param 2 2 --id_name _test-coherent_euclid_200stars
diff --git a/jz-submissions/jobs/training_mccd_1000.sh b/jz-submissions/jobs/training_mccd_1000.sh
deleted file mode 100644
index 4abbc049..00000000
--- a/jz-submissions/jobs/training_mccd_1000.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=1k_mccd_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=1k_mccd_train%j.out # nom du fichier de sortie
-#SBATCH --error=1k_mccd_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_mccd_1000.py
diff --git a/jz-submissions/jobs/training_mccd_200.sh b/jz-submissions/jobs/training_mccd_200.sh
deleted file mode 100644
index f3433172..00000000
--- a/jz-submissions/jobs/training_mccd_200.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2c_mccd_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2c_mccd_train%j.out # nom du fichier de sortie
-#SBATCH --error=2c_mccd_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_mccd_200.py
diff --git a/jz-submissions/jobs/training_mccd_2000.sh b/jz-submissions/jobs/training_mccd_2000.sh
deleted file mode 100644
index a48c209a..00000000
--- a/jz-submissions/jobs/training_mccd_2000.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2k_mccd_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2k_mccd_train%j.out # nom du fichier de sortie
-#SBATCH --error=2k_mccd_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_mccd_2000.py
diff --git a/jz-submissions/jobs/training_mccd_500.sh b/jz-submissions/jobs/training_mccd_500.sh
deleted file mode 100644
index ddbb7cc7..00000000
--- a/jz-submissions/jobs/training_mccd_500.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=5c_mccd_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=5c_mccd_train%j.out # nom du fichier de sortie
-#SBATCH --error=5c_mccd_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_mccd_500.py
diff --git a/jz-submissions/jobs/training_param_1000.sh b/jz-submissions/jobs/training_param_1000.sh
deleted file mode 100644
index f4435bf0..00000000
--- a/jz-submissions/jobs/training_param_1000.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=1k_param_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=1k_param_train%j.out # nom du fichier de sortie
-#SBATCH --error=1k_param_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_param_1000.py
diff --git a/jz-submissions/jobs/training_param_200.sh b/jz-submissions/jobs/training_param_200.sh
deleted file mode 100644
index cb4d71eb..00000000
--- a/jz-submissions/jobs/training_param_200.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2c_param_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2c_param_train%j.out # nom du fichier de sortie
-#SBATCH --error=2c_param_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_param_200.py
diff --git a/jz-submissions/jobs/training_param_2000.sh b/jz-submissions/jobs/training_param_2000.sh
deleted file mode 100644
index e0ed3bf1..00000000
--- a/jz-submissions/jobs/training_param_2000.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2k_param_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2k_param_train%j.out # nom du fichier de sortie
-#SBATCH --error=2k_param_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_param_2000.py
diff --git a/jz-submissions/jobs/training_param_500.sh b/jz-submissions/jobs/training_param_500.sh
deleted file mode 100644
index 132f3360..00000000
--- a/jz-submissions/jobs/training_param_500.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=5c_param_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=5c_param_train%j.out # nom du fichier de sortie
-#SBATCH --error=5c_param_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_param_500.py
diff --git a/jz-submissions/jobs/training_poly_1000.sh b/jz-submissions/jobs/training_poly_1000.sh
deleted file mode 100644
index 1c65d04d..00000000
--- a/jz-submissions/jobs/training_poly_1000.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=1k_poly_v2_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=1k_poly_v2_train%j.out # nom du fichier de sortie
-#SBATCH --error=1k_poly_v2_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_poly_1000.py
diff --git a/jz-submissions/jobs/training_poly_200.sh b/jz-submissions/jobs/training_poly_200.sh
deleted file mode 100644
index a4433cdd..00000000
--- a/jz-submissions/jobs/training_poly_200.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2c_poly_v2_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-16g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2c_poly_v2_train%j.out # nom du fichier de sortie
-#SBATCH --error=2c_poly_v2_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_poly_200.py
diff --git a/jz-submissions/jobs/training_poly_2000.sh b/jz-submissions/jobs/training_poly_2000.sh
deleted file mode 100644
index 65483075..00000000
--- a/jz-submissions/jobs/training_poly_2000.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=2k_poly_v2_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=2k_poly_v2_train%j.out # nom du fichier de sortie
-#SBATCH --error=2k_poly_v2_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_poly_2000.py
diff --git a/jz-submissions/jobs/training_poly_500.sh b/jz-submissions/jobs/training_poly_500.sh
deleted file mode 100644
index 4b339687..00000000
--- a/jz-submissions/jobs/training_poly_500.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=5c_poly_v2_train # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=5c_poly_v2_train%j.out # nom du fichier de sortie
-#SBATCH --error=5c_poly_v2_train%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A xdy@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.4.1
-
-# echo des commandes lancees
-set -x
-
-cd $WORK/repo/wf-psf/jz-submissions/slurm-logs/
-
-srun python ./../scripts/training_poly_500.py
diff --git a/jz-submissions/scripts/evaluation_mccd_1000.py b/jz-submissions/scripts/evaluation_mccd_1000.py
deleted file mode 100644
index 98fca698..00000000
--- a/jz-submissions/scripts/evaluation_mccd_1000.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-model = 'mccd'
-# model = 'poly'
-# model = 'param'
-
-id_name = '-coherent_euclid_1000stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs/log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_1000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_mccd_200.py b/jz-submissions/scripts/evaluation_mccd_200.py
deleted file mode 100644
index 63ca4fa8..00000000
--- a/jz-submissions/scripts/evaluation_mccd_200.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-model = 'mccd'
-# model = 'poly'
-# model = 'param'
-
-id_name = '-coherent_euclid_200stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs/log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_200_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_mccd_2000.py b/jz-submissions/scripts/evaluation_mccd_2000.py
deleted file mode 100644
index 152603c4..00000000
--- a/jz-submissions/scripts/evaluation_mccd_2000.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-model = 'mccd'
-# model = 'poly'
-# model = 'param'
-
-id_name = '-coherent_euclid_2000stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs/log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_2000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_mccd_500.py b/jz-submissions/scripts/evaluation_mccd_500.py
deleted file mode 100644
index d8f41b21..00000000
--- a/jz-submissions/scripts/evaluation_mccd_500.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-model = 'mccd'
-# model = 'poly'
-# model = 'param'
-
-id_name = '-coherent_euclid_500stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs/log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_500_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_param_1000.py b/jz-submissions/scripts/evaluation_param_1000.py
deleted file mode 100644
index 1f433c4c..00000000
--- a/jz-submissions/scripts/evaluation_param_1000.py
+++ /dev/null
@@ -1,375 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-# model = 'mccd'
-# model = 'poly'
-model = 'param'
-
-id_name = '-coherent_euclid_1000stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs//log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_1000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 45
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_param_200.py b/jz-submissions/scripts/evaluation_param_200.py
deleted file mode 100644
index 143150dc..00000000
--- a/jz-submissions/scripts/evaluation_param_200.py
+++ /dev/null
@@ -1,375 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-# model = 'mccd'
-# model = 'poly'
-model = 'param'
-
-id_name = '-coherent_euclid_200stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs//log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_200_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 45
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_param_2000.py b/jz-submissions/scripts/evaluation_param_2000.py
deleted file mode 100644
index e4eef001..00000000
--- a/jz-submissions/scripts/evaluation_param_2000.py
+++ /dev/null
@@ -1,375 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-# model = 'mccd'
-# model = 'poly'
-model = 'param'
-
-id_name = '-coherent_euclid_2000stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs//log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_2000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 45
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_param_500.py b/jz-submissions/scripts/evaluation_param_500.py
deleted file mode 100644
index 6278c2c5..00000000
--- a/jz-submissions/scripts/evaluation_param_500.py
+++ /dev/null
@@ -1,375 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-# model = 'mccd'
-# model = 'poly'
-model = 'param'
-
-id_name = '-coherent_euclid_500stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs//log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_500_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 45
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_poly_1000.py b/jz-submissions/scripts/evaluation_poly_1000.py
deleted file mode 100644
index 0bdb1bfb..00000000
--- a/jz-submissions/scripts/evaluation_poly_1000.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-# model = 'mccd'
-model = 'poly'
-# model = 'param'
-
-id_name = '_v2-coherent_euclid_1000stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs//log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_1000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_poly_200.py b/jz-submissions/scripts/evaluation_poly_200.py
deleted file mode 100644
index ed41d113..00000000
--- a/jz-submissions/scripts/evaluation_poly_200.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-# model = 'mccd'
-model = 'poly'
-# model = 'param'
-
-id_name = '_v2-coherent_euclid_200stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs//log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_200_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_poly_2000.py b/jz-submissions/scripts/evaluation_poly_2000.py
deleted file mode 100644
index dc80272b..00000000
--- a/jz-submissions/scripts/evaluation_poly_2000.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-# model = 'mccd'
-model = 'poly'
-# model = 'param'
-
-id_name = '_v2-coherent_euclid_2000stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs//log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_2000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/evaluation_poly_500.py b/jz-submissions/scripts/evaluation_poly_500.py
deleted file mode 100644
index 0ae93881..00000000
--- a/jz-submissions/scripts/evaluation_poly_500.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-## PSF modelling evaluation
-
-import sys
-import numpy as np
-import time
-import matplotlib.pyplot as plt
-import wf_psf as wf
-import tensorflow as tf
-
-
-## Start measuring elapsed time
-starting_time = time.time()
-
-
-## Define saving paths
-# model = 'mccd'
-model = 'poly'
-# model = 'param'
-
-id_name = '_v2-coherent_euclid_500stars'
-run_id_name = model + id_name
-
-# folder paths
-log_save_file = '/gpfswork/rech/xdy/ulx23va/wf-outputs/log-files/'
-model_folder = '/gpfswork/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-weights_paths = model_folder + 'chkp_' + run_id_name + '_cycle2'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_500_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Saving path
-saving_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/'
-
-
-## Model parameters
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-
-## Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-
-## Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-
-## Load datasets
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Prepare models
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Outputs (needed for the MCCD model)
-outputs = tf_noisy_train_stars
-
-
-## Create the model
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-## Load the model's weights
-tf_semiparam_field.load_weights(weights_paths)
-
-
-## Prepare ground truth model
-n_zernikes_bis = 45
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes_bis, wfe_dim=pupil_diameter)
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-# Initialize the model
-GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes_bis,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-# For the Ground truth model
-GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
-_ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
-## Metric evaluation on the test dataset
-print('\n***\nMetric evaluation on the test dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
-mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=batch_size)
-
-opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics
-test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
-## Metric evaluation on the train dataset
-print('\n***\nMetric evaluation on the train dataset\n***\n')
-
-# Polychromatic star reconstructions
-rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=n_bins_lda,
- batch_size=batch_size)
-
-train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
-# Monochromatic star reconstructions
-lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
-rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
-train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
-# OPD metrics
-rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=batch_size)
-
-train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
-# Shape metrics
-train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=batch_size)
-
-# Save metrics into dictionary
-train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
-## Save results
-metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
-output_path = saving_path + 'metrics-' + run_id_name
-np.save(output_path, metrics, allow_pickle=True)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
-
diff --git a/jz-submissions/scripts/train_eval_poly_200.py b/jz-submissions/scripts/train_eval_poly_200.py
deleted file mode 100644
index 0718c73b..00000000
--- a/jz-submissions/scripts/train_eval_poly_200.py
+++ /dev/null
@@ -1,703 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# PSF modelling and evaluation
-from absl import app
-from absl import flags
-
-import sys
-import numpy as np
-import time
-import wf_psf as wf
-import tensorflow as tf
-
-# import tensorflow as tf
-import tensorflow_addons as tfa
-
-
-## Training flags
-# Model definition
-flags.DEFINE_string("model", "poly", "Model type. Options are: 'mccd', 'poly, 'param'.")
-flags.DEFINE_string("id_name", "_test-coherent_euclid_200stars", "Model saving id.")
-# Saving paths
-flags.DEFINE_string("base_path", "/gpfswork/rech/xdy/ulx23va/wf-outputs/", "Base path for saving files.")
-flags.DEFINE_string("log_folder", "log-files/", "Folder name to save log files.")
-flags.DEFINE_string("model_folder", "chkp/", "Folder name to save trained models.")
-flags.DEFINE_string("optim_hist_folder", "optim-hist/", "Folder name to save optimisation history files.")
-flags.DEFINE_string("chkp_save_path", "/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/", "Path to save model checkpoints during training.")
-# Input dataset paths
-flags.DEFINE_string("dataset_folder", "/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/", "Folder path of datasets.")
-flags.DEFINE_string("train_dataset_file", "train_Euclid_res_200_TrainStars_id_001.npy", "Train dataset file name.")
-flags.DEFINE_string("test_dataset_file", "test_Euclid_res_id_001.npy", "Test dataset file name.")
-# Model parameters
-flags.DEFINE_integer("n_zernikes", 15, "Zernike polynomial modes to use on the parametric part.")
-flags.DEFINE_integer("pupil_diameter", 256, "Dimension of the OPD/Wavefront space.")
-flags.DEFINE_integer("n_bins_lda", 20, "Number of wavelength bins to use to reconstruct polychromatic objects.")
-flags.DEFINE_float("output_Q", 3., "Downsampling rate to match the specified telescope's sampling from the oversampling rate used in the model.")
-flags.DEFINE_float("oversampling_rate", 3., "Oversampling rate used for the OPD/WFE PSF model.")
-flags.DEFINE_integer("output_dim", 32, "Dimension of the pixel PSF postage stamp.")
-flags.DEFINE_integer("d_max", 2, "Max polynomial degree of the parametric part.")
-flags.DEFINE_integer("d_max_nonparam", 3, "Max polynomial degree of the non-parametric part.")
-flags.DEFINE_list("x_lims", [0, 1e3], "Limits of the PSF field coordinates for the x axis.")
-flags.DEFINE_list("y_lims", [0, 1e3], "Limits of the PSF field coordinates for the y axis.")
-flags.DEFINE_integer("graph_features", 10, "Number of graph-constrained features of the non-parametric part.")
-flags.DEFINE_float("l1_rate", 1e-8, "L1 regularisation parameter for the non-parametric part.")
-# Training parameters
-flags.DEFINE_integer("batch_size", 32, "Batch size used for the trainingin the stochastic gradient descend type of algorithm.")
-flags.DEFINE_list("l_rate_param", [1e-2, 1e-2], "Learning rates for the parametric parts.")
-flags.DEFINE_list("l_rate_non_param", [1e-1, 1e-1], "Learning rates for the non-parametric parts.")
-flags.DEFINE_list("n_epochs_param", [2, 2], "Number of training epochs of the parametric parts.")
-flags.DEFINE_list("n_epochs_non_param", [2, 2], "Number of training epochs of the non-parametric parts.")
-flags.DEFINE_integer("total_cycles", 2, "Total amount of cycles to perform. For the moment the only available options are '1' or '2'.")
-
-## Evaluation flags
-# Saving paths
-flags.DEFINE_string("metric_base_path", "/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/", "Base path for saving metric files.")
-flags.DEFINE_string("saved_model_type", "final", "Type of saved model to use for the evaluation. Can be 'final' or 'checkpoint'.")
-flags.DEFINE_string("saved_cycle", "cycle2", "Saved cycle to use for the evaluation. Can be 'cycle1' or 'cycle2'.")
-# Evaluation parameters
-flags.DEFINE_integer("GT_n_zernikes", 45, "Zernike polynomial modes to use on the ground truth model parametric part.")
-flags.DEFINE_integer("eval_batch_size", 16, "Batch size to use for the evaluation.")
-
-# Define flags
-FLAGS = flags.FLAGS
-
-
-def train_model():
- """ Train the model defined in the flags. """
- # Start measuring elapsed time
- starting_time = time.time()
-
- # Define model run id
- run_id_name = FLAGS.model + FLAGS.id_name
-
- # Define paths
- log_save_file = FLAGS.base_path + FLAGS.log_folder
- model_save_file= FLAGS.base_path + FLAGS.model_folder
- optim_hist_file = FLAGS.base_path + FLAGS.optim_hist_folder
- saving_optim_hist = dict()
-
-
- # Save output prints to logfile
- old_stdout = sys.stdout
- log_file = open(log_save_file + run_id_name + '_output.log','w')
- sys.stdout = log_file
- print('Starting the log file.')
-
- # Print GPU and tensorflow info
- device_name = tf.test.gpu_device_name()
- print('Found GPU at: {}'.format(device_name))
- print('tf_version: ' + str(tf.__version__))
-
- ## Prepare the inputs
- # Generate Zernike maps
- zernikes = wf.utils.zernike_generator(n_zernikes=FLAGS.n_zernikes, wfe_dim=FLAGS.pupil_diameter)
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
- for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
- print('Zernike cube:')
- print(tf_zernike_cube.shape)
-
-
- ## Load the dictionaries
- train_dataset = np.load(FLAGS.dataset_folder + FLAGS.train_dataset_file, allow_pickle=True)[()]
- # train_stars = train_dataset['stars']
- # noisy_train_stars = train_dataset['noisy_stars']
- # train_pos = train_dataset['positions']
- train_SEDs = train_dataset['SEDs']
- # train_zernike_coef = train_dataset['zernike_coef']
- train_C_poly = train_dataset['C_poly']
- train_parameters = train_dataset['parameters']
-
- test_dataset = np.load(FLAGS.dataset_folder + FLAGS.test_dataset_file, allow_pickle=True)[()]
- # test_stars = test_dataset['stars']
- # test_pos = test_dataset['positions']
- test_SEDs = test_dataset['SEDs']
- # test_zernike_coef = test_dataset['zernike_coef']
-
- # Convert to tensor
- tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
- tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
- tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
- tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
- tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
- print('Dataset parameters:')
- print(train_parameters)
-
-
- ## Generate initializations
- # Prepare np input
- simPSF_np = wf.SimPSFToolkit(zernikes, max_order=FLAGS.n_zernikes,
- pupil_diameter=FLAGS.pupil_diameter, output_dim=FLAGS.output_dim,
- oversampling_rate=FLAGS.oversampling_rate, output_Q=FLAGS.output_Q)
- simPSF_np.gen_random_Z_coeffs(max_order=FLAGS.n_zernikes)
- z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
- simPSF_np.set_z_coeffs(z_coeffs)
- simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
- # Obscurations
- obscurations = simPSF_np.generate_pupil_obscurations(N_pix=FLAGS.pupil_diameter, N_filter=2)
- tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
- # Initialize the SED data list
- packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=FLAGS.n_bins_lda)
- for _sed in train_SEDs]
-
-
- # Prepare the inputs for the training
- tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
- tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
- inputs = [tf_train_pos, tf_packed_SED_data]
-
- # Select the observed stars (noisy or noiseless)
- outputs = tf_noisy_train_stars
- # outputs = tf_train_stars
-
-
- ## Prepare validation data inputs
- val_SEDs = test_SEDs
- tf_val_pos = tf_test_pos
- tf_val_stars = tf_test_stars
-
- # Initialize the SED data list
- val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=FLAGS.n_bins_lda)
- for _sed in val_SEDs]
-
- # Prepare the inputs for the validation
- tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
- tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
- # Prepare input validation tuple
- val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
- val_y_inputs = tf_val_stars
- val_data = (val_x_inputs, val_y_inputs)
-
-
- ## Select the model
- if FLAGS.model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=FLAGS.x_lims,
- y_lims=FLAGS.y_lims,
- d_max=FLAGS.d_max_nonparam,
- graph_features=FLAGS.graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=FLAGS.batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=FLAGS.output_Q,
- d_max_nonparam=FLAGS.d_max_nonparam,
- graph_features=FLAGS.graph_features,
- l1_rate=FLAGS.l1_rate,
- output_dim=FLAGS.output_dim,
- n_zernikes=FLAGS.n_zernikes,
- d_max=FLAGS.d_max,
- x_lims=FLAGS.x_lims,
- y_lims=FLAGS.y_lims)
-
- elif FLAGS.model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=FLAGS.batch_size,
- output_Q=FLAGS.output_Q,
- d_max_nonparam=FLAGS.d_max_nonparam,
- output_dim=FLAGS.output_dim,
- n_zernikes=FLAGS.n_zernikes,
- d_max=FLAGS.d_max,
- x_lims=FLAGS.x_lims,
- y_lims=FLAGS.y_lims)
-
- elif FLAGS.model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=FLAGS.batch_size,
- output_Q=FLAGS.output_Q,
- output_dim=FLAGS.output_dim,
- n_zernikes=FLAGS.n_zernikes,
- d_max=FLAGS.d_max,
- x_lims=FLAGS.x_lims,
- y_lims=FLAGS.y_lims)
-
-
- # # Model Training
- # Prepare the saving callback
- # Prepare to save the model as a callback
- filepath_chkp_callback = FLAGS.chkp_save_path + 'chkp_callback_' + run_id_name + '_cycle1'
- model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
- # Prepare the optimisers
- param_optim = tfa.optimizers.RectifiedAdam(lr=FLAGS.l_rate_param[0])
- non_param_optim = tfa.optimizers.RectifiedAdam(lr=FLAGS.l_rate_non_param[0])
-
- print('Starting cycle 1..')
- start_cycle1 = time.time()
-
- if FLAGS.model == 'param':
- tf_semiparam_field, hist_param = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=FLAGS.batch_size,
- l_rate=FLAGS.l_rate_param[0],
- n_epochs=FLAGS.n_epochs_param[0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
-
- else:
- tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=FLAGS.batch_size,
- l_rate_param=FLAGS.l_rate_param[0],
- l_rate_non_param=FLAGS.l_rate_non_param[0],
- n_epochs_param=FLAGS.n_epochs_param[0],
- n_epochs_non_param=FLAGS.n_epochs_non_param[0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- verbose=2)
-
- # Save weights
- tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
- end_cycle1 = time.time()
- print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
- # Save optimisation history in the saving dict
- saving_optim_hist['param_cycle1'] = hist_param.history
- if FLAGS.model != 'param':
- saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
- if FLAGS.total_cycles >= 2:
- # Prepare to save the model as a callback
- filepath_chkp_callback = FLAGS.chkp_save_path + 'chkp_callback_' + run_id_name + '_cycle2'
- model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
- # Prepare the optimisers
- param_optim = tfa.optimizers.RectifiedAdam(lr=FLAGS.l_rate_param[1])
- non_param_optim = tfa.optimizers.RectifiedAdam(lr=FLAGS.l_rate_non_param[1])
-
- print('Starting cycle 2..')
- start_cycle2 = time.time()
-
-
- # Compute the next cycle
- if FLAGS.model == 'param':
- tf_semiparam_field, hist_param_2 = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=FLAGS.batch_size,
- l_rate=FLAGS.l_rate_param[1],
- n_epochs=FLAGS.n_epochs_param[1],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
- else:
- # Compute the next cycle
- tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=FLAGS.batch_size,
- l_rate_param=FLAGS.l_rate_param[1],
- l_rate_non_param=FLAGS.l_rate_non_param[1],
- n_epochs_param=FLAGS.n_epochs_param[1],
- n_epochs_non_param=FLAGS.n_epochs_non_param[1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- verbose=2)
-
- # Save the weights at the end of the second cycle
- tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
- end_cycle2 = time.time()
- print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
- # Save optimisation history in the saving dict
- saving_optim_hist['param_cycle2'] = hist_param_2.history
- if FLAGS.model != 'param':
- saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
- # Save optimisation history dictionary
- np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
- ## Print final time
- final_time = time.time()
- print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
- ## Close log file
- print('\n Good bye..')
- sys.stdout = old_stdout
- log_file.close()
-
-
-def evaluate_model():
- """ Evaluate the trained model."""
- # Start measuring elapsed time
- starting_time = time.time()
-
- # Define model run id
- run_id_name = FLAGS.model + FLAGS.id_name
- # Define paths
- log_save_file = FLAGS.base_path + FLAGS.log_folder
-
- # Define saved model to use
- if FLAGS.saved_model_type == 'checkpoint':
- weights_paths = FLAGS.chkp_save_path + 'chkp_callback_' + run_id_name + '_' + FLAGS.saved_cycle
-
- elif FLAGS.saved_model_type == 'final':
- model_save_file= FLAGS.base_path + FLAGS.model_folder
- weights_paths = model_save_file + 'chkp_' + run_id_name + '_' + FLAGS.saved_cycle
-
-
- ## Save output prints to logfile
- old_stdout = sys.stdout
- log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
- sys.stdout = log_file
- print('Starting the log file.')
-
- ## Check GPU and tensorflow version
- device_name = tf.test.gpu_device_name()
- print('Found GPU at: {}'.format(device_name))
- print('tf_version: ' + str(tf.__version__))
-
-
- ## Load datasets
- train_dataset = np.load(FLAGS.dataset_folder + FLAGS.train_dataset_file, allow_pickle=True)[()]
- # train_stars = train_dataset['stars']
- # noisy_train_stars = train_dataset['noisy_stars']
- # train_pos = train_dataset['positions']
- train_SEDs = train_dataset['SEDs']
- # train_zernike_coef = train_dataset['zernike_coef']
- train_C_poly = train_dataset['C_poly']
- train_parameters = train_dataset['parameters']
-
- test_dataset = np.load(FLAGS.dataset_folder + FLAGS.test_dataset_file, allow_pickle=True)[()]
- # test_stars = test_dataset['stars']
- # test_pos = test_dataset['positions']
- test_SEDs = test_dataset['SEDs']
- # test_zernike_coef = test_dataset['zernike_coef']
-
- # Convert to tensor
- tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
- tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
- tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
- print('Dataset parameters:')
- print(train_parameters)
-
-
- ## Prepare models
- # Generate Zernike maps
- zernikes = wf.utils.zernike_generator(n_zernikes=FLAGS.n_zernikes, wfe_dim=FLAGS.pupil_diameter)
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
- for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
- # Prepare np input
- simPSF_np = wf.SimPSFToolkit(zernikes, max_order=FLAGS.n_zernikes,
- pupil_diameter=FLAGS.pupil_diameter, output_dim=FLAGS.output_dim,
- oversampling_rate=FLAGS.oversampling_rate, output_Q=FLAGS.output_Q)
- simPSF_np.gen_random_Z_coeffs(max_order=FLAGS.n_zernikes)
- z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
- simPSF_np.set_z_coeffs(z_coeffs)
- simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
- # Obscurations
- obscurations = simPSF_np.generate_pupil_obscurations(N_pix=FLAGS.pupil_diameter, N_filter=2)
- tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
- # Outputs (needed for the MCCD model)
- outputs = tf_noisy_train_stars
-
-
- ## Create the model
- ## Select the model
- if FLAGS.model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=FLAGS.x_lims,
- y_lims=FLAGS.y_lims,
- d_max=FLAGS.d_max_nonparam,
- graph_features=FLAGS.graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=FLAGS.batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=FLAGS.output_Q,
- d_max_nonparam=FLAGS.d_max_nonparam,
- graph_features=FLAGS.graph_features,
- l1_rate=FLAGS.l1_rate,
- output_dim=FLAGS.output_dim,
- n_zernikes=FLAGS.n_zernikes,
- d_max=FLAGS.d_max,
- x_lims=FLAGS.x_lims,
- y_lims=FLAGS.y_lims)
-
- elif FLAGS.model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=FLAGS.batch_size,
- output_Q=FLAGS.output_Q,
- d_max_nonparam=FLAGS.d_max_nonparam,
- output_dim=FLAGS.output_dim,
- n_zernikes=FLAGS.n_zernikes,
- d_max=FLAGS.d_max,
- x_lims=FLAGS.x_lims,
- y_lims=FLAGS.y_lims)
-
- elif FLAGS.model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=FLAGS.batch_size,
- output_Q=FLAGS.output_Q,
- output_dim=FLAGS.output_dim,
- n_zernikes=FLAGS.n_zernikes,
- d_max=FLAGS.d_max,
- x_lims=FLAGS.x_lims,
- y_lims=FLAGS.y_lims)
-
- ## Load the model's weights
- tf_semiparam_field.load_weights(weights_paths)
-
- ## Prepare ground truth model
- # Generate Zernike maps
- zernikes = wf.utils.zernike_generator(n_zernikes=FLAGS.GT_n_zernikes, wfe_dim=FLAGS.pupil_diameter)
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
- for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
- # Initialize the model
- GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=FLAGS.batch_size,
- output_Q=FLAGS.output_Q,
- d_max_nonparam=FLAGS.d_max_nonparam,
- output_dim=FLAGS.output_dim,
- n_zernikes=FLAGS.GT_n_zernikes,
- d_max=FLAGS.d_max,
- x_lims=FLAGS.x_lims,
- y_lims=FLAGS.y_lims)
-
- # For the Ground truth model
- GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
- _ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
- ## Metric evaluation on the test dataset
- print('\n***\nMetric evaluation on the test dataset\n***\n')
-
- # Polychromatic star reconstructions
- rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=FLAGS.n_bins_lda,
- batch_size=FLAGS.eval_batch_size)
-
- poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
- # Monochromatic star reconstructions
- lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
- rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
- mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
- # OPD metrics
- rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=FLAGS.eval_batch_size)
-
- opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
- # Shape metrics
- shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=FLAGS.n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=FLAGS.eval_batch_size)
-
- # Save metrics
- test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
- ## Metric evaluation on the train dataset
- print('\n***\nMetric evaluation on the train dataset\n***\n')
-
- # Polychromatic star reconstructions
- rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=FLAGS.n_bins_lda,
- batch_size=FLAGS.eval_batch_size)
-
- train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
- # Monochromatic star reconstructions
- lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
- rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
- train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
- # OPD metrics
- rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=FLAGS.eval_batch_size)
-
- train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
- # Shape metrics
- train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=FLAGS.n_bins_lda,
- output_Q=1,
- output_dim=64,
- batch_size=FLAGS.eval_batch_size)
-
- # Save metrics into dictionary
- train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
- ## Save results
- metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
- output_path = FLAGS.metric_base_path + 'metrics-' + run_id_name
- np.save(output_path, metrics, allow_pickle=True)
-
- ## Print final time
- final_time = time.time()
- print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
- ## Close log file
- print('\n Good bye..')
- sys.stdout = old_stdout
- log_file.close()
-
-
-
-def main(_):
- train_model()
- evaluate_model()
-
-if __name__ == "__main__":
- app.run(main)
diff --git a/jz-submissions/scripts/train_eval_poly_200_click.py b/jz-submissions/scripts/train_eval_poly_200_click.py
deleted file mode 100644
index 42667a91..00000000
--- a/jz-submissions/scripts/train_eval_poly_200_click.py
+++ /dev/null
@@ -1,843 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# PSF modelling and evaluation
-
-# Import packages
-import sys
-import numpy as np
-import time
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-import click
-
-# from absl import app
-# from absl import flags
-
-@click.command()
-
-@click.command()
-
-## Training options
-# Model definition
-@click.option(
- "--model",
- default="poly",
- type=str,
- help="Model type. Options are: 'mccd', 'poly, 'param'.")
-@click.option(
- "--id_name",
- default="-coherent_euclid_200stars",
- type=str,
- help="Model saving id.")
-# Saving paths
-@click.option(
- "--base_path",
- default="/gpfswork/rech/xdy/ulx23va/wf-outputs/",
- type=str,
- help="Base path for saving files.")
-@click.option(
- "--log_folder",
- default="log-files/",
- type=str,
- help="Folder name to save log files.")
-@click.option(
- "--model_folder",
- default="chkp/",
- type=str,
- help="Folder name to save trained models.")
-@click.option(
- "--optim_hist_folder",
- default="optim-hist/",
- type=str,
- help="Folder name to save optimisation history files.")
-@click.option(
- "--chkp_save_path",
- default="/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/",
- type=str,
- help="Path to save model checkpoints during training.")
-# Input dataset paths
-@click.option(
- "--dataset_folder",
- default="/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/",
- type=str,
- help="Folder path of datasets.")
-@click.option(
- "--train_dataset_file",
- default="train_Euclid_res_200_TrainStars_id_001.npy",
- type=str,
- help="Train dataset file name.")
-@click.option(
- "--test_dataset_file",
- default="test_Euclid_res_id_001.npy",
- type=str,
- help="Test dataset file name.")
-# Model parameters
-@click.option(
- "--n_zernikes",
- default=15,
- type=int,
- help="Zernike polynomial modes to use on the parametric part.")
-@click.option(
- "--pupil_diameter",
- default=256,
- type=int,
- help="Dimension of the OPD/Wavefront space.")
-@click.option(
- "--n_bins_lda",
- default=20,
- type=int,
- help="Number of wavelength bins to use to reconstruct polychromatic objects.")
-@click.option(
- "--output_q",
- default=3.,
- type=float,
- help="Downsampling rate to match the specified telescope's sampling from the oversampling rate used in the model.")
-@click.option(
- "--oversampling_rate",
- default=3.,
- type=float,
- help="Oversampling rate used for the OPD/WFE PSF model.")
-@click.option(
- "--output_dim",
- default=32,
- type=int,
- help="Dimension of the pixel PSF postage stamp.")
-@click.option(
- "--d_max",
- default=2,
- type=int,
- help="Max polynomial degree of the parametric part.")
-@click.option(
- "--d_max_nonparam",
- default=3,
- type=int,
- help="Max polynomial degree of the non-parametric part.")
-@click.option(
- "--x_lims",
- nargs=2,
- default=[0, 1e3],
- type=float,
- help="Limits of the PSF field coordinates for the x axis.")
-@click.option(
- "--y_lims",
- nargs=2,
- default=[0, 1e3],
- type=float,
- help="Limits of the PSF field coordinates for the y axis.")
-@click.option(
- "--graph_features",
- default=10,
- type=int,
- help="Number of graph-constrained features of the non-parametric part.")
-@click.option(
- "--l1_rate",
- default=1e-8,
- type=float,
- help="L1 regularisation parameter for the non-parametric part.")
-# Training parameters
-@click.option(
- "--batch_size",
- default=32,
- type=int,
- help="Batch size used for the trainingin the stochastic gradient descend type of algorithm.")
-@click.option(
- "--l_rate_param",
- nargs=2,
- default=[1e-2, 1e-2],
- type=float,
- help="Learning rates for the parametric parts.")
-@click.option(
- "--l_rate_non_param",
- nargs=2,
- default=[1e-1, 1e-1],
- type=float,
- help="Learning rates for the non-parametric parts.")
-@click.option(
- "--n_epochs_param",
- nargs=2,
- default=[20, 20],
- type=int,
- help="Number of training epochs of the parametric parts.")
-@click.option(
- "--n_epochs_non_param",
- nargs=2,
- default=[100, 120],
- type=int,
- help="Number of training epochs of the non-parametric parts.")
-@click.option(
- "--total_cycles",
- default=2,
- type=int,
- help="Total amount of cycles to perform. For the moment the only available options are '1' or '2'.")
-## Evaluation flags
-# Saving paths
-@click.option(
- "--metric_base_path",
- default="/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/",
- type=str,
- help="Base path for saving metric files.")
-@click.option(
- "--saved_model_type",
- default="final",
- type=str,
- help="Type of saved model to use for the evaluation. Can be 'final' or 'checkpoint'.")
-@click.option(
- "--saved_cycle",
- default="cycle2",
- type=str,
- help="Saved cycle to use for the evaluation. Can be 'cycle1' or 'cycle2'.")
-# Evaluation parameters
-@click.option(
- "--gt_n_zernikes",
- default=45,
- type=int,
- help="Zernike polynomial modes to use on the ground truth model parametric part.")
-@click.option(
- "--eval_batch_size",
- default=16,
- type=int,
- help="Batch size to use for the evaluation.")
-
-def main(**args):
- train_model(**args)
- evaluate_model(**args)
-
-
-def train_model(**args):
- """ Train the model defined in the """
- # Start measuring elapsed time
- starting_time = time.time()
-
- # Define model run id
- run_id_name = args['model'] + args['id_name']
-
- # Define paths
- log_save_file = args['base_path'] + args['log_folder']
- model_save_file= args['base_path'] + args['model_folder']
- optim_hist_file = args['base_path'] + args['optim_hist_folder']
- saving_optim_hist = dict()
-
-
- # Save output prints to logfile
- old_stdout = sys.stdout
- log_file = open(log_save_file + run_id_name + '_output.log','w')
- sys.stdout = log_file
- print('Starting the log file.')
-
- # Print GPU and tensorflow info
- device_name = tf.test.gpu_device_name()
- print('Found GPU at: {}'.format(device_name))
- print('tf_version: ' + str(tf.__version__))
-
- ## Prepare the inputs
- # Generate Zernike maps
- zernikes = wf.utils.zernike_generator(n_zernikes=args['n_zernikes'], wfe_dim=args['pupil_diameter'])
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
- for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
- print('Zernike cube:')
- print(tf_zernike_cube.shape)
-
-
- ## Load the dictionaries
- train_dataset = np.load(args['dataset_folder'] + args['train_dataset_file'], allow_pickle=True)[()]
- # train_stars = train_dataset['stars']
- # noisy_train_stars = train_dataset['noisy_stars']
- # train_pos = train_dataset['positions']
- train_SEDs = train_dataset['SEDs']
- # train_zernike_coef = train_dataset['zernike_coef']
- train_C_poly = train_dataset['C_poly']
- train_parameters = train_dataset['parameters']
-
- test_dataset = np.load(args['dataset_folder'] + args['test_dataset_file'], allow_pickle=True)[()]
- # test_stars = test_dataset['stars']
- # test_pos = test_dataset['positions']
- test_SEDs = test_dataset['SEDs']
- # test_zernike_coef = test_dataset['zernike_coef']
-
- # Convert to tensor
- tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
- tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
- tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
- tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
- tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
- print('Dataset parameters:')
- print(train_parameters)
-
-
- ## Generate initializations
- # Prepare np input
- simPSF_np = wf.SimPSFToolkit(zernikes, max_order=args['n_zernikes'],
- pupil_diameter=args['pupil_diameter'], output_dim=args['output_dim'],
- oversampling_rate=args['oversampling_rate'], output_Q=args['output_q'])
- simPSF_np.gen_random_Z_coeffs(max_order=args['n_zernikes'])
- z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
- simPSF_np.set_z_coeffs(z_coeffs)
- simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
- # Obscurations
- obscurations = simPSF_np.generate_pupil_obscurations(N_pix=args['pupil_diameter'], N_filter=2)
- tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
- # Initialize the SED data list
- packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=args['n_bins_lda'])
- for _sed in train_SEDs]
-
-
- # Prepare the inputs for the training
- tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
- tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
- inputs = [tf_train_pos, tf_packed_SED_data]
-
- # Select the observed stars (noisy or noiseless)
- outputs = tf_noisy_train_stars
- # outputs = tf_train_stars
-
-
- ## Prepare validation data inputs
- val_SEDs = test_SEDs
- tf_val_pos = tf_test_pos
- tf_val_stars = tf_test_stars
-
- # Initialize the SED data list
- val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=args['n_bins_lda'])
- for _sed in val_SEDs]
-
- # Prepare the inputs for the validation
- tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
- tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
- # Prepare input validation tuple
- val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
- val_y_inputs = tf_val_stars
- val_data = (val_x_inputs, val_y_inputs)
-
-
- ## Select the model
- if args['model'] == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=args['x_lims'],
- y_lims=args['y_lims'],
- d_max=args['d_max_nonparam'],
- graph_features=args['graph_features'])
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- graph_features=args['graph_features'],
- l1_rate=args['l1_rate'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims'])
-
- elif args['model'] == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims'])
-
- elif args['model'] == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims'])
-
-
- # # Model Training
- # Prepare the saving callback
- # Prepare to save the model as a callback
- filepath_chkp_callback = args['chkp_save_path'] + 'chkp_callback_' + run_id_name + '_cycle1'
- model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
- # Prepare the optimisers
- param_optim = tfa.optimizers.RectifiedAdam(lr=args['l_rate_param'][0])
- non_param_optim = tfa.optimizers.RectifiedAdam(lr=args['l_rate_non_param'][0])
-
- print('Starting cycle 1..')
- start_cycle1 = time.time()
-
- if args['model'] == 'param':
- tf_semiparam_field, hist_param = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate=args['l_rate_param'][0],
- n_epochs=args['n_epochs_param'][0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
-
- else:
- tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate_param=args['l_rate_param'][0],
- l_rate_non_param=args['l_rate_non_param'][0],
- n_epochs_param=args['n_epochs_param'][0],
- n_epochs_non_param=args['n_epochs_non_param'][0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- verbose=2)
-
- # Save weights
- tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
- end_cycle1 = time.time()
- print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
- # Save optimisation history in the saving dict
- saving_optim_hist['param_cycle1'] = hist_param.history
- if args['model'] != 'param':
- saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
- if args['total_cycles'] >= 2:
- # Prepare to save the model as a callback
- filepath_chkp_callback = args['chkp_save_path'] + 'chkp_callback_' + run_id_name + '_cycle2'
- model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
- # Prepare the optimisers
- param_optim = tfa.optimizers.RectifiedAdam(lr=args['l_rate_param'][1])
- non_param_optim = tfa.optimizers.RectifiedAdam(lr=args['l_rate_non_param'][1])
-
- print('Starting cycle 2..')
- start_cycle2 = time.time()
-
-
- # Compute the next cycle
- if args['model'] == 'param':
- tf_semiparam_field, hist_param_2 = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate=args['l_rate_param'][1],
- n_epochs=args['n_epochs_param'][1],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
- else:
- # Compute the next cycle
- tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate_param=args['l_rate_param'][1],
- l_rate_non_param=args['l_rate_non_param'][1],
- n_epochs_param=args['n_epochs_param'][1],
- n_epochs_non_param=args['n_epochs_non_param'][1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- verbose=2)
-
- # Save the weights at the end of the second cycle
- tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
- end_cycle2 = time.time()
- print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
- # Save optimisation history in the saving dict
- saving_optim_hist['param_cycle2'] = hist_param_2.history
- if args['model'] != 'param':
- saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
- # Save optimisation history dictionary
- np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
- ## Print final time
- final_time = time.time()
- print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
- ## Close log file
- print('\n Good bye..')
- sys.stdout = old_stdout
- log_file.close()
-
-
-def evaluate_model(**args):
- """ Evaluate the trained model."""
- # Start measuring elapsed time
- starting_time = time.time()
-
- # Define model run id
- run_id_name = args['model'] + args['id_name']
- # Define paths
- log_save_file = args['base_path'] + args['log_folder']
-
- # Define saved model to use
- if args['saved_model_type'] == 'checkpoint':
- weights_paths = args['chkp_save_path'] + 'chkp_callback_' + run_id_name + '_' + args['saved_cycle']
-
- elif args['saved_model_type'] == 'final':
- model_save_file= args['base_path'] + args['model_folder']
- weights_paths = model_save_file + 'chkp_' + run_id_name + '_' + args['saved_cycle']
-
-
- ## Save output prints to logfile
- old_stdout = sys.stdout
- log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
- sys.stdout = log_file
- print('Starting the log file.')
-
- ## Check GPU and tensorflow version
- device_name = tf.test.gpu_device_name()
- print('Found GPU at: {}'.format(device_name))
- print('tf_version: ' + str(tf.__version__))
-
-
- ## Load datasets
- train_dataset = np.load(args['dataset_folder'] + args['train_dataset_file'], allow_pickle=True)[()]
- # train_stars = train_dataset['stars']
- # noisy_train_stars = train_dataset['noisy_stars']
- # train_pos = train_dataset['positions']
- train_SEDs = train_dataset['SEDs']
- # train_zernike_coef = train_dataset['zernike_coef']
- train_C_poly = train_dataset['C_poly']
- train_parameters = train_dataset['parameters']
-
- test_dataset = np.load(args['dataset_folder'] + args['test_dataset_file'], allow_pickle=True)[()]
- # test_stars = test_dataset['stars']
- # test_pos = test_dataset['positions']
- test_SEDs = test_dataset['SEDs']
- # test_zernike_coef = test_dataset['zernike_coef']
-
- # Convert to tensor
- tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
- tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
- tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
- print('Dataset parameters:')
- print(train_parameters)
-
-
- ## Prepare models
- # Generate Zernike maps
- zernikes = wf.utils.zernike_generator(n_zernikes=args['n_zernikes'], wfe_dim=args['pupil_diameter'])
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
- for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
- # Prepare np input
- simPSF_np = wf.SimPSFToolkit(zernikes, max_order=args['n_zernikes'],
- pupil_diameter=args['pupil_diameter'], output_dim=args['output_dim'],
- oversampling_rate=args['oversampling_rate'], output_Q=args['output_q'])
- simPSF_np.gen_random_Z_coeffs(max_order=args['n_zernikes'])
- z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
- simPSF_np.set_z_coeffs(z_coeffs)
- simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
- # Obscurations
- obscurations = simPSF_np.generate_pupil_obscurations(N_pix=args['pupil_diameter'], N_filter=2)
- tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
- # Outputs (needed for the MCCD model)
- outputs = tf_noisy_train_stars
-
-
- ## Create the model
- ## Select the model
- if args['model'] == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=args['x_lims'],
- y_lims=args['y_lims'],
- d_max=args['d_max_nonparam'],
- graph_features=args['graph_features'])
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- graph_features=args['graph_features'],
- l1_rate=args['l1_rate'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims'])
-
- elif args['model'] == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims'])
-
- elif args['model'] == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims'])
-
- ## Load the model's weights
- tf_semiparam_field.load_weights(weights_paths)
-
- ## Prepare ground truth model
- # Generate Zernike maps
- zernikes = wf.utils.zernike_generator(n_zernikes=args['gt_n_zernikes'], wfe_dim=args['pupil_diameter'])
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
- for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
- # Initialize the model
- GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- output_dim=args['output_dim'],
- n_zernikes=args['gt_n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims'])
-
- # For the Ground truth model
- GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
- _ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat))
-
-
- ## Metric evaluation on the test dataset
- print('\n***\nMetric evaluation on the test dataset\n***\n')
-
- # Polychromatic star reconstructions
- rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=args['n_bins_lda'],
- batch_size=args['eval_batch_size'])
-
- poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
- # Monochromatic star reconstructions
- lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
- rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list)
-
- mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
- # OPD metrics
- rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=args['eval_batch_size'])
-
- opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
- # Shape metrics
- shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=args['n_bins_lda'],
- output_Q=1,
- output_dim=64,
- batch_size=args['eval_batch_size'])
-
- # Save metrics
- test_metrics = {'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
- ## Metric evaluation on the train dataset
- print('\n***\nMetric evaluation on the train dataset\n***\n')
-
- # Polychromatic star reconstructions
- rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=args['n_bins_lda'],
- batch_size=args['eval_batch_size'])
-
- train_poly_metric = {'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
- # Monochromatic star reconstructions
- lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
- rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list)
-
- train_mono_metric = {'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
- # OPD metrics
- rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=args['eval_batch_size'])
-
- train_opd_metric = { 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
- # Shape metrics
- train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=args['n_bins_lda'],
- output_Q=1,
- output_dim=64,
- batch_size=args['eval_batch_size'])
-
- # Save metrics into dictionary
- train_metrics = {'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
- ## Save results
- metrics = {'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
- output_path = args['metric_base_path'] + 'metrics-' + run_id_name
- np.save(output_path, metrics, allow_pickle=True)
-
- ## Print final time
- final_time = time.time()
- print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
- ## Close log file
- print('\n Good bye..')
- sys.stdout = old_stdout
- log_file.close()
-
-
-
-if __name__ == "__main__":
- main()
-
diff --git a/jz-submissions/scripts/training_mccd_1000.py b/jz-submissions/scripts/training_mccd_1000.py
deleted file mode 100644
index b3ccb7bf..00000000
--- a/jz-submissions/scripts/training_mccd_1000.py
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-model = 'mccd'
-# model = 'poly'
-# model = 'param'
-
-id_name = '-coherent_euclid_1000stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_1000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-l_rate_non_param = [1e-1, 1e-1]
-
-n_epochs_param = [20, 20]
-n_epochs_non_param = [100, 120]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[0],
- l_rate_non_param=l_rate_non_param[0],
- n_epochs_param=n_epochs_param[0],
- n_epochs_non_param=n_epochs_non_param[0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- verbose=2)
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[1])
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[1],
- l_rate_non_param=l_rate_non_param[1],
- n_epochs_param=n_epochs_param[1],
- n_epochs_non_param=n_epochs_non_param[1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- verbose=2)
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_mccd_200.py b/jz-submissions/scripts/training_mccd_200.py
deleted file mode 100644
index b601c7ed..00000000
--- a/jz-submissions/scripts/training_mccd_200.py
+++ /dev/null
@@ -1,346 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-model = 'mccd'
-# model = 'poly'
-# model = 'param'
-
-id_name = '-coherent_euclid_200stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_200_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-l_rate_non_param = [1e-1, 1e-1]
-
-n_epochs_param = [20, 20]
-n_epochs_non_param = [100, 120]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[0],
- l_rate_non_param=l_rate_non_param[0],
- n_epochs_param=n_epochs_param[0],
- n_epochs_non_param=n_epochs_non_param[0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- verbose=2)
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[1])
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[1],
- l_rate_non_param=l_rate_non_param[1],
- n_epochs_param=n_epochs_param[1],
- n_epochs_non_param=n_epochs_non_param[1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- verbose=2)
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_mccd_2000.py b/jz-submissions/scripts/training_mccd_2000.py
deleted file mode 100644
index affeb8ea..00000000
--- a/jz-submissions/scripts/training_mccd_2000.py
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-model = 'mccd'
-# model = 'poly'
-# model = 'param'
-
-id_name = '-coherent_euclid_2000stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_2000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-l_rate_non_param = [1e-1, 1e-1]
-
-n_epochs_param = [20, 20]
-n_epochs_non_param = [100, 120]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[0],
- l_rate_non_param=l_rate_non_param[0],
- n_epochs_param=n_epochs_param[0],
- n_epochs_non_param=n_epochs_non_param[0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- verbose=2)
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[1])
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[1],
- l_rate_non_param=l_rate_non_param[1],
- n_epochs_param=n_epochs_param[1],
- n_epochs_non_param=n_epochs_non_param[1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- verbose=2)
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_mccd_500.py b/jz-submissions/scripts/training_mccd_500.py
deleted file mode 100644
index 7354a66e..00000000
--- a/jz-submissions/scripts/training_mccd_500.py
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-model = 'mccd'
-# model = 'poly'
-# model = 'param'
-
-id_name = '-coherent_euclid_500stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_500_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-l_rate_non_param = [1e-1, 1e-1]
-
-n_epochs_param = [20, 20]
-n_epochs_non_param = [100, 120]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[0],
- l_rate_non_param=l_rate_non_param[0],
- n_epochs_param=n_epochs_param[0],
- n_epochs_non_param=n_epochs_non_param[0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- verbose=2)
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[1])
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[1],
- l_rate_non_param=l_rate_non_param[1],
- n_epochs_param=n_epochs_param[1],
- n_epochs_non_param=n_epochs_non_param[1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- verbose=2)
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_param_1000.py b/jz-submissions/scripts/training_param_1000.py
deleted file mode 100644
index ff66daea..00000000
--- a/jz-submissions/scripts/training_param_1000.py
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-# model = 'mccd'
-# model = 'poly'
-model = 'param'
-
-id_name = '-coherent_euclid_1000stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_1000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 45
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-n_epochs_param = [30, 40]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- raise NotImplementedError
-
-elif model == 'poly':
- # # Initialize the model
- raise NotImplementedError
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate=l_rate_param[0],
- n_epochs=n_epochs_param[0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
-
-
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2 = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate=l_rate_param[0],
- n_epochs=n_epochs_param[0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
-
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_param_200.py b/jz-submissions/scripts/training_param_200.py
deleted file mode 100644
index aa0c45e9..00000000
--- a/jz-submissions/scripts/training_param_200.py
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-# model = 'mccd'
-# model = 'poly'
-model = 'param'
-
-id_name = '-coherent_euclid_200stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_200_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 45
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-n_epochs_param = [30, 40]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- raise NotImplementedError
-
-elif model == 'poly':
- # # Initialize the model
- raise NotImplementedError
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate=l_rate_param[0],
- n_epochs=n_epochs_param[0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
-
-
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2 = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate=l_rate_param[0],
- n_epochs=n_epochs_param[0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
-
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_param_2000.py b/jz-submissions/scripts/training_param_2000.py
deleted file mode 100644
index ebbff6f9..00000000
--- a/jz-submissions/scripts/training_param_2000.py
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-# model = 'mccd'
-# model = 'poly'
-model = 'param'
-
-id_name = '-coherent_euclid_2000stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_2000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 45
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-n_epochs_param = [30, 40]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- raise NotImplementedError
-
-elif model == 'poly':
- # # Initialize the model
- raise NotImplementedError
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate=l_rate_param[0],
- n_epochs=n_epochs_param[0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
-
-
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2 = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate=l_rate_param[0],
- n_epochs=n_epochs_param[0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
-
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_param_500.py b/jz-submissions/scripts/training_param_500.py
deleted file mode 100644
index 4d1835ad..00000000
--- a/jz-submissions/scripts/training_param_500.py
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-# model = 'mccd'
-# model = 'poly'
-model = 'param'
-
-id_name = '-coherent_euclid_500stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_500_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 45
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 3 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-n_epochs_param = [30, 40]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- raise NotImplementedError
-
-elif model == 'poly':
- # # Initialize the model
- raise NotImplementedError
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate=l_rate_param[0],
- n_epochs=n_epochs_param[0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
-
-
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2 = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate=l_rate_param[0],
- n_epochs=n_epochs_param[0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- verbose=2)
-
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_poly_1000.py b/jz-submissions/scripts/training_poly_1000.py
deleted file mode 100644
index 600fddee..00000000
--- a/jz-submissions/scripts/training_poly_1000.py
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-# model = 'mccd'
-model = 'poly'
-# model = 'param'
-
-id_name = '_v2-coherent_euclid_1000stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_1000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-l_rate_non_param = [1e-1, 1e-1]
-
-n_epochs_param = [20, 20]
-n_epochs_non_param = [100, 120]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[0],
- l_rate_non_param=l_rate_non_param[0],
- n_epochs_param=n_epochs_param[0],
- n_epochs_non_param=n_epochs_non_param[0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- verbose=2)
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[1])
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[1],
- l_rate_non_param=l_rate_non_param[1],
- n_epochs_param=n_epochs_param[1],
- n_epochs_non_param=n_epochs_non_param[1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- verbose=2)
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_poly_200.py b/jz-submissions/scripts/training_poly_200.py
deleted file mode 100644
index 38e1877e..00000000
--- a/jz-submissions/scripts/training_poly_200.py
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-# model = 'mccd'
-model = 'poly'
-# model = 'param'
-
-id_name = '_test-coherent_euclid_200stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_200_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-l_rate_non_param = [1e-1, 1e-1]
-
-n_epochs_param = [20, 20]
-n_epochs_non_param = [100, 120]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[0],
- l_rate_non_param=l_rate_non_param[0],
- n_epochs_param=n_epochs_param[0],
- n_epochs_non_param=n_epochs_non_param[0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- verbose=2)
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[1])
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[1],
- l_rate_non_param=l_rate_non_param[1],
- n_epochs_param=n_epochs_param[1],
- n_epochs_non_param=n_epochs_non_param[1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- verbose=2)
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_poly_2000.py b/jz-submissions/scripts/training_poly_2000.py
deleted file mode 100644
index 588dca42..00000000
--- a/jz-submissions/scripts/training_poly_2000.py
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-# model = 'mccd'
-model = 'poly'
-# model = 'param'
-
-id_name = '_v2-coherent_euclid_2000stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_2000_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-l_rate_non_param = [1e-1, 1e-1]
-
-n_epochs_param = [20, 20]
-n_epochs_non_param = [100, 120]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[0],
- l_rate_non_param=l_rate_non_param[0],
- n_epochs_param=n_epochs_param[0],
- n_epochs_non_param=n_epochs_non_param[0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- verbose=2)
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[1])
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[1],
- l_rate_non_param=l_rate_non_param[1],
- n_epochs_param=n_epochs_param[1],
- n_epochs_non_param=n_epochs_non_param[1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- verbose=2)
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/scripts/training_poly_500.py b/jz-submissions/scripts/training_poly_500.py
deleted file mode 100644
index 89539ed7..00000000
--- a/jz-submissions/scripts/training_poly_500.py
+++ /dev/null
@@ -1,345 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# # PSF modelling
-
-
-#@title Import packages
-import sys
-import numpy as np
-import time
-
-# Import wavefront code
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-# Start measuring elapsed time
-starting_time = time.time()
-
-# # Define saving paths
-# model = 'mccd'
-model = 'poly'
-# model = 'param'
-
-id_name = '_v2-coherent_euclid_500stars'
-run_id_name = model + id_name
-
-# Saving paths
-base_path = '/gpfswork/rech/xdy/ulx23va/wf-outputs/'
-log_save_file = base_path + 'log-files/'
-model_save_file= base_path + 'chkp/'
-optim_hist_file = base_path + 'optim-hist/'
-saving_optim_hist = dict()
-
-chkp_save_file = '/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/'
-
-# Input paths
-dataset_path = '/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/'
-train_path = 'train_Euclid_res_500_TrainStars_id_001.npy'
-test_path = 'test_Euclid_res_id_001.npy'
-
-
-# Save output prints to logfile
-old_stdout = sys.stdout
-log_file = open(log_save_file + run_id_name + '_output.log','w')
-sys.stdout = log_file
-print('Starting the log file.')
-
-# Check GPU
-device_name = tf.test.gpu_device_name()
-if device_name != '/device:GPU:0':
- raise SystemError('GPU device not found')
-print('Found GPU at: {}'.format(device_name))
-print('tf_version: ' + str(tf.__version__))
-
-# # Define new model
-
-# Decimation factor for Zernike polynomials
-n_zernikes = 15
-
-# Some parameters
-pupil_diameter = 256
-n_bins_lda = 20
-
-output_Q = 3.
-oversampling_rate = 3.
-
-batch_size = 16
-output_dim = 32
-d_max = 2
-d_max_nonparam = 5 # polynomial-constraint features
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-graph_features = 10 # Graph-constraint features
-l1_rate = 1e-8 # L1 regularisation
-
-# Learning rates and number of epochs
-l_rate_param = [1e-2, 1e-2]
-l_rate_non_param = [1e-1, 1e-1]
-
-n_epochs_param = [20, 20]
-n_epochs_non_param = [100, 120]
-
-
-## Prepare the inputs
-
-# Generate Zernike maps
-zernikes = wf.utils.zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diameter)
-
-# Now as cubes
-np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
-for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
-np_zernike_cube[np.isnan(np_zernike_cube)] = 0
-tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
-print('Zernike cube:')
-print(tf_zernike_cube.shape)
-
-
-## Load the dictionaries
-train_dataset = np.load(dataset_path + train_path, allow_pickle=True)[()]
-# train_stars = train_dataset['stars']
-# noisy_train_stars = train_dataset['noisy_stars']
-# train_pos = train_dataset['positions']
-train_SEDs = train_dataset['SEDs']
-# train_zernike_coef = train_dataset['zernike_coef']
-train_C_poly = train_dataset['C_poly']
-train_parameters = train_dataset['parameters']
-
-
-test_dataset = np.load(dataset_path + test_path, allow_pickle=True)[()]
-# test_stars = test_dataset['stars']
-# test_pos = test_dataset['positions']
-test_SEDs = test_dataset['SEDs']
-# test_zernike_coef = test_dataset['zernike_coef']
-
-# Convert to tensor
-tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
-tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
-tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
-
-tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
-tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
-print('Dataset parameters:')
-print(train_parameters)
-
-
-## Generate initializations
-
-# Prepare np input
-simPSF_np = wf.SimPSFToolkit(zernikes, max_order=n_zernikes,
- pupil_diameter=pupil_diameter, output_dim=output_dim,
- oversampling_rate=oversampling_rate, output_Q=output_Q)
-simPSF_np.gen_random_Z_coeffs(max_order=n_zernikes)
-z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
-simPSF_np.set_z_coeffs(z_coeffs)
-simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
-# Obscurations
-obscurations = simPSF_np.generate_pupil_obscurations(N_pix=pupil_diameter, N_filter=2)
-tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
-# Initialize the SED data list
-packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in train_SEDs]
-
-
-# Prepare the inputs for the training
-tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
-tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
-inputs = [tf_train_pos, tf_packed_SED_data]
-
-# Select the observed stars (noisy or noiseless)
-outputs = tf_noisy_train_stars
-# outputs = tf_train_stars
-
-
-## Prepare validation data inputs
-
-# Let's take a subset of the testing data for the validation
-# in order to test things faster
-val_SEDs = test_SEDs # [0:50, :, :]
-tf_val_pos = tf_test_pos # [0:50, :]
-tf_val_stars = tf_test_stars # [0:50, :, :]
-
-# Initialize the SED data list
-val_packed_SED_data = [wf.utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
- for _sed in val_SEDs]
-
-# Prepare the inputs for the validation
-tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
-tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
-# Prepare input validation tuple
-val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
-val_y_inputs = tf_val_stars
-val_data = (val_x_inputs, val_y_inputs)
-
-
-## Select the model
-if model == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=x_lims,
- y_lims=y_lims,
- d_max=d_max_nonparam,
- graph_features=graph_features)
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- graph_features=graph_features,
- l1_rate=l1_rate,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_Q=output_Q,
- d_max_nonparam=d_max_nonparam,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-elif model == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=batch_size,
- output_dim=output_dim,
- n_zernikes=n_zernikes,
- d_max=d_max,
- x_lims=x_lims,
- y_lims=y_lims)
-
-
-
-
-# # Model Training
-
-# Prepare the saving callback
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle1'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[0])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[0])
-
-print('Starting cycle 1..')
-start_cycle1 = time.time()
-
-tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[0],
- l_rate_non_param=l_rate_non_param[0],
- n_epochs_param=n_epochs_param[0],
- n_epochs_non_param=n_epochs_non_param[0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- verbose=2)
-
-# Save weights
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
-end_cycle1 = time.time()
-print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle1'] = hist_param.history
-saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
-
-
-
-# Prepare to save the model as a callback
-filepath_chkp_callback = chkp_save_file + 'chkp_callback_' + run_id_name + '_cycle2'
-model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error', verbose=1, save_best_only=True,
- save_weights_only=False, mode='min', save_freq='epoch',
- options=None)
-
-# Prepare the optimisers
-param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_param[1])
-non_param_optim = tfa.optimizers.RectifiedAdam(lr=l_rate_non_param[1])
-
-print('Starting cycle 2..')
-start_cycle2 = time.time()
-
-# Compute the next cycle
-tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=batch_size,
- l_rate_param=l_rate_param[1],
- l_rate_non_param=l_rate_non_param[1],
- n_epochs_param=n_epochs_param[1],
- n_epochs_non_param=n_epochs_non_param[1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None, non_param_loss=None,
- param_metrics=None, non_param_metrics=None,
- param_callback=None, non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- verbose=2)
-
-# Save the weights at the end of the second cycle
-tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
-end_cycle2 = time.time()
-print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
-# Save optimisation history in the saving dict
-saving_optim_hist['param_cycle2'] = hist_param_2.history
-saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
-# Save optimisation history dictionary
-np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
-
-## Print final time
-final_time = time.time()
-print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-## Close log file
-print('\n Good bye..')
-sys.stdout = old_stdout
-log_file.close()
diff --git a/jz-submissions/slurm-logs/slurm_output_logs.md b/jz-submissions/slurm-logs/slurm_output_logs.md
deleted file mode 100644
index c033fecc..00000000
--- a/jz-submissions/slurm-logs/slurm_output_logs.md
+++ /dev/null
@@ -1,2 +0,0 @@
-slurm output logs
-
\ No newline at end of file
diff --git a/long-runs/Input_data.md b/long-runs/Input_data.md
deleted file mode 100644
index 27f6e27d..00000000
--- a/long-runs/Input_data.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# Inputs of the WaveDiff model
-
-For the model to be trained a dataset is needed. The train and test datasets for the WaveDiff model are detailed below.
-
-## Input datasets
-This model uses two datasets, one for optimising the model and another for testing the model. Both datasets are organised as python dictionaries.
-
-### Dataset keys
-
-```
-train_dataset.keys() --> ['stars', 'noisy_stars', 'super_res_stars', 'positions', 'SEDs', 'zernike_coef', 'C_poly', 'parameters']
-
-test_dataset.keys() --> ['stars', 'super_res_stars', 'positions', 'SEDs', 'zernike_coef', 'C_poly', 'parameters']
-```
-
-The contents of each entry are detailed below.
-
-## Composition of the datasets
-
-- Stars: it is a `numpy` ndarray containing the low resolution observations of each star.
- - Shape: `(n_stars, output_dim, output_dim)`.
-- Noisy stars: it is a `numpy` ndarray containing noisy version of the low resolution observations of each star.
- - Shape: `(n_stars, output_dim, output_dim)`.
-- Super resolved stars: it is a `numpy` ndarray containing the high resolution observations of each star.
- - Shape: `(n_stars, super_out_res, super_out_res)`.
-- Positions: it is a `numpy` ndarray containing the $(x,y)$ FOV position of each observation.
- - Shape: `(n_stars, 2)`.
- - Order: `pos[0]` $\rightarrow \, x$, `pos[1]` $\rightarrow \, y$
-- SEDs: it is a `numpy` ndarray containing the SED asociated to each observed star. For each star, the SED is composed by a set of `n_bins` frequency values, a set of `n_bins` SED values and a set of `n_bins` bin weights proportional to the relative size of each bin with respect to the whole SED bandwidth.
- - Shape: `(n_stars, n_bins, 3)`.
- - Order: `SED[i,:,0]` $\rightarrow \, \lambda$, `SED[i,:,1]` $\rightarrow \, \text{SED}(\lambda)$, `SED[i,:,2]` $\rightarrow \, \frac{\Delta_{\text{bin}}}{B}$.
-- Zernike_coef: it is a `numpy` ndarray containing the ground truth `n_zernike_gt` coefficients of each observed PSF wavefront error.
- - Shape: `(n_stars, n_zernike_gt, 1)`.
-- C_poly: it is a `numpy` ndarray containing the ground truth coefficients of the spatial variation polynomials asociated to each Zernike order. Each polynomial has a degree `d_max` and thus the number of coefficients is $n_{\text{max}} = (d_{\text{max}}+1)(d_{\text{max}}+2)/2$.
- - Shape: `(n_zernikes_gt, n_max)`.
-- Parameters: it is a python dictionary containing information about the data generation model parameters.
- - Keys: `['d_max', 'max_order', 'x_lims', 'y_lims', 'grid_points', 'n_bins', 'max_wfe_rms', 'oversampling_rate', 'output_Q', 'output_dim', 'LP_filter_length', 'pupil_diameter', 'euclid_obsc', 'n_stars']`.
-
-
-## Tensorflow inputs and outputs
-
-From each dataset, the inputs and outputs for the model will be generated. The inputs of the model, $X$, are the star positions and the SEDs. The outputs of the model, $Y$, are the noisy stars which will be constrasted with the model's predictions $\hat{Y}$ to compute the loss $\mathcal{L}(Y, \hat{Y})$.
-
-### Inputs
-As mentioned before the input will contain two tensors, the first one containing the $(x,y)$ FOV positions and the second one containing the SED for each observation.
-
-```
-inputs = [tf_pos, tf_packed_SED_data]
-```
-
-- `tf_pos` is a tensor of shape `([n_stars,2])`. The order of the $(x,y)$ positions are the same as before.
-
-- `tf_packed_SED_data` is a tensor of shape `([n_stars,n_bins,3])`. For each observation $i$ this tensor contains three `n_bins`-point SED features. The first one contains each integer `feasible_N` needed for the upscaling of the wavefront error in order to compute the PSF at each `feasible_wv` (listed in the second position). Finaly in the third position the SED values are listed for each `feasible_wv`.
- - Notes
- - The second dimention of the packed SED data can change its lenght if SED interpolation is performed. The interpolated SED will then have the following number of samples:
-
- $n_{\text{bins\_interp}} = n_{\text{bins}} \times (\texttt{interp\_pts\_per\_bin} + 1) \; \pm \; 1$,
-
- if extrapolation is performed, a point is added to the SED, thus it corresponts to the $+1$ case. If exptrapolation is not performed the $-1$ case is considered.
-
- - The `feasible_N` asociated to each `feasible_wv` is calculated using the [`SimPSFToolkit.feasible_N()`](https://github.com/tobias-liaudat/wf-psf/blob/main/wf_psf/SimPSFToolkit.py#L636) method.
- - The packed SED date is generated with the [generate_packed_elems()](https://github.com/tobias-liaudat/wf-psf/blob/main/wf_psf/utils.py#L44) method of the `wf_utils` class detailed in the `utils.py` file.
- - The packed SED data must be converted to tensor to be an input of the model (a column permutation also takes place):
- ```
- tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
- tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
- ```
-
-### Outputs
-The output of the WaveDiff model contains the noisy low-resolution star observations.
-```
-outputs = [tf_noisy_train_stars]
-```
-
-- `tf_noisy_train_stars` is a tensor of shape `([n_stars, output_dim, output_dim])` containing the noisy observation of each star.
- - Notes
- - The train stars must be converted to tensor in order to be used by the model.
- ```
- tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
- ```
\ No newline at end of file
diff --git a/long-runs/LR-PSF-field-gen-coherentFields.py b/long-runs/LR-PSF-field-gen-coherentFields.py
deleted file mode 100644
index 1d0e078d..00000000
--- a/long-runs/LR-PSF-field-gen-coherentFields.py
+++ /dev/null
@@ -1,200 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-import numpy as np
-import matplotlib.pyplot as plt
-import wf_psf
-from tqdm import tqdm
-
-r"""
-This script is used to generate coherent datasets.
-
-With coherent we mean that each smaller dataset is a sub-ensemble
-of the all the bigger datasets.
-The number of training stars is determined by the list ``n_star_list``
-while the testing stars is fixed for all and is the number ``n_test_stars``.
-"""
-
-
-# Paths
-
-# SED folder path
-SED_path = '/Users/tliaudat/Documents/PhD/codes/WF_PSF/notebooks/explore_SEDs/save_SEDs/'
-# SED_path = '/local/home/tliaudat/data/'
-
-# Output saving path
-output_folder = '/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/data/coherent_euclid_dataset/'
-# output_folder = '/local/home/tliaudat/psf-datasets/'
-
-
-# Dataset ID
-dataset_id = 1
-dataset_id_str = '%03d'%(dataset_id)
-
-
-# This list must be in order from bigger to smaller
-n_star_list = [2000, 1000, 500, 200]
-n_test_stars = 400 # 20% of the max test stars
-
-# n_star_list = [50, 40, 30, 20]
-# n_test_stars = 10 # 20% of the max test stars
-
-
-
-# Parameters
-d_max = 2
-max_order = 45
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-grid_points = [4, 4]
-n_bins = 20
-verbose = True
-
-oversampling_rate = 3.
-output_Q = 3.
-
-max_wfe_rms = 0.1
-output_dim = 32
-LP_filter_length = 2
-euclid_obsc = True
-pupil_diameter = 256
-
-
-
-
-# Generate Zernike maps
-zernikes = wf_psf.utils.zernike_generator(n_zernikes=max_order, wfe_dim=pupil_diameter)
-
-# Initialize PSF simulator
-sim_PSF_toolkit = wf_psf.SimPSFToolkit(
- zernikes, max_order=max_order, max_wfe_rms=max_wfe_rms, oversampling_rate=oversampling_rate,
- output_Q=output_Q, output_dim=output_dim, pupil_diameter=pupil_diameter, euclid_obsc=euclid_obsc,
- LP_filter_length=LP_filter_length)
-
-# Initialize PSF field
-gen_poly_fieldPSF = wf_psf.GenPolyFieldPSF(sim_PSF_toolkit, d_max=d_max,
- grid_points=grid_points, max_order=max_order,
- x_lims=x_lims, y_lims=y_lims, n_bins=n_bins,
- lim_max_wfe_rms=max_wfe_rms, verbose=verbose)
-
-
-
-# Load the SEDs
-stellar_SEDs = np.load(SED_path + 'SEDs.npy', allow_pickle=True)
-stellar_lambdas = np.load(SED_path + 'lambdas.npy', allow_pickle=True)
-
-
-
-# Total stars
-n_stars = n_star_list[0] + n_test_stars
-# Max train stars
-tot_train_stars = n_star_list[0]
-
-# Generate all the stars and then go saving different subsets
-
-
-
-# Select random SEDs
-SED_list = []
-for it in range(n_stars):
- selected_id_SED = np.random.randint(low=0, high=13)
- concat_SED_wv = np.concatenate((stellar_lambdas.reshape(-1,1),
- stellar_SEDs[selected_id_SED,:].reshape(-1,1)), axis=1)
- SED_list.append(concat_SED_wv)
-
-
-
-# First we choose the locations (randomly)
-pos_np = np.random.rand(n_stars, 2)
-
-pos_np[:,0] = pos_np[:,0]*(x_lims[1] - x_lims[0]) + x_lims[0]
-pos_np[:,1] = pos_np[:,1]*(y_lims[1] - y_lims[0]) + y_lims[0]
-
-
-
-# Generate all the polychromatic PSFs
-poly_psf_list = []
-zernike_coef_list = []
-
-for it in tqdm(range(n_stars)):
- _psf, _zernike, _ = gen_poly_fieldPSF.get_poly_PSF(xv_flat=pos_np[it, 0],
- yv_flat=pos_np[it, 1],
- SED=SED_list[it])
-
- poly_psf_list.append(_psf)
- zernike_coef_list.append(_zernike)
-
-
-
-# Generate numpy arrays from the lists
-poly_psf_np = np.array(poly_psf_list)
-zernike_coef_np = np.array(zernike_coef_list)
-SED_np = np.array(SED_list)
-
-
-# Generate the noisy train stars
-
-# Copy the training stars
-noisy_train_stars = np.copy(poly_psf_np[:tot_train_stars, :, :])
-# Generate a dataset with a SNR varying randomly from 10 to 120
-rand_SNR = (np.random.rand(noisy_train_stars.shape[0]) * 100) + 10
-# Add Gaussian noise to the observations
-noisy_train_stars = np.stack([wf_psf.utils.add_noise(_im, desired_SNR=_SNR)
- for _im, _SNR in zip(noisy_train_stars, rand_SNR)], axis=0)
-
-
-
-# Save only one test dataset
-
-# Build param dicitionary
-dataset_params = {'d_max':d_max, 'max_order':max_order, 'x_lims':x_lims, 'y_lims':y_lims,
- 'grid_points':grid_points, 'n_bins':n_bins, 'max_wfe_rms':max_wfe_rms,
- 'oversampling_rate':oversampling_rate, 'output_Q':output_Q,
- 'output_dim':output_dim, 'LP_filter_length':LP_filter_length,
- 'pupil_diameter':pupil_diameter, 'euclid_obsc':euclid_obsc,
- 'n_stars':n_test_stars}
-
-# Save dataset C coefficient matrix (reproductible dataset)
-C_poly = gen_poly_fieldPSF.C_poly
-
-test_psf_dataset = {'stars' : poly_psf_np[tot_train_stars:, :, :],
- 'positions' : pos_np[tot_train_stars:, :],
- 'SEDs' : SED_np[tot_train_stars:, :, :],
- 'zernike_coef' : zernike_coef_np[tot_train_stars:, :, :],
- 'C_poly' : C_poly,
- 'parameters': dataset_params}
-
-np.save(output_folder + 'test_Euclid_res_id_' + dataset_id_str + '.npy',
- test_psf_dataset, allow_pickle=True)
-
-
-
-# Save the different train datasets
-
-
-for it_glob in range(len(n_star_list)):
-
- n_train_stars = n_star_list[it_glob]
-
- # Build param dicitionary
- dataset_params = {'d_max':d_max, 'max_order':max_order, 'x_lims':x_lims, 'y_lims':y_lims,
- 'grid_points':grid_points, 'n_bins':n_bins, 'max_wfe_rms':max_wfe_rms,
- 'oversampling_rate':oversampling_rate, 'output_Q':output_Q,
- 'output_dim':output_dim, 'LP_filter_length':LP_filter_length,
- 'pupil_diameter':pupil_diameter, 'euclid_obsc':euclid_obsc,
- 'n_stars':n_train_stars}
-
- train_psf_dataset = {'stars' : poly_psf_np[:n_train_stars, :, :],
- 'noisy_stars': noisy_train_stars[:n_train_stars, :, :],
- 'positions' : pos_np[:n_train_stars, :],
- 'SEDs' : SED_np[:n_train_stars, :, :],
- 'zernike_coef' : zernike_coef_np[:n_train_stars, :, :],
- 'C_poly' : C_poly,
- 'parameters': dataset_params}
-
-
- np.save(output_folder + 'train_Euclid_res_' + str(n_train_stars) + '_TrainStars_id_' + dataset_id_str + '.npy',
- train_psf_dataset, allow_pickle=True)
-
-
-
\ No newline at end of file
diff --git a/long-runs/LR-PSF-field-generation.py b/long-runs/LR-PSF-field-generation.py
deleted file mode 100644
index ab75925c..00000000
--- a/long-runs/LR-PSF-field-generation.py
+++ /dev/null
@@ -1,214 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# In[1]:
-
-
-import wf_psf
-from wf_psf import SimPSFToolkit
-from wf_psf import GenPolyFieldPSF
-import scipy.io as sio
-import numpy as np
-import matplotlib.pyplot as plt
-
-# get_ipython().run_line_magic('pylab', 'inline')
-
-import time
-from tqdm import tqdm
-
-
-# In[2]:
-
-
-def add_noise(image, desired_SNR):
- sigma_noise = np.sqrt((np.sum(image**2))/(desired_SNR * image.shape[0] * image.shape[1]))
- noisy_image = image + np.random.standard_normal(image.shape) * sigma_noise
- return noisy_image
-
-
-# In[3]:
-
-
-# Zcube_path = '/Users/tliaudat/Documents/PhD/codes/WF_PSF/data/PA-zernike-cubes/Zernike45.mat'
-Zcube_path = '/local/home/tliaudat/data/Zernike45.mat'
-Zcube = sio.loadmat(Zcube_path)
-
-
-# In[4]:
-
-
-# Parameters
-d_max = 2
-max_order = 45
-x_lims = [0, 1e3]
-y_lims = [0, 1e3]
-grid_points = [4, 4]
-n_bins = 20
-verbose = True
-
-oversampling_rate = 3.
-output_Q = 3.
-
-max_wfe_rms=0.1
-output_dim=32
-LP_filter_length=2
-euclid_obsc = True
-pupil_diameter = 256
-
-
-zernikes = []
-for it in range(45):
- zernikes.append(wf_psf.utils.downsample_im(input_im=Zcube['Zpols'][0,it][5], output_dim=pupil_diameter))
-
-
-# Initialize PSF simulator
-sim_PSF_toolkit = SimPSFToolkit.SimPSFToolkit(
- zernikes, max_order=max_order, max_wfe_rms=max_wfe_rms, oversampling_rate=oversampling_rate,
- output_Q=output_Q, output_dim=output_dim, pupil_diameter=pupil_diameter, euclid_obsc=euclid_obsc,
- LP_filter_length=LP_filter_length)
-
-# Initialize PSF field
-gen_poly_fieldPSF = GenPolyFieldPSF.GenPolyFieldPSF(sim_PSF_toolkit, d_max=d_max,
- grid_points=grid_points, max_order=max_order,
- x_lims=x_lims, y_lims=y_lims, n_bins=n_bins,
- lim_max_wfe_rms=max_wfe_rms, verbose=verbose)
-
-
-# Load the SEDs
-
-# SED_path = '/Users/tliaudat/Documents/PhD/codes/WF_PSF/notebooks/explore_SEDs/save_SEDs/'
-SED_path = '/local/home/tliaudat/data/'
-
-stellar_SEDs = np.load(SED_path + 'SEDs.npy', allow_pickle=True)
-stellar_lambdas = np.load(SED_path + 'lambdas.npy', allow_pickle=True)
-
-
-
-
-n_star_list = [200, 500, 1000, 2000, 4000, 8000]
-# n_star_list = [20, 40, 50]
-
-for it_glob in range(len(n_star_list)):
-
- n_stars = n_star_list[it_glob]
- train_lim = int(n_stars*0.8)
-
-
- # Build param dicitionary
- dataset_params = {'d_max':d_max, 'max_order':max_order, 'x_lims':x_lims, 'y_lims':y_lims,
- 'grid_points':grid_points, 'n_bins':n_bins, 'max_wfe_rms':max_wfe_rms,
- 'oversampling_rate':oversampling_rate, 'output_Q':output_Q,
- 'output_dim':output_dim, 'LP_filter_length':LP_filter_length,
- 'pupil_diameter':pupil_diameter, 'euclid_obsc':euclid_obsc,
- 'n_stars':n_stars, 'train_lim':train_lim}
-
-
-
- SED_list = []
-
- # Select SEDs
- for it in range(n_stars):
- selected_id_SED = np.random.randint(low=0, high=13)
- concat_SED_wv = np.concatenate((stellar_lambdas.reshape(-1,1),
- stellar_SEDs[selected_id_SED,:].reshape(-1,1)), axis=1)
- SED_list.append(concat_SED_wv)
-
-
-
- # In[7]:
-
-
- # First we choose the locations (randomly)
- pos_np = np.random.rand(n_stars,2)
-
- pos_np[:,0] = pos_np[:,0]*(x_lims[1] - x_lims[0]) + x_lims[0]
- pos_np[:,1] = pos_np[:,1]*(y_lims[1] - y_lims[0]) + y_lims[0]
-
-
-
-
-
- # In[ ]:
-
-
- # Generate the PSFs
- poly_psf_list = []
- zernike_coef_list = []
-
- for it in tqdm(range(n_stars)):
- _psf, _zernike, _ = gen_poly_fieldPSF.get_poly_PSF(xv_flat=pos_np[it, 0],
- yv_flat=pos_np[it, 1],
- SED=SED_list[it])
-
- poly_psf_list.append(_psf)
- zernike_coef_list.append(_zernike)
-
-
- # In[ ]:
-
-
- # gen_poly_fieldPSF.show_WFE_RMS(save_img=True)
-
-
- # In[ ]:
-
-
- # Generate numpy arrays from the lists
- poly_psf_np = np.array(poly_psf_list)
- zernike_coef_np = np.array(zernike_coef_list)
- SED_np = np.array(SED_list)
-
-
- # In[ ]:
-
-
- print(poly_psf_np.shape)
- print(zernike_coef_np.shape)
- print(SED_np.shape)
- print(pos_np.shape)
-
-
-
- # Generate the noisy train stars
-
- # Copy the training stars
- noisy_train_stars = np.copy(poly_psf_np[:train_lim, :, :])
-
- # Generate a dataset with a SNR varying randomly from 10 to 80
- rand_SNR = (np.random.rand(noisy_train_stars.shape[0]) * 70) + 10
-
- noisy_train_stars = np.stack([add_noise(_im, desired_SNR=_SNR) for _im, _SNR in zip(noisy_train_stars, rand_SNR)], axis=0)
-
-
-
- # In[ ]:
-
-
- # Generate dictionary and save
-
- C_poly = gen_poly_fieldPSF.C_poly
-
- train_psf_dataset = {'stars' : poly_psf_np[:train_lim, :, :],
- 'noisy_stars': noisy_train_stars,
- 'positions' : pos_np[:train_lim, :],
- 'SEDs' : SED_np[:train_lim, :, :],
- 'zernike_coef' : zernike_coef_np[:train_lim, :, :],
- 'C_poly' : C_poly,
- 'parameters': dataset_params}
-
- test_psf_dataset = {'stars' : poly_psf_np[train_lim:, :, :],
- 'positions' : pos_np[train_lim:, :],
- 'SEDs' : SED_np[train_lim:, :, :],
- 'zernike_coef' : zernike_coef_np[train_lim:, :, :],
- 'C_poly' : C_poly,
- 'parameters': dataset_params}
-
-
- # In[ ]:
-
-
- # output_folder = '/Users/tliaudat/Documents/PhD/codes/WF_PSF/notebooks/psf_field_datasets/Euclid_resolution/'
- output_folder = '/local/home/tliaudat/psf-datasets/'
-
- np.save(output_folder + 'train_Euclid_res_' + str(n_stars) + '_stars_dim256.npy', train_psf_dataset, allow_pickle=True)
- np.save(output_folder + 'test_Euclid_res_' + str(n_stars) + '_stars_dim256.npy', test_psf_dataset, allow_pickle=True)
diff --git a/long-runs/README.md b/long-runs/README.md
deleted file mode 100644
index 885e16b8..00000000
--- a/long-runs/README.md
+++ /dev/null
@@ -1,273 +0,0 @@
-# Use of the WaveDiff model
-
-The model should be installed as a python package as detailed in the main [README](https://github.com/tobias-liaudat/wf-psf) file. The `wf-psf` package includes functions to train, evaluate and plot the results of the model. In this file we detail how to make use of this functions.
-
-For more information on the input datasets see [Input_data.md](https://github.com/tobias-liaudat/wf-psf/blob/main/long-runs/Input_data.md).
-
-## Auxiliary functions
-
-The package has two main functions to perform test with the WaveDiff model. One function for running the model's optimisation process and one function for performing evaluation and saving the results. There is a third function that generates some plots when the evaluation is finished. This functions can be found in the [`script_utils.py`](https://github.com/tobias-liaudat/wf-psf/blob/main/wf_psf/script_utils.py) file.
-
-- `train_model(**args)` : trains the PSF model.
-- `evaluate_model(**args)` : evaluate the trained PSF model.
-- `plot_metrics(**args)`: Plot multiple models results.
-
-The arguments of these functions are detailed below.
-
-## Training script
-For running the previous functions one can use the [`train_eval_plot_script_click.py`](https://github.com/tobias-liaudat/wf-psf/blob/main/long-runs/train_eval_plot_script_click.py) script. This script calls each of the above functions and serves as an interface to collect the `**args` options from command line arguments using the [`click`](https://click.palletsprojects.com/en/8.1.x/) package.
-
-To run the script one should input the desired parameters as command line arguments as in the following example:
-
-```
-./train_eval_plot_script_click.py \
- --train_opt True \
- --eval_opt True \
- --plot_opt True \
- --model poly \
- --id_name your_id \
- --suffix_id_name _1k_stars --suffix_id_name _2k_stars \
- --id_name your_id_1k_stars --id_name your_id_2k_stars \
- --star_numbers 1000 --stars_numbers 2000 \
- --base_path your/base/path/wf-outputs/ \
- ...
- --pupil_diameter 256 \
- --n_epochs_param_multi_cycle "15 15" \
- --n_epochs_non_param_multi_cycle "100 50" \
- --l_rate_non_param_multi_cycle "0.1 0.06" \
- --l_rate_param_multi_cycle "0.01 0.004" \
- ...
-```
-
-The options that remain unset will take the default values defined in the [`train_eval_plot_script_click.py`](https://github.com/tobias-liaudat/wf-psf/blob/main/long-runs/train_eval_plot_script_click.py) script.
-
-# Running model on SLURM clusters
-In the folder `./examples` a job scheduler example can be found. That script lets us run several experiments (model training and evaluation) in parallel on a computing cluster using SLURM directives. Each model can use different parameters, datasets or optimisation strategies.
-
-Note:
-- The `--star_numbers` option is for the final plot's x-axis. It does not always represents the number of stars but it needs to be an integer.
-- `--id_name` = `--base_id_name` + `--suffix_id_name`
-
-# Output folders
-
-For the auxiliary functions to work properly several folders must be created before running the experiments. The output file structure is shown below:
-
-```
-wf-outputs
-├── chkp
-| ├── model_1
-| ├── ...
-| └── model_N
-├── log-files
-├── metrics
-├── optim-hist
-└── plots
-```
-
-One checkpoint folder is recommended for each model in a multi-model parallel training. This simplifies the organisation of multiple cycle model checkpoints. This folders should coincide with their corresponding script option.
-
-# Script options
-Here we detail every option or argument used in the WaveDiff model auxiliary script.
-
-## Script options
-
-- `--train_opt`, default=`True`, type: `bool`
- - Train the model.
-
-- `--eval_opt`, default=`True`, type: `bool`
- - Evaluate the model.
-
-- `--plot_opt`, default=`True`, type: `bool`
- - Plot the model results
-## Training options
-### Model definition
-
-- `--model`, default="poly", type: str
- - Model type. Options are: 'mccd', 'graph', 'poly, 'param', 'poly_physical'.
-
-- `--id_name`, default="-coherent_euclid_200stars", type: str
- - Model saving id.
-### Saving paths
-
-- `--base_path`, default="/gpfswork/rech/ynx/ulx23va/wf-outputs/", type: str
- - Base path for saving files.
-
-- `--log_folder`, default="log-files/", type: str
- - Folder name to save log files.
-
-- `--model_folder`, default="chkp/", type: str
- - Folder name to save trained models.
-
-- `--optim_hist_folder`, default="optim-hist/", type: str
- - Folder name to save optimisation history files.
-
-- `--chkp_save_path`, default="/gpfsscratch/rech/ynx/ulx23va/wf-outputs/chkp/", type: str
- - Path to save model checkpoints during training.
-
-- `--plots_folder`, default="plots/", type: str
- - Folder name to save the generated plots.
-### Input dataset paths
-
-- `--dataset_folder`, default="/gpfswork/rech/ynx/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/", type: str
- - Folder path of datasets.
-
-- `--train_dataset_file`, default="train_Euclid_res_200_TrainStars_id_001.npy", type: str
- - Train dataset file name.
-
-- `--test_dataset_file`, default="test_Euclid_res_id_001.npy", type: str
- - Test dataset file name.
-## Model parameters
-
-- `--n_zernikes`, default=15, type: int
- - Zernike polynomial modes to use on the parametric part.
-
-- `--pupil_diameter`, default=256, type: int
- - Dimension of the OPD/Wavefront space.
-
-- `--n_bins_lda`, default=20, type: int
- - Number of wavelength bins to use to reconstruct polychromatic objects.
-
-- `--output_q`, default=3., type: float
- - Downsampling rate to match the specified telescope's sampling from the oversampling rate used in the model.
-
-- `--oversampling_rate`, default=3., type: float
- - Oversampling rate used for the OPD/WFE PSF model.
-
-- `--output_dim`, default=32, type: int
- - Dimension of the pixel PSF postage stamp.
-
-- `--d_max`, default=2, type: int
- - Max polynomial degree of the parametric part.
-
-- `--d_max_nonparam`, default=3, type: int
- - Max polynomial degree of the non-parametric part.
-
-- `--x_lims`
-nargs=2,, default=[0, 1e3], type: float
- - Limits of the PSF field coordinates for the x axis.
-
-- `--y_lims`
-nargs=2,, default=[0, 1e3], type: float
- - Limits of the PSF field coordinates for the y axis.
-
-- `--graph_features`, default=10, type: int
- - Number of graph-constrained features of the non-parametric part.
-
-- `--l1_rate`, default=`1e-8`, type: float
- - L1 regularisation parameter for the non-parametric part.
-
-- `--use_sample_weights`, default=`False`, type: `bool`
- - Boolean to define if we use sample weights based on the noise standard deviation estimation.
-
-- `--interpolation_type`, default="none", type: str
- - The interpolation type for the physical poly model. Options are: 'none', 'all', 'top_K', 'independent_Zk'.
-## Training parameters
-
-- `--batch_size`, default=32, type: int
- - Batch size used for the training in the stochastic gradient descend type of algorithm.
-### Old multicycle parameters for backwards compatibility.
-
-- `--l_rate_param`, default=None, type: float
- - Learning rates for the parametric parts.
-
-- `--l_rate_non_param`, default=None, type: float
- - Learning rates for the non-parametric parts.
-
-- `--n_epochs_param`, default=None, type: int
- - Number of training epochs of the parametric parts.
-
-- `--n_epochs_non_param`, default=None, type: int
- - Number of training epochs of the non-parametric parts.
-### New multicycle parameters
-
-- `--l_rate_param_multi_cycle`, default="`1e-2` `1e-2`", type: str
- - Learning rates for the parametric parts. It should be a string where numeric values are separated by spaces.
-
-- `--l_rate_non_param_multi_cycle`, default="`1e-1` `1e-1`", type: str
- - Learning rates for the non-parametric parts. It should be a string where numeric values are separated by spaces.
-
-- `--n_epochs_param_multi_cycle`, default="20 20", type: str
- - Number of training epochs of the parametric parts. It should be a string where numeric values are separated by spaces.
-
-- `--n_epochs_non_param_multi_cycle`, default="100 120", type: str
- - Number of training epochs of the non-parametric parts. It should be a string where numeric values are separated by spaces.
-
-- `--save_all_cycles`, default=`False`, type: `bool`
- - Make checkpoint at every cycle or just save the checkpoint at the end of the training.
-
-- `--total_cycles`, default=2, type: int
- - Total amount of cycles to perform. For the moment the only available options are '1' or '2'.
-
-- `--cycle_def`, default="complete", type: str
- - Train cycle definition. It can be: 'parametric', 'non-parametric', 'complete', 'only-non-parametric' and 'only-parametric'.
-## Evaluation flags
-### Saving paths
-
-- `--model_eval`, default="poly", type: str
- - Model used as ground truth for the evaluation. Options are: 'poly', 'physical'.
-
-- `--metric_base_path`, default="/gpfswork/rech/ynx/ulx23va/wf-outputs/metrics/", type: str
- - Base path for saving metric files.
-
-- `--saved_model_type`, default="final", type: str
- - Type of saved model to use for the evaluation. Can be 'final' for a full model, 'checkpoint' for model weights or 'external' for a different model not saved under the same base_id as the current one..
-
-- `--saved_cycle`, default="cycle2", type: str
- - Saved cycle to use for the evaluation. Can be 'cycle1', 'cycle2', ..., 'cycleN'.
-### Evaluation parameters
-
-- `--gt_n_zernikes`, default=45, type: int
- - Zernike polynomial modes to use on the ground truth model parametric part.
-
-- `--eval_batch_size`, default=16, type: int
- - Batch size to use for the evaluation.
-
-- `--n_bins_gt`, default=20, type: int,
-help="Number of bins used for the ground truth model poly PSF generation."
-)
-
-- `--opt_stars_rel_pix_rmse`, default=`False`, type: `bool`
- - Option to get SR pixel PSF RMSE for each individual test star.
-## Specific parameters
-
-- `--l2_param`, default=0., type: float
- - Parameter for the l2 loss of the OPD.
-## Plot parameters
-
-- `--base_id_name`, default="-coherent_euclid_", type: str
- - Plot parameter. Base id_name before dataset suffix are added.
-
-- `--suffix_id_name`, default=["2c", "5c"],
-multiple=`True`, type: str
- - Plot parameter. Suffix needed to recreate the different id names.
-
-- `--star_numbers`, default=[200, 500],
-multiple=`True`, type: int
- - Plot parameter. Training star number of the different models evaluated. Needs to correspond with the `suffix_id_name`.
-## WaveDiff new features
-### Feature: SED interp
-
-- `--interp_pts_per_bin`, default=0, type: int
- - Number of points per bin to add during the interpolation process. It can take values {0,1,2,3}, where 0 means no interpolation.
-
-- `--extrapolate`, default=`True`, type: `bool`
- - Whether extrapolation is performed or not on the borders of the SED.
-
-- `--SED_interp_kind`, default="linear", type: str
- - Type of interpolation for the SED. It can be "linear", "cubic", "quadratic", etc. Check [all available options](https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp1d.html).
-
-- `--SED_sigma`, default=0, type: float
- - Standard deviation of the multiplicative SED Gaussian noise.
-### Feature: project parameters
-
-- `--project_dd_features`, default=`False`, type: `bool`
- - Project NP DD features onto parametric model.
-
-- `--eval_only_param`, default=`False`, type: `bool`
- - Use only the parametric model for evaluation.
-
-- `--reset_dd_features`, default=`False`, type: `bool`
- - Reset to random initialisation the non-parametric model after projecting the DD features.
-
-- `--pretrained_model`, default=`None`, type: str
- - Path to pretrained model checkpoint callback.
\ No newline at end of file
diff --git a/long-runs/alternative_train_eval_script.py b/long-runs/alternative_train_eval_script.py
deleted file mode 100644
index 386525f0..00000000
--- a/long-runs/alternative_train_eval_script.py
+++ /dev/null
@@ -1,937 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# PSF modelling and evaluation
-
-# Import packages
-import sys
-import numpy as np
-import time
-import wf_psf as wf
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-import click
-
-# from absl import app
-# from absl import flags
-
-@click.command()
-
-## Training options
-# Model definition
-@click.option(
- "--model",
- default="poly",
- type=str,
- help="Model type. Options are: 'mccd', 'poly, 'param'.")
-@click.option(
- "--id_name",
- default="-coherent_euclid_200stars",
- type=str,
- help="Model saving id.")
-# Saving paths
-@click.option(
- "--base_path",
- default="/gpfswork/rech/xdy/ulx23va/wf-outputs/",
- type=str,
- help="Base path for saving files.")
-@click.option(
- "--log_folder",
- default="log-files/",
- type=str,
- help="Folder name to save log files.")
-@click.option(
- "--model_folder",
- default="chkp/",
- type=str,
- help="Folder name to save trained models.")
-@click.option(
- "--optim_hist_folder",
- default="optim-hist/",
- type=str,
- help="Folder name to save optimisation history files.")
-@click.option(
- "--chkp_save_path",
- default="/gpfsscratch/rech/xdy/ulx23va/wf-outputs/chkp/",
- type=str,
- help="Path to save model checkpoints during training.")
-# Input dataset paths
-@click.option(
- "--dataset_folder",
- default="/gpfswork/rech/xdy/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/",
- type=str,
- help="Folder path of datasets.")
-@click.option(
- "--train_dataset_file",
- default="train_Euclid_res_200_TrainStars_id_001.npy",
- type=str,
- help="Train dataset file name.")
-@click.option(
- "--test_dataset_file",
- default="test_Euclid_res_id_001.npy",
- type=str,
- help="Test dataset file name.")
-# Model parameters
-@click.option(
- "--n_zernikes",
- default=15,
- type=int,
- help="Zernike polynomial modes to use on the parametric part.")
-@click.option(
- "--pupil_diameter",
- default=256,
- type=int,
- help="Dimension of the OPD/Wavefront space.")
-@click.option(
- "--n_bins_lda",
- default=20,
- type=int,
- help="Number of wavelength bins to use to reconstruct polychromatic objects.")
-@click.option(
- "--output_q",
- default=3.,
- type=float,
- help="Downsampling rate to match the specified telescope's sampling from the oversampling rate used in the model.")
-@click.option(
- "--oversampling_rate",
- default=3.,
- type=float,
- help="Oversampling rate used for the OPD/WFE PSF model.")
-@click.option(
- "--output_dim",
- default=32,
- type=int,
- help="Dimension of the pixel PSF postage stamp.")
-@click.option(
- "--d_max",
- default=2,
- type=int,
- help="Max polynomial degree of the parametric part.")
-@click.option(
- "--d_max_nonparam",
- default=3,
- type=int,
- help="Max polynomial degree of the non-parametric part.")
-@click.option(
- "--x_lims",
- nargs=2,
- default=[0, 1e3],
- type=float,
- help="Limits of the PSF field coordinates for the x axis.")
-@click.option(
- "--y_lims",
- nargs=2,
- default=[0, 1e3],
- type=float,
- help="Limits of the PSF field coordinates for the y axis.")
-@click.option(
- "--graph_features",
- default=10,
- type=int,
- help="Number of graph-constrained features of the non-parametric part.")
-@click.option(
- "--l1_rate",
- default=1e-8,
- type=float,
- help="L1 regularisation parameter for the non-parametric part.")
-@click.option(
- "--use_sample_weights",
- default=False,
- type=bool,
- help="Boolean to define if we use sample weights based on the noise standard deviation estimation.")
-# Training parameters
-@click.option(
- "--batch_size",
- default=32,
- type=int,
- help="Batch size used for the trainingin the stochastic gradient descend type of algorithm.")
-@click.option(
- "--l_rate_param",
- nargs=2,
- default=[1e-2, 1e-2],
- type=float,
- help="Learning rates for the parametric parts.")
-@click.option(
- "--l_rate_non_param",
- nargs=2,
- default=[1e-1, 1e-1],
- type=float,
- help="Learning rates for the non-parametric parts.")
-@click.option(
- "--n_epochs_param",
- nargs=2,
- default=[20, 20],
- type=int,
- help="Number of training epochs of the parametric parts.")
-@click.option(
- "--n_epochs_non_param",
- nargs=2,
- default=[100, 120],
- type=int,
- help="Number of training epochs of the non-parametric parts.")
-@click.option(
- "--total_cycles",
- default=2,
- type=int,
- help="Total amount of cycles to perform. For the moment the only available options are '1' or '2'.")
-## Evaluation flags
-# Saving paths
-@click.option(
- "--metric_base_path",
- default="/gpfswork/rech/xdy/ulx23va/wf-outputs/metrics/",
- type=str,
- help="Base path for saving metric files.")
-@click.option(
- "--saved_model_type",
- default="final",
- type=str,
- help="Type of saved model to use for the evaluation. Can be 'final' or 'checkpoint'.")
-@click.option(
- "--saved_cycle",
- default="cycle2",
- type=str,
- help="Saved cycle to use for the evaluation. Can be 'cycle1' or 'cycle2'.")
-# Evaluation parameters
-@click.option(
- "--gt_n_zernikes",
- default=45,
- type=int,
- help="Zernike polynomial modes to use on the ground truth model parametric part.")
-@click.option(
- "--eval_batch_size",
- default=16,
- type=int,
- help="Batch size to use for the evaluation.")
-
-## Specific parameters
-@click.option(
- "--l2_param",
- default=0.,
- type=float,
- help="Parameter for the l2 loss of the OPD.")
-
-def main(**args):
- print(args)
- train_model(**args)
- evaluate_model(**args)
-
-
-def train_model(**args):
- """ Train the model defined in the """
- # Start measuring elapsed time
- starting_time = time.time()
-
- # Define model run id
- run_id_name = args['model'] + args['id_name']
-
- # Define paths
- log_save_file = args['base_path'] + args['log_folder']
- model_save_file= args['base_path'] + args['model_folder']
- optim_hist_file = args['base_path'] + args['optim_hist_folder']
- saving_optim_hist = dict()
-
-
- # Save output prints to logfile
- old_stdout = sys.stdout
- log_file = open(log_save_file + run_id_name + '_output.log','w')
- sys.stdout = log_file
- print('Starting the log file.')
-
- # Print GPU and tensorflow info
- device_name = tf.test.gpu_device_name()
- print('Found GPU at: {}'.format(device_name))
- print('tf_version: ' + str(tf.__version__))
-
- ## Prepare the inputs
- # Generate Zernike maps
- zernikes = wf.utils.zernike_generator(n_zernikes=args['n_zernikes'], wfe_dim=args['pupil_diameter'])
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
- for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
- print('Zernike cube:')
- print(tf_zernike_cube.shape)
-
-
- ## Load the dictionaries
- train_dataset = np.load(args['dataset_folder'] + args['train_dataset_file'], allow_pickle=True)[()]
- # train_stars = train_dataset['stars']
- # noisy_train_stars = train_dataset['noisy_stars']
- # train_pos = train_dataset['positions']
- train_SEDs = train_dataset['SEDs']
- # train_zernike_coef = train_dataset['zernike_coef']
- # train_C_poly = train_dataset['C_poly']
- train_parameters = train_dataset['parameters']
-
- test_dataset = np.load(args['dataset_folder'] + args['test_dataset_file'], allow_pickle=True)[()]
- # test_stars = test_dataset['stars']
- # test_pos = test_dataset['positions']
- test_SEDs = test_dataset['SEDs']
- # test_zernike_coef = test_dataset['zernike_coef']
-
- # Convert to tensor
- tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
- # tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
- tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
- tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
- tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
- print('Dataset parameters:')
- print(train_parameters)
-
-
- ## Generate initializations
- # Prepare np input
- simPSF_np = wf.SimPSFToolkit(
- zernikes,
- max_order=args['n_zernikes'],
- pupil_diameter=args['pupil_diameter'],
- output_dim=args['output_dim'],
- oversampling_rate=args['oversampling_rate'],
- output_Q=args['output_q']
- )
- simPSF_np.gen_random_Z_coeffs(max_order=args['n_zernikes'])
- z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
- simPSF_np.set_z_coeffs(z_coeffs)
- simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
- # Obscurations
- obscurations = simPSF_np.generate_pupil_obscurations(N_pix=args['pupil_diameter'], N_filter=2)
- tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
- # Initialize the SED data list
- packed_SED_data = [
- wf.utils.generate_packed_elems(
- _sed,
- simPSF_np,
- n_bins=args['n_bins_lda']
- )
- for _sed in train_SEDs
- ]
-
-
- # Prepare the inputs for the training
- tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
- tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
- inputs = [tf_train_pos, tf_packed_SED_data]
-
- # Select the observed stars (noisy or noiseless)
- outputs = tf_noisy_train_stars
- # outputs = tf_train_stars
-
-
- ## Prepare validation data inputs
- val_SEDs = test_SEDs
- tf_val_pos = tf_test_pos
- tf_val_stars = tf_test_stars
-
- # Initialize the SED data list
- val_packed_SED_data = [
- wf.utils.generate_packed_elems(
- _sed,
- simPSF_np,
- n_bins=args['n_bins_lda']
- )
- for _sed in val_SEDs
- ]
-
- # Prepare the inputs for the validation
- tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
- tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
- # Prepare input validation tuple
- val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
- val_y_inputs = tf_val_stars
- val_data = (val_x_inputs, val_y_inputs)
-
-
- ## Select the model
- if args['model'] == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(
- obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=args['x_lims'],
- y_lims=args['y_lims'],
- d_max=args['d_max_nonparam'],
- graph_features=args['graph_features']
- )
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- graph_features=args['graph_features'],
- l1_rate=args['l1_rate'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- l2_param=args['l2_param'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- l2_param=args['l2_param'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
-
- # # Model Training
- # Prepare the saving callback
- # Prepare to save the model as a callback
- filepath_chkp_callback = args['chkp_save_path'] + 'chkp_callback_' + run_id_name + '_cycle1'
- model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error',
- verbose=1,
- save_best_only=True,
- save_weights_only=False,
- mode='min',
- save_freq='epoch',
- options=None
- )
-
- # Prepare the optimisers
- param_optim = tfa.optimizers.RectifiedAdam(lr=args['l_rate_param'][0])
- non_param_optim = tfa.optimizers.RectifiedAdam(lr=args['l_rate_non_param'][0])
-
- print('Starting cycle 1..')
- start_cycle1 = time.time()
-
- if args['model'] == 'param':
- tf_semiparam_field, hist_param = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate=args['l_rate_param'][0],
- n_epochs=args['n_epochs_param'][0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- use_sample_weights=args['use_sample_weights'],
- verbose=2
- )
-
- else:
- tf_semiparam_field, hist_param, hist_non_param = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate_param=args['l_rate_param'][0],
- l_rate_non_param=args['l_rate_non_param'][0],
- n_epochs_param=args['n_epochs_param'][0],
- n_epochs_non_param=args['n_epochs_non_param'][0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None,
- non_param_loss=None,
- param_metrics=None,
- non_param_metrics=None,
- param_callback=None,
- non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- use_sample_weights=args['use_sample_weights'],
- verbose=2
- )
-
- # Save weights
- tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
- end_cycle1 = time.time()
- print('Cycle1 elapsed time: %f'%(end_cycle1-start_cycle1))
-
- # Save optimisation history in the saving dict
- saving_optim_hist['param_cycle1'] = hist_param.history
- if args['model'] != 'param':
- saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
- if args['total_cycles'] >= 2:
- # Prepare to save the model as a callback
- filepath_chkp_callback = args['chkp_save_path'] + 'chkp_callback_' + run_id_name + '_cycle2'
- model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error',
- verbose=1,
- save_best_only=True,
- save_weights_only=False,
- mode='min',
- save_freq='epoch',
- options=None
- )
-
- # Prepare the optimisers
- param_optim = tfa.optimizers.RectifiedAdam(lr=args['l_rate_param'][1])
- non_param_optim = tfa.optimizers.RectifiedAdam(lr=args['l_rate_non_param'][1])
-
- print('Starting cycle 2..')
- start_cycle2 = time.time()
-
-
- # Compute the next cycle
- if args['model'] == 'param':
- tf_semiparam_field, hist_param_2 = wf.train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate=args['l_rate_param'][1],
- n_epochs=args['n_epochs_param'][1],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- use_sample_weights=args['use_sample_weights'],
- verbose=2
- )
- else:
- # Compute the next cycle
- tf_semiparam_field, hist_param_2, hist_non_param_2 = wf.train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate_param=args['l_rate_param'][1],
- l_rate_non_param=args['l_rate_non_param'][1],
- n_epochs_param=args['n_epochs_param'][1],
- n_epochs_non_param=args['n_epochs_non_param'][1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None,
- non_param_loss=None,
- param_metrics=None,
- non_param_metrics=None,
- param_callback=None,
- non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- use_sample_weights=args['use_sample_weights'],
- verbose=2
- )
-
- # Save the weights at the end of the second cycle
- tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle2')
-
- end_cycle2 = time.time()
- print('Cycle2 elapsed time: %f'%(end_cycle2 - start_cycle2))
-
- # Save optimisation history in the saving dict
- saving_optim_hist['param_cycle2'] = hist_param_2.history
- if args['model'] != 'param':
- saving_optim_hist['nonparam_cycle2'] = hist_non_param_2.history
-
- # Save optimisation history dictionary
- np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
- ## Print final time
- final_time = time.time()
- print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
- ## Close log file
- print('\n Good bye..')
- sys.stdout = old_stdout
- log_file.close()
-
-
-def evaluate_model(**args):
- """ Evaluate the trained model."""
- # Start measuring elapsed time
- starting_time = time.time()
-
- # Define model run id
- run_id_name = args['model'] + args['id_name']
- # Define paths
- log_save_file = args['base_path'] + args['log_folder']
-
- # Define saved model to use
- if args['saved_model_type'] == 'checkpoint':
- weights_paths = args['chkp_save_path'] + 'chkp_callback_' + run_id_name + '_' + args['saved_cycle']
-
- elif args['saved_model_type'] == 'final':
- model_save_file= args['base_path'] + args['model_folder']
- weights_paths = model_save_file + 'chkp_' + run_id_name + '_' + args['saved_cycle']
-
-
- ## Save output prints to logfile
- old_stdout = sys.stdout
- log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
- sys.stdout = log_file
- print('Starting the log file.')
-
- ## Check GPU and tensorflow version
- device_name = tf.test.gpu_device_name()
- print('Found GPU at: {}'.format(device_name))
- print('tf_version: ' + str(tf.__version__))
-
-
- ## Load datasets
- train_dataset = np.load(args['dataset_folder'] + args['train_dataset_file'], allow_pickle=True)[()]
- # train_stars = train_dataset['stars']
- # noisy_train_stars = train_dataset['noisy_stars']
- # train_pos = train_dataset['positions']
- train_SEDs = train_dataset['SEDs']
- # train_zernike_coef = train_dataset['zernike_coef']
- train_C_poly = train_dataset['C_poly']
- train_parameters = train_dataset['parameters']
-
- test_dataset = np.load(args['dataset_folder'] + args['test_dataset_file'], allow_pickle=True)[()]
- # test_stars = test_dataset['stars']
- # test_pos = test_dataset['positions']
- test_SEDs = test_dataset['SEDs']
- # test_zernike_coef = test_dataset['zernike_coef']
-
- # Convert to tensor
- tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
- tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
- tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
- print('Dataset parameters:')
- print(train_parameters)
-
-
- ## Prepare models
- # Generate Zernike maps
- zernikes = wf.utils.zernike_generator(n_zernikes=args['n_zernikes'], wfe_dim=args['pupil_diameter'])
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
- for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
- # Prepare np input
- simPSF_np = wf.SimPSFToolkit(
- zernikes,
- max_order=args['n_zernikes'],
- pupil_diameter=args['pupil_diameter'],
- output_dim=args['output_dim'],
- oversampling_rate=args['oversampling_rate'],
- output_Q=args['output_q']
- )
- simPSF_np.gen_random_Z_coeffs(max_order=args['n_zernikes'])
- z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
- simPSF_np.set_z_coeffs(z_coeffs)
- simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
- # Obscurations
- obscurations = simPSF_np.generate_pupil_obscurations(N_pix=args['pupil_diameter'], N_filter=2)
- tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
- # Outputs (needed for the MCCD model)
- outputs = tf_noisy_train_stars
-
-
- ## Create the model
- ## Select the model
- if args['model'] == 'mccd':
- poly_dic, graph_dic = wf.tf_mccd_psf_field.build_mccd_spatial_dic_v2(
- obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=args['x_lims'],
- y_lims=args['y_lims'],
- d_max=args['d_max_nonparam'],
- graph_features=args['graph_features']
- )
-
- spatial_dic = [poly_dic, graph_dic]
-
- # Initialize the model
- tf_semiparam_field = wf.tf_mccd_psf_field.TF_SP_MCCD_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- graph_features=args['graph_features'],
- l1_rate=args['l1_rate'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'poly':
- # # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- l2_param=args['l2_param'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'param':
- # Initialize the model
- tf_semiparam_field = wf.tf_psf_field.TF_PSF_field_model(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- l2_param=args['l2_param'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- ## Load the model's weights
- tf_semiparam_field.load_weights(weights_paths)
-
- ## Prepare ground truth model
- # Generate Zernike maps
- zernikes = wf.utils.zernike_generator(n_zernikes=args['gt_n_zernikes'], wfe_dim=args['pupil_diameter'])
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
- for it in range(len(zernikes)):
- np_zernike_cube[it,:,:] = zernikes[it]
-
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
- # Initialize the model
- GT_tf_semiparam_field = wf.tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- output_dim=args['output_dim'],
- n_zernikes=args['gt_n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- # For the Ground truth model
- GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
- _ = GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat.assign(
- np.zeros_like(GT_tf_semiparam_field.tf_np_poly_opd.alpha_mat)
- )
-
-
- ## Metric evaluation on the test dataset
- print('\n***\nMetric evaluation on the test dataset\n***\n')
-
- # Polychromatic star reconstructions
- rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=args['n_bins_lda'],
- batch_size=args['eval_batch_size']
- )
-
- poly_metric = {
- 'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
- # Monochromatic star reconstructions
- lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
- rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list
- )
-
- mono_metric = {
- 'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
- # OPD metrics
- rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=args['eval_batch_size']
- )
-
- opd_metric = {
- 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
- # Shape metrics
- shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=args['n_bins_lda'],
- output_Q=1,
- output_dim=64,
- batch_size=args['eval_batch_size']
- )
-
- # Save metrics
- test_metrics = {
- 'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
-
- ## Metric evaluation on the train dataset
- print('\n***\nMetric evaluation on the train dataset\n***\n')
-
- # Polychromatic star reconstructions
- rmse, rel_rmse, std_rmse, std_rel_rmse = wf.metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=args['n_bins_lda'],
- batch_size=args['eval_batch_size']
- )
-
- train_poly_metric = {
- 'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
- # Monochromatic star reconstructions
- lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
- rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf.metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list
- )
-
- train_mono_metric = {
- 'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
-
- # OPD metrics
- rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf.metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=args['eval_batch_size']
- )
-
- train_opd_metric = {
- 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
-
- # Shape metrics
- train_shape_results_dict = wf.metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=args['n_bins_lda'],
- output_Q=1,
- output_dim=64,
- batch_size=args['eval_batch_size']
- )
-
- # Save metrics into dictionary
- train_metrics = {
- 'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
-
-
- ## Save results
- metrics = {
- 'test_metrics': test_metrics,
- 'train_metrics': train_metrics
- }
- output_path = args['metric_base_path'] + 'metrics-' + run_id_name
- np.save(output_path, metrics, allow_pickle=True)
-
- ## Print final time
- final_time = time.time()
- print('\nTotal elapsed time: %f'%(final_time - starting_time))
-
-
- ## Close log file
- print('\n Good bye..')
- sys.stdout = old_stdout
- log_file.close()
-
-
-if __name__ == "__main__":
- main()
diff --git a/long-runs/examples/job_parallel_training_example.sh b/long-runs/examples/job_parallel_training_example.sh
deleted file mode 100644
index 7c364850..00000000
--- a/long-runs/examples/job_parallel_training_example.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/bash
-#SBATCH --job-name=5_cycles_256 # nom du job
-##SBATCH --partition=gpu_p2 # de-commente pour la partition gpu_p2
-#SBATCH --ntasks=1 # nombre total de tache MPI (= nombre total de GPU)
-#SBATCH --ntasks-per-node=1 # nombre de tache MPI par noeud (= nombre de GPU par noeud)
-#SBATCH --gres=gpu:1 # nombre de GPU par n?~Sud (max 8 avec gpu_p2)
-#SBATCH --cpus-per-task=10 # nombre de coeurs CPU par tache (un quart du noeud ici)
-#SBATCH -C v100-32g
-# /!\ Attention, "multithread" fait reference a l'hyperthreading dans la terminologie Slurm
-#SBATCH --hint=nomultithread # hyperthreading desactive
-#SBATCH --time=20:00:00 # temps d'execution maximum demande (HH:MM:SS)
-#SBATCH --output=5_cycles_256%j.out # nom du fichier de sortie
-#SBATCH --error=5_cycles_256%j.err # nom du fichier d'erreur (ici commun avec la sortie)
-#SBATCH -A ynx@gpu # specify the project
-##SBATCH --qos=qos_gpu-dev # using the dev queue, as this is only for profiling
-#SBATCH --array=0-3
-
-# nettoyage des modules charges en interactif et herites par defaut
-module purge
-
-# chargement des modules
-module load tensorflow-gpu/py3/2.7.0
-
-# echo des commandes lancees
-set -x
-
-# n_bins ---> number of points per SED (n_bins + 1)
-opt[0]="--id_name _5_cycles_256_proj_d2_45z --project_dd_features True --d_max 2 --n_zernikes 45"
-opt[1]="--id_name _5_cycles_256_no_proj_d2_45z --project_dd_features False --d_max 2 --n_zernikes 45"
-opt[2]="--id_name _5_cycles_256_proj_d5_45z --project_dd_features True --d_max 5 --n_zernikes 45"
-opt[3]="--id_name _5_cycles_256_proj_d2_60z --project_dd_features True --d_max 2 --n_zernikes 60"
-
-
-cd $WORK/repos/wf-SEDs/HD_projected_optimisation/scripts/
-
-srun python -u ./train_project_click_multi_cycle.py \
- --train_opt True \
- --eval_opt True \
- --plot_opt True \
- --model poly \
- --base_path /gpfswork/rech/ynx/uds36vp/repos/wf-SEDs/HD_projected_optimisation/wf-outputs/ \
- --log_folder log-files/ \
- --model_folder chkp/8_bins/ \
- --optim_hist_folder optim-hist/ \
- --chkp_save_path /gpfswork/rech/ynx/uds36vp/repos/wf-SEDs/HD_projected_optimisation/wf-outputs/chkp/8_bins/ \
- --plots_folder plots/ \
- --dataset_folder /gpfswork/rech/ynx/uds36vp/datasets/interp_SEDs/ \
- --train_dataset_file train_Euclid_res_2000_TrainStars_id_009_8_bins_sigma_0.npy \
- --test_dataset_file test_Euclid_res_id_009_8_bins.npy \
- --pupil_diameter 256 \
- --n_bins_lda 8 \
- --output_ 3. \
- --oversampling_rate 3. \
- --output_dim 32 \
- --d_max_nonparam 5 \
- --use_sample_weights True \
- --interpolation_type none \
- --l_rate_param_multi_cycle "0.01 0.0085 0.007 0.0055 0.004" \
- --l_rate_non_param_multi_cycle "0.1 0.09 0.08 0.07 0.06" \
- --n_epochs_param_multi_cycle "10 5 5 5 15" \
- --n_epochs_non_param_multi_cycle "25 25 25 25 50" \
- --save_all_cycles True \
- --total_cycles 5 \
- --cycle_def complete \
- --model_eval poly \
- --metric_base_path /gpfswork/rech/ynx/uds36vp/repos/wf-SEDs/HD_projected_optimisation/wf-outputs/metrics/ \
- --saved_model_type checkpoint \
- --saved_cycle cycle5 \
- --eval_batch_size 16 \
- --n_bins_gt 8 \
- --opt_stars_rel_pix_rmse True \
- --l2_param 0. \
- --base_id_name _5_cycles_256_ \
- --suffix_id_name proj_d2_45z --suffix_id_name no_proj_d2_45z --suffix_id_name proj_d5_45z --suffix_id_name proj_d2_60z \
- --star_numbers 1 --star_numbers 2 --star_numbers 3 --star_numbers 4 \
- ${opt[$SLURM_ARRAY_TASK_ID]} \
-
-## --star_numbers is for the final plot's x-axis. It does not always represents the number of stars.
-## --id_name = --base_id_name + --suffix_id_name
\ No newline at end of file
diff --git a/long-runs/train_eval_plot_script_click.py b/long-runs/train_eval_plot_script_click.py
deleted file mode 100644
index eaa6d788..00000000
--- a/long-runs/train_eval_plot_script_click.py
+++ /dev/null
@@ -1,376 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# PSF modelling and evaluation
-
-import wf_psf as wf
-import click
-
-# from absl import app
-# from absl import flags
-
-
-@click.command()
-## Script options
-@click.option("--train_opt", default=True, type=bool, help="Train the model.")
-@click.option("--eval_opt", default=True, type=bool, help="Evaluate the model.")
-@click.option("--plot_opt", default=True, type=bool, help="Plot the model results")
-## Training options
-# Model definition
-@click.option(
- "--model",
- default="poly",
- type=str,
- help="Model type. Options are: 'mccd', 'graph', 'poly, 'param', 'poly_physical'."
-)
-@click.option(
- "--id_name",
- default="-coherent_euclid_200stars",
- type=str,
- help="Model saving id."
-)
-# Saving paths
-@click.option(
- "--base_path",
- default="/gpfswork/rech/ynx/ulx23va/wf-outputs/",
- type=str,
- help="Base path for saving files."
-)
-@click.option(
- "--log_folder",
- default="log-files/",
- type=str,
- help="Folder name to save log files."
-)
-@click.option(
- "--model_folder",
- default="chkp/",
- type=str,
- help="Folder name to save trained models."
-)
-@click.option(
- "--optim_hist_folder",
- default="optim-hist/",
- type=str,
- help="Folder name to save optimisation history files."
-)
-@click.option(
- "--chkp_save_path",
- default="/gpfsscratch/rech/ynx/ulx23va/wf-outputs/chkp/",
- type=str,
- help="Path to save model checkpoints during training."
-)
-@click.option(
- "--plots_folder",
- default="plots/",
- type=str,
- help="Folder name to save the generated plots."
-)
-# Input dataset paths
-@click.option(
- "--dataset_folder",
- default="/gpfswork/rech/ynx/ulx23va/repo/wf-psf/data/coherent_euclid_dataset/",
- type=str,
- help="Folder path of datasets."
-)
-@click.option(
- "--train_dataset_file",
- default="train_Euclid_res_200_TrainStars_id_001.npy",
- type=str,
- help="Train dataset file name."
-)
-@click.option(
- "--test_dataset_file",
- default="test_Euclid_res_id_001.npy",
- type=str,
- help="Test dataset file name."
-)
-# Model parameters
-@click.option(
- "--n_zernikes",
- default=15,
- type=int,
- help="Zernike polynomial modes to use on the parametric part."
-)
-@click.option(
- "--pupil_diameter",
- default=256,
- type=int,
- help="Dimension of the OPD/Wavefront space."
-)
-@click.option(
- "--n_bins_lda",
- default=20,
- type=int,
- help="Number of wavelength bins to use to reconstruct polychromatic objects."
-)
-@click.option(
- "--output_q",
- default=3.,
- type=float,
- help="Downsampling rate to match the specified telescope's sampling from the oversampling rate used in the model."
-)
-@click.option(
- "--oversampling_rate",
- default=3.,
- type=float,
- help="Oversampling rate used for the OPD/WFE PSF model."
-)
-@click.option(
- "--output_dim",
- default=32,
- type=int,
- help="Dimension of the pixel PSF postage stamp."
-)
-@click.option(
- "--d_max",
- default=2,
- type=int,
- help="Max polynomial degree of the parametric part."
-)
-@click.option(
- "--d_max_nonparam",
- default=3,
- type=int,
- help="Max polynomial degree of the non-parametric part."
-)
-@click.option(
- "--x_lims",
- nargs=2,
- default=[0, 1e3],
- type=float,
- help="Limits of the PSF field coordinates for the x axis."
-)
-@click.option(
- "--y_lims",
- nargs=2,
- default=[0, 1e3],
- type=float,
- help="Limits of the PSF field coordinates for the y axis."
-)
-@click.option(
- "--graph_features",
- default=10,
- type=int,
- help="Number of graph-constrained features of the non-parametric part."
-)
-@click.option(
- "--l1_rate",
- default=1e-8,
- type=float,
- help="L1 regularisation parameter for the non-parametric part."
-)
-@click.option(
- "--use_sample_weights",
- default=False,
- type=bool,
- help="Boolean to define if we use sample weights based on the noise standard deviation estimation."
-)
-@click.option(
- "--interpolation_type",
- default="none",
- type=str,
- help="The interpolation type for the physical poly model. Options are: 'none', 'all', 'top_K', 'independent_Zk'."
-)
-# Training parameters
-@click.option(
- "--batch_size",
- default=32,
- type=int,
- help="Batch size used for the trainingin the stochastic gradient descend type of algorithm."
-)
-# Old multicycle parameters for backwards compatibility.
-@click.option(
- "--l_rate_param",
- nargs=2,
- default=None,
- type=float,
- help="Learning rates for the parametric parts."
-)
-@click.option(
- "--l_rate_non_param",
- nargs=2,
- default=None,
- type=float,
- help="Learning rates for the non-parametric parts."
-)
-@click.option(
- "--n_epochs_param",
- nargs=2,
- default=None,
- type=int,
- help="Number of training epochs of the parametric parts."
-)
-@click.option(
- "--n_epochs_non_param",
- nargs=2,
- default=None,
- type=int,
- help="Number of training epochs of the non-parametric parts."
-)
-# New multicycle parameters
-@click.option(
- "--l_rate_param_multi_cycle",
- default="1e-2 1e-2",
- type=str,
- help="Learning rates for the parametric parts. It should be a strign where numeric values are separated by spaces."
-)
-@click.option(
- "--l_rate_non_param_multi_cycle",
- default="1e-1 1e-1",
- type=str,
- help="Learning rates for the non-parametric parts. It should be a strign where numeric values are separated by spaces."
-)
-@click.option(
- "--n_epochs_param_multi_cycle",
- default="20 20",
- type=str,
- help="Number of training epochs of the parametric parts. It should be a strign where numeric values are separated by spaces."
-)
-@click.option(
- "--n_epochs_non_param_multi_cycle",
- default="100 120",
- type=str,
- help="Number of training epochs of the non-parametric parts. It should be a strign where numeric values are separated by spaces."
-)
-@click.option(
- "--save_all_cycles",
- default=False,
- type=bool,
- help="Make checkpoint at every cycle or just save the checkpoint at the end of the training."
-)
-@click.option(
- "--total_cycles",
- default=2,
- type=int,
- help="Total amount of cycles to perform. For the moment the only available options are '1' or '2'."
-)
-@click.option(
- "--cycle_def",
- default="complete",
- type=str,
- help="Train cycle definition. It can be: 'parametric', 'non-parametric', 'complete', 'only-non-parametric' and 'only-parametric'."
-)
-## Evaluation flags
-# Saving paths
-@click.option(
- "--model_eval",
- default="poly",
- type=str,
- help="Model used as ground truth for the evaluation. Options are: 'poly', 'physical'."
-)
-@click.option(
- "--metric_base_path",
- default="/gpfswork/rech/ynx/ulx23va/wf-outputs/metrics/",
- type=str,
- help="Base path for saving metric files."
-)
-@click.option(
- "--saved_model_type",
- default="final",
- type=str,
- help="Type of saved model to use for the evaluation. Can be 'final' or 'checkpoint'."
-)
-@click.option(
- "--saved_cycle",
- default="cycle2",
- type=str,
- help="Saved cycle to use for the evaluation. Can be 'cycle1' or 'cycle2'."
-)
-# Evaluation parameters
-@click.option(
- "--gt_n_zernikes",
- default=45,
- type=int,
- help="Zernike polynomial modes to use on the ground truth model parametric part."
-)
-@click.option(
- "--eval_batch_size",
- default=16,
- type=int,
- help="Batch size to use for the evaluation."
-)
-@click.option(
- "--n_bins_gt",
- default=20,
- type=int,
- help="Number of bins used for the ground truth model poly PSF generation."
-)
-@click.option(
- "--opt_stars_rel_pix_rmse",
- default=False,
- type=bool,
- help="Save RMS error for each super resolved PSF in the test dataset in addition to the mean across the FOV."
-)
-## Specific parameters
-@click.option(
- "--l2_param",
- default=0.,
- type=float,
- help="Parameter for the l2 loss of the OPD."
-)
-## Plot parameters
-@click.option(
- "--base_id_name",
- default="-coherent_euclid_",
- type=str,
- help="Plot parameter. Base id_name before dataset suffix are added."
-)
-@click.option(
- "--suffix_id_name",
- default=["2c", "5c"],
- multiple=True,
- type=str,
- help="Plot parameter. Suffix needed to recreate the different id names."
-)
-@click.option(
- "--star_numbers",
- default=[200, 500],
- multiple=True,
- type=int,
- help="Plot parameter. Training star number of the different models evaluated. Needs to correspond with the `suffix_id_name`."
-)
-# Feature: SED interp
-@click.option(
- "--interp_pts_per_bin",
- default=0,
- type=int,
- help="Number of points per bin to add during the interpolation process. It can take values {0,1,2,3}, where 0 means no interpolation."
-)
-@click.option(
- "--extrapolate",
- default=True,
- type=bool,
- help="Whether extrapolation is performed or not on the borders of the SED."
-)
-@click.option(
- "--SED_interp_kind",
- default="linear",
- type=str,
- help="Type of interpolation for the SED."
-)
-# Feature: project parameters
-@click.option(
- "--project_dd_features",
- default=False,
- type=bool,
- help="Project NP DD features onto parametric model."
-)
-
-
-def main(**args):
- print(args)
- if args['train_opt']:
- print('Training...')
- wf.script_utils.train_model(**args)
- if args['eval_opt']:
- print('Evaluation...')
- wf.script_utils.evaluate_model(**args)
- if args['plot_opt']:
- print('Plotting...')
- wf.script_utils.plot_metrics(**args)
- wf.script_utils.plot_optimisation_metrics(**args)
-
-
-if __name__ == "__main__":
- main()
diff --git a/long-runs/train_eval_plot_script_click_multi_cycle.py b/long-runs/train_eval_plot_script_click_multi_cycle.py
deleted file mode 100644
index 4ca2cb52..00000000
--- a/long-runs/train_eval_plot_script_click_multi_cycle.py
+++ /dev/null
@@ -1,426 +0,0 @@
-#!/usr/bin/env python3
-# coding: utf-8
-
-# PSF modelling and evaluation
-"""
-This script is a second version of the script 'train_eval_plot_script_click.py' in the same folder.
-
-This script allows for a multi-cycle optimisation where the number of cycles can be greater than 2.
-
-For backwards compatibility the old click parameters concerning multiple cycles are preserved.
-
-"""
-import wf_psf as wf
-import click
-
-# from absl import app
-# from absl import flags
-
-
-@click.command()
-## Script options
-@click.option("--train_opt", default=True, type=bool, help="Train the model.")
-@click.option("--eval_opt", default=True, type=bool, help="Evaluate the model.")
-@click.option("--plot_opt", default=True, type=bool, help="Plot the model results")
-## Training options
-# Model definition
-@click.option(
- "--model",
- default="poly",
- type=str,
- help="Model type. Options are: 'mccd', 'graph', 'poly, 'param', 'poly_physical'."
-)
-@click.option(
- "--id_name",
- default="-coherent_euclid_2000stars",
- type=str,
- help="Model saving id."
-)
-# Saving paths
-@click.option(
- "--base_path",
- default="/home/ecentofanti/wf-SEDs/WFE_sampling_test/wf-outputs/",
- type=str,
- help="Base path for saving files."
-)
-@click.option(
- "--log_folder",
- default="log-files/",
- type=str,
- help="Folder name to save log files."
-)
-@click.option(
- "--model_folder",
- default="chkp/",
- type=str,
- help="Folder name to save trained models."
-)
-@click.option(
- "--optim_hist_folder",
- default="optim-hist/",
- type=str,
- help="Folder name to save optimisation history files."
-)
-@click.option(
- "--chkp_save_path",
- default="/home/ecentofanti/wf-SEDs/WFE_sampling_test/wf-outputs/chkp/",
- type=str,
- help="Path to save model checkpoints during training."
-)
-@click.option(
- "--plots_folder",
- default="plots/",
- type=str,
- help="Folder name to save the generated plots."
-)
-# Input dataset paths
-@click.option(
- "--dataset_folder",
- default="/n05data/ecentofanti/WFE_sampling_test/multires_dataset/",
- type=str,
- help="Folder path of datasets."
-)
-@click.option(
- "--train_dataset_file",
- default="train_Euclid_res_2000_TrainStars_id_004_wfeRes_256.npy",
- type=str,
- help="Train dataset file name."
-)
-@click.option(
- "--test_dataset_file",
- default="test_Euclid_res_id_004_wfeRes_256.npy",
- type=str,
- help="Test dataset file name."
-)
-# Model parameters
-@click.option(
- "--n_zernikes",
- default=15,
- type=int,
- help="Zernike polynomial modes to use on the parametric part."
-)
-@click.option(
- "--pupil_diameter",
- default=256,
- type=int,
- help="Dimension of the OPD/Wavefront space."
-)
-@click.option(
- "--n_bins_lda",
- default=20,
- type=int,
- help="Number of wavelength bins to use to reconstruct polychromatic objects."
-)
-@click.option(
- "--output_q",
- default=3.,
- type=float,
- help="Downsampling rate to match the specified telescope's sampling from the oversampling rate used in the model."
-)
-@click.option(
- "--oversampling_rate",
- default=3.,
- type=float,
- help="Oversampling rate used for the OPD/WFE PSF model."
-)
-@click.option(
- "--output_dim",
- default=32,
- type=int,
- help="Dimension of the pixel PSF postage stamp."
-)
-@click.option(
- "--d_max",
- default=2,
- type=int,
- help="Max polynomial degree of the parametric part."
-)
-@click.option(
- "--d_max_nonparam",
- default=3,
- type=int,
- help="Max polynomial degree of the non-parametric part."
-)
-@click.option(
- "--x_lims",
- nargs=2,
- default=[0, 1e3],
- type=float,
- help="Limits of the PSF field coordinates for the x axis."
-)
-@click.option(
- "--y_lims",
- nargs=2,
- default=[0, 1e3],
- type=float,
- help="Limits of the PSF field coordinates for the y axis."
-)
-@click.option(
- "--graph_features",
- default=10,
- type=int,
- help="Number of graph-constrained features of the non-parametric part."
-)
-@click.option(
- "--l1_rate",
- default=1e-8,
- type=float,
- help="L1 regularisation parameter for the non-parametric part."
-)
-@click.option(
- "--use_sample_weights",
- default=False,
- type=bool,
- help="Boolean to define if we use sample weights based on the noise standard deviation estimation."
-)
-@click.option(
- "--interpolation_type",
- default="none",
- type=str,
- help="The interpolation type for the physical poly model. Options are: 'none', 'all', 'top_K', 'independent_Zk'."
-)
-# Training parameters
-@click.option(
- "--batch_size",
- default=32,
- type=int,
- help="Batch size used for the trainingin the stochastic gradient descend type of algorithm."
-)
-# Old multicycle parameters for backwards compatibility.
-@click.option(
- "--l_rate_param",
- nargs=2,
- default=None,
- type=float,
- help="Learning rates for the parametric parts."
-)
-@click.option(
- "--l_rate_non_param",
- nargs=2,
- default=None,
- type=float,
- help="Learning rates for the non-parametric parts."
-)
-@click.option(
- "--n_epochs_param",
- nargs=2,
- default=None,
- type=int,
- help="Number of training epochs of the parametric parts."
-)
-@click.option(
- "--n_epochs_non_param",
- nargs=2,
- default=None,
- type=int,
- help="Number of training epochs of the non-parametric parts."
-)
-# New multicycle parameters
-@click.option(
- "--l_rate_param_multi_cycle",
- default="1e-2 1e-2",
- type=str,
- help="Learning rates for the parametric parts. It should be a string where numeric values are separated by spaces."
-)
-@click.option(
- "--l_rate_non_param_multi_cycle",
- default="1e-1 1e-1",
- type=str,
- help="Learning rates for the non-parametric parts. It should be a string where numeric values are separated by spaces."
-)
-@click.option(
- "--n_epochs_param_multi_cycle",
- default="20 20",
- type=str,
- help="Number of training epochs of the parametric parts. It should be a string where integer values are separated by spaces."
-)
-@click.option(
- "--n_epochs_non_param_multi_cycle",
- default="100 120",
- type=str,
- help="Number of training epochs of the non-parametric parts. It should be a string where integer values are separated by spaces."
-)
-@click.option(
- "--save_all_cycles",
- default=False,
- type=bool,
- help="Make checkpoint at every cycle or just save the checkpoint at the end of the training."
-)
-@click.option(
- "--total_cycles",
- default=2,
- type=int,
- help="Total amount of cycles to perform."
-)
-@click.option(
- "--cycle_def",
- default="complete",
- type=str,
- help="Train cycle definition. It can be: 'parametric', 'non-parametric', 'complete', 'only-non-parametric' and 'only-parametric'."
-)
-## Evaluation flags
-# Saving paths
-@click.option(
- "--model_eval",
- default="poly",
- type=str,
- help="Model used as ground truth for the evaluation. Options are: 'poly', 'physical'."
-)
-@click.option(
- "--metric_base_path",
- default="/home/ecentofanti/wf-SEDs/WFE_sampling_test/wf-outputs/metrics/",
- type=str,
- help="Base path for saving metric files."
-)
-@click.option(
- "--saved_model_type",
- default="final",
- type=str,
- help="Type of saved model to use for the evaluation. Can be 'final' for a full model, 'checkpoint' for model weights or 'external' for a different model not saved under the same base_id as the current one."
-)
-@click.option(
- "--saved_cycle",
- default="cycle2",
- type=str,
- help="Saved cycle to use for the evaluation. Can be 'cycle1' or 'cycle2'."
-)
-# Evaluation parameters
-@click.option(
- "--gt_n_zernikes",
- default=45,
- type=int,
- help="Zernike polynomial modes to use on the ground truth model parametric part."
-)
-@click.option(
- "--eval_batch_size",
- default=16,
- type=int,
- help="Batch size to use for the evaluation."
-)
-@click.option(
- "--n_bins_gt",
- default=20,
- type=int,
- help="Number of bins used for the ground truth model poly PSF generation."
-)
-@click.option(
- "--opt_stars_rel_pix_rmse",
- default=False,
- type=bool,
- help="Save RMS error for each super resolved PSF in the test dataset in addition to the mean across the FOV."
-)
-@click.option(
- "--eval_mono_metric_rmse",
- default=True,
- type=bool,
- help="Evaluate the monchromatic RMSE metric."
-)
-@click.option(
- "--eval_opd_metric_rmse",
- default=True,
- type=bool,
- help="Evaluate the OPD RMSE metric."
-)
-@click.option(
- "--eval_train_shape_sr_metric_rmse",
- default=True,
- type=bool,
- help="Evaluate the super-resolution and the shape RMSE metrics for the train dataset."
-)
-## Specific parameters
-@click.option(
- "--l2_param",
- default=0.,
- type=float,
- help="Parameter for the l2 loss of the OPD."
-)
-## Plot parameters
-@click.option(
- "--base_id_name",
- default="-multires_euclid_",
- type=str,
- help="Plot parameter. Base id_name before dataset suffix are added."
-)
-@click.option(
- "--suffix_id_name",
- default=["2c", "5c"],
- multiple=True,
- type=str,
- help="Plot parameter. Suffix needed to recreate the different id names."
-)
-@click.option(
- "--star_numbers",
- default=[200, 500],
- multiple=True,
- type=int,
- help="Plot parameter. Training star number of the different models evaluated. Needs to correspond with the `suffix_id_name`."
-)
-# Feature: SED interp
-@click.option(
- "--interp_pts_per_bin",
- default=0,
- type=int,
- help="Number of points per bin to add during the interpolation process. It can take values {0,1,2,3}, where 0 means no interpolation."
-)
-@click.option(
- "--extrapolate",
- default=True,
- type=bool,
- help="Whether extrapolation is performed or not on the borders of the SED."
-)
-@click.option(
- "--sed_interp_kind",
- default="linear",
- type=str,
- help="Type of interpolation for the SED."
-)
-@click.option(
- "--sed_sigma",
- default=0,
- type=float,
- help="Standard deviation of the multiplicative SED Gaussian noise."
-)
-# Feature: project parameters
-@click.option(
- "--project_dd_features",
- default=False,
- type=bool,
- help="Project NP DD features onto parametric model."
-)
-@click.option(
- "--eval_only_param",
- default=False,
- type=bool,
- help="Use only the parametric model for evaluation."
-)
-@click.option(
- "--reset_dd_features",
- default=False,
- type=bool,
- help="Reset to random initialisation the non-parametric model after projecting."
-)
-@click.option(
- "--pretrained_model",
- default=None,
- type=str,
- help="Path to pretrained model checkpoint callback."
-)
-
-
-def main(**args):
- args = wf.utils.load_multi_cycle_params_click(args)
- print(args)
- if args['train_opt']:
- print('Training...')
- wf.script_utils.train_model(**args)
- if args['eval_opt']:
- print('Evaluation...')
- wf.script_utils.evaluate_model(**args)
- if args['plot_opt']:
- print('Plotting...')
- wf.script_utils.plot_metrics(**args)
- wf.script_utils.plot_optimisation_metrics(**args)
-
-
-if __name__ == "__main__":
- main()
diff --git a/method-comparison/config_files/default.conv b/method-comparison/config_files/default.conv
deleted file mode 100644
index 2590b9cb..00000000
--- a/method-comparison/config_files/default.conv
+++ /dev/null
@@ -1,5 +0,0 @@
-CONV NORM
-# 3x3 ``all-ground'' convolution mask with FWHM = 2 pixels.
-1 2 1
-2 4 2
-1 2 1
diff --git a/method-comparison/config_files/default.nnw b/method-comparison/config_files/default.nnw
deleted file mode 100644
index 4b521bd4..00000000
--- a/method-comparison/config_files/default.nnw
+++ /dev/null
@@ -1,28 +0,0 @@
-NNW
-# Neural Network Weights for the SExtractor star/galaxy classifier (V1.3)
-# inputs: 9 for profile parameters + 1 for seeing.
-# outputs: ``Stellarity index'' (0.0 to 1.0)
-# Seeing FWHM range: from 0.025 to 5.5'' (images must have 1.5 < FWHM < 5 pixels)
-# Optimized for Moffat profiles with 2<= beta <= 4.
-
- 3 10 10 1
-
--1.56604e+00 -2.48265e+00 -1.44564e+00 -1.24675e+00 -9.44913e-01 -5.22453e-01 4.61342e-02 8.31957e-01 2.15505e+00 2.64769e-01
- 3.03477e+00 2.69561e+00 3.16188e+00 3.34497e+00 3.51885e+00 3.65570e+00 3.74856e+00 3.84541e+00 4.22811e+00 3.27734e+00
-
--3.22480e-01 -2.12804e+00 6.50750e-01 -1.11242e+00 -1.40683e+00 -1.55944e+00 -1.84558e+00 -1.18946e-01 5.52395e-01 -4.36564e-01 -5.30052e+00
- 4.62594e-01 -3.29127e+00 1.10950e+00 -6.01857e-01 1.29492e-01 1.42290e+00 2.90741e+00 2.44058e+00 -9.19118e-01 8.42851e-01 -4.69824e+00
--2.57424e+00 8.96469e-01 8.34775e-01 2.18845e+00 2.46526e+00 8.60878e-02 -6.88080e-01 -1.33623e-02 9.30403e-02 1.64942e+00 -1.01231e+00
- 4.81041e+00 1.53747e+00 -1.12216e+00 -3.16008e+00 -1.67404e+00 -1.75767e+00 -1.29310e+00 5.59549e-01 8.08468e-01 -1.01592e-02 -7.54052e+00
- 1.01933e+01 -2.09484e+01 -1.07426e+00 9.87912e-01 6.05210e-01 -6.04535e-02 -5.87826e-01 -7.94117e-01 -4.89190e-01 -8.12710e-02 -2.07067e+01
--5.31793e+00 7.94240e+00 -4.64165e+00 -4.37436e+00 -1.55417e+00 7.54368e-01 1.09608e+00 1.45967e+00 1.62946e+00 -1.01301e+00 1.13514e-01
- 2.20336e-01 1.70056e+00 -5.20105e-01 -4.28330e-01 1.57258e-03 -3.36502e-01 -8.18568e-02 -7.16163e+00 8.23195e+00 -1.71561e-02 -1.13749e+01
- 3.75075e+00 7.25399e+00 -1.75325e+00 -2.68814e+00 -3.71128e+00 -4.62933e+00 -2.13747e+00 -1.89186e-01 1.29122e+00 -7.49380e-01 6.71712e-01
--8.41923e-01 4.64997e+00 5.65808e-01 -3.08277e-01 -1.01687e+00 1.73127e-01 -8.92130e-01 1.89044e+00 -2.75543e-01 -7.72828e-01 5.36745e-01
--3.65598e+00 7.56997e+00 -3.76373e+00 -1.74542e+00 -1.37540e-01 -5.55400e-01 -1.59195e-01 1.27910e-01 1.91906e+00 1.42119e+00 -4.35502e+00
-
--1.70059e+00 -3.65695e+00 1.22367e+00 -5.74367e-01 -3.29571e+00 2.46316e+00 5.22353e+00 2.42038e+00 1.22919e+00 -9.22250e-01 -2.32028e+00
-
-
- 0.00000e+00
- 1.00000e+00
diff --git a/method-comparison/config_files/default.param b/method-comparison/config_files/default.param
deleted file mode 100644
index 0c0845b2..00000000
--- a/method-comparison/config_files/default.param
+++ /dev/null
@@ -1,11 +0,0 @@
-NUMBER
-XWIN_IMAGE
-YWIN_IMAGE
-FLUX_AUTO
-FLUXERR_AUTO
-FLUX_RADIUS
-FLAGS
-FWHM_IMAGE
-VIGNET(32,32)
-SNR_WIN
-ELONGATION
diff --git a/method-comparison/config_files/default.sex b/method-comparison/config_files/default.sex
deleted file mode 100644
index 6d7a391d..00000000
--- a/method-comparison/config_files/default.sex
+++ /dev/null
@@ -1,132 +0,0 @@
-## Default configuration file for SExtractor 2.19.5
-# EB 2017-11-30
-#
-
-#-------------------------------- Catalog ------------------------------------
-
-CATALOG_TYPE FITS_LDAC
-
-PARAMETERS_NAME /Users/tliaudat/Documents/PhD/codes/WF_PSF/comparison-PSF-methods/output-datasets/psfex/config_files/default.param
-
-#------------------------------- Extraction ----------------------------------
-
-DETECT_TYPE CCD # CCD (linear) or PHOTO (with gamma correction)
-DETECT_MINAREA 3 # min. # of pixels above threshold
-DETECT_MAXAREA 0 # max. # of pixels above threshold (0=unlimited)
-THRESH_TYPE RELATIVE # threshold type: RELATIVE (in sigmas)
- # or ABSOLUTE (in ADUs)
-DETECT_THRESH 3.0 # or , in mag.arcsec-2
-ANALYSIS_THRESH 1.5 # or , in mag.arcsec-2
-
-FILTER N # apply filter for detection (Y or N)?
-FILTER_NAME /Users/tliaudat/Documents/PhD/codes/WF_PSF/comparison-PSF-methods/output-datasets/psfex/config_files/default.conv
-FILTER_NAME gauss_2.0_5x5.conv # name of the file containing the filter
-# FILTER_THRESH # Threshold[s] for retina filtering
-
-DEBLEND_NTHRESH 32 # Number of deblending sub-thresholds
-DEBLEND_MINCONT 1 # Minimum contrast parameter for deblending
-
-CLEAN N # Clean spurious detections? (Y or N)?
-CLEAN_PARAM 1.0 # Cleaning efficiency
-
-MASK_TYPE NONE # type of detection MASKing: can be one of
- # NONE, BLANK or CORRECT
-
-#-------------------------------- WEIGHTing ----------------------------------
-
-WEIGHT_TYPE NONE # type of WEIGHTing: NONE, BACKGROUND,
- # MAP_RMS, MAP_VAR or MAP_WEIGHT
-# RESCALE_WEIGHTS Y # Rescale input weights/variances (Y/N)?
-# WEIGHT_IMAGE weight.fits # weight-map filename
-# WEIGHT_GAIN Y # modulate gain (E/ADU) with weights? (Y/N)
-# WEIGHT_THRESH # weight threshold[s] for bad pixels
-
-#-------------------------------- FLAGging -----------------------------------
-
-FLAG_IMAGE flag.fits # filename for an input FLAG-image
-FLAG_TYPE OR # flag pixel combination: OR, AND, MIN, MAX
- # or MOST
-
-#------------------------------ Photometry -----------------------------------
-
-PHOT_APERTURES 5 # MAG_APER aperture diameter(s) in pixels
-PHOT_AUTOPARAMS 2.5, 3.5 # MAG_AUTO parameters: ,
-PHOT_PETROPARAMS 2.0, 3.5 # MAG_PETRO parameters: ,
- #
-PHOT_AUTOAPERS 0.0,0.0 # , minimum apertures
- # for MAG_AUTO and MAG_PETRO
-PHOT_FLUXFRAC 0.5 # flux fraction[s] used for FLUX_RADIUS
-
-SATUR_KEY SATURATE # keyword for saturation level (in ADUs)
-
-MAG_ZEROPOINT 30.0 # magnitude zero-point
-MAG_GAMMA 4.0 # gamma of emulsion (for photographic scans)
-GAIN 0.0 # detector gain in e-/ADU
-GAIN_KEY GAIN # keyword for detector gain in e-/ADU
-PIXEL_SCALE 1. # size of pixel in arcsec (0=use FITS WCS info)
-
-#------------------------- Star/Galaxy Separation ----------------------------
-
-SEEING_FWHM 2.2 # stellar FWHM in arcsec
-STARNNW_NAME /Users/tliaudat/Documents/PhD/codes/WF_PSF/comparison-PSF-methods/output-datasets/psfex/config_files/default.nnw
-
-#------------------------------ Background -----------------------------------
-
-# BACK_TYPE MANUAL # AUTO or MANUAL
-BACK_TYPE AUTO # AUTO or MANUAL
-BACK_VALUE 0.0 # Default background value in MANUAL mode
-BACK_SIZE 64 # Background mesh: or ,
-BACK_FILTERSIZE 3 # Background filter: or ,
-
-BACKPHOTO_TYPE GLOBAL # can be GLOBAL or LOCAL
-BACKPHOTO_THICK 24 # thickness of the background LOCAL annulus
-BACK_FILTTHRESH 0.0 # Threshold above which the background-
- # map filter operates
-
-#------------------------------ Check Image ----------------------------------
-
-CHECKIMAGE_TYPE NONE #BACKGROUND_RMS,BACKGROUND
-# can be NONE, BACKGROUND, BACKGROUND_RMS,
- # MINIBACKGROUND, MINIBACK_RMS, -BACKGROUND,
- # FILTERED, OBJECTS, -OBJECTS, SEGMENTATION,
- # or APERTURES
-# CHECKIMAGE_NAME check.fits,back.fits
-# Filename for the check-image
-
-#--------------------- Memory (change with caution!) -------------------------
-
-MEMORY_OBJSTACK 3000 # number of objects in stack
-MEMORY_PIXSTACK 300000 # number of pixels in stack
-MEMORY_BUFSIZE 1024 # number of lines in buffer
-
-#------------------------------- ASSOCiation ---------------------------------
-
-ASSOC_NAME sky.list # name of the ASCII file to ASSOCiate
-ASSOC_DATA 2,3,4 # columns of the data to replicate (0=all)
-ASSOC_PARAMS 2,3,4 # columns of xpos,ypos[,mag]
-ASSOCCOORD_TYPE PIXEL # ASSOC coordinates: PIXEL or WORLD
-ASSOC_RADIUS 2.0 # cross-matching radius (pixels)
-ASSOC_TYPE NEAREST # ASSOCiation method: FIRST, NEAREST, MEAN,
- # MAG_MEAN, SUM, MAG_SUM, MIN or MAX
-ASSOCSELEC_TYPE MATCHED # ASSOC selection type: ALL, MATCHED or -MATCHED
-
-#----------------------------- Miscellaneous ---------------------------------
-
-VERBOSE_TYPE FULL # can be QUIET, NORMAL or FULL
-HEADER_SUFFIX .head # Filename extension for additional headers
-WRITE_XML N # Write XML file (Y/N)?
-
-NTHREADS 1 # 1 single thread
-
-FITS_UNSIGNED N # Treat FITS integer values as unsigned (Y/N)?
-INTERP_MAXXLAG 16 # Max. lag along X for 0-weight interpolation
-INTERP_MAXYLAG 16 # Max. lag along Y for 0-weight interpolation
-INTERP_TYPE ALL # Interpolation type: NONE, VAR_ONLY or ALL
-
-#--------------------------- Experimental Stuff -----------------------------
-
-#PSF_NAME default.psf # File containing the PSF model
-#PSF_NMAX 1 # Max.number of PSFs fitted simultaneously
-#PATTERN_TYPE RINGS-HARMONIC # can RINGS-QUADPOLE, RINGS-OCTOPOLE,
- # RINGS-HARMONICS or GAUSS-LAGUERRE
-#SOM_NAME default.som # File containing Self-Organizing Map weights
diff --git a/method-comparison/config_files/gauss_2.0_5x5.conv b/method-comparison/config_files/gauss_2.0_5x5.conv
deleted file mode 100644
index 09e2c682..00000000
--- a/method-comparison/config_files/gauss_2.0_5x5.conv
+++ /dev/null
@@ -1,7 +0,0 @@
-CONV NORM
-# 5x5 convolution mask of a gaussian PSF with FWHM = 2.0 pixels.
-0.006319 0.040599 0.075183 0.040599 0.006319
-0.040599 0.260856 0.483068 0.260856 0.040599
-0.075183 0.483068 0.894573 0.483068 0.075183
-0.040599 0.260856 0.483068 0.260856 0.040599
-0.006319 0.040599 0.075183 0.040599 0.006319
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id08.ini b/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id08.ini
deleted file mode 100644
index b06bf634..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id08.ini
+++ /dev/null
@@ -1,149 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id08/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id08/
-
-[INSTANCE]
-N_COMP_LOC = 4
-D_COMP_GLOB = 6
-KSIG_LOC = 3.00
-KSIG_GLOB = 3.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-UPFACT = 3
-
-[FIT]
-LOC_MODEL = rca
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id08/
-VAL_DATA_INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/test/
-VAL_PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id08/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/validation/model_id08/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id09.ini b/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id09.ini
deleted file mode 100644
index 50bb47c5..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id09.ini
+++ /dev/null
@@ -1,149 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id09/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id09/
-
-[INSTANCE]
-N_COMP_LOC = 4
-D_COMP_GLOB = 6
-KSIG_LOC = 1.00
-KSIG_GLOB = 1.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-UPFACT = 3
-
-[FIT]
-LOC_MODEL = hybrid
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id09/
-VAL_DATA_INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/test/
-VAL_PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id09/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/validation/model_id09/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id10.ini b/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id10.ini
deleted file mode 100644
index 6477b5e2..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id10.ini
+++ /dev/null
@@ -1,149 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id10/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id10/
-
-[INSTANCE]
-N_COMP_LOC = 4
-D_COMP_GLOB = 6
-KSIG_LOC = 3.00
-KSIG_GLOB = 3.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-UPFACT = 3
-
-[FIT]
-LOC_MODEL = hybrid
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id10/
-VAL_DATA_INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/test/
-VAL_PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id10/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/validation/model_id10/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp.ini b/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp.ini
deleted file mode 100644
index fbac33b7..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp.ini
+++ /dev/null
@@ -1,148 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /home/tliaudat/github/mccd_develop/mccd/wf_exps/data/rca/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /home/tliaudat/github/mccd_develop/mccd/wf_exps/data/mccd/inputs/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd/models/
-
-[INSTANCE]
-N_COMP_LOC = 4
-D_COMP_GLOB = 6
-KSIG_LOC = 0.00
-KSIG_GLOB = 0.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-
-[FIT]
-LOC_MODEL = hybrid
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd/models/
-VAL_DATA_INPUT_DIR = /home/tliaudat/github/mccd_develop/mccd/wf_exps/data/rca/test_stars/
-VAL_PREPROCESSED_OUTPUT_DIR = /home/tliaudat/github/mccd_develop/mccd/wf_exps/data/mccd/inputs/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd/validation/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id01.ini b/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id01.ini
deleted file mode 100644
index 67acd84d..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id01.ini
+++ /dev/null
@@ -1,148 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id01/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id01/
-
-[INSTANCE]
-N_COMP_LOC = 4
-D_COMP_GLOB = 4
-KSIG_LOC = 0.00
-KSIG_GLOB = 0.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-
-[FIT]
-LOC_MODEL = hybrid
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id01/
-VAL_DATA_INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/test/
-VAL_PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id01/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/validation/model_id01/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id02.ini b/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id02.ini
deleted file mode 100644
index 9e746f65..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id02.ini
+++ /dev/null
@@ -1,148 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id02/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id02/
-
-[INSTANCE]
-N_COMP_LOC = 4
-D_COMP_GLOB = 6
-KSIG_LOC = 0.00
-KSIG_GLOB = 0.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-
-[FIT]
-LOC_MODEL = hybrid
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id02/
-VAL_DATA_INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/test/
-VAL_PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id02/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/validation/model_id02/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id03.ini b/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id03.ini
deleted file mode 100644
index b60cd3ef..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id03.ini
+++ /dev/null
@@ -1,148 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id03/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id03/
-
-[INSTANCE]
-N_COMP_LOC = 6
-D_COMP_GLOB = 8
-KSIG_LOC = 0.00
-KSIG_GLOB = 0.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-
-[FIT]
-LOC_MODEL = hybrid
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id03/
-VAL_DATA_INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/test/
-VAL_PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id03/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/validation/model_id03/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id04.ini b/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id04.ini
deleted file mode 100644
index 85425253..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id04.ini
+++ /dev/null
@@ -1,148 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id04/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id04/
-
-[INSTANCE]
-N_COMP_LOC = 8
-D_COMP_GLOB = 10
-KSIG_LOC = 0.00
-KSIG_GLOB = 0.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-
-[FIT]
-LOC_MODEL = hybrid
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id04/
-VAL_DATA_INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/test/
-VAL_PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id04/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/validation/model_id04/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id05.ini b/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id05.ini
deleted file mode 100644
index 873a1a37..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id05.ini
+++ /dev/null
@@ -1,148 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id05/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id05/
-
-[INSTANCE]
-N_COMP_LOC = 4
-D_COMP_GLOB = 6
-KSIG_LOC = 1.00
-KSIG_GLOB = 1.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-
-[FIT]
-LOC_MODEL = hybrid
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id05/
-VAL_DATA_INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/test/
-VAL_PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id05/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/validation/model_id05/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id06.ini b/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id06.ini
deleted file mode 100644
index f5cc8361..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id06.ini
+++ /dev/null
@@ -1,148 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id06/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id06/
-
-[INSTANCE]
-N_COMP_LOC = 4
-D_COMP_GLOB = 6
-KSIG_LOC = 3.00
-KSIG_GLOB = 3.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-
-[FIT]
-LOC_MODEL = hybrid
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id06/
-VAL_DATA_INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/test/
-VAL_PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id06/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/validation/model_id06/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id07.ini b/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id07.ini
deleted file mode 100644
index f7668768..00000000
--- a/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id07.ini
+++ /dev/null
@@ -1,148 +0,0 @@
-# Configuration file for the MCCD method
-
-[INPUTS]
-INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/all_stars/
-INPUT_REGEX_FILE_PATTERN = train_stars-*-*.fits
-INPUT_SEPARATOR = -
-MIN_N_STARS = 2
-OUTLIER_STD_MAX = 100.
-USE_SNR_WEIGHTS = False
-PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id07/
-OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id07/
-
-[INSTANCE]
-N_COMP_LOC = 6
-D_COMP_GLOB = 6
-KSIG_LOC = 1.00
-KSIG_GLOB = 1.00
-FILTER_PATH = None
-D_HYB_LOC = 2
-MIN_D_COMP_GLOB = None
-RMSE_THRESH = 10.
-CCD_STAR_THRESH = 0.9
-FP_GEOMETRY = EUCLID
-
-[FIT]
-LOC_MODEL = rca
-PSF_SIZE = 4.
-PSF_SIZE_TYPE = R2
-N_EIGENVECTS = 5
-N_ITER_RCA = 1
-N_ITER_GLOB = 2
-N_ITER_LOC = 2
-NB_SUBITER_S_LOC = 300
-NB_SUBITER_A_LOC = 400
-NB_SUBITER_S_GLOB = 100
-NB_SUBITER_A_GLOB = 200
-
-[VALIDATION]
-VAL_MODEL_INPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/models/model_id07/
-VAL_DATA_INPUT_DIR = /n05data/tliaudat/wf_exps/datasets/rca_shifts/test/
-VAL_PREPROCESSED_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/inputs/model_id07/
-VAL_REGEX_FILE_PATTERN = test_stars-*-*.fits
-VAL_SEPARATOR = -
-VAL_OUTPUT_DIR = /n05data/tliaudat/wf_exps/outputs/mccd_shifts/validation/model_id07/
-APPLY_DEGRADATION = True
-MCCD_DEBUG = False
-GLOBAL_POL_INTERP = False
-
-
-# Parameter description:
-#
-#
-# [INPUTS]
-# INPUT_DIR : (Required) Must be a valid directory containing the input
-# MCCD files.
-# INPUT_REGEX_FILE_PATTERN : File pattern of the input files to use. It should
-# follow regex (regular expression) standards.
-# INPUT_SEPARATOR : Separator of the different fields in the filename,
-# ie sexcat[SEP]catalog_id[SEP]CCD_id.fits
-# MIN_N_STARS : Minimum number of stars to keep a CCD for the training.
-# OUTLIER_STD_MAX : Maximum standard deviation used for the outlier rejection.
-# Should not be too low as a hihg quantity of low quality
-# stars will be rejected. ie 9 is a conservative rejection.
-# USE_SNR_WEIGHTS : Boolean to determine if the SNR weighting strategy will
-# be used.
-# For now, it needs the SNR estimations from SExtractor.
-# PREPROCESSED_OUTPUT_DIR : (Required) Must be a valid directory to write the
-# preprocessed input files.
-# OUTPUT_DIR : (Required) Must be a valid directory to write the output files.
-# The constructed models will be saved.
-#
-#
-# [INSTANCE]
-# N_COMP_LOC : Number of components of the Local model. If LOC_MODEL is poly,
-# will be the max degree D of the polynomial.
-# D_COMP_GLOB : Max degree of the global polynomial model.
-# KSIG_LOC : Denoising parameter of the local model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# KSIG_GLOB : Denoising parameter of the global model.
-# ie 1 is a normal denoising, 3 is a hard denoising.
-# FILTER_PATH : Path for predefined filters.
-# D_HYB_LOC : Degree of the polynomial component for the local part in case
-# the LOC_MODEL used is 'hybrid'.
-# MIN_D_COMP_GLOB : The minimum degree of the polynomial for the global
-# component. For example, if the paramter is set to 1, the
-# polynomials of degree 0 and 1 will be excluded from the
-# global polynomial variations. ``None`` means that we are
-# not excluding any degree.
-# RMSE_THRESH : Parameter concerning the CCD outlier rejection. Once the PSF
-# model is calculated we perform an outlier check on the training
-# stars. We divide each star in two parts with a given circle.
-# The inner part corresponds to the most of the PSF/star energy
-# while the outer part corresponds to the observation background.
-# The outer part is used to calculate the noise level and the inner
-# part to calculate the model residual
-# (star observation - PSF model reconstruction). If the RMSE error
-# of the residual divided by the noise level is over the RMSE_THRESH,
-# the star will be considered an outlier. A perfect reconstruction
-# would have RMSE_THRESH equal to 1.
-# CCD_STAR_THRESH : Parameter concerning the CCD outlier rejection. If the
-# percentage of outlier stars in a single CCD is bigger than
-# CCD_STAR_THRESH, the CCD is considered to be an outlier.
-# In this case, the CCD is rejected from the PSF model.
-# A value lower than 0 means that no outlier rejection
-# will be done.
-# FP_GEOMETRY : Defines the geometry of the focal plane. For the moment the two
-# available options are 'CFIS' and 'EUCLID'. If the parameter is
-# not specified it defaults to 'CFIS'.
-#
-#
-#
-# [FIT]
-# LOC_MODEL : Defines the type of local model to use, it can be: 'rca',
-# 'poly' or 'hybrid'.
-# When the poly model is used, N_COMP_LOC should be used
-# as the D_LOC (max degree of the poly model)
-# PSF_SIZE : First guess of the PSF size. A size estimation is done anyways.
-# PSF_SIZE_TYPE : Type of the size information. It can be: fwhm, R2, sigma
-# N_EIGENVECTS : Number of eigenvectors to keep for the graph constraint
-# construction.
-# N_ITER_RCA : Number of global epochs in the algorithm. Alternation between
-# global and local estimations.
-# N_ITER_GLOB : Number of epochs for each global optimization. Alternations
-# between A_GLOB and S_GLOB.
-# N_ITER_LOC : Number of epochs for each local optimization. Alternations
-# between the different A_LOC and S_LOC.
-# NB_SUBITER_S_LOC : Iterations for the optimization algorithm over S_LOC.
-# NB_SUBITER_A_LOC : Iterations for the optimization algorithm over A_LOC.
-# NB_SUBITER_S_GLOB : Iterations for the optimization algorithm over S_GLOB.
-# NB_SUBITER_A_GLOB : Iterations for the optimization algorithm over A_GLOB.
-#
-#
-# [VALIDATION]
-# MODEL_INPUT_DIR : (Required) Must be a valid directory which contains the
-# saved trained models.
-# VAL_DATA_INPUT_DIR : (Required) Must be a valid directory which contains the
-# validation input data (test dataset).
-# VAL_REGEX_FILE_PATTERN : Same as INPUT_REGEX_FILE_PATTERN but for validation.
-# VAL_SEPARATOR : Same as INPUT_SEPARATOR but for validation.
-# VAL_OUTPUT_DIR : (Required) Must be a valid directory where to save the
-# validation outputs, test PSFs and interpolated PSFs.
-# APPLY_DEGRADATION : Whether the PSF models should be degraded
-# (sampling/shifts/flux) to match stars; use True if you
-# plan on making pixel-based comparisons (residuals etc.).
-# MCCD_DEBUG : Debug mode. Returns the local and global contributions.
-# GLOBAL_POL_INTERP : Uses polynomial interpolation for the global model
-# instead of RBF kernel interpolation.
-#
diff --git a/method-comparison/config_files/new_SR_default.psfex b/method-comparison/config_files/new_SR_default.psfex
deleted file mode 100644
index e66fccc4..00000000
--- a/method-comparison/config_files/new_SR_default.psfex
+++ /dev/null
@@ -1,64 +0,0 @@
-# Default configuration file for PSFEx 3.17.1
-# EB 2017-02-04
-#
-
-#-------------------------------- PSF model ----------------------------------
-
-BASIS_TYPE PIXEL # _AUTO # NONE, PIXEL, GAUSS-LAGUERRE or FILE
-BASIS_NUMBER 25 # Basis number or parameter
-PSF_SAMPLING .333 # Sampling step in pixel units (0.0 = auto)
-PSF_ACCURACY 0.01 # Accuracy to expect from PSF "pixel" values
-PSF_SIZE 64,64 # Image size of the PSF model
-
-#------------------------- Point source measurements -------------------------
-
-CENTER_KEYS XWIN_IMAGE,YWIN_IMAGE # Catalogue parameters for source pre-centering
-PHOTFLUX_KEY FLUX_AUTO # Catalogue parameter for photometric norm.
-PHOTFLUXERR_KEY FLUXERR_AUTO # Catalogue parameter for photometric error
-
-#----------------------------- PSF variability -------------------------------
-
-PSFVAR_KEYS XWIN_IMAGE,YWIN_IMAGE # Catalogue or FITS (preceded by :) params
-PSFVAR_GROUPS 1,1 # Group tag for each context key
-PSFVAR_DEGREES 2 # Polynom degree for each group
-PSFVAR_NSNAP 12 # Number of PSF snapshots per axis
-
-#----------------------------- Sample selection ------------------------------
-
-SAMPLE_AUTOSELECT N # Automatically select the FWHM (Y/N) ?
-SAMPLEVAR_TYPE NONE # File-to-file PSF variability: NONE or SEEING
-SAMPLE_FWHMRANGE 0.0001,1000 # Allowed FWHM range
-SAMPLE_VARIABILITY 10 # Allowed FWHM variability (1.0 = 100%)
-SAMPLE_MINSN .000001 # Minimum S/N for a source to be used
-SAMPLE_MAXELLIP 1 # Maximum (A-B)/(A+B) for a source to be used
-
-#------------------------------- Check-plots ----------------------------------
-
-CHECKPLOT_DEV PNG # NULL, XWIN, TK, PS, PSC, XFIG, PNG,
- # JPEG, AQT, PDF or SVG
-CHECKPLOT_TYPE FWHM,ELLIPTICITY,COUNTS, COUNT_FRACTION, CHI2, RESIDUALS
- # or NONE
-CHECKPLOT_NAME fwhm, ellipticity, counts, countfrac, chi2, resi
-
-#------------------------------ Check-Images ---------------------------------
-
-CHECKIMAGE_CUBE Y
-CHECKIMAGE_TYPE #PROTOTYPES #,SAMPLES,RESIDUALS,SNAPSHOTS,BASIS
- # or MOFFAT,-MOFFAT,-SYMMETRICAL
-CHECKIMAGE_NAME #proto.fits #,samp.fits,resi.fits,snap.fits,basis.fits
- # Check-image filenames
-
-#----------------------------- Miscellaneous ---------------------------------
-
-#PSF_DIR # Where to write PSFs (empty=same as input)
-VERBOSE_TYPE NORMAL # can be QUIET,NORMAL,LOG or FULL
-WRITE_XML N # Write XML file (Y/N)?
-XML_NAME psfex.xml # Filename for XML output
-NTHREADS 1 # Number of simultaneous threads for
- # the SMP version of PSFEx
- # 0 = automatic
-
-#----------------------------- Output catalogs -------------------------------
-
-OUTCAT_TYPE FITS_LDAC # NONE, ASCII_HEAD, ASCII, FITS_LDAC
-# OUTCAT_NAME psfex_out.cat # Output catalog filename
diff --git a/method-comparison/config_files/new_default.psfex b/method-comparison/config_files/new_default.psfex
deleted file mode 100644
index 4775ff11..00000000
--- a/method-comparison/config_files/new_default.psfex
+++ /dev/null
@@ -1,65 +0,0 @@
-# Default configuration file for PSFEx 3.17.1
-# EB 2017-02-04
-#
-
-#-------------------------------- PSF model ----------------------------------
-
-BASIS_TYPE PIXEL #_AUTO # NONE, PIXEL, GAUSS-LAGUERRE or FILE
-BASIS_NUMBER 25 # Basis number or parameter
-# PSF_SAMPLING 1. # Sampling step in pixel units (0.0 = auto)
-PSF_ACCURACY 0.01 # Accuracy to expect from PSF "pixel" values
-# PSF_SIZE 32,32 # Image size of the PSF model
-
-#------------------------- Point source measurements -------------------------
-
-CENTER_KEYS XWIN_IMAGE,YWIN_IMAGE # Catalogue parameters for source pre-centering
-PHOTFLUX_KEY FLUX_AUTO # Catalogue parameter for photometric norm.
-PHOTFLUXERR_KEY FLUXERR_AUTO # Catalogue parameter for photometric error
-
-#----------------------------- PSF variability -------------------------------
-
-PSFVAR_KEYS XWIN_IMAGE,YWIN_IMAGE # Catalogue or FITS (preceded by :) params
-PSFVAR_GROUPS 1,1 # Group tag for each context key
-# PSFVAR_DEGREES 2 # Polynom degree for each group
-PSFVAR_NSNAP 12 # Number of PSF snapshots per axis
-
-#----------------------------- Sample selection ------------------------------
-
-SAMPLE_AUTOSELECT N # Automatically select the FWHM (Y/N) ?
-SAMPLEVAR_TYPE NONE # File-to-file PSF variability: NONE or SEEING
-SAMPLE_FWHMRANGE 0.0001,1000 # Allowed FWHM range
-SAMPLE_VARIABILITY 10 # Allowed FWHM variability (1.0 = 100%)
-SAMPLE_MINSN .000001 # Minimum S/N for a source to be used
-SAMPLE_MAXELLIP 1 # Maximum (A-B)/(A+B) for a source to be used
-
-#------------------------------- Check-plots ----------------------------------
-
-CHECKPLOT_DEV PNG # NULL, XWIN, TK, PS, PSC, XFIG, PNG,
- # JPEG, AQT, PDF or SVG
-CHECKPLOT_TYPE FWHM,ELLIPTICITY,COUNTS, COUNT_FRACTION, CHI2, RESIDUALS
- # or NONE
-CHECKPLOT_NAME fwhm, ellipticity, counts, countfrac, chi2, resi
-
-#------------------------------ Check-Images ---------------------------------
-
-CHECKIMAGE_CUBE Y
-CHECKIMAGE_TYPE #PROTOTYPES #,SAMPLES,RESIDUALS,SNAPSHOTS,BASIS
- # or MOFFAT,-MOFFAT,-SYMMETRICAL
-CHECKIMAGE_NAME #proto.fits #,samp.fits,resi.fits,snap.fits,basis.fits
- # Check-image filenames
-
-#----------------------------- Miscellaneous ---------------------------------
-
-#PSF_DIR # Where to write PSFs (empty=same as input)
-VERBOSE_TYPE NORMAL # can be QUIET,NORMAL,LOG or FULL
-WRITE_XML N # Write XML file (Y/N)?
-XML_NAME psfex.xml # Filename for XML output
-NTHREADS 1 # Number of simultaneous threads for
- # the SMP version of PSFEx
- # 0 = automatic
-
- #----------------------------- Output catalogs -------------------------------
-
-OUTCAT_TYPE FITS_LDAC # NONE, ASCII_HEAD, ASCII, FITS_LDAC
-# OUTCAT_NAME psfex_out.cat # Output catalog filename
-
diff --git a/method-comparison/jobs/mccd_SR_job_id08.sh b/method-comparison/jobs/mccd_SR_job_id08.sh
deleted file mode 100755
index 729dc6d9..00000000
--- a/method-comparison/jobs/mccd_SR_job_id08.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_SR_id08
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=04:hasgpu
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./mccd_script_SR.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id08.ini \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --run_id mccd_SR_id08 \
- --psf_out_dim 64 \
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_SR_job_id08_test.sh b/method-comparison/jobs/mccd_SR_job_id08_test.sh
deleted file mode 100755
index f4bc01f9..00000000
--- a/method-comparison/jobs/mccd_SR_job_id08_test.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_SR_id08_test
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=04:hasgpu
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./test_mccd_script_SR.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id08.ini \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --run_id mccd_SR_id08 \
- --psf_out_dim 64 \
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_SR_job_id09.sh b/method-comparison/jobs/mccd_SR_job_id09.sh
deleted file mode 100755
index 8448e97b..00000000
--- a/method-comparison/jobs/mccd_SR_job_id09.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_SR_id09
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=04:hasgpu
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./mccd_script_SR.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id09.ini \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --run_id mccd_SR_id09 \
- --psf_out_dim 64 \
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_SR_job_id10.sh b/method-comparison/jobs/mccd_SR_job_id10.sh
deleted file mode 100755
index 89b9af24..00000000
--- a/method-comparison/jobs/mccd_SR_job_id10.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_SR_id10
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n16:ppn=04:hasgpu
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./mccd_script_SR.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_SR_wf_exp_id10.ini \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --run_id mccd_SR_id10 \
- --psf_out_dim 64 \
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_data_gen_candide.sh b/method-comparison/jobs/mccd_data_gen_candide.sh
deleted file mode 100755
index beae3a80..00000000
--- a/method-comparison/jobs/mccd_data_gen_candide.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_data_gen
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n16:ppn=02
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/dataset-conversion-MCCD.py \
- --rca_data_path /n05data/tliaudat/wf_exps/datasets/rca/ \
- --mccd_saving_path /n05data/tliaudat/wf_exps/datasets/mccd/ \
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_data_gen_local.sh b/method-comparison/jobs/mccd_data_gen_local.sh
deleted file mode 100755
index e50e9976..00000000
--- a/method-comparison/jobs/mccd_data_gen_local.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-
-python /Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/scripts/dataset-conversion-MCCD.py \
- --rca_data_path /Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/rca_shifts/ \
- --mccd_saving_path /Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/mccd_shifts/ \
diff --git a/method-comparison/jobs/mccd_data_gen_shifts_candide.sh b/method-comparison/jobs/mccd_data_gen_shifts_candide.sh
deleted file mode 100755
index ea985bc4..00000000
--- a/method-comparison/jobs/mccd_data_gen_shifts_candide.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_data_gen_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=02
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/dataset-conversion-MCCD.py \
- --rca_data_path /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
- --mccd_saving_path /n05data/tliaudat/wf_exps/datasets/mccd_shifts/ \
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_job.sh b/method-comparison/jobs/mccd_job.sh
deleted file mode 100755
index 2b925bf7..00000000
--- a/method-comparison/jobs/mccd_job.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_wf_exp
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=04
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/mccd_develop/mccd/wf_exps/log_files/
-
-python ./../scripts/mccd_script.py \
- --config_file /home/tliaudat/github/mccd_develop/mccd/wf_exps/config_files/config_MCCD_wf_exp.ini
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_job_id01.sh b/method-comparison/jobs/mccd_job_id01.sh
deleted file mode 100755
index 934198a3..00000000
--- a/method-comparison/jobs/mccd_job_id01.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_wf_exp_id01
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n01:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./mccd_script.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id01.ini
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_job_id02.sh b/method-comparison/jobs/mccd_job_id02.sh
deleted file mode 100755
index 874b3edf..00000000
--- a/method-comparison/jobs/mccd_job_id02.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_wf_exp_id02
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n01:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./mccd_script.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id02.ini
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_job_id03.sh b/method-comparison/jobs/mccd_job_id03.sh
deleted file mode 100755
index 73be6d75..00000000
--- a/method-comparison/jobs/mccd_job_id03.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_wf_exp_id03
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n01:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./mccd_script.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id03.ini
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_job_id04.sh b/method-comparison/jobs/mccd_job_id04.sh
deleted file mode 100755
index c4a2c4ee..00000000
--- a/method-comparison/jobs/mccd_job_id04.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_wf_exp_id04
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n01:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./mccd_script.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id04.ini
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_job_id05.sh b/method-comparison/jobs/mccd_job_id05.sh
deleted file mode 100755
index 5440b0a5..00000000
--- a/method-comparison/jobs/mccd_job_id05.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_wf_exp_id05
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n01:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./mccd_script.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id05.ini
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_job_id06.sh b/method-comparison/jobs/mccd_job_id06.sh
deleted file mode 100755
index 5c1c42ae..00000000
--- a/method-comparison/jobs/mccd_job_id06.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_wf_exp_id06
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n01:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./mccd_script.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id06.ini
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/mccd_job_id07.sh b/method-comparison/jobs/mccd_job_id07.sh
deleted file mode 100755
index e1ee0669..00000000
--- a/method-comparison/jobs/mccd_job_id07.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N mccd_wf_exp_id07
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n01:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-python ./mccd_script.py \
- --config_file /home/tliaudat/github/wf-psf/method-comparison/config_files/mccd_configs/config_MCCD_wf_exp_id07.ini
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/psfex_SR_run_d2_candide.sh b/method-comparison/jobs/psfex_SR_run_d2_candide.sh
deleted file mode 100755
index 78b27549..00000000
--- a/method-comparison/jobs/psfex_SR_run_d2_candide.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N psfex_SR_run_d2_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=02:hasgpu
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/psfex_script_SR.py \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/psfex_SR_d2_shifts/ \
- --dataset_path /n05data/tliaudat/wf_exps/datasets/psfex_shifts/ \
- --psfvar_degrees 2 \
- --psf_sampling 0.33 \
- --psf_size 64 \
- --run_id psfex_SR_run_d2 \
- --exec_path psfex \
- --verbose 1 \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/psfex_SR_run_d3_candide.sh b/method-comparison/jobs/psfex_SR_run_d3_candide.sh
deleted file mode 100755
index 4921454c..00000000
--- a/method-comparison/jobs/psfex_SR_run_d3_candide.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N psfex_SR_run_d3_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=02:hasgpu
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/psfex_script_SR.py \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --saving_dir /n05data/tliaudat/wf_exps/test_outputs/psfex_SR_d3_shifts/ \
- --dataset_path /n05data/tliaudat/wf_exps/datasets/psfex_shifts/ \
- --psfvar_degrees 3 \
- --psf_sampling 0.33 \
- --psf_size 64 \
- --run_id psfex_SR_run_d3 \
- --exec_path psfex \
- --verbose 1 \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/psfex_data_gen_candide.sh b/method-comparison/jobs/psfex_data_gen_candide.sh
deleted file mode 100755
index b0d407fa..00000000
--- a/method-comparison/jobs/psfex_data_gen_candide.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N psfex_data_gen
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n16:ppn=02
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/dataset-conversion-PSFEx.py \
- --base_repo_path /home/tliaudat/github/wf-psf/ \
- --sex_exec_path sex \
- --psfex_saving_path /n05data/tliaudat/wf_exps/datasets/psfex/ \
- --verbose 1 \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/psfex_data_gen_local.sh b/method-comparison/jobs/psfex_data_gen_local.sh
deleted file mode 100755
index 9f05d5e0..00000000
--- a/method-comparison/jobs/psfex_data_gen_local.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-
-python /Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/scripts//dataset-conversion-PSFEx.py \
- --base_repo_path /Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/ \
- --sex_exec_path sex \
- --psfex_saving_path /Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/psfex_shifts/ \
- --verbose 1 \
- --rca_input_path /Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/rca_shifts/ \
-
diff --git a/method-comparison/jobs/psfex_data_gen_shifts_candide.sh b/method-comparison/jobs/psfex_data_gen_shifts_candide.sh
deleted file mode 100755
index d68df8e9..00000000
--- a/method-comparison/jobs/psfex_data_gen_shifts_candide.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N psfex_data_gen_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=02
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/dataset-conversion-PSFEx.py \
- --base_repo_path /home/tliaudat/github/wf-psf/ \
- --sex_exec_path sex \
- --psfex_saving_path /n05data/tliaudat/wf_exps/datasets/psfex_shifts/ \
- --verbose 1 \
- --rca_input_path /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/psfex_run_candide.sh b/method-comparison/jobs/psfex_run_candide.sh
deleted file mode 100755
index 224fef6a..00000000
--- a/method-comparison/jobs/psfex_run_candide.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N psfex_run_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/psfex_script.py \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/psfex_d2_shifts/ \
- --dataset_path /n05data/tliaudat/wf_exps/datasets/psfex_shifts/ \
- --psfvar_degrees 2 \
- --psf_sampling 1. \
- --psf_size 32 \
- --run_id psfex_run_r1_d2 \
- --exec_path psfex \
- --verbose 1 \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/psfex_run_d3_candide.sh b/method-comparison/jobs/psfex_run_d3_candide.sh
deleted file mode 100755
index df76fcfb..00000000
--- a/method-comparison/jobs/psfex_run_d3_candide.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N psfex_run_d3_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/psfex_script.py \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/psfex_d3_shifts/ \
- --dataset_path /n05data/tliaudat/wf_exps/datasets/psfex_shifts/ \
- --psfvar_degrees 3 \
- --psf_sampling 1. \
- --psf_size 32 \
- --run_id psfex_run_r1_d3 \
- --exec_path psfex \
- --verbose 1 \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/psfex_run_d4_candide.sh b/method-comparison/jobs/psfex_run_d4_candide.sh
deleted file mode 100755
index a901c2ee..00000000
--- a/method-comparison/jobs/psfex_run_d4_candide.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N psfex_run_d4_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/psfex_script.py \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/psfex_d4_shifts/ \
- --dataset_path /n05data/tliaudat/wf_exps/datasets/psfex_shifts/ \
- --psfvar_degrees 4 \
- --psf_sampling 1. \
- --psf_size 32 \
- --run_id psfex_run_r1_d4 \
- --exec_path psfex \
- --verbose 1 \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_SR_job.sh b/method-comparison/jobs/rca_SR_job.sh
deleted file mode 100755
index d9b73cfe..00000000
--- a/method-comparison/jobs/rca_SR_job.sh
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_SR
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=04:hasgpu
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-# n8
-python ./rca_script_SR.py \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --run_id rca_SR_shifts_n8_up3_k3 \
- --n_comp 8 \
- --upfact 3 \
- --ksig 3. \
- --psf_out_dim 64 \
-
-# n12
-python ./rca_script_SR.py \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --run_id rca_SR_shifts_n12_up3_k3 \
- --n_comp 12 \
- --upfact 3 \
- --ksig 3. \
- --psf_out_dim 64 \
-
-# n16
-python ./rca_script_SR.py \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --run_id rca_SR_shifts_n16_up3_k3 \
- --n_comp 16 \
- --upfact 3 \
- --ksig 3. \
- --psf_out_dim 64 \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_SR_job_n4.sh b/method-comparison/jobs/rca_SR_job_n4.sh
deleted file mode 100755
index db6d5afe..00000000
--- a/method-comparison/jobs/rca_SR_job_n4.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_SR_n4
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=02:hasgpu
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-python ./rca_script_SR.py \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
- --repo_base_path /home/tliaudat/github/wf-psf/ \
- --run_id rca_SR_shifts_n4_up3_k3 \
- --n_comp 4 \
- --upfact 3 \
- --ksig 3. \
- --psf_out_dim 64 \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_data_gen_candide.sh b/method-comparison/jobs/rca_data_gen_candide.sh
deleted file mode 100755
index c9335e68..00000000
--- a/method-comparison/jobs/rca_data_gen_candide.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_data_gen
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n16:ppn=02
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/dataset-conversion-RCA.py \
- --base_repo_path /home/tliaudat/github/wf-psf/ \
- --rca_saving_path /n05data/tliaudat/wf_exps/datasets/rca/ \
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_data_gen_local.sh b/method-comparison/jobs/rca_data_gen_local.sh
deleted file mode 100755
index 79ea5b1e..00000000
--- a/method-comparison/jobs/rca_data_gen_local.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/bash
-
-python /Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/scripts/dataset-conversion-RCA-intrapixshifts.py \
- --base_repo_path /Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/ \
- --rca_saving_path /Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/rca_shifts/ \
diff --git a/method-comparison/jobs/rca_data_gen_shifts_candide.sh b/method-comparison/jobs/rca_data_gen_shifts_candide.sh
deleted file mode 100755
index 2b97ac92..00000000
--- a/method-comparison/jobs/rca_data_gen_shifts_candide.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_data_gen_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n03:ppn=02
-
-# Activate conda environment
-# module load intelpython/3-2020.1
-module load tensorflow/2.4
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/
-
-python ./method-comparison/scripts/dataset-conversion-RCA-intrapixshifts.py \
- --base_repo_path /home/tliaudat/github/wf-psf/ \
- --rca_saving_path /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_job_n12.sh b/method-comparison/jobs/rca_job_n12.sh
deleted file mode 100755
index db523a09..00000000
--- a/method-comparison/jobs/rca_job_n12.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_wf_exp_n12
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-python ./rca_script.py \
- --run_id rca_shifts_n12_up1_k3 \
- --n_comp 12 \
- --upfact 1 \
- --ksig 3. \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_job_n16.sh b/method-comparison/jobs/rca_job_n16.sh
deleted file mode 100755
index fcd2a2be..00000000
--- a/method-comparison/jobs/rca_job_n16.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_wf_exp_n16_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-python ./rca_script.py \
- --run_id rca_shifts_n16_up1_k3 \
- --n_comp 16 \
- --upfact 1 \
- --ksig 3. \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_job_n4.sh b/method-comparison/jobs/rca_job_n4.sh
deleted file mode 100755
index 92966371..00000000
--- a/method-comparison/jobs/rca_job_n4.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_wf_exp_n4_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-python ./rca_script.py \
- --run_id rca_shifts_n4_up1_k3 \
- --n_comp 4 \
- --upfact 1 \
- --ksig 3. \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_job_n4_up1_k0.sh b/method-comparison/jobs/rca_job_n4_up1_k0.sh
deleted file mode 100755
index 02013595..00000000
--- a/method-comparison/jobs/rca_job_n4_up1_k0.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_wf_exp_n4_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-python ./rca_script.py \
- --run_id rca_shifts_n4_up1_k0 \
- --n_comp 4 \
- --upfact 1 \
- --ksig 0. \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_job_n4_up1_k1.sh b/method-comparison/jobs/rca_job_n4_up1_k1.sh
deleted file mode 100755
index a44fc65e..00000000
--- a/method-comparison/jobs/rca_job_n4_up1_k1.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_wf_exp_n4_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-python ./rca_script.py \
- --run_id rca_shifts_n4_up1_k1 \
- --n_comp 4 \
- --upfact 1 \
- --ksig 1. \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_job_n4_up2.sh b/method-comparison/jobs/rca_job_n4_up2.sh
deleted file mode 100755
index feb936ad..00000000
--- a/method-comparison/jobs/rca_job_n4_up2.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_wf_exp_n4_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-python ./rca_script.py \
- --run_id rca_shifts_n4_up2_k3 \
- --n_comp 4 \
- --upfact 2 \
- --ksig 3. \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_job_n4_up3.sh b/method-comparison/jobs/rca_job_n4_up3.sh
deleted file mode 100755
index 9dabf4ad..00000000
--- a/method-comparison/jobs/rca_job_n4_up3.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_wf_exp_n4_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-python ./rca_script.py \
- --run_id rca_shifts_n4_up3_k3 \
- --n_comp 4 \
- --upfact 3 \
- --ksig 3. \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/rca_job_n8.sh b/method-comparison/jobs/rca_job_n8.sh
deleted file mode 100755
index a4525eb7..00000000
--- a/method-comparison/jobs/rca_job_n8.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_wf_exp_n8_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n02:ppn=02
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-python ./rca_script.py \
- --run_id rca_shifts_n8_up1_k3 \
- --n_comp 8 \
- --upfact 1 \
- --ksig 3. \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/test_rca_job.sh b/method-comparison/jobs/test_rca_job.sh
deleted file mode 100755
index ea955bb5..00000000
--- a/method-comparison/jobs/test_rca_job.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N test_rca
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n05:ppn=04
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=$NSLOTS
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=1
-
-python ./rca_script.py \
- --run_id test_rca_shifts_n8_up1_k3 \
- --n_comp 8 \
- --upfact 1 \
- --ksig 3. \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/test_rca_job_n8.sh b/method-comparison/jobs/test_rca_job_n8.sh
deleted file mode 100755
index c3cc7741..00000000
--- a/method-comparison/jobs/test_rca_job_n8.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-##########################
-# SMP Script for CANDIDE #
-##########################
-
-# Receive email when job finishes or aborts
-#PBS -M tobias.liaudat@cea.fr
-#PBS -m ea
-# Set a name for the job
-#PBS -N rca_wf_exp_n8_shifts
-# Join output and errors in one file
-#PBS -j oe
-# Set maximum computing time (e.g. 5min)
-#PBS -l walltime=99:00:00
-# Request number of cores
-#PBS -l nodes=n01:ppn=04
-
-# Activate conda environment
-module load intelpython/3-2020.1
-module load intel/19.0/2
-source activate new_shapepipe
-
-cd /home/tliaudat/github/wf-psf/method-comparison/scripts/
-
-# Just 1 for OMP_NUM_THREADS for this Python script
-export OMP_NUM_THREADS=1
-# And let the low-level threading use all of the requested cores
-export OPENBLAS_NUM_THREADS=$NSLOTS
-
-python ./rca_script.py \
- --run_id rca_shifts_n8_up1_k3 \
- --n_comp 8 \
- --upfact 1 \
- --ksig 3. \
- --saving_dir /n05data/tliaudat/wf_exps/outputs/rca_shifts/ \
- --input_data_dir /n05data/tliaudat/wf_exps/datasets/rca_shifts/ \
-
-# Return exit code
-exit 0
diff --git a/method-comparison/jobs/test_script.py b/method-comparison/jobs/test_script.py
deleted file mode 100644
index 4047a45d..00000000
--- a/method-comparison/jobs/test_script.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/python
-
-import numpy as np
-from astropy.io import fits
-import rca
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--n_comp",
- default=4,
- type=int,
- help="RCA number of eigenPSFs.")
-@click.option(
- "--upfact",
- default=1,
- type=int,
- help="Upsampling factor.")
-@click.option(
- "--ksig",
- default=3.,
- type=float,
- help="Denoising parameter K.")
-@click.option(
- "--run_id",
- default="rca",
- type=str,
- help="Id used for the saved models and validations.")
-
-def main(**args):
- print(args)
- with parallel_backend("loky", inner_max_num_threads=4):
- results = Parallel(n_jobs=1)(
- delayed(rca_procedure)(**args) for i in range(1)
- )
-
-def rca_procedure(**args):
- # Model parameters
- n_comp = args['n_comp']
- upfact = args['upfact']
- ksig = args['ksig']
-
- run_id = args['run_id']
-
- print('n_comp ', n_comp)
- print('upfact ', upfact)
- print('ksig ', ksig)
- print('run_id ', run_id)
-
-
-if __name__ == "__main__":
- main()
diff --git a/method-comparison/logs/slurm_output_logs.md b/method-comparison/logs/slurm_output_logs.md
deleted file mode 100644
index c033fecc..00000000
--- a/method-comparison/logs/slurm_output_logs.md
+++ /dev/null
@@ -1,2 +0,0 @@
-slurm output logs
-
\ No newline at end of file
diff --git a/method-comparison/scripts/dataset-conversion-MCCD.py b/method-comparison/scripts/dataset-conversion-MCCD.py
deleted file mode 100644
index 1956d6a3..00000000
--- a/method-comparison/scripts/dataset-conversion-MCCD.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-import numpy as np
-import mccd
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--rca_data_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/rca/",
- type=str,
- help="RCA dataset directory path. Should have /test and /train folders inside.")
-@click.option(
- "--mccd_saving_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/mccd/",
- type=str,
- help="MCCD dataset directory saving path. Should have /test and /train folders inside.")
-
-
-def main(**args):
- print(args)
- with parallel_backend("loky", inner_max_num_threads=1):
- results = Parallel(n_jobs=1)(
- delayed(mccd_dataset_conversion)(**args) for i in range(1)
- )
-
-
-
-def mccd_dataset_conversion(**args):
- ### Create MCCD catalogs
- # This should be done after the RCA catalogs have been built
- # as we will be tranforming those into MCCD format.
-
- loc2glob = mccd.mccd_utils.Loc2Glob_EUCLID_sim()
- euclid_CCD_n = loc2glob.ccd_tot
-
- base_input_path = args['rca_data_path']
- base_output_path = args['mccd_saving_path']
-
-
- # Train dataset
- input_folder_path = base_input_path + 'train/'
- output_path = base_output_path + 'train/'
-
- mccd.auxiliary_fun.mccd_preprocessing(
- input_folder_path,
- output_path,
- min_n_stars=1,
- file_pattern='train_stars-*-*.fits',
- separator='-',
- CCD_id_filter_list=np.arange(euclid_CCD_n),
- outlier_std_max=100.,
- save_masks=False,
- save_name='train_star_selection',
- save_extension='.fits',
- verbose=True,
- loc2glob=mccd.mccd_utils.Loc2Glob_EUCLID_sim(),
- fits_tb_pos=1)
-
-
- # Test dataset
- input_folder_path = base_input_path + 'test/'
- output_path = base_output_path + 'test/'
-
- mccd.auxiliary_fun.mccd_preprocessing(
- input_folder_path,
- output_path,
- min_n_stars=1,
- file_pattern='test_stars-*-*.fits',
- separator='-',
- CCD_id_filter_list=np.arange(euclid_CCD_n),
- outlier_std_max=100.,
- save_masks=False,
- save_name='test_star_selection',
- save_extension='.fits',
- verbose=True,
- loc2glob=mccd.mccd_utils.Loc2Glob_EUCLID_sim(),
- fits_tb_pos=1)
-
-
-if __name__ == "__main__":
- main()
diff --git a/method-comparison/scripts/dataset-conversion-PSFEx.py b/method-comparison/scripts/dataset-conversion-PSFEx.py
deleted file mode 100644
index 833d326d..00000000
--- a/method-comparison/scripts/dataset-conversion-PSFEx.py
+++ /dev/null
@@ -1,327 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-import os
-import subprocess
-import platform
-
-import numpy as np
-from astropy.io import fits
-import mccd
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--base_repo_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/",
- type=str,
- help="Base wf-psf repository path.")
-@click.option(
- "--sex_exec_path",
- default="sex",
- type=str,
- help="Sextractor executable path.")
-@click.option(
- "--psfex_saving_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/psfex/",
- type=str,
- help="PSFEx dataset directory saving path. Should have /test, /train and /tmp folders inside.")
-@click.option(
- "--rca_input_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/rca/",
- type=str,
- help="[optional] RCA dataset path. To be used to add the intra pixel shifts.")
-@click.option(
- "--psf_size",
- default=1.25,
- type=float,
- help="PSF/star size in sigma.")
-@click.option(
- "--verbose",
- default=0,
- type=int,
- help="Verbose mode.")
-
-def main(**args):
- print(args)
- with parallel_backend("loky", inner_max_num_threads=1):
- results = Parallel(n_jobs=1)(
- delayed(psfex_dataset_conversion)(**args) for i in range(1)
- )
-
-
-def psfex_dataset_conversion(**args):
-
- python_var_tuple = platform.python_version_tuple()
-
- psf_size = args['psf_size']
-
- # Sextractor parameters
- exec_path = args['sex_exec_path']
-
- config_files_dir = args['base_repo_path'] + 'method-comparison/config_files/'
- dot_sex = config_files_dir + 'default.sex'
- dot_param = config_files_dir + 'default.param'
- dot_conv = config_files_dir + 'default.conv'
-
-
- # Saving parameters
-
- # Temporary folder for Sextractor intermediate products
- base_sex_path = args['psfex_saving_path'] + 'tmp/'
-
- # Input dataset dir
- dataset_path = args['base_repo_path'] + 'data/coherent_euclid_dataset/'
-
- # Initialize parameters
- loc2glob = mccd.mccd_utils.Loc2Glob_EUCLID_sim()
- euclid_CCD_n = loc2glob.ccd_tot # 36
- image_size = 32
- verbose = args['verbose']
-
-
- catalog_ids = [200, 500, 1000, 2000, 0]
- train_cat = [True, True, True, True, False]
-
- for catalog_id, train_bool in zip(catalog_ids, train_cat):
-
- if train_bool:
- folder_path = 'train/'
- save_name = 'train_stars'
- datafile_path = 'train_Euclid_res_%d_TrainStars_id_001.npy'%(catalog_id)
- else:
- folder_path = 'test/'
- save_name = 'test_stars'
- datafile_path = 'test_Euclid_res_id_001.npy'
-
- output_dir = args['psfex_saving_path'] + folder_path
-
- # Load dataset
- dataset = np.load(dataset_path + datafile_path, allow_pickle=True)[()]
- if train_bool:
- data_stars = dataset['noisy_stars']
- else:
- data_stars = dataset['stars'] # Noiseless stars for the test
- train_pos = dataset['positions']
-
- # Convert positions and calculate CCD_n
- # Need to switch these coordinates to the global coordinates
- # Each CCD is 4096x4096 and the origin is bottom left corner of the CCD 15.
-
- # Shift origin to the global positions
- global_pos = train_pos * (4096*6 /1000) - (4096 * 3)
- # Calculate local positions and CCD_n
- local_pos = np.array([
- loc2glob.glob2loc_img_coord(_x_glob, _y_glob) for _x_glob, _y_glob in global_pos
- ])
-
- ccd_n_list = local_pos[:,0].astype(int)
- local_pos = local_pos[:,1:]
-
- # CCD list
- CCD_ids = np.arange(euclid_CCD_n)
-
- CCD_w = loc2glob.x_npix
- CCD_h = loc2glob.y_npix
-
- half_size = np.floor(image_size/2).astype(int)
-
-
- for ccd_it in CCD_ids:
- # Select the CCD
-
- # Build CCD mask
- ccd_mask = (ccd_n_list == ccd_it)
-
- # Extract elements from selected CCD
- x_loc = local_pos[ccd_mask, 0]
- y_loc = local_pos[ccd_mask, 1]
- obs_stars = data_stars[ccd_mask, :, :]
- GT_stars = dataset['stars'][ccd_mask, :, :]
-
- # Try to add the intra pixel shift if available
- try:
- # First recover the corresponding shifts
- if train_bool:
- rca_cat_id = catalog_id
- else:
- rca_cat_id = catalog_ids[0]
- rca_data_dir = args['rca_input_path'] + folder_path
- rca_cat = fits.open(
- rca_data_dir + save_name + '-%07d-%02d.fits'%(rca_cat_id, ccd_it),
- memmap=False
- )
- rand_shifts = rca_cat[1].data['SHIFTS']
-
- # Get the noise realisation
- if train_bool:
- noise_realisations = obs_stars - GT_stars
-
- # Let's add intra-pixel shifts
- # First we calculate the shift needed to center the stars to the
- # postage stamp centroid
- cents = [mccd.utils.CentroidEstimator(star, sig=psf_size) for star in GT_stars]
- req_shifts = np.array([ce.return_shifts() for ce in cents])
-
- # This is required as we shift the star to the postage stamp centroid
- # and not the other way around
- req_shifts *= -1
-
- shift_kernels, _ = mccd.utils.shift_ker_stack(
- shifts=req_shifts,
- upfact=1,
- lanc_rad=4
- )
- shift_kernels = mccd.utils.reg_format(shift_kernels)
- shifted_stars = np.array([
- mccd.utils.degradation_op(star, ker, D=1)
- for star, ker in zip(GT_stars, shift_kernels)
- ])
- # Now we have the centred stars, we can proced to add a random intra-pixel shift.
- # We do it in two step to avoid having a possible large shift
-
- # We already recovered the random shifts from the RCA catalog
- # We generate the shift kernels
- shift_kernels, _ = mccd.utils.shift_ker_stack(
- shifts=rand_shifts,
- upfact=1,
- lanc_rad=4
- )
- # Change to regular format (first dimension is the batch)
- shift_kernels = mccd.utils.reg_format(shift_kernels)
- # Shift the noiseless images and add the noise realisation
- shifted_stars = np.array([
- mccd.utils.degradation_op(star, ker, D=1)
- for star, ker in zip(shifted_stars, shift_kernels)
- ])
- # If there are the training stars we add the noise realisation
- if save_name == 'train_stars':
- shifted_stars += noise_realisations
-
- # Now let's save the obs stars
- obs_stars = shifted_stars
-
- except:
- continue
-
- # Check the borders
- for k in range(x_loc.shape[0]):
- if x_loc[k]-half_size < 0:
- print('Modif border x low. id=%d, x=%f'%(k, x_loc[k]))
- x_loc[k] -= x_loc[k]-half_size
- if x_loc[k]+half_size+1 > CCD_w:
- print('Modif border x high. id=%d, x=%f'%(k, x_loc[k]))
- x_loc[k] -= x_loc[k]+half_size+1-CCD_w
- if y_loc[k]-half_size < 0:
- print('Modif border y low. id=%d, y=%f'%(k, y_loc[k]))
- y_loc[k] -= y_loc[k]-half_size
- if y_loc[k]+half_size+1 > CCD_h:
- print('Modif border y high. id=%d, y=%f'%(k, y_loc[k]))
- y_loc[k] -= y_loc[k]+half_size+1-CCD_h
-
- # Number of stars in the CCD
- star_num = x_loc.shape[0]
-
- # Create big pictures
- Im_vig = np.zeros((CCD_w, CCD_h))
- Im_pos = np.zeros((CCD_w, CCD_h))
-
- # No need to add noise as the obs_stars already contain noise
- # And as we are using the Im_pos to force the detection
- # sigma_noise = 1e-10
- # noise = (sigma_noise * np.random.randn(CCD_w * CCD_h)).reshape((CCD_w,CCD_h))
- # Im_vig += noise
- # Im_pos += noise
-
-
- # Add the stars to the big picture
- for k in range(obs_stars.shape[0]):
- Im_vig[
- int(x_loc[k]-half_size):int(x_loc[k]+half_size),
- int(y_loc[k]-half_size):int(y_loc[k]+half_size)
- ] += obs_stars[k,:,:].T
-
- Im_pos[
- int(x_loc[k]-1):int(x_loc[k]+2),
- int(y_loc[k]-1):int(y_loc[k]+2)
- ] += 10
-
- # Define the header and save the big images
- hdr = fits.Header()
- hdr['OWNER'] = 'WF_PSF package'
-
- overwrite = False
-
- data_im = Im_vig.T
- data_pos = Im_pos.T
-
- sex_im_savepath = base_sex_path + '/tmp_ccd_image.fits'
- sex_pos_savepath = base_sex_path + '/tmp_ccd_pos.fits'
-
-
- if os.path.isfile(sex_im_savepath):
- os.remove(sex_im_savepath)
- if os.path.isfile(sex_pos_savepath):
- os.remove(sex_pos_savepath)
-
- fits.PrimaryHDU(data_im, hdr).writeto(sex_im_savepath, overwrite=overwrite)
- fits.PrimaryHDU(data_pos, hdr).writeto(sex_pos_savepath, overwrite=overwrite)
-
-
- # Prepare the command an launch sextractor
- detection_image_path = sex_pos_savepath
- measurement_image = sex_im_savepath
- output_file_path = output_dir + '%s_psfex-%07d-%02d.fits'%(save_name, catalog_id, ccd_it)
-
- # Base arguments for SExtractor
- command_line_base = (
- '{0} {1},{2} -c {3} -PARAMETERS_NAME {4} -FILTER_NAME {5} '
- '-CATALOG_NAME {6} -WEIGHT_TYPE None -CHECKIMAGE_TYPE NONE -CHECKIMAGE_NAME none'.format(
- exec_path,
- detection_image_path,
- measurement_image,
- dot_sex,
- dot_param,
- dot_conv,
- output_file_path
- )
- )
-
- # For >=python3.7
- if int(python_var_tuple[0]) == 3 and int(python_var_tuple[1]) >= 7:
- process_output = subprocess.run(command_line_base, shell=True, check=True, capture_output=True)
- if verbose > 0:
- print('STDERR:\n', process_output.stderr.decode("utf-8"))
- print('STDOUT:\n', process_output.stdout.decode("utf-8"))
- elif int(python_var_tuple[0]) == 3 and int(python_var_tuple[1]) < 7:
- # For python3.6
- process_output = subprocess.run(
- command_line_base,
- shell=True,
- check=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT
- )
- if verbose > 0:
- print('STDOUT:\n', process_output.stdout.decode("utf-8"))
-
- sexcat = fits.open(output_file_path, memmap=False)
- print(
- 'Catalog %s, CCD %d, \t Detected stars: %d,\t Diff (inserted - detected) = %d'%(
- catalog_id,
- ccd_it,
- sexcat[2].data['VIGNET'].shape[0],
- star_num - sexcat[2].data['VIGNET'].shape[0]
- )
- )
- sexcat.close()
-
- if os.path.isfile(sex_im_savepath):
- os.remove(sex_im_savepath)
- if os.path.isfile(sex_pos_savepath):
- os.remove(sex_pos_savepath)
-
-if __name__ == "__main__":
- main()
diff --git a/method-comparison/scripts/dataset-conversion-RCA-intrapixshifts.py b/method-comparison/scripts/dataset-conversion-RCA-intrapixshifts.py
deleted file mode 100644
index 65e92f5d..00000000
--- a/method-comparison/scripts/dataset-conversion-RCA-intrapixshifts.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-"""
-## Create RCA catalogs with intra pixel shifts
-
-This script generates RCA-compatible datasets with random
-intra-pixel shifts. The inputs are the wf_psf coherent datasets.
-
-The intra pixel is added in this script.
-
-Fits required table names
-'VIGNET' : observed vignets
-'XWIN_IMAGE' : X local coordinate
-'YWIN_IMAGE' : Y local coordinte
-
-The following is optional (optional)
-'SNR_WIN' : SNR estimation
-'XWIN_WORLD' : X WCS global coordinate (RA)
-'XWIN_WORLD' : Y WCS global coordinate (DEC)
-
-Name example:
-An filename example would be: ``star_selection-1234567-04.fits``
- Where the exposure ID is: ``1234567``
- Where the CCD ID is: ``04`` (using 2 digits)
-"""
-
-import numpy as np
-import mccd
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--base_repo_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/",
- type=str,
- help="Base wf-psf repository path.")
-@click.option(
- "--rca_saving_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/rca/",
- type=str,
- help="RCA dataset saving path. Should have /test and /train folders inside.")
-@click.option(
- "--psf_size",
- default=1.25,
- type=float,
- help="PSF/star size in sigma.")
-@click.option(
- "--shift_rand_seed",
- default=10,
- type=int,
- help="Seed for the random shifts.")
-
-
-def main(**args):
- print(args)
- with parallel_backend("loky", inner_max_num_threads=1):
- results = Parallel(n_jobs=1)(
- delayed(rca_dataset_conversion)(**args) for i in range(1)
- )
-
-
-
-def rca_dataset_conversion(**args):
-
- # Seed for random generator
- seed = args['shift_rand_seed']
- np.random.seed(seed)
-
- # Loading dir
- dataset_path = args['base_repo_path'] + 'data/coherent_euclid_dataset/'
-
- # Saving parameters
- rca_base_path = args['rca_saving_path']
-
- psf_size = args['psf_size']
-
- separator = '-'
- save_extension = '.fits'
-
-
- catalog_ids = [200, 500, 1000, 2000]
- train_cat = [True, False]
-
- for catalog_id in catalog_ids:
- for train_bool in train_cat:
-
- if train_bool:
- save_name = 'train_stars'
- folder_path = 'train/'
- load_file = 'train_Euclid_res_%d_TrainStars_id_001.npy'%(catalog_id)
- else:
- save_name = 'test_stars'
- folder_path = 'test/'
- load_file = 'test_Euclid_res_id_001.npy'
-
- # Load dataset
- dataset = np.load(dataset_path + load_file, allow_pickle=True)[()]
- data_stars = dataset['stars']
- data_pos = dataset['positions']
- if save_name == 'train_stars':
- noisy_train_stars = dataset['noisy_stars']
- noise_realisations = noisy_train_stars - data_stars
-
- # Let's add intra-pixel shifts
- # First we calculate the shift needed to center the stars to the
- # postage stamp centroid
- cents = [mccd.utils.CentroidEstimator(star, sig=psf_size) for star in data_stars]
- req_shifts = np.array([ce.return_shifts() for ce in cents])
-
- # This is required as we shift the star to the postage stamp centroid
- # and not the other way around
- req_shifts *= -1
-
- shift_kernels, _ = mccd.utils.shift_ker_stack(
- shifts=req_shifts,
- upfact=1,
- lanc_rad=4
- )
- shift_kernels = mccd.utils.reg_format(shift_kernels)
- shifted_stars = np.array([
- mccd.utils.degradation_op(star, ker, D=1)
- for star, ker in zip(data_stars, shift_kernels)
- ])
- # Now we have the centred stars, we can proced to add a random intra-pixel shift.
- # We do it in two step to avoid having a possible large shift
-
- # We first calculate random shift uniformly distributed in [-0.4, 0.4]
- rand_shifts = (np.random.rand(data_stars.shape[0], 2) - 0.5) * 0.8
- # We generate the shift kernels
- shift_kernels, _ = mccd.utils.shift_ker_stack(
- shifts=rand_shifts,
- upfact=1,
- lanc_rad=4
- )
- # Change to regular format (first dimension is the batch)
- shift_kernels = mccd.utils.reg_format(shift_kernels)
- # Shift the noiseless images and add the noise realisation
- shifted_stars = np.array([
- mccd.utils.degradation_op(star, ker, D=1)
- for star, ker in zip(shifted_stars, shift_kernels)
- ])
- # If there are the training stars we add the noise realisation
- if save_name == 'train_stars':
- shifted_stars += noise_realisations
-
- # Convert positions and calculate CCD_n
- # Need to switch these coordinates to the global coordinates
- # Each CCD is 4096x4096 and the origin is bottom left corner of the CCD 15.
- loc2glob = mccd.mccd_utils.Loc2Glob_EUCLID_sim()
- euclid_CCD_n = loc2glob.ccd_tot
-
- # Shift origin to the global positions
- global_pos = data_pos * (4096*6 /1000) - (4096 * 3)
- # Calculate local positions and CCD_n
- local_pos = np.array([
- loc2glob.glob2loc_img_coord(_x_glob, _y_glob) for _x_glob, _y_glob in global_pos
- ])
-
- ccd_n_list = local_pos[:,0].astype(int)
- local_pos = local_pos[:,1:]
-
- # Save the local CCDs on fits catalogs
- for it in range(euclid_CCD_n):
- # Select the CCD
- ccd_it = it
-
- # Build CCD mask
- ccd_mask = (ccd_n_list == ccd_it)
-
- # Extract elements from selected CCD
- x_local = local_pos[ccd_mask, 0]
- y_local = local_pos[ccd_mask, 1]
- GT_stars = data_stars[ccd_mask, :, :]
- obs_stars = shifted_stars[ccd_mask, :, :]
- ccd_rand_shifts = rand_shifts[ccd_mask, :]
-
- saving_dic = {
- 'VIGNET': obs_stars,
- 'GT_VIGNET': GT_stars,
- 'XWIN_IMAGE': x_local,
- 'YWIN_IMAGE': y_local,
- 'SHIFTS': ccd_rand_shifts,
- }
-
- ccd_str = '%02d'%ccd_it
- catalog_str = '%07d'%catalog_id
-
- saving_path = rca_base_path + folder_path + save_name + separator \
- + catalog_str + separator + ccd_str + save_extension
-
- mccd.mccd_utils.save_to_fits(saving_dic, saving_path)
-
-
-if __name__ == "__main__":
- main()
diff --git a/method-comparison/scripts/dataset-conversion-RCA.py b/method-comparison/scripts/dataset-conversion-RCA.py
deleted file mode 100644
index 01237899..00000000
--- a/method-comparison/scripts/dataset-conversion-RCA.py
+++ /dev/null
@@ -1,135 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-import numpy as np
-import mccd
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--base_repo_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/",
- type=str,
- help="Base wf-psf repository path.")
-@click.option(
- "--rca_saving_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/rca/",
- type=str,
- help="RCA dataset saving path. Should have /test and /train folders inside.")
-
-
-### Create RCA catalogs
-
-# Fits required table names
-# 'VIGNET' : observed vignets
-# 'XWIN_IMAGE' : X local coordinate
-# 'YWIN_IMAGE' : Y local coordinte
-
-# The following is optional (optional)
-# 'SNR_WIN' : SNR estimation
-# 'XWIN_WORLD' : X WCS global coordinate (RA)
-# 'XWIN_WORLD' : Y WCS global coordinate (DEC)
-
-# Name example:
-# An filename example would be: ``star_selection-1234567-04.fits``
-# Where the exposure ID is: ``1234567``
-# Where the CCD ID is: ``04`` (using 2 digits)
-
-
-def main(**args):
- print(args)
- with parallel_backend("loky", inner_max_num_threads=1):
- results = Parallel(n_jobs=1)(
- delayed(rca_dataset_conversion)(**args) for i in range(1)
- )
-
-
-
-def rca_dataset_conversion(**args):
-
- # Loading dir
- dataset_path = args['base_repo_path'] + 'data/coherent_euclid_dataset/'
-
- # Saving parameters
- rca_base_path = args['rca_saving_path']
-
- separator = '-'
- save_extension = '.fits'
-
-
- catalog_ids = [200, 500, 1000, 2000]
- train_cat = [True, False]
-
- for catalog_id in catalog_ids:
- for train_bool in train_cat:
-
- if train_bool:
- save_name = 'train_stars'
- folder_path = 'train/'
- load_file = 'train_Euclid_res_%d_TrainStars_id_001.npy'%(catalog_id)
- else:
- save_name = 'test_stars'
- folder_path = 'test/'
- load_file = 'test_Euclid_res_id_001.npy'
-
- # Load dataset
- dataset = np.load(dataset_path + load_file, allow_pickle=True)[()]
- data_stars = dataset['stars']
- if save_name == 'train_stars':
- noisy_train_stars = dataset['noisy_stars']
- data_pos = dataset['positions']
-
- # Convert positions and calculate CCD_n
- # Need to switch these coordinates to the global coordinates
- # Each CCD is 4096x4096 and the origin is bottom left corner of the CCD 15.
- loc2glob = mccd.mccd_utils.Loc2Glob_EUCLID_sim()
- euclid_CCD_n = loc2glob.ccd_tot
-
- # Shift origin to the global positions
- global_pos = data_pos * (4096*6 /1000) - (4096 * 3)
- # Calculate local positions and CCD_n
- local_pos = np.array([
- loc2glob.glob2loc_img_coord(_x_glob, _y_glob) for _x_glob, _y_glob in global_pos
- ])
-
- ccd_n_list = local_pos[:,0].astype(int)
- local_pos = local_pos[:,1:]
-
- # Save the local CCDs on fits catalogs
- for it in range(euclid_CCD_n):
- # Select the CCD
- ccd_it = it
-
- # Build CCD mask
- ccd_mask = (ccd_n_list == ccd_it)
-
- # Extract elements from selected CCD
- x_local = local_pos[ccd_mask, 0]
- y_local = local_pos[ccd_mask, 1]
- GT_stars = data_stars[ccd_mask, :, :]
-
- if save_name == 'train_stars':
- obs_stars = noisy_train_stars[ccd_mask, :, :]
- elif save_name == 'test_stars':
- obs_stars = GT_stars
-
- saving_dic = {
- 'VIGNET': obs_stars,
- 'GT_VIGNET': GT_stars,
- 'XWIN_IMAGE': x_local,
- 'YWIN_IMAGE': y_local,
- }
-
- ccd_str = '%02d'%ccd_it
- catalog_str = '%07d'%catalog_id
-
- saving_path = rca_base_path + folder_path + save_name + separator \
- + catalog_str + separator + ccd_str + save_extension
-
- mccd.mccd_utils.save_to_fits(saving_dic, saving_path)
-
-
-if __name__ == "__main__":
- main()
diff --git a/method-comparison/scripts/mccd_script.py b/method-comparison/scripts/mccd_script.py
deleted file mode 100644
index bc2148bd..00000000
--- a/method-comparison/scripts/mccd_script.py
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/bin/python
-
-import mccd
-import numpy as np
-from astropy.io import fits
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--config_file",
- default="/home/tliaudat/github/mccd_develop/mccd/wf_exps/config_files/config_MCCD_wf_exp.ini",
- type=str,
- help="MCCD configuration file path.")
-
-
-def mccd_procedure(**args):
- # Run MCCD
- run_mccd = mccd.auxiliary_fun.RunMCCD(
- args['config_file'],
- fits_table_pos=1,
- verbose=True
- )
- run_mccd.run_MCCD()
- # run_mccd.validate_MCCD_models()
-
- # Calculate metrics
- val_output_dir = run_mccd.param_parser.get_extra_kw(
- 'val_model_output_dir'
- )
-
- rmse_list = []
- rel_rmse_list = []
- catalogs = [200, 500, 1000, 2000]
-
- for it in range(len(catalogs)):
-
- # Load validation data
- file_name = 'validation_psf-%07d.fits'%(catalogs[it])
- mccd_val_dic = fits.open(val_output_dir + file_name)
-
- exp_psfs = mccd_val_dic[1].data['PSF_VIGNET_LIST']
- exp_stars = mccd_val_dic[1].data['VIGNET_LIST']
-
- # Calculate residuals
- residuals = np.sqrt(np.mean((exp_psfs - exp_stars)**2, axis=(1,2)))
- GT_star_mean = np.sqrt(np.mean((exp_stars)**2, axis=(1,2)))
-
- # RMSE calculations
- rmse = np.mean(residuals)
- rel_rmse = 100. * np.mean(residuals/GT_star_mean)
-
- rmse_list.append(rmse)
- rel_rmse_list.append(rel_rmse)
-
- rmse_list = np.array(rmse_list)
- rel_rmse_list = np.array(rel_rmse_list)
-
- metrics_dict = {
- 'rmse': rmse_list,
- 'rel_rmse': rel_rmse_list
- }
- metrics_save_name = 'mccd_metrics.npy'
- np.save(val_output_dir + metrics_save_name, metrics_dict, allow_pickle=True)
-
-
-def main(**args):
- print(args)
- with parallel_backend("loky", inner_max_num_threads=1):
- results = Parallel(n_jobs=1)(
- delayed(mccd_procedure)(**args) for i in range(1)
- )
-
-if __name__ == "__main__":
- main()
diff --git a/method-comparison/scripts/mccd_script_SR.py b/method-comparison/scripts/mccd_script_SR.py
deleted file mode 100644
index 53909dd2..00000000
--- a/method-comparison/scripts/mccd_script_SR.py
+++ /dev/null
@@ -1,188 +0,0 @@
-#!/bin/python
-
-import traceback
-import numpy as np
-import mccd
-from wf_psf import method_comp_utils as comp_utils
-from wf_psf import metrics as metrics
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--config_file",
- default="/home/tliaudat/github/mccd_develop/mccd/wf_exps/config_files/config_MCCD_wf_exp.ini",
- type=str,
- help="MCCD configuration file path.")
-@click.option(
- "--psf_out_dim",
- default=64,
- type=int,
- help="Image dimension of the PSF vignet. Like the PSFEx variable (psf_size).")
-@click.option(
- "--run_id",
- default="mccd_SR",
- type=str,
- help="Id used for the saved SR validations and metrics.")
-@click.option(
- "--repo_base_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/",
- type=str,
- help="Path of the wf-psf repository.")
-
-
-def main(**args):
- print(args)
- with parallel_backend("loky", inner_max_num_threads=1):
- results = Parallel(n_jobs=1)(
- delayed(mccd_procedure)(**args) for i in range(1)
- )
-
-def mccd_procedure(**args):
-
- output_dim = [args['psf_out_dim'], args['psf_out_dim']]
- # Validation upfact
- upfact = 1
- run_id = args['run_id']
-
- # Generate instance of MCCD runner
- run_mccd = mccd.auxiliary_fun.RunMCCD(
- args['config_file'],
- fits_table_pos=1,
- verbose=True
- )
- # Train/fit the models
- run_mccd.fit_MCCD_models()
- run_mccd.preprocess_val_inputs()
-
- # Prepare validation
-
- # Generate GT SR psfs
- test_wf_file_path = args['repo_base_path'] + 'data/coherent_euclid_dataset/test_Euclid_res_id_001.npy'
- GT_predictions, wf_test_pos, _ = metrics.gen_GT_wf_model(
- test_wf_file_path,
- pred_output_Q=1,
- pred_output_dim=args['psf_out_dim']
- )
-
- # Need to change positions to local coordinates
- loc2glob = mccd.mccd_utils.Loc2Glob_EUCLID_sim()
- ccd_tot = loc2glob.ccd_tot # 36
-
- # Need to change positions to local coordinates
- # Shift origin to the global positions
- global_pos = wf_test_pos * (4096 * 6 /1000) - (4096 * 3)
- # Calculate local positions and CCD_n
- local_pos = np.array([
- loc2glob.glob2loc_img_coord(_x_glob, _y_glob) for _x_glob, _y_glob in global_pos
- ])
- # CCD list
- ccd_n_list = local_pos[:,0].astype(int)
- # Local positions
- local_pos = local_pos[:,1:]
-
-
- # Fit model input dir
- fit_model_input_dir = run_mccd.param_parser.get_extra_kw(
- 'val_model_input_dir'
- )
- # Validation output dir
- val_output_dir = run_mccd.param_parser.get_extra_kw(
- 'val_model_output_dir'
- )
-
- catalog_ids = [200, 500, 1000, 2000]
- metrics_list = []
-
- for catalog_id in catalog_ids:
-
- # Fitted model path
- fit_model_path = fit_model_input_dir + \
- run_mccd.fitting_model_saving_name + run_mccd.separator + \
- '%07d'%catalog_id + '.npy'
-
- mccd_inst = mccd.mccd_quickload(fit_model_path)
- fit_upfact = mccd_inst.upfact
-
- # Create lists for exposure metrics
- star_list = []
- psf_SR_list = []
-
- # Iterate over the ccds
- for ccd_it in range(ccd_tot):
-
- try:
- # Extract test catalog
- # Build CCD mask
- ccd_mask = (ccd_n_list == ccd_it)
-
- # Extract elements from selected CCD
- x_loc = local_pos[ccd_mask, 0].reshape(-1, 1)
- y_loc = local_pos[ccd_mask, 1].reshape(-1, 1)
-
- ccd_test_pos = np.concatenate((x_loc, y_loc), axis=1)
- ccd_test_stars = GT_predictions[ccd_mask, :, :]
-
- # Recover the PSFs from the model
- interp_psfs_SR = run_mccd.recover_MCCD_PSFs(
- fit_model_path,
- positions=ccd_test_pos,
- ccd_id=ccd_it,
- local_pos=True,
- upfact=upfact
- )
-
- # Match SR psfs to SR stars
- # Crop to output dim
- interp_psfs_SR = np.array([
- comp_utils.crop_at_center(_psf_SR, output_dim) for _psf_SR in interp_psfs_SR
- ])
-
- # Match to SR stars
- matched_SR_psfs = comp_utils.match_psfs(
- interp_psfs_SR,
- ccd_test_stars,
- psf_size=1.25 * (fit_upfact / upfact)
- )
-
- # Save validation PSFs and Ground truth stars
- val_dict = {
- 'PSF_VIGNETS_SR': matched_SR_psfs,
- 'GT_VIGNET': ccd_test_stars,
- 'POS_LOCAL': ccd_test_pos,
- 'CCD_ID': np.ones(ccd_test_pos.shape[0]) * ccd_it,
- }
- val_save_name = run_id + '_validation-%07d-%02d.npy'%(catalog_id, ccd_it)
- np.save(val_output_dir + val_save_name, val_dict, allow_pickle=True)
-
- # Add images to lists
- psf_SR_list.append(np.copy(matched_SR_psfs))
- star_list.append(np.copy(ccd_test_stars))
-
- except Exception:
- traceback.print_exc()
- print('Problem with catalog: train_stars_psfex-%07d-%02d.psf'%(catalog_id, ccd_it))
-
-
- # Concatenate all the test stars in one array
- exp_psfs = np.concatenate(psf_SR_list)
- exp_stars = np.concatenate(star_list)
- # Calcualte pixel and shape errors
- metrics_dic = comp_utils.shape_pix_metrics(exp_psfs, exp_stars)
- # Add dictionary to saving list
- metrics_list.append(metrics_dic)
-
-
- # Save the metrics
- metrics_dict = {
- 'metrics_dics': metrics_list,
- 'catalog_ids': catalog_ids
- }
- metrics_save_name = run_id + '_metrics.npy'
- np.save(val_output_dir + metrics_save_name, metrics_dict, allow_pickle=True)
-
- print('Good bye!')
-
-if __name__ == "__main__":
- main()
diff --git a/method-comparison/scripts/psfex_script.py b/method-comparison/scripts/psfex_script.py
deleted file mode 100644
index 48d3635a..00000000
--- a/method-comparison/scripts/psfex_script.py
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/bin/python
-
-import os
-import subprocess
-import platform
-import traceback
-import numpy as np
-from astropy.io import fits
-import mccd
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-from wf_psf import method_comp_utils as comp_utils
-
-import click
-@click.command()
-
-@click.option(
- "--psfvar_degrees",
- default=2,
- type=int,
- help="Polynomial degree for each group. PSFEx variable.")
-@click.option(
- "--psf_sampling",
- default=1.,
- type=float,
- help="Sampling step in pixel units. PSFEx variable.")
-@click.option(
- "--psf_size",
- default=32,
- type=int,
- help="Image size of the PSF model. PSFEx variable.")
-@click.option(
- "--run_id",
- default="psfex_model",
- type=str,
- help="Id used for the saved models and validations.")
-@click.option(
- "--exec_path",
- default="psfex",
- type=str,
- help="Executable path of psfex.")
-@click.option(
- "--repo_base_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/",
- type=str,
- help="Path of the wf-psf repository.")
-@click.option(
- "--dataset_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/psfex/",
- type=str,
- help="Path to the directory with the PSFEx datasets. Should include the directories /test and /train.")
-@click.option(
- "--saving_dir",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/comparison-PSF-methods/outputs/psfex/",
- type=str,
- help="Path to the saving directory. Should include the directories /models, /validation and /metrics.")
-@click.option(
- "--verbose",
- default=1,
- type=int,
- help="Verbose parameter. Bigger than 0 means verbose.")
-
-
-def main(**args):
- print(args)
- # PSFEx SMP can be controlled directly from its config file
- psfex_procedure(**args)
-
-
-def psfex_procedure(**args):
- # Python version
- python_var_tuple = platform.python_version_tuple()
-
- # Model parameters
- run_id = args['run_id']
- psfex_config_path = args['repo_base_path'] + 'method-comparison/config_files/new_default.psfex'
- psfex_exec_path = args['exec_path']
-
- # Load data
- saving_base_path = args['saving_dir']
-
- input_train_dir = args['dataset_path'] + 'train/'
- input_test_dir = args['dataset_path'] + 'test/'
-
- model_save_dir_path = saving_base_path + 'models/'
- val_save_dir_path = saving_base_path + 'validation/'
- metrics_save_dir_path = saving_base_path + 'metrics/'
-
- # Need to change positions to local coordinates
- loc2glob = mccd.mccd_utils.Loc2Glob_EUCLID_sim()
-
- catalog_ids = [200, 500, 1000, 2000] # [200]
- ccd_tot = loc2glob.ccd_tot # 36
-
- rmse_list = []
- rel_rmse_list = []
-
-
- # Iterate over the catalogs
- for catalog_id in catalog_ids:
- # Create lists for exposure metrics
- psf_interp_list = []
- star_list = []
-
- # Iterate over the ccds
- for ccd_it in range(ccd_tot):
-
- ## Training
- # Train data
- train_file_path = input_train_dir + 'train_stars_psfex-%07d-%02d.fits'%(catalog_id, ccd_it)
- outcat_name = model_save_dir_path + '%s-%07d-%02d.psf'%(args['run_id'], catalog_id, ccd_it)
-
- # Define psfex command line
- command_line_base = (
- '{0} {1} -c {2} -PSF_DIR {3} -OUTCAT_NAME {4} -PSFVAR_DEGREES {5} -PSF_SAMPLING {6} -PSF_SIZE {7}'.format(
- psfex_exec_path,
- train_file_path,
- psfex_config_path,
- model_save_dir_path,
- outcat_name,
- args['psfvar_degrees'],
- args['psf_sampling'],
- '%2d,%2d'%(args['psf_size'], args['psf_size'])
- )
- )
-
- try:
- # For >=python3.7
- if int(python_var_tuple[0]) == 3 and int(python_var_tuple[1]) >= 7:
- process_output = subprocess.run(command_line_base, shell=True, check=False, capture_output=True)
- if args['verbose'] > 0:
- print('STDERR:\n', process_output.stderr.decode("utf-8"))
- print('STDOUT:\n', process_output.stdout.decode("utf-8"))
- # For python3.6
- elif int(python_var_tuple[0]) == 3 and int(python_var_tuple[1]) < 7:
- process_output = subprocess.run(
- command_line_base,
- shell=True,
- check=False,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT
- )
- if args['verbose'] > 0:
- print('STDOUT:\n', process_output.stdout.decode("utf-8"))
-
- # PSFEx default model name
- psf_model_path = model_save_dir_path + 'train_stars_psfex-%07d-%02d.psf'%(catalog_id, ccd_it)
-
- # Test catalog
- test_file_path = input_test_dir + 'test_stars_psfex-%07d-%02d.fits'%(0, ccd_it)
- test_catalog = fits.open(test_file_path)
-
- test_stars = test_catalog[2].data['VIGNET']
- test_pos = np.array([
- test_catalog[2].data['XWIN_IMAGE'],
- test_catalog[2].data['YWIN_IMAGE']
- ]).T
-
- # Interpolate PSFs
- interp_psfs = comp_utils.interpsfex(psf_model_path, test_pos)
-
- # Match PSFs, flux and intrapixel shifts
- matched_psfs = comp_utils.validation_stars(
- interp_psfs,
- test_stars=test_stars,
- psf_size=1.25 / args['psf_sampling']
- )
-
-
- # Save validation PSFs and Ground truth stars
- val_dict = {
- 'PSF_VIGNETS': matched_psfs,
- 'GT_VIGNET': test_stars,
- 'POS': test_pos,
- }
-
- val_save_name = run_id + '_validation-%07d-%02d.npy'%(catalog_id, ccd_it)
- np.save(val_save_dir_path + val_save_name, val_dict, allow_pickle=True)
-
- # Add images to lists
- psf_interp_list.append(np.copy(matched_psfs))
- star_list.append(np.copy(test_stars))
- except Exception:
- traceback.print_exc()
- print('Problem with catalog: train_stars_psfex-%07d-%02d.psf'%(catalog_id, ccd_it))
-
- # Calculate RMSE metric on all the CCDs
- exp_psfs = np.concatenate(psf_interp_list)
- exp_stars = np.concatenate(star_list)
-
- # Calculate residuals
- residuals = np.sqrt(np.mean((exp_psfs - exp_stars)**2, axis=(1,2)))
- GT_star_mean = np.sqrt(np.mean((exp_stars)**2, axis=(1,2)))
-
- # RMSE calculations
- rmse = np.mean(residuals)
- rel_rmse = 100. * np.mean(residuals/GT_star_mean)
-
- rmse_list.append(rmse)
- rel_rmse_list.append(rel_rmse)
-
- rmse_list = np.array(rmse_list)
- rel_rmse_list = np.array(rel_rmse_list)
-
- # Save the metrics
- metrics_dict = {
- 'rmse': rmse_list,
- 'rel_rmse': rel_rmse_list
- }
- metrics_save_name = run_id + '_metrics.npy'
- np.save(metrics_save_dir_path + metrics_save_name, metrics_dict, allow_pickle=True)
-
- print('Good bye!')
-
-
-if __name__ == "__main__":
- main()
-
diff --git a/method-comparison/scripts/psfex_script_SR.py b/method-comparison/scripts/psfex_script_SR.py
deleted file mode 100644
index eee5ff19..00000000
--- a/method-comparison/scripts/psfex_script_SR.py
+++ /dev/null
@@ -1,230 +0,0 @@
-#!/bin/python
-
-import os
-import subprocess
-import platform
-import traceback
-
-import numpy as np
-import mccd
-from wf_psf import method_comp_utils as comp_utils
-from wf_psf import metrics as metrics
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--psfvar_degrees",
- default=2,
- type=int,
- help="Polynomial degree for each group. PSFEx variable.")
-@click.option(
- "--psf_sampling",
- default=.33,
- type=float,
- help="Sampling step in pixel units. PSFEx variable.")
-@click.option(
- "--psf_size",
- default=64,
- type=int,
- help="Image size of the PSF model. PSFEx variable.")
-@click.option(
- "--run_id",
- default="psfex_SR_model",
- type=str,
- help="Id used for the saved models and validations.")
-@click.option(
- "--exec_path",
- default="psfex",
- type=str,
- help="Executable path of psfex.")
-@click.option(
- "--repo_base_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/",
- type=str,
- help="Path of the wf-psf repository.")
-@click.option(
- "--dataset_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/method-comparison/compatible-datasets/psfex/",
- type=str,
- help="Path to the directory with the PSFEx datasets. Should include the directory /train.")
-@click.option(
- "--saving_dir",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/comparison-PSF-methods/outputs/psfex_SR/",
- type=str,
- help="Path to the saving directory.")
-@click.option(
- "--verbose",
- default=1,
- type=int,
- help="Verbose parameter. Bigger than 0 means verbose.")
-
-
-def main(**args):
- print(args)
- # PSFEx SMP can be controlled directly from its config file
- psfex_SR_procedure(**args)
-
-
-def psfex_SR_procedure(**args):
- # Python version
- python_var_tuple = platform.python_version_tuple()
-
- # Model parameters
- run_id = args['run_id']
- psfex_config_path = args['repo_base_path'] + 'method-comparison/config_files/new_default.psfex'
- psfex_exec_path = args['exec_path']
-
- # Load data
- saving_base_path = args['saving_dir']
-
- input_train_dir = args['dataset_path'] + 'train/'
-
- model_save_dir_path = saving_base_path + 'models/'
- val_save_dir_path = saving_base_path + 'validation/'
- metrics_save_dir_path = saving_base_path + 'metrics/'
-
- if args['psf_sampling'] != 0.33:
- raise NotImplementedError("The psf_sampling should be 0.33, meaning x3 Euclid resolution.")
-
- test_wf_file_path = args['repo_base_path'] + 'data/coherent_euclid_dataset/test_Euclid_res_id_001.npy'
- GT_predictions, wf_test_pos, _ = metrics.gen_GT_wf_model(
- test_wf_file_path,
- pred_output_Q=1,
- pred_output_dim=args['psf_size']
- )
-
- # Need to change positions to local coordinates
- loc2glob = mccd.mccd_utils.Loc2Glob_EUCLID_sim()
- ccd_tot = loc2glob.ccd_tot # 36
-
- # Need to change positions to local coordinates
- # Shift origin to the global positions
- global_pos = wf_test_pos * (4096 * 6 /1000) - (4096 * 3)
- # Calculate local positions and CCD_n
- local_pos = np.array([
- loc2glob.glob2loc_img_coord(_x_glob, _y_glob) for _x_glob, _y_glob in global_pos
- ])
- # CCD list
- ccd_n_list = local_pos[:,0].astype(int)
- # Local positions
- local_pos = local_pos[:,1:]
-
-
- # Iteration parameters
- catalog_ids = [200, 500, 1000, 2000] # [200]
-
- metrics_list = []
-
- # Iterate over the catalogs
- for catalog_id in catalog_ids:
- # Create lists for exposure metrics
- psf_interp_list = []
- star_list = []
-
- # Iterate over the ccds
- for ccd_it in range(ccd_tot):
-
- ## Training
- # Train data
- train_file_path = input_train_dir + 'train_stars_psfex-%07d-%02d.fits'%(catalog_id, ccd_it)
- outcat_name = model_save_dir_path + '%s-%07d-%02d.psf'%(args['run_id'], catalog_id, ccd_it)
-
- # Define psfex command line
- command_line_base = (
- '{0} {1} -c {2} -PSF_DIR {3} -OUTCAT_NAME {4} -PSFVAR_DEGREES {5} -PSF_SAMPLING {6} -PSF_SIZE {7}'.format(
- psfex_exec_path,
- train_file_path,
- psfex_config_path,
- model_save_dir_path,
- outcat_name,
- args['psfvar_degrees'],
- args['psf_sampling'],
- '%2d,%2d'%(args['psf_size'], args['psf_size'])
- )
- )
-
- try:
- # For >=python3.7
- if int(python_var_tuple[0]) == 3 and int(python_var_tuple[1]) >= 7:
- process_output = subprocess.run(command_line_base, shell=True, check=False, capture_output=True)
- if args['verbose'] > 0:
- print('STDERR:\n', process_output.stderr.decode("utf-8"))
- print('STDOUT:\n', process_output.stdout.decode("utf-8"))
- # For python3.6
- elif int(python_var_tuple[0]) == 3 and int(python_var_tuple[1]) < 7:
- process_output = subprocess.run(
- command_line_base,
- shell=True,
- check=False,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT
- )
- if args['verbose'] > 0:
- print('STDOUT:\n', process_output.stdout.decode("utf-8"))
-
- # Extract test catalog
- # Build CCD mask
- ccd_mask = (ccd_n_list == ccd_it)
-
- # Extract elements from selected CCD
- x_loc = local_pos[ccd_mask, 0].reshape(-1, 1)
- y_loc = local_pos[ccd_mask, 1].reshape(-1, 1)
-
- ccd_test_pos = np.concatenate((x_loc, y_loc), axis=1)
- ccd_test_stars = GT_predictions[ccd_mask, :, :]
-
- # PSFEx default model name
- psf_model_path = model_save_dir_path + 'train_stars_psfex-%07d-%02d.psf'%(catalog_id, ccd_it)
-
- # Interpolate PSFs
- interp_psfs = comp_utils.interpsfex(psf_model_path, ccd_test_pos)
-
- # Match PSFs, flux and intrapixel shifts
- matched_psfs = comp_utils.match_psfs(
- interp_psfs,
- test_stars=ccd_test_stars,
- psf_size=1.25 / args['psf_sampling']
- )
-
- # Save validation PSFs and Ground truth stars
- val_dict = {
- 'PSF_VIGNETS': matched_psfs,
- 'GT_VIGNET': ccd_test_stars,
- 'POS': ccd_test_pos,
- }
-
- val_save_name = run_id + '_validation-%07d-%02d.npy'%(catalog_id, ccd_it)
- np.save(val_save_dir_path + val_save_name, val_dict, allow_pickle=True)
-
- # Add images to lists
- psf_interp_list.append(np.copy(matched_psfs))
- star_list.append(np.copy(ccd_test_stars))
- except Exception:
- traceback.print_exc()
- print('Problem with catalog: train_stars_psfex-%07d-%02d.psf'%(catalog_id, ccd_it))
-
-
- # Concatenate all the test stars in one array
- exp_psfs = np.concatenate(psf_interp_list)
- exp_stars = np.concatenate(star_list)
- # Calcualte pixel and shape errors
- metrics_dic = comp_utils.shape_pix_metrics(exp_psfs, exp_stars)
- # Add dictionary to saving list
- metrics_list.append(metrics_dic)
-
- # Save the metrics
- metrics_dict = {
- 'metrics_dics': metrics_list,
- 'catalog_ids': catalog_ids
- }
- metrics_save_name = run_id + '_metrics.npy'
- np.save(metrics_save_dir_path + metrics_save_name, metrics_dict, allow_pickle=True)
-
- print('Good bye!')
-
-
-if __name__ == "__main__":
- main()
-
diff --git a/method-comparison/scripts/rca_script.py b/method-comparison/scripts/rca_script.py
deleted file mode 100644
index 1702c055..00000000
--- a/method-comparison/scripts/rca_script.py
+++ /dev/null
@@ -1,192 +0,0 @@
-#!/bin/python
-
-import numpy as np
-from astropy.io import fits
-import rca
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--n_comp",
- default=4,
- type=int,
- help="RCA number of eigenPSFs.")
-@click.option(
- "--upfact",
- default=1,
- type=int,
- help="Upsampling factor.")
-@click.option(
- "--ksig",
- default=3.,
- type=float,
- help="Denoising parameter K.")
-@click.option(
- "--run_id",
- default="rca",
- type=str,
- help="Id used for the saved models and validations.")
-@click.option(
- "--psf_size",
- default=1.25,
- type=float,
- help="PSF size in the type of the psf_size_type parameter.")
-@click.option(
- "--psf_size_type",
- default="sigma",
- type=str,
- help="PSF size type.")
-@click.option(
- "--saving_dir",
- default="/n05data/tliaudat/wf_exps/outputs/rca/",
- type=str,
- help="Path to the saving directory. Should include the directories /models, /validation and /metrics.")
-@click.option(
- "--input_data_dir",
- default="/n05data/tliaudat/wf_exps/datasets/rca_shifts/",
- type=str,
- help="Input dataset directory. Should have /train and /test directories.")
-
-
-def main(**args):
- print(args)
- with parallel_backend("loky", inner_max_num_threads=1):
- results = Parallel(n_jobs=1)(
- delayed(rca_procedure)(**args) for i in range(1)
- )
-
-def rca_procedure(**args):
- # Model parameters
- n_comp = args['n_comp']
- upfact = args['upfact']
- ksig = args['ksig']
-
- run_id = args['run_id']
-
-
- # Load data
- saving_base_path = args['saving_dir']
- input_base_path = args['input_data_dir']
- model_save_dir_path = saving_base_path + 'models/'
- val_save_dir_path = saving_base_path + 'validation/'
- metrics_save_dir_path = saving_base_path + 'metrics/'
-
- catalog_ids = [200, 500, 1000, 2000] # [200]
- ccd_tot = 36 # 3
-
- rmse_list = []
- rel_rmse_list = []
-
-
- # Iterate over the catalogs
- for catalog_id in catalog_ids:
- # Create lists for exposure metrics
- psf_interp_list = []
- star_list = []
-
- # Iterate over the ccds
- for ccd_it in range(ccd_tot):
-
- ## Training
- # Train data
- train_file_name = 'train/train_stars-%07d-%02d.fits'%(catalog_id, ccd_it)
- train_catalog = fits.open(input_base_path + train_file_name)
-
- # Prepare input data
- # In RCA format (batch dim at the end)
- obs_stars = rca.utils.rca_format(
- train_catalog[1].data['VIGNET']
- )
- obs_pos = np.array([
- train_catalog[1].data['XWIN_IMAGE'],
- train_catalog[1].data['YWIN_IMAGE']
- ]).T
-
- # Create RCA instance
- rca_inst = rca.RCA(
- n_comp=n_comp,
- upfact=upfact,
- ksig=ksig,
- verbose=1
- )
-
- # If we have 2 stars or less we skip the ccd
- if obs_pos.shape[0] <= 2:
- continue
-
- # fit it to stars
- _, _ = rca_inst.fit(
- obs_stars,
- obs_pos,
- psf_size=args['psf_size'],
- psf_size_type=args['psf_size_type']
- )
- # Save model
- save_path = model_save_dir_path + run_id + '_fitted_model-%07d-%02d.npy'%(catalog_id, ccd_it)
- rca_inst.quicksave(save_path)
-
-
- ## Validation
- # Test data
- test_file_name = 'test/test_stars-%07d-%02d.fits'%(catalog_id, ccd_it)
- test_catalog = fits.open(input_base_path + test_file_name)
-
- test_stars = rca.utils.rca_format(
- test_catalog[1].data['VIGNET']
- )
-
- # Test positions
- test_pos = np.array([
- test_catalog[1].data['XWIN_IMAGE'],
- test_catalog[1].data['YWIN_IMAGE']
- ]).T
-
- # Recover test PSFs
- interp_psfs_upfact1 = rca_inst.validation_stars(test_stars, test_pos, upfact=1)
- interp_psfs = rca_inst.validation_stars(test_stars, test_pos, upfact=upfact)
-
- # Save validation PSFs and Ground truth stars
- val_dict = {
- 'PSF_VIGNETS': interp_psfs,
- 'PSF_UPFACT1': interp_psfs_upfact1,
- 'GT_VIGNET': test_catalog[1].data['VIGNET'],
- 'POS': test_pos,
- }
- val_save_name = run_id + '_validation-%07d-%02d.npy'%(catalog_id, ccd_it)
- np.save(val_save_dir_path + val_save_name, val_dict, allow_pickle=True)
-
- # Add images to lists
- psf_interp_list.append(np.copy(interp_psfs))
- star_list.append(np.copy(test_catalog[1].data['VIGNET']))
-
- # Calculate RMSE metric on all the CCDs
- exp_psfs = np.concatenate(psf_interp_list)
- exp_stars = np.concatenate(star_list)
-
- # Calculate residuals
- residuals = np.sqrt(np.mean((exp_psfs - exp_stars)**2, axis=(1,2)))
- GT_star_mean = np.sqrt(np.mean((exp_stars)**2, axis=(1,2)))
-
- # RMSE calculations
- rmse = np.mean(residuals)
- rel_rmse = 100. * np.mean(residuals/GT_star_mean)
-
- rmse_list.append(rmse)
- rel_rmse_list.append(rel_rmse)
-
- rmse_list = np.array(rmse_list)
- rel_rmse_list = np.array(rel_rmse_list)
-
- # Save the metrics
- metrics_dict = {
- 'rmse': rmse_list,
- 'rel_rmse': rel_rmse_list
- }
- metrics_save_name = run_id + '_metrics.npy'
- np.save(metrics_save_dir_path + metrics_save_name, metrics_dict, allow_pickle=True)
-
-
-if __name__ == "__main__":
- main()
diff --git a/method-comparison/scripts/rca_script_SR.py b/method-comparison/scripts/rca_script_SR.py
deleted file mode 100644
index 37ae91b8..00000000
--- a/method-comparison/scripts/rca_script_SR.py
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/bin/python
-
-import numpy as np
-from astropy.io import fits
-import rca
-import mccd
-from wf_psf import method_comp_utils as comp_utils
-from wf_psf import metrics as metrics
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--n_comp",
- default=4,
- type=int,
- help="RCA number of eigenPSFs.")
-@click.option(
- "--upfact",
- default=3,
- type=int,
- help="Upsampling factor.")
-@click.option(
- "--ksig",
- default=3.,
- type=float,
- help="Denoising parameter K. Default 3.")
-@click.option(
- "--run_id",
- default="rca_SR",
- type=str,
- help="Id used for the saved models and validations.")
-@click.option(
- "--psf_out_dim",
- default=64,
- type=int,
- help="Image dimension of the PSF vignet. Like the PSFEx variable (psf_size).")
-@click.option(
- "--psf_size",
- default=1.25,
- type=float,
- help="PSF size in the type of the psf_size_type parameter. Default 1.25 (sigma).")
-@click.option(
- "--psf_size_type",
- default="sigma",
- type=str,
- help="PSF size type. Default sigma.")
-@click.option(
- "--saving_dir",
- default="/n05data/tliaudat/wf_exps/outputs/rca_SR_shifts/",
- type=str,
- help="Path to the saving directory. Should include the directories /models, /validation and /metrics.")
-@click.option(
- "--input_data_dir",
- default="/n05data/tliaudat/wf_exps/datasets/rca_shifts/",
- type=str,
- help="Input dataset directory. Should have /train and /test directories.")
-@click.option(
- "--repo_base_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/",
- type=str,
- help="Path of the wf-psf repository.")
-
-
-def main(**args):
- print(args)
- with parallel_backend("loky", inner_max_num_threads=1):
- results = Parallel(n_jobs=1)(
- delayed(rca_procedure)(**args) for i in range(1)
- )
-
-def rca_procedure(**args):
- # Model parameters
- n_comp = args['n_comp']
- upfact = args['upfact']
- ksig = args['ksig']
-
- run_id = args['run_id']
-
- output_dim = [args['psf_out_dim'], args['psf_out_dim']]
-
- # Load data
- saving_base_path = args['saving_dir']
- input_base_path = args['input_data_dir']
- model_save_dir_path = saving_base_path + 'models/'
- val_save_dir_path = saving_base_path + 'validation/'
- metrics_save_dir_path = saving_base_path + 'metrics/'
-
- # Generate GT SR psfs
- test_wf_file_path = args['repo_base_path'] + 'data/coherent_euclid_dataset/test_Euclid_res_id_001.npy'
- GT_predictions, wf_test_pos, _ = metrics.gen_GT_wf_model(
- test_wf_file_path,
- pred_output_Q=1,
- pred_output_dim=args['psf_out_dim']
- )
-
- # Need to change positions to local coordinates
- loc2glob = mccd.mccd_utils.Loc2Glob_EUCLID_sim()
- ccd_tot = loc2glob.ccd_tot # 36
-
- # Need to change positions to local coordinates
- # Shift origin to the global positions
- global_pos = wf_test_pos * (4096 * 6 /1000) - (4096 * 3)
- # Calculate local positions and CCD_n
- local_pos = np.array([
- loc2glob.glob2loc_img_coord(_x_glob, _y_glob) for _x_glob, _y_glob in global_pos
- ])
- # CCD list
- ccd_n_list = local_pos[:,0].astype(int)
- # Local positions
- local_pos = local_pos[:,1:]
-
-
- catalog_ids = [200, 500, 1000, 2000] # [200]
- ccd_tot = 36 # 3
-
- metrics_list = []
-
- # Iterate over the catalogs
- for catalog_id in catalog_ids:
- # Create lists for exposure metrics
- psf_interp_list = []
- star_list = []
- psf_SR_list = []
-
- # Iterate over the ccds
- for ccd_it in range(ccd_tot):
-
- ## Training
- # Train data
- train_file_name = 'train/train_stars-%07d-%02d.fits'%(catalog_id, ccd_it)
- train_catalog = fits.open(input_base_path + train_file_name)
-
- # Prepare input data
- # In RCA format (batch dim at the end)
- obs_stars = rca.utils.rca_format(
- train_catalog[1].data['VIGNET']
- )
- obs_pos = np.array([
- train_catalog[1].data['XWIN_IMAGE'],
- train_catalog[1].data['YWIN_IMAGE']
- ]).T
-
- # Create RCA instance
- rca_inst = rca.RCA(
- n_comp=n_comp,
- upfact=upfact,
- ksig=ksig,
- verbose=1
- )
-
- # If we have 2 stars or less we skip the ccd
- if obs_pos.shape[0] <= 2:
- continue
-
- # fit it to stars
- _, _ = rca_inst.fit(
- obs_stars,
- obs_pos,
- psf_size=args['psf_size'],
- psf_size_type=args['psf_size_type']
- )
- # Save model
- save_path = model_save_dir_path + run_id + '_fitted_model-%07d-%02d.npy'%(catalog_id, ccd_it)
- rca_inst.quicksave(save_path)
-
-
- ## Validation
-
- # Extract test catalog
- # Build CCD mask
- ccd_mask = (ccd_n_list == ccd_it)
-
- # Extract elements from selected CCD
- x_loc = local_pos[ccd_mask, 0].reshape(-1, 1)
- y_loc = local_pos[ccd_mask, 1].reshape(-1, 1)
-
- ccd_test_pos = np.concatenate((x_loc, y_loc), axis=1)
- ccd_test_stars = GT_predictions[ccd_mask, :, :]
-
-
- # Recover test PSFs
- interp_psfs = rca_inst.validation_stars(
- rca.utils.rca_format(ccd_test_stars),
- ccd_test_pos,
- upfact=upfact
- )
- interp_psfs_SR = rca_inst.estimate_psf(ccd_test_pos, upfact=1)
-
- # Match SR psfs to SR stars
- # Crop to output dim
- interp_psfs_SR = np.array([
- comp_utils.crop_at_center(_psf_SR, output_dim) for _psf_SR in interp_psfs_SR
- ])
- # Match to SR stars
- matched_SR_psfs = comp_utils.match_psfs(
- interp_psfs_SR,
- ccd_test_stars,
- psf_size=1.25 * upfact
- )
-
- # Save validation PSFs and Ground truth stars
- val_dict = {
- 'PSF_VIGNETS': interp_psfs,
- 'PSF_VIGNETS_SR': matched_SR_psfs,
- 'GT_VIGNET': ccd_test_stars,
- 'POS': ccd_test_pos,
- }
- val_save_name = run_id + '_validation-%07d-%02d.npy'%(catalog_id, ccd_it)
- np.save(val_save_dir_path + val_save_name, val_dict, allow_pickle=True)
-
- # Add images to lists
- psf_interp_list.append(np.copy(interp_psfs))
- psf_SR_list.append(np.copy(matched_SR_psfs))
- star_list.append(np.copy(ccd_test_stars))
-
- # Calculate RMSE metric on all the CCDs
-
- # Concatenate all the test stars in one array
- exp_psfs = np.concatenate(psf_SR_list)
- exp_stars = np.concatenate(star_list)
- # Calcualte pixel and shape errors
- metrics_dic = comp_utils.shape_pix_metrics(exp_psfs, exp_stars)
- # Add dictionary to saving list
- metrics_list.append(metrics_dic)
-
-
- # Save the metrics
- metrics_dict = {
- 'metrics_dics': metrics_list,
- 'catalog_ids': catalog_ids
- }
- metrics_save_name = run_id + '_metrics.npy'
- np.save(metrics_save_dir_path + metrics_save_name, metrics_dict, allow_pickle=True)
-
- print('Good bye!')
-
-
-if __name__ == "__main__":
- main()
diff --git a/method-comparison/scripts/test_mccd_script_SR.py b/method-comparison/scripts/test_mccd_script_SR.py
deleted file mode 100644
index 31771fea..00000000
--- a/method-comparison/scripts/test_mccd_script_SR.py
+++ /dev/null
@@ -1,189 +0,0 @@
-#!/bin/python
-
-import traceback
-import numpy as np
-import mccd
-from wf_psf import method_comp_utils as comp_utils
-from wf_psf import metrics as metrics
-from joblib import Parallel, delayed, parallel_backend, cpu_count
-
-import click
-@click.command()
-
-@click.option(
- "--config_file",
- default="/home/tliaudat/github/mccd_develop/mccd/wf_exps/config_files/config_MCCD_wf_exp.ini",
- type=str,
- help="MCCD configuration file path.")
-@click.option(
- "--psf_out_dim",
- default=64,
- type=int,
- help="Image dimension of the PSF vignet. Like the PSFEx variable (psf_size).")
-@click.option(
- "--run_id",
- default="mccd_SR",
- type=str,
- help="Id used for the saved SR validations and metrics.")
-@click.option(
- "--repo_base_path",
- default="/Users/tliaudat/Documents/PhD/codes/WF_PSF/github/wf-psf/",
- type=str,
- help="Path of the wf-psf repository.")
-
-
-def main(**args):
- print(args)
- with parallel_backend("loky", inner_max_num_threads=1):
- results = Parallel(n_jobs=1)(
- delayed(mccd_procedure)(**args) for i in range(1)
- )
-
-def mccd_procedure(**args):
-
- output_dim = [args['psf_out_dim'], args['psf_out_dim']]
- # Validation upfact
- upfact = 1
- run_id = args['run_id']
-
- # Generate instance of MCCD runner
- run_mccd = mccd.auxiliary_fun.RunMCCD(
- args['config_file'],
- fits_table_pos=1,
- verbose=True
- )
- # Train/fit the models
- run_mccd.parse_config_file()
- run_mccd.preprocess_inputs()
- # run_mccd.fit_MCCD_models()
- run_mccd.preprocess_val_inputs()
-
- # Prepare validation
-
- # Generate GT SR psfs
- test_wf_file_path = args['repo_base_path'] + 'data/coherent_euclid_dataset/test_Euclid_res_id_001.npy'
- GT_predictions, wf_test_pos, _ = metrics.gen_GT_wf_model(
- test_wf_file_path,
- pred_output_Q=1,
- pred_output_dim=args['psf_out_dim']
- )
-
- # Need to change positions to local coordinates
- loc2glob = mccd.mccd_utils.Loc2Glob_EUCLID_sim()
- ccd_tot = loc2glob.ccd_tot # 36
-
- # Need to change positions to local coordinates
- # Shift origin to the global positions
- global_pos = wf_test_pos * (4096 * 6 /1000) - (4096 * 3)
- # Calculate local positions and CCD_n
- local_pos = np.array([
- loc2glob.glob2loc_img_coord(_x_glob, _y_glob) for _x_glob, _y_glob in global_pos
- ])
- # CCD list
- ccd_n_list = local_pos[:,0].astype(int)
- # Local positions
- local_pos = local_pos[:,1:]
-
-
- # Fit model input dir
- fit_model_input_dir = run_mccd.param_parser.get_extra_kw(
- 'val_model_input_dir'
- )
- # Validation output dir
- val_output_dir = run_mccd.param_parser.get_extra_kw(
- 'val_model_output_dir'
- )
-
- catalog_ids = [200, 500, 1000, 2000]
- metrics_list = []
-
- for catalog_id in catalog_ids:
-
- # Fitted model path
- fit_model_path = fit_model_input_dir + \
- run_mccd.fitting_model_saving_name + run_mccd.separator + \
- '%07d'%catalog_id + '.npy'
-
- mccd_inst = mccd.mccd_quickload(fit_model_path)
- fit_upfact = mccd_inst.upfact
-
- # Create lists for exposure metrics
- star_list = []
- psf_SR_list = []
-
- # Iterate over the ccds
- for ccd_it in range(ccd_tot):
- try:
- # Extract test catalog
- # Build CCD mask
- ccd_mask = (ccd_n_list == ccd_it)
-
- # Extract elements from selected CCD
- x_loc = local_pos[ccd_mask, 0].reshape(-1, 1)
- y_loc = local_pos[ccd_mask, 1].reshape(-1, 1)
-
- ccd_test_pos = np.concatenate((x_loc, y_loc), axis=1)
- ccd_test_stars = GT_predictions[ccd_mask, :, :]
-
- # Recover the PSFs from the model
- interp_psfs_SR = run_mccd.recover_MCCD_PSFs(
- fit_model_path,
- positions=ccd_test_pos,
- ccd_id=ccd_it,
- local_pos=True,
- upfact=upfact
- )
-
- # Match SR psfs to SR stars
- # Crop to output dim
- interp_psfs_SR = np.array([
- comp_utils.crop_at_center(_psf_SR, output_dim) for _psf_SR in interp_psfs_SR
- ])
-
- # Match to SR stars
- matched_SR_psfs = comp_utils.match_psfs(
- interp_psfs_SR,
- ccd_test_stars,
- psf_size=1.25 * (fit_upfact / upfact)
- )
-
- # Save validation PSFs and Ground truth stars
- val_dict = {
- 'PSF_VIGNETS_SR': matched_SR_psfs,
- 'GT_VIGNET': ccd_test_stars,
- 'POS_LOCAL': ccd_test_pos,
- 'CCD_ID': np.ones(ccd_test_pos.shape[0]) * ccd_it,
- }
- val_save_name = run_id + '_validation-%07d-%02d.npy'%(catalog_id, ccd_it)
- np.save(val_output_dir + val_save_name, val_dict, allow_pickle=True)
-
- # Add images to lists
- psf_SR_list.append(np.copy(matched_SR_psfs))
- star_list.append(np.copy(ccd_test_stars))
-
- except Exception:
- traceback.print_exc()
- print('Problem with catalog: train_stars_psfex-%07d-%02d.psf'%(catalog_id, ccd_it))
-
-
- # Concatenate all the test stars in one array
- exp_psfs = np.concatenate(psf_SR_list)
- exp_stars = np.concatenate(star_list)
- # Calcualte pixel and shape errors
- metrics_dic = comp_utils.shape_pix_metrics(exp_psfs, exp_stars)
- # Add dictionary to saving list
- metrics_list.append(metrics_dic)
-
-
- # Save the metrics
- metrics_dict = {
- 'metrics_dics': metrics_list,
- 'catalog_ids': catalog_ids
- }
- metrics_save_name = run_id + '_metrics.npy'
- np.save(val_output_dir + metrics_save_name, metrics_dict, allow_pickle=True)
-
- print('Good bye!')
-
-if __name__ == "__main__":
- main()
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..e83e3653
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,87 @@
+[project]
+name = "wf_psf"
+requires-python = ">=3.9"
+authors = [
+ { "name" = "Tobias Liaudat", "email" = "tobiasliaudat@gmail.com"},
+ { "name" = "Jennifer Pollack", "email" = "jennifer.pollack@cea.fr"},
+]
+maintainers = [
+ { "name" = "Jennifer Pollack", "email" = "jennifer.pollack@cea.fr" },
+]
+
+description = 'A software framework to perform Differentiable wavefront-based PSF modelling.'
+dependencies = [
+ "numpy>=1.19.2",
+ "scipy>=1.11.2",
+ "tensorflow>=2.9.1",
+ "tensorflow-addons>=0.12.1",
+ "tensorflow-estimator>=2.9.0",
+ "zernike==0.0.31",
+ "opencv-python>=4.5.1.48",
+ "pillow>=9.5.0",
+ "galsim>=2.4.11",
+ "astropy>=5.3.3",
+ "matplotlib>=3.3.2",
+ "seaborn>=0.12.2",
+]
+
+version = "2.0.0"
+
+[project.optional-dependencies]
+docs = [
+ "importlib_metadata",
+ "myst-parser",
+ "numpydoc",
+ "sphinx",
+ "sphinxcontrib-bibtex",
+ "sphinxawesome-theme",
+ "sphinx-gallery",
+ "sphinxemoji",
+ "sphinx_rtd_theme",
+
+]
+
+lint = [
+ "black",
+]
+
+release = [
+ "build",
+ "twine",
+]
+
+test = [
+ "pytest",
+ "pytest-black",
+ "pytest-cases",
+ "pytest-cov",
+ "pytest-emoji",
+ "pytest-raises",
+ "pytest-xdist",
+]
+
+# Install for development
+dev = ["wf_psf[docs,lint,release,test]"]
+
+[project.scripts]
+wavediff = "wf_psf.run:mainMethod"
+
+[tool.black]
+line-length = 88
+
+[tool.pydocstyle]
+convention = "numpy"
+
+
+[tool.pytest.ini_options]
+addopts = [
+ "--verbose",
+ "--black",
+ "--emoji",
+ "--cov=wf_psf",
+ "--cov-report=term-missing",
+ "--cov-report=xml",
+ "--junitxml=pytest.xml",
+
+]
+testpaths = ["src/wf_psf"]
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 77270c5a..00000000
--- a/requirements.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-numpy>=1.19.2
-scipy>=1.5.2
-tensorflow>=2.4.1
-tensorflow-addons>=0.12.1
-zernike==0.0.31
-opencv-python>=4.5.1.48
-pillow>=8.1.0
-galsim>=2.3.1
-astropy>=4.1
-matplotlib>=3.3.2
-seaborn>=0.11
diff --git a/setup.py b/setup.py
deleted file mode 100644
index 940994a1..00000000
--- a/setup.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#! /usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from setuptools import setup
-import os
-
-__name__ = 'wf_psf'
-
-release_info = {}
-infopath = os.path.abspath(os.path.join(os.path.dirname(__file__),
- __name__, 'info.py'))
-with open(infopath) as open_file:
- exec(open_file.read(), release_info)
-
-setup(
- name=__name__,
- version=release_info['__version__'],
- description='Differentiable wavefront-based PSF modelling',
- url=release_info['__url__'],
- author=release_info['__author__'],
- author_email=release_info['__email__'],
- license=release_info['__license__'],
- packages=['wf_psf'],
- zip_safe=False,
-)
-
diff --git a/src/wf_psf/__init__.py b/src/wf_psf/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/wf_psf/data/__init__.py b/src/wf_psf/data/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/wf_psf/data/training_preprocessing.py b/src/wf_psf/data/training_preprocessing.py
new file mode 100644
index 00000000..58e23d1e
--- /dev/null
+++ b/src/wf_psf/data/training_preprocessing.py
@@ -0,0 +1,98 @@
+"""Training Data Processing.
+
+A module to load and preprocess training and validation test data.
+
+:Authors: Jennifer Pollack and Tobias Liaudat
+
+"""
+import numpy as np
+import wf_psf.utils.utils as utils
+import tensorflow as tf
+import tensorflow_addons as tfa
+import wf_psf.sims.SimPSFToolkit as SimPSFToolkit
+import os
+
+
+class TrainingDataHandler:
+ """Training Data Handler.
+
+ A class to manage training data.
+
+ Parameters
+ ----------
+ training_data_params: Recursive Namespace object
+ Recursive Namespace object containing training data parameters
+ simPSF: object
+ SimPSFToolkit instance
+ n_bins_lambda: int
+ Number of bins in wavelength
+
+ """
+
+ def __init__(self, training_data_params, simPSF, n_bins_lambda):
+ self.training_data_params = training_data_params
+ self.train_dataset = np.load(
+ os.path.join(
+ self.training_data_params.data_dir, self.training_data_params.file
+ ),
+ allow_pickle=True,
+ )[()]
+ self.train_dataset["positions"] = tf.convert_to_tensor(
+ self.train_dataset["positions"], dtype=tf.float32
+ )
+ self.train_dataset["noisy_stars"] = tf.convert_to_tensor(
+ self.train_dataset["noisy_stars"], dtype=tf.float32
+ )
+ self.simPSF = simPSF
+ self.n_bins_lambda = n_bins_lambda
+ self.sed_data = [
+ utils.generate_SED_elems_in_tensorflow(
+ _sed, self.simPSF, n_bins=self.n_bins_lambda, tf_dtype=tf.float64
+ )
+ for _sed in self.train_dataset["SEDs"]
+ ]
+ self.sed_data = tf.convert_to_tensor(self.sed_data, dtype=tf.float32)
+ self.sed_data = tf.transpose(self.sed_data, perm=[0, 2, 1])
+
+
+class TestDataHandler:
+ """Test Data Handler.
+
+ A class to handle test data for model validation.
+
+ Parameters
+ ----------
+ test_data_params: Recursive Namespace object
+ Recursive Namespace object containing test data parameters
+ simPSF: object
+ SimPSFToolkit instance
+ n_bins_lambda: int
+ Number of bins in wavelength
+
+ """
+
+ def __init__(self, test_data_params, simPSF, n_bins_lambda):
+ self.test_data_params = test_data_params
+ self.test_dataset = np.load(
+ os.path.join(self.test_data_params.data_dir, self.test_data_params.file),
+ allow_pickle=True,
+ )[()]
+ self.test_dataset["stars"] = tf.convert_to_tensor(
+ self.test_dataset["stars"], dtype=tf.float32
+ )
+ self.test_dataset["positions"] = tf.convert_to_tensor(
+ self.test_dataset["positions"], dtype=tf.float32
+ )
+
+ # Prepare validation data inputs
+ self.simPSF = simPSF
+ self.n_bins_lambda = n_bins_lambda
+
+ self.sed_data = [
+ utils.generate_SED_elems_in_tensorflow(
+ _sed, self.simPSF, n_bins=self.n_bins_lambda, tf_dtype=tf.float64
+ )
+ for _sed in self.test_dataset["SEDs"]
+ ]
+ self.sed_data = tf.convert_to_tensor(self.sed_data, dtype=tf.float32)
+ self.sed_data = tf.transpose(self.sed_data, perm=[0, 2, 1])
diff --git a/src/wf_psf/metrics/__init__.py b/src/wf_psf/metrics/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/wf_psf/metrics.py b/src/wf_psf/metrics/metrics.py
similarity index 70%
rename from wf_psf/metrics.py
rename to src/wf_psf/metrics/metrics.py
index d3e4705b..ed7fa58c 100644
--- a/wf_psf/metrics.py
+++ b/src/wf_psf/metrics/metrics.py
@@ -2,10 +2,13 @@
import tensorflow as tf
import matplotlib.pyplot as plt
import galsim as gs
-import wf_psf.utils as utils
-from wf_psf.tf_psf_field import build_PSF_model
-from wf_psf import tf_psf_field as psf_field
-from wf_psf import SimPSFToolkit as SimPSFToolkit
+import wf_psf.utils.utils as utils
+from wf_psf.psf_models.tf_psf_field import build_PSF_model
+from wf_psf.psf_models import tf_psf_field as psf_field
+from wf_psf.sims import SimPSFToolkit as SimPSFToolkit
+import logging
+
+logger = logging.getLogger(__name__)
def compute_poly_metric(
@@ -19,9 +22,9 @@ def compute_poly_metric(
batch_size=16,
dataset_dict=None,
):
- """ Calculate metrics for polychromatic reconstructions.
+ """Calculate metrics for polychromatic reconstructions.
- The ``tf_semiparam_field`` should be the model to evaluate, and the
+ The ``tf_semiparam_field`` should be the model to evaluate, and the
``GT_tf_semiparam_field`` should be loaded with the ground truth PSF field.
Relative values returned in [%] (so multiplied by 100).
@@ -46,7 +49,7 @@ def compute_poly_metric(
batch_size: int
Batch size for the PSF calcualtions.
dataset_dict: dict
- Dictionary containing the dataset information. If provided, and if the `'stars'` key
+ Dictionary containing the dataset information. If provided, and if the `'stars'` key
is present, the noiseless stars from the dataset are used to compute the metrics.
Otherwise, the stars are generated from the GT model.
Default is `None`.
@@ -65,7 +68,8 @@ def compute_poly_metric(
"""
# Generate SED data list for the model
packed_SED_data = [
- utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda) for _sed in tf_SEDs
+ utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
+ for _sed in tf_SEDs
]
tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
@@ -75,16 +79,17 @@ def compute_poly_metric(
preds = tf_semiparam_field.predict(x=pred_inputs, batch_size=batch_size)
# GT data preparation
- if dataset_dict is None or 'stars' not in dataset_dict:
- print('Regenerating GT stars from model.')
+ if dataset_dict is None or "stars" not in dataset_dict:
+ logger.info("Regenerating GT stars from model.")
# Change interpolation parameters for the GT simPSF
- interp_pts_per_bin = simPSF_np.interp_pts_per_bin
- simPSF_np.interp_pts_per_bin = 0
+ interp_pts_per_bin = simPSF_np.SED_interp_pts_per_bin
+ simPSF_np.SED_interp_pts_per_bin = 0
SED_sigma = simPSF_np.SED_sigma
simPSF_np.SED_sigma = 0
# Generate SED data list for GT model
packed_SED_data = [
- utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_gt) for _sed in tf_SEDs
+ utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_gt)
+ for _sed in tf_SEDs
]
tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
@@ -92,27 +97,26 @@ def compute_poly_metric(
# GT model prediction
GT_preds = GT_tf_semiparam_field.predict(x=pred_inputs, batch_size=batch_size)
-
- else:
- print('Using GT stars from dataset.')
- GT_preds = dataset_dict['stars']
+ else:
+ logger.info("Using GT stars from dataset.")
+ GT_preds = dataset_dict["stars"]
# Calculate residuals
- residuals = np.sqrt(np.mean((GT_preds - preds)**2, axis=(1, 2)))
- GT_star_mean = np.sqrt(np.mean((GT_preds)**2, axis=(1, 2)))
+ residuals = np.sqrt(np.mean((GT_preds - preds) ** 2, axis=(1, 2)))
+ GT_star_mean = np.sqrt(np.mean((GT_preds) ** 2, axis=(1, 2)))
# RMSE calculations
rmse = np.mean(residuals)
- rel_rmse = 100. * np.mean(residuals / GT_star_mean)
+ rel_rmse = 100.0 * np.mean(residuals / GT_star_mean)
# STD calculations
std_rmse = np.std(residuals)
- std_rel_rmse = 100. * np.std(residuals / GT_star_mean)
+ std_rel_rmse = 100.0 * np.std(residuals / GT_star_mean)
# Print RMSE values
- print('Absolute RMSE:\t %.4e \t +/- %.4e' % (rmse, std_rmse))
- print('Relative RMSE:\t %.4e %% \t +/- %.4e %%' % (rel_rmse, std_rel_rmse))
+ logger.info("Absolute RMSE:\t %.4e \t +/- %.4e" % (rmse, std_rmse))
+ logger.info("Relative RMSE:\t %.4e %% \t +/- %.4e %%" % (rel_rmse, std_rel_rmse))
return rmse, rel_rmse, std_rmse, std_rel_rmse
@@ -123,11 +127,11 @@ def compute_mono_metric(
simPSF_np,
tf_pos,
lambda_list,
- batch_size=32
+ batch_size=32,
):
- """ Calculate metrics for monochromatic reconstructions.
+ """Calculate metrics for monochromatic reconstructions.
- The ``tf_semiparam_field`` should be the model to evaluate, and the
+ The ``tf_semiparam_field`` should be the model to evaluate, and the
``GT_tf_semiparam_field`` should be loaded with the ground truth PSF field.
Relative values returned in [%] (so multiplied by 100).
@@ -140,7 +144,7 @@ def compute_mono_metric(
Ground truth model to produce GT observations at any position
and wavelength.
simPSF_np: PSF simulator object
- Simulation object capable of calculating ``phase_N`` values from
+ Simulation object capable of calculating ``phase_N`` values from
wavelength values.
tf_pos: list of floats [batch x 2]
Positions to evaluate the model.
@@ -183,7 +187,6 @@ def compute_mono_metric(
n_epochs = int(np.ceil(total_samples / batch_size))
ep_low_lim = 0
for ep in range(n_epochs):
-
# Define the upper limit
if ep_low_lim + batch_size >= total_samples:
ep_up_lim = total_samples
@@ -203,9 +206,12 @@ def compute_mono_metric(
num_pixels = GT_mono_psf.shape[1] * GT_mono_psf.shape[2]
- residuals[ep_low_lim:ep_up_lim] = np.sum((GT_mono_psf - model_mono_psf)**2,
- axis=(1, 2)) / num_pixels
- GT_star_mean[ep_low_lim:ep_up_lim] = np.sum((GT_mono_psf)**2, axis=(1, 2)) / num_pixels
+ residuals[ep_low_lim:ep_up_lim] = (
+ np.sum((GT_mono_psf - model_mono_psf) ** 2, axis=(1, 2)) / num_pixels
+ )
+ GT_star_mean[ep_low_lim:ep_up_lim] = (
+ np.sum((GT_mono_psf) ** 2, axis=(1, 2)) / num_pixels
+ )
# Increase lower limit
ep_low_lim += batch_size
@@ -216,24 +222,24 @@ def compute_mono_metric(
# RMSE calculations
rmse_lda.append(np.mean(residuals))
- rel_rmse_lda.append(100. * np.mean(residuals / GT_star_mean))
+ rel_rmse_lda.append(100.0 * np.mean(residuals / GT_star_mean))
# STD calculations
std_rmse_lda.append(np.std(residuals))
- std_rel_rmse_lda.append(100. * np.std(residuals / GT_star_mean))
+ std_rel_rmse_lda.append(100.0 * np.std(residuals / GT_star_mean))
return rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda
def compute_opd_metrics(tf_semiparam_field, GT_tf_semiparam_field, pos, batch_size=16):
- """ Compute the OPD metrics.
-
+ """Compute the OPD metrics.
+
Need to handle a batch size to avoid Out-Of-Memory errors with
the GPUs. This is specially due to the fact that the OPD maps
have a higher dimensionality than the observed PSFs.
-
+
The OPD RMSE is computed after having removed the mean from the
- different reconstructions. It is computed only on the
+ different reconstructions. It is computed only on the
non-obscured elements from the OPD.
Parameters
@@ -296,17 +302,21 @@ def compute_opd_metrics(tf_semiparam_field, GT_tf_semiparam_field, pos, batch_si
nb_mask_elems = np.sum(obsc_mask)
# Compute the OPD RMSE with the masked obscurations
res_opd = np.sqrt(
- np.array([
- np.sum((im1[obsc_mask] - im2[obsc_mask])**2) / nb_mask_elems
- for im1, im2 in zip(opd_batch, GT_opd_batch)
- ])
+ np.array(
+ [
+ np.sum((im1[obsc_mask] - im2[obsc_mask]) ** 2) / nb_mask_elems
+ for im1, im2 in zip(opd_batch, GT_opd_batch)
+ ]
+ )
)
GT_opd_mean = np.sqrt(
- np.array([np.sum(im2[obsc_mask]**2) / nb_mask_elems for im2 in GT_opd_batch])
+ np.array(
+ [np.sum(im2[obsc_mask] ** 2) / nb_mask_elems for im2 in GT_opd_batch]
+ )
)
# RMSE calculations
rmse_vals[counter:end_sample] = res_opd
- rel_rmse_vals[counter:end_sample] = 100. * (res_opd / GT_opd_mean)
+ rel_rmse_vals[counter:end_sample] = 100.0 * (res_opd / GT_opd_mean)
# Add the results to the lists
counter += batch_size
@@ -318,8 +328,8 @@ def compute_opd_metrics(tf_semiparam_field, GT_tf_semiparam_field, pos, batch_si
rel_rmse_std = np.std(rel_rmse_vals)
# Print RMSE values
- print('Absolute RMSE:\t %.4e \t +/- %.4e' % (rmse, rmse_std))
- print('Relative RMSE:\t %.4e %% \t +/- %.4e %%' % (rel_rmse, rel_rmse_std))
+ logger.info("Absolute RMSE:\t %.4e \t +/- %.4e" % (rmse, rmse_std))
+ logger.info("Relative RMSE:\t %.4e %% \t +/- %.4e %%" % (rel_rmse, rel_rmse_std))
return rmse, rel_rmse, rmse_std, rel_rmse_std
@@ -336,9 +346,9 @@ def compute_shape_metrics(
output_dim=64,
batch_size=16,
opt_stars_rel_pix_rmse=False,
- dataset_dict=None
+ dataset_dict=None,
):
- """ Compute the pixel, shape and size RMSE of a PSF model.
+ """Compute the pixel, shape and size RMSE of a PSF model.
This is done at a specific sampling and output image dimension.
It is done for polychromatic PSFs so SEDs are needed.
@@ -416,16 +426,21 @@ def compute_shape_metrics(
predictions = tf_semiparam_field.predict(x=pred_inputs, batch_size=batch_size)
# GT data preparation
- if dataset_dict is None or 'super_res_stars' not in dataset_dict or 'SR_stars' not in dataset_dict:
- print('Generating GT super resolved stars from the GT model.')
+ if (
+ dataset_dict is None
+ or "super_res_stars" not in dataset_dict
+ or "SR_stars" not in dataset_dict
+ ):
+ logger.info("Generating GT super resolved stars from the GT model.")
# Change interpolation parameters for the GT simPSF
- interp_pts_per_bin = simPSF_np.interp_pts_per_bin
- simPSF_np.interp_pts_per_bin = 0
+ interp_pts_per_bin = simPSF_np.SED_interp_pts_per_bin
+ simPSF_np.SED_interp_pts_per_bin = 0
SED_sigma = simPSF_np.SED_sigma
simPSF_np.SED_sigma = 0
# Generate SED data list for GT model
packed_SED_data = [
- utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_gt) for _sed in SEDs
+ utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_gt)
+ for _sed in SEDs
]
# Prepare inputs
@@ -434,56 +449,68 @@ def compute_shape_metrics(
pred_inputs = [tf_pos, tf_packed_SED_data]
# Ground Truth model
- GT_predictions = GT_tf_semiparam_field.predict(x=pred_inputs, batch_size=batch_size)
-
+ GT_predictions = GT_tf_semiparam_field.predict(
+ x=pred_inputs, batch_size=batch_size
+ )
+
else:
- print('Using super resolved stars from dataset.')
- if 'super_res_stars' in dataset_dict:
- GT_predictions = dataset_dict['super_res_stars']
- elif 'SR_stars' in dataset_dict:
- GT_predictions = dataset_dict['SR_stars']
+ logger.info("Using super resolved stars from dataset.")
+ if "super_res_stars" in dataset_dict:
+ GT_predictions = dataset_dict["super_res_stars"]
+ elif "SR_stars" in dataset_dict:
+ GT_predictions = dataset_dict["SR_stars"]
# Calculate residuals
- residuals = np.sqrt(np.mean((GT_predictions - predictions)**2, axis=(1, 2)))
- GT_star_mean = np.sqrt(np.mean((GT_predictions)**2, axis=(1, 2)))
+ residuals = np.sqrt(np.mean((GT_predictions - predictions) ** 2, axis=(1, 2)))
+ GT_star_mean = np.sqrt(np.mean((GT_predictions) ** 2, axis=(1, 2)))
# Pixel RMSE for each star
if opt_stars_rel_pix_rmse:
- stars_rel_pix_rmse = 100. * residuals / GT_star_mean
+ stars_rel_pix_rmse = 100.0 * residuals / GT_star_mean
# RMSE calculations
pix_rmse = np.mean(residuals)
- rel_pix_rmse = 100. * np.mean(residuals / GT_star_mean)
+ rel_pix_rmse = 100.0 * np.mean(residuals / GT_star_mean)
# STD calculations
pix_rmse_std = np.std(residuals)
- rel_pix_rmse_std = 100. * np.std(residuals / GT_star_mean)
+ rel_pix_rmse_std = 100.0 * np.std(residuals / GT_star_mean)
# Print pixel RMSE values
- print('\nPixel star absolute RMSE:\t %.4e \t +/- %.4e ' % (pix_rmse, pix_rmse_std))
- print('Pixel star relative RMSE:\t %.4e %% \t +/- %.4e %%' % (rel_pix_rmse, rel_pix_rmse_std))
+ logger.info(
+ "\nPixel star absolute RMSE:\t %.4e \t +/- %.4e " % (pix_rmse, pix_rmse_std)
+ )
+ logger.info(
+ "Pixel star relative RMSE:\t %.4e %% \t +/- %.4e %%"
+ % (rel_pix_rmse, rel_pix_rmse_std)
+ )
# Measure shapes of the reconstructions
- pred_moments = [gs.hsm.FindAdaptiveMom(gs.Image(_pred), strict=False) for _pred in predictions]
+ pred_moments = [
+ gs.hsm.FindAdaptiveMom(gs.Image(_pred), strict=False) for _pred in predictions
+ ]
# Measure shapes of the reconstructions
GT_pred_moments = [
- gs.hsm.FindAdaptiveMom(gs.Image(_pred), strict=False) for _pred in GT_predictions
+ gs.hsm.FindAdaptiveMom(gs.Image(_pred), strict=False)
+ for _pred in GT_predictions
]
pred_e1_HSM, pred_e2_HSM, pred_R2_HSM = [], [], []
GT_pred_e1_HSM, GT_pred_e2_HSM, GT_pred_R2_HSM = [], [], []
for it in range(len(GT_pred_moments)):
- if pred_moments[it].moments_status == 0 and GT_pred_moments[it].moments_status == 0:
-
+ if (
+ pred_moments[it].moments_status == 0
+ and GT_pred_moments[it].moments_status == 0
+ ):
pred_e1_HSM.append(pred_moments[it].observed_shape.g1)
pred_e2_HSM.append(pred_moments[it].observed_shape.g2)
- pred_R2_HSM.append(2 * (pred_moments[it].moments_sigma**2))
+ pred_R2_HSM.append(2 * (pred_moments[it].moments_sigma ** 2))
GT_pred_e1_HSM.append(GT_pred_moments[it].observed_shape.g1)
GT_pred_e2_HSM.append(GT_pred_moments[it].observed_shape.g2)
- GT_pred_R2_HSM.append(2 * (GT_pred_moments[it].moments_sigma**2))
+ GT_pred_R2_HSM.append(2 * (GT_pred_moments[it].moments_sigma ** 2))
pred_e1_HSM = np.array(pred_e1_HSM)
pred_e2_HSM = np.array(pred_e2_HSM)
@@ -500,18 +527,18 @@ def compute_shape_metrics(
e1_res_rel = (GT_pred_e1_HSM - pred_e1_HSM) / GT_pred_e1_HSM
rmse_e1 = np.sqrt(np.mean(e1_res**2))
- rel_rmse_e1 = 100. * np.sqrt(np.mean(e1_res_rel**2))
+ rel_rmse_e1 = 100.0 * np.sqrt(np.mean(e1_res_rel**2))
std_rmse_e1 = np.std(e1_res)
- std_rel_rmse_e1 = 100. * np.std(e1_res_rel)
+ std_rel_rmse_e1 = 100.0 * np.std(e1_res_rel)
# e2
e2_res = GT_pred_e2_HSM - pred_e2_HSM
e2_res_rel = (GT_pred_e2_HSM - pred_e2_HSM) / GT_pred_e2_HSM
rmse_e2 = np.sqrt(np.mean(e2_res**2))
- rel_rmse_e2 = 100. * np.sqrt(np.mean(e2_res_rel**2))
+ rel_rmse_e2 = 100.0 * np.sqrt(np.mean(e2_res_rel**2))
std_rmse_e2 = np.std(e2_res)
- std_rel_rmse_e2 = 100. * np.std(e2_res_rel)
+ std_rel_rmse_e2 = 100.0 * np.std(e2_res_rel)
# R2
R2_res = GT_pred_R2_HSM - pred_R2_HSM
@@ -520,21 +547,36 @@ def compute_shape_metrics(
std_rmse_R2_meanR2 = np.std(R2_res / GT_pred_R2_HSM)
# Print shape/size errors
- print('\nsigma(e1) RMSE =\t\t %.4e \t +/- %.4e ' % (rmse_e1, std_rmse_e1))
- print('sigma(e2) RMSE =\t\t %.4e \t +/- %.4e ' % (rmse_e2, std_rmse_e2))
- print('sigma(R2)/ =\t\t %.4e \t +/- %.4e ' % (rmse_R2_meanR2, std_rmse_R2_meanR2))
+ logger.info("\nsigma(e1) RMSE =\t\t %.4e \t +/- %.4e " % (rmse_e1, std_rmse_e1))
+ logger.info("sigma(e2) RMSE =\t\t %.4e \t +/- %.4e " % (rmse_e2, std_rmse_e2))
+ logger.info(
+ "sigma(R2)/ =\t\t %.4e \t +/- %.4e " % (rmse_R2_meanR2, std_rmse_R2_meanR2)
+ )
# Print relative shape/size errors
- print('\nRelative sigma(e1) RMSE =\t %.4e %% \t +/- %.4e %%' % (rel_rmse_e1, std_rel_rmse_e1))
- print('Relative sigma(e2) RMSE =\t %.4e %% \t +/- %.4e %%' % (rel_rmse_e2, std_rel_rmse_e2))
+ logger.info(
+ "\nRelative sigma(e1) RMSE =\t %.4e %% \t +/- %.4e %%"
+ % (rel_rmse_e1, std_rel_rmse_e1)
+ )
+ logger.info(
+ "Relative sigma(e2) RMSE =\t %.4e %% \t +/- %.4e %%"
+ % (rel_rmse_e2, std_rel_rmse_e2)
+ )
# Print number of stars
- print('\nTotal number of stars: \t\t%d' % (len(GT_pred_moments)))
- print('Problematic number of stars: \t%d' % (len(GT_pred_moments) - GT_pred_e1_HSM.shape[0]))
+ logger.info("\nTotal number of stars: \t\t%d" % (len(GT_pred_moments)))
+ logger.info(
+ "Problematic number of stars: \t%d"
+ % (len(GT_pred_moments) - GT_pred_e1_HSM.shape[0])
+ )
# Re-et the original output_Q and output_dim parameters in the models
- tf_semiparam_field.set_output_Q(output_Q=original_out_Q, output_dim=original_out_dim)
- GT_tf_semiparam_field.set_output_Q(output_Q=GT_original_out_Q, output_dim=GT_original_out_dim)
+ tf_semiparam_field.set_output_Q(
+ output_Q=original_out_Q, output_dim=original_out_dim
+ )
+ GT_tf_semiparam_field.set_output_Q(
+ output_Q=GT_original_out_Q, output_dim=GT_original_out_dim
+ )
# Need to compile the models again
tf_semiparam_field = build_PSF_model(tf_semiparam_field)
@@ -542,39 +584,39 @@ def compute_shape_metrics(
# Moment results
result_dict = {
- 'pred_e1_HSM': pred_e1_HSM,
- 'pred_e2_HSM': pred_e2_HSM,
- 'pred_R2_HSM': pred_R2_HSM,
- 'GT_pred_e1_HSM': GT_pred_e1_HSM,
- 'GT_ped_e2_HSM': GT_pred_e2_HSM,
- 'GT_pred_R2_HSM': GT_pred_R2_HSM,
- 'rmse_e1': rmse_e1,
- 'std_rmse_e1': std_rmse_e1,
- 'rel_rmse_e1': rel_rmse_e1,
- 'std_rel_rmse_e1': std_rel_rmse_e1,
- 'rmse_e2': rmse_e2,
- 'std_rmse_e2': std_rmse_e2,
- 'rel_rmse_e2': rel_rmse_e2,
- 'std_rel_rmse_e2': std_rel_rmse_e2,
- 'rmse_R2_meanR2': rmse_R2_meanR2,
- 'std_rmse_R2_meanR2': std_rmse_R2_meanR2,
- 'pix_rmse': pix_rmse,
- 'pix_rmse_std': pix_rmse_std,
- 'rel_pix_rmse': rel_pix_rmse,
- 'rel_pix_rmse_std': rel_pix_rmse_std,
- 'output_Q': output_Q,
- 'output_dim': output_dim,
- 'n_bins_lda': n_bins_lda,
+ "pred_e1_HSM": pred_e1_HSM,
+ "pred_e2_HSM": pred_e2_HSM,
+ "pred_R2_HSM": pred_R2_HSM,
+ "GT_pred_e1_HSM": GT_pred_e1_HSM,
+ "GT_ped_e2_HSM": GT_pred_e2_HSM,
+ "GT_pred_R2_HSM": GT_pred_R2_HSM,
+ "rmse_e1": rmse_e1,
+ "std_rmse_e1": std_rmse_e1,
+ "rel_rmse_e1": rel_rmse_e1,
+ "std_rel_rmse_e1": std_rel_rmse_e1,
+ "rmse_e2": rmse_e2,
+ "std_rmse_e2": std_rmse_e2,
+ "rel_rmse_e2": rel_rmse_e2,
+ "std_rel_rmse_e2": std_rel_rmse_e2,
+ "rmse_R2_meanR2": rmse_R2_meanR2,
+ "std_rmse_R2_meanR2": std_rmse_R2_meanR2,
+ "pix_rmse": pix_rmse,
+ "pix_rmse_std": pix_rmse_std,
+ "rel_pix_rmse": rel_pix_rmse,
+ "rel_pix_rmse_std": rel_pix_rmse_std,
+ "output_Q": output_Q,
+ "output_dim": output_dim,
+ "n_bins_lda": n_bins_lda,
}
if opt_stars_rel_pix_rmse:
- result_dict['stars_rel_pix_rmse'] = stars_rel_pix_rmse
+ result_dict["stars_rel_pix_rmse"] = stars_rel_pix_rmse
return result_dict
def gen_GT_wf_model(test_wf_file_path, pred_output_Q=1, pred_output_dim=64):
- r""" Generate the ground truth model and output test PSF ar required resolution.
+ r"""Generate the ground truth model and output test PSF ar required resolution.
If `pred_output_Q=1` the resolution will be 3 times the one of Euclid.
"""
@@ -582,43 +624,48 @@ def gen_GT_wf_model(test_wf_file_path, pred_output_Q=1, pred_output_dim=64):
wf_test_dataset = np.load(test_wf_file_path, allow_pickle=True)[()]
# Extract parameters from the wf test dataset
- wf_test_params = wf_test_dataset['parameters']
- wf_test_C_poly = wf_test_dataset['C_poly']
- wf_test_pos = wf_test_dataset['positions']
+ wf_test_params = wf_test_dataset["parameters"]
+ wf_test_C_poly = wf_test_dataset["C_poly"]
+ wf_test_pos = wf_test_dataset["positions"]
tf_test_pos = tf.convert_to_tensor(wf_test_pos, dtype=tf.float32)
- wf_test_SEDs = wf_test_dataset['SEDs']
+ wf_test_SEDs = wf_test_dataset["SEDs"]
# Generate GT model
batch_size = 16
# Generate Zernike maps
zernikes = utils.zernike_generator(
- n_zernikes=wf_test_params['max_order'], wfe_dim=wf_test_params['pupil_diameter']
+ n_zernikes=wf_test_params["max_order"], wfe_dim=wf_test_params["pupil_diameter"]
)
## Generate initializations
# Prepare np input
simPSF_np = SimPSFToolkit(
zernikes,
- max_order=wf_test_params['max_order'],
- pupil_diameter=wf_test_params['pupil_diameter'],
- output_dim=wf_test_params['output_dim'],
- oversampling_rate=wf_test_params['oversampling_rate'],
- output_Q=wf_test_params['output_Q']
+ max_order=wf_test_params["max_order"],
+ pupil_diameter=wf_test_params["pupil_diameter"],
+ output_dim=wf_test_params["output_dim"],
+ oversampling_rate=wf_test_params["oversampling_rate"],
+ output_Q=wf_test_params["output_Q"],
+ )
+ simPSF_np.gen_random_Z_coeffs(max_order=wf_test_params["max_order"])
+ z_coeffs = simPSF_np.normalize_zernikes(
+ simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms
)
- simPSF_np.gen_random_Z_coeffs(max_order=wf_test_params['max_order'])
- z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
simPSF_np.set_z_coeffs(z_coeffs)
simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
# Obscurations
obscurations = simPSF_np.generate_pupil_obscurations(
- N_pix=wf_test_params['pupil_diameter'], N_filter=wf_test_params['LP_filter_length']
+ N_pix=wf_test_params["pupil_diameter"],
+ N_filter=wf_test_params["LP_filter_length"],
)
tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
## Prepare ground truth model
# Now Zernike's as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
+ np_zernike_cube = np.zeros(
+ (len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1])
+ )
for it in range(len(zernikes)):
np_zernike_cube[it, :, :] = zernikes[it]
@@ -630,13 +677,13 @@ def gen_GT_wf_model(test_wf_file_path, pred_output_Q=1, pred_output_dim=64):
zernike_maps=tf_zernike_cube,
obscurations=tf_obscurations,
batch_size=batch_size,
- output_Q=wf_test_params['output_Q'],
+ output_Q=wf_test_params["output_Q"],
d_max_nonparam=2,
- output_dim=wf_test_params['output_dim'],
- n_zernikes=wf_test_params['max_order'],
- d_max=wf_test_params['d_max'],
- x_lims=wf_test_params['x_lims'],
- y_lims=wf_test_params['y_lims']
+ output_dim=wf_test_params["output_dim"],
+ n_zernikes=wf_test_params["max_order"],
+ d_max=wf_test_params["d_max"],
+ x_lims=wf_test_params["x_lims"],
+ y_lims=wf_test_params["y_lims"],
)
# For the Ground truth model
@@ -647,12 +694,14 @@ def gen_GT_wf_model(test_wf_file_path, pred_output_Q=1, pred_output_dim=64):
# Set required output_Q
- GT_tf_semiparam_field.set_output_Q(output_Q=pred_output_Q, output_dim=pred_output_dim)
+ GT_tf_semiparam_field.set_output_Q(
+ output_Q=pred_output_Q, output_dim=pred_output_dim
+ )
GT_tf_semiparam_field = psf_field.build_PSF_model(GT_tf_semiparam_field)
packed_SED_data = [
- utils.generate_packed_elems(_sed, simPSF_np, n_bins=wf_test_params['n_bins'])
+ utils.generate_packed_elems(_sed, simPSF_np, n_bins=wf_test_params["n_bins"])
for _sed in wf_test_SEDs
]
@@ -680,21 +729,27 @@ def compute_metrics(
tf_test_stars,
tf_train_stars,
n_bins_lda,
- batch_size=16
+ batch_size=16,
):
# Generate SED data list
test_packed_SED_data = [
- utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda) for _sed in test_SEDs
+ utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
+ for _sed in test_SEDs
]
- tf_test_packed_SED_data = tf.convert_to_tensor(test_packed_SED_data, dtype=tf.float32)
+ tf_test_packed_SED_data = tf.convert_to_tensor(
+ test_packed_SED_data, dtype=tf.float32
+ )
tf_test_packed_SED_data = tf.transpose(tf_test_packed_SED_data, perm=[0, 2, 1])
test_pred_inputs = [tf_test_pos, tf_test_packed_SED_data]
- test_predictions = tf_semiparam_field.predict(x=test_pred_inputs, batch_size=batch_size)
+ test_predictions = tf_semiparam_field.predict(
+ x=test_pred_inputs, batch_size=batch_size
+ )
# Initialize the SED data list
packed_SED_data = [
- utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda) for _sed in train_SEDs
+ utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
+ for _sed in train_SEDs
]
# First estimate the stars for the observations
tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
@@ -703,26 +758,30 @@ def compute_metrics(
train_predictions = tf_semiparam_field.predict(x=inputs, batch_size=batch_size)
# Calculate RMSE values
- test_res = np.sqrt(np.mean((tf_test_stars - test_predictions)**2))
- train_res = np.sqrt(np.mean((tf_train_stars - train_predictions)**2))
+ test_res = np.sqrt(np.mean((tf_test_stars - test_predictions) ** 2))
+ train_res = np.sqrt(np.mean((tf_train_stars - train_predictions) ** 2))
# Calculate relative RMSE values
- relative_test_res = test_res / np.sqrt(np.mean((tf_test_stars)**2))
- relative_train_res = train_res / np.sqrt(np.mean((tf_train_stars)**2))
+ relative_test_res = test_res / np.sqrt(np.mean((tf_test_stars) ** 2))
+ relative_train_res = train_res / np.sqrt(np.mean((tf_train_stars) ** 2))
# Print RMSE values
- print('Test stars absolute RMSE:\t %.4e' % test_res)
- print('Training stars absolute RMSE:\t %.4e' % train_res)
+ logger.info("Test stars absolute RMSE:\t %.4e" % test_res)
+ logger.info("Training stars absolute RMSE:\t %.4e" % train_res)
# Print RMSE values
- print('Test stars relative RMSE:\t %.4e %%' % (relative_test_res * 100.))
- print('Training stars relative RMSE:\t %.4e %%' % (relative_train_res * 100.))
+ logger.info("Test stars relative RMSE:\t %.4e %%" % (relative_test_res * 100.0))
+ logger.info(
+ "Training stars relative RMSE:\t %.4e %%" % (relative_train_res * 100.0)
+ )
return test_res, train_res
-def compute_opd_metrics_mccd(tf_semiparam_field, GT_tf_semiparam_field, test_pos, train_pos):
- """ Compute the OPD metrics. """
+def compute_opd_metrics_mccd(
+ tf_semiparam_field, GT_tf_semiparam_field, test_pos, train_pos
+):
+ """Compute the OPD metrics."""
np_obscurations = np.real(tf_semiparam_field.obscurations.numpy())
@@ -747,12 +806,14 @@ def compute_opd_metrics_mccd(tf_semiparam_field, GT_tf_semiparam_field, test_pos
# Calculate relative RMSE values
relative_test_opd_rmse = test_opd_rmse / np.sqrt(
- np.mean((GT_opd_maps.numpy() * np_obscurations)**2)
+ np.mean((GT_opd_maps.numpy() * np_obscurations) ** 2)
)
# Print RMSE values
- print('Test stars absolute OPD RMSE:\t %.4e' % test_opd_rmse)
- print('Test stars relative OPD RMSE:\t %.4e %%\n' % (relative_test_opd_rmse * 100.))
+ logger.info("Test stars absolute OPD RMSE:\t %.4e" % test_opd_rmse)
+ logger.info(
+ "Test stars relative OPD RMSE:\t %.4e %%\n" % (relative_test_opd_rmse * 100.0)
+ )
## For train part
# Param part
@@ -775,18 +836,22 @@ def compute_opd_metrics_mccd(tf_semiparam_field, GT_tf_semiparam_field, test_pos
# Calculate relative RMSE values
relative_train_opd_rmse = train_opd_rmse / np.sqrt(
- np.mean((GT_opd_maps.numpy() * np_obscurations)**2)
+ np.mean((GT_opd_maps.numpy() * np_obscurations) ** 2)
)
# Print RMSE values
- print('Train stars absolute OPD RMSE:\t %.4e' % train_opd_rmse)
- print('Train stars relative OPD RMSE:\t %.4e %%\n' % (relative_train_opd_rmse * 100.))
+ logger.info("Train stars absolute OPD RMSE:\t %.4e" % train_opd_rmse)
+ logger.info(
+ "Train stars relative OPD RMSE:\t %.4e %%\n" % (relative_train_opd_rmse * 100.0)
+ )
return test_opd_rmse, train_opd_rmse
-def compute_opd_metrics_polymodel(tf_semiparam_field, GT_tf_semiparam_field, test_pos, train_pos):
- """ Compute the OPD metrics. """
+def compute_opd_metrics_polymodel(
+ tf_semiparam_field, GT_tf_semiparam_field, test_pos, train_pos
+):
+ """Compute the OPD metrics."""
np_obscurations = np.real(tf_semiparam_field.obscurations.numpy())
@@ -811,12 +876,14 @@ def compute_opd_metrics_polymodel(tf_semiparam_field, GT_tf_semiparam_field, tes
# Calculate relative RMSE values
relative_test_opd_rmse = test_opd_rmse / np.sqrt(
- np.mean((GT_opd_maps.numpy() * np_obscurations)**2)
+ np.mean((GT_opd_maps.numpy() * np_obscurations) ** 2)
)
# Print RMSE values
- print('Test stars OPD RMSE:\t %.4e' % test_opd_rmse)
- print('Test stars relative OPD RMSE:\t %.4e %%\n' % (relative_test_opd_rmse * 100.))
+ logger.info("Test stars OPD RMSE:\t %.4e" % test_opd_rmse)
+ logger.info(
+ "Test stars relative OPD RMSE:\t %.4e %%\n" % (relative_test_opd_rmse * 100.0)
+ )
## For train part
# Param part
@@ -839,18 +906,22 @@ def compute_opd_metrics_polymodel(tf_semiparam_field, GT_tf_semiparam_field, tes
# Calculate relative RMSE values
relative_train_opd_rmse = train_opd_rmse / np.sqrt(
- np.mean((GT_opd_maps.numpy() * np_obscurations)**2)
+ np.mean((GT_opd_maps.numpy() * np_obscurations) ** 2)
)
# Pritn RMSE values
- print('Train stars OPD RMSE:\t %.4e' % train_opd_rmse)
- print('Train stars relative OPD RMSE:\t %.4e %%\n' % (relative_train_opd_rmse * 100.))
+ logger.info("Train stars OPD RMSE:\t %.4e" % train_opd_rmse)
+ logger.info(
+ "Train stars relative OPD RMSE:\t %.4e %%\n" % (relative_train_opd_rmse * 100.0)
+ )
return test_opd_rmse, train_opd_rmse
-def compute_opd_metrics_param_model(tf_semiparam_field, GT_tf_semiparam_field, test_pos, train_pos):
- """ Compute the OPD metrics. """
+def compute_opd_metrics_param_model(
+ tf_semiparam_field, GT_tf_semiparam_field, test_pos, train_pos
+):
+ """Compute the OPD metrics."""
np_obscurations = np.real(tf_semiparam_field.obscurations.numpy())
@@ -871,12 +942,14 @@ def compute_opd_metrics_param_model(tf_semiparam_field, GT_tf_semiparam_field, t
# Calculate relative RMSE values
relative_test_opd_rmse = test_opd_rmse / np.sqrt(
- np.mean((GT_opd_maps.numpy() * np_obscurations)**2)
+ np.mean((GT_opd_maps.numpy() * np_obscurations) ** 2)
)
# Print RMSE values
- print('Test stars absolute OPD RMSE:\t %.4e' % test_opd_rmse)
- print('Test stars relative OPD RMSE:\t %.4e %%\n' % (relative_test_opd_rmse * 100.))
+ logger.info("Test stars absolute OPD RMSE:\t %.4e" % test_opd_rmse)
+ logger.info(
+ "Test stars relative OPD RMSE:\t %.4e %%\n" % (relative_test_opd_rmse * 100.0)
+ )
## For train part
# Param part
@@ -895,18 +968,20 @@ def compute_opd_metrics_param_model(tf_semiparam_field, GT_tf_semiparam_field, t
# Calculate relative RMSE values
relative_train_opd_rmse = train_opd_rmse / np.sqrt(
- np.mean((GT_opd_maps.numpy() * np_obscurations)**2)
+ np.mean((GT_opd_maps.numpy() * np_obscurations) ** 2)
)
# Print RMSE values
- print('Train stars absolute OPD RMSE:\t %.4e' % train_opd_rmse)
- print('Train stars relative OPD RMSE:\t %.4e %%\n' % (relative_train_opd_rmse * 100.))
+ logger.info("Train stars absolute OPD RMSE:\t %.4e" % train_opd_rmse)
+ logger.info(
+ "Train stars relative OPD RMSE:\t %.4e %%\n" % (relative_train_opd_rmse * 100.0)
+ )
return test_opd_rmse, train_opd_rmse
def compute_one_opd_rmse(GT_tf_semiparam_field, tf_semiparam_field, pos, is_poly=False):
- """ Compute the OPD map for one position!. """
+ """Compute the OPD map for one position."""
np_obscurations = np.real(tf_semiparam_field.obscurations.numpy())
@@ -937,7 +1012,7 @@ def compute_one_opd_rmse(GT_tf_semiparam_field, tf_semiparam_field, pos, is_poly
return opd_rmse
-def plot_function(mesh_pos, residual, tf_train_pos, tf_test_pos, title='Error'):
+def plot_function(mesh_pos, residual, tf_train_pos, tf_test_pos, title="Error"):
vmax = np.max(residual)
vmin = np.min(residual)
@@ -947,19 +1022,31 @@ def plot_function(mesh_pos, residual, tf_train_pos, tf_test_pos, title='Error'):
mesh_pos[:, 1],
s=100,
c=residual.reshape(-1, 1),
- cmap='viridis',
- marker='s',
+ cmap="viridis",
+ marker="s",
vmax=vmax,
- vmin=vmin
+ vmin=vmin,
)
plt.colorbar()
plt.scatter(
- tf_train_pos[:, 0], tf_train_pos[:, 1], c='k', marker='*', s=10, label='Train stars'
+ tf_train_pos[:, 0],
+ tf_train_pos[:, 1],
+ c="k",
+ marker="*",
+ s=10,
+ label="Train stars",
+ )
+ plt.scatter(
+ tf_test_pos[:, 0],
+ tf_test_pos[:, 1],
+ c="r",
+ marker="*",
+ s=10,
+ label="Test stars",
)
- plt.scatter(tf_test_pos[:, 0], tf_test_pos[:, 1], c='r', marker='*', s=10, label='Test stars')
plt.title(title)
- plt.xlabel('x-axis')
- plt.ylabel('y-axis')
+ plt.xlabel("x-axis")
+ plt.ylabel("y-axis")
plt.show()
@@ -972,9 +1059,8 @@ def plot_residual_maps(
tf_test_pos,
n_bins_lda=20,
n_points_per_dim=30,
- is_poly=False
+ is_poly=False,
):
-
# Recover teh grid limits
x_lims = tf_semiparam_field.x_lims
y_lims = tf_semiparam_field.y_lims
@@ -984,8 +1070,9 @@ def plot_residual_maps(
y = np.linspace(y_lims[0], y_lims[1], n_points_per_dim)
x_pos, y_pos = np.meshgrid(x, y)
- mesh_pos = np.concatenate((x_pos.flatten().reshape(-1, 1), y_pos.flatten().reshape(-1, 1)),
- axis=1)
+ mesh_pos = np.concatenate(
+ (x_pos.flatten().reshape(-1, 1), y_pos.flatten().reshape(-1, 1)), axis=1
+ )
tf_mesh_pos = tf.convert_to_tensor(mesh_pos, dtype=tf.float32)
# Testing the positions
@@ -1001,11 +1088,14 @@ def plot_residual_maps(
# Generate SED data list
mesh_packed_SED_data = [
- utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda) for _sed in mesh_SEDs
+ utils.generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda)
+ for _sed in mesh_SEDs
]
# Generate inputs
- tf_mesh_packed_SED_data = tf.convert_to_tensor(mesh_packed_SED_data, dtype=tf.float32)
+ tf_mesh_packed_SED_data = tf.convert_to_tensor(
+ mesh_packed_SED_data, dtype=tf.float32
+ )
tf_mesh_packed_SED_data = tf.transpose(tf_mesh_packed_SED_data, perm=[0, 2, 1])
mesh_pred_inputs = [tf_mesh_pos, tf_mesh_packed_SED_data]
@@ -1014,36 +1104,52 @@ def plot_residual_maps(
GT_mesh_preds = GT_tf_semiparam_field.predict(x=mesh_pred_inputs, batch_size=16)
# Calculate pixel RMSE for each star
- pix_rmse = np.array([
- np.sqrt(np.mean((_GT_pred - _model_pred)**2))
- for _GT_pred, _model_pred in zip(GT_mesh_preds, model_mesh_preds)
- ])
+ pix_rmse = np.array(
+ [
+ np.sqrt(np.mean((_GT_pred - _model_pred) ** 2))
+ for _GT_pred, _model_pred in zip(GT_mesh_preds, model_mesh_preds)
+ ]
+ )
- relative_pix_rmse = np.array([
- np.sqrt(np.mean((_GT_pred - _model_pred)**2)) / np.sqrt(np.mean((_GT_pred)**2))
- for _GT_pred, _model_pred in zip(GT_mesh_preds, model_mesh_preds)
- ])
+ relative_pix_rmse = np.array(
+ [
+ np.sqrt(np.mean((_GT_pred - _model_pred) ** 2))
+ / np.sqrt(np.mean((_GT_pred) ** 2))
+ for _GT_pred, _model_pred in zip(GT_mesh_preds, model_mesh_preds)
+ ]
+ )
# Plot absolute pixel error
- plot_function(mesh_pos, pix_rmse, tf_train_pos, tf_test_pos, title='Absolute pixel error')
+ plot_function(
+ mesh_pos, pix_rmse, tf_train_pos, tf_test_pos, title="Absolute pixel error"
+ )
# Plot relative pixel error
plot_function(
- mesh_pos, relative_pix_rmse, tf_train_pos, tf_test_pos, title='Relative pixel error'
+ mesh_pos,
+ relative_pix_rmse,
+ tf_train_pos,
+ tf_test_pos,
+ title="Relative pixel error",
)
# Compute OPD errors
- opd_rmse = np.array([
- compute_one_opd_rmse(
- GT_tf_semiparam_field, tf_semiparam_field, _pos.reshape(1, -1), is_poly
- ) for _pos in mesh_pos
- ])
+ opd_rmse = np.array(
+ [
+ compute_one_opd_rmse(
+ GT_tf_semiparam_field, tf_semiparam_field, _pos.reshape(1, -1), is_poly
+ )
+ for _pos in mesh_pos
+ ]
+ )
# Plot absolute pixel error
- plot_function(mesh_pos, opd_rmse, tf_train_pos, tf_test_pos, title='Absolute OPD error')
+ plot_function(
+ mesh_pos, opd_rmse, tf_train_pos, tf_test_pos, title="Absolute OPD error"
+ )
-def plot_imgs(mat, cmap='gist_stern', figsize=(20, 20)):
- """ Function to plot 2D images of a tensor.
+def plot_imgs(mat, cmap="gist_stern", figsize=(20, 20)):
+ """Function to plot 2D images of a tensor.
The Tensor is (batch,xdim,ydim) and the matrix of subplots is
chosen "intelligently".
"""
@@ -1080,11 +1186,10 @@ def dp(n, left): # returns tuple (cost, [factors])
for _i in range(row_n):
for _j in range(col_n):
-
plt.subplot(row_n, col_n, idx + 1)
plt.imshow(mat[idx, :, :], cmap=cmap)
plt.colorbar()
- plt.title('matrix id %d' % idx)
+ plt.title("matrix id %d" % idx)
idx += 1
diff --git a/src/wf_psf/metrics/metrics_interface.py b/src/wf_psf/metrics/metrics_interface.py
new file mode 100644
index 00000000..2249a38b
--- /dev/null
+++ b/src/wf_psf/metrics/metrics_interface.py
@@ -0,0 +1,389 @@
+"""Metrics Interface.
+
+A module which defines the classes and methods
+to manage metrics evaluation of the trained psf model.
+
+:Author: Jennifer Pollack
+
+"""
+
+import sys
+import numpy as np
+import time
+import tensorflow as tf
+import tensorflow_addons as tfa
+import wf_psf.data.training_preprocessing as training_preprocessing
+from wf_psf.data.training_preprocessing import TrainingDataHandler, TestDataHandler
+from wf_psf.psf_models import psf_models
+from wf_psf.metrics import metrics as wf_metrics
+import os
+import logging
+import wf_psf.utils.io as io
+
+logger = logging.getLogger(__name__)
+
+
+def ground_truth_psf_model(metrics_params, coeff_matrix):
+ psf_model = psf_models.get_psf_model(
+ metrics_params.ground_truth_model.model_params,
+ metrics_params.metrics_hparams,
+ )
+ psf_model.tf_poly_Z_field.assign_coeff_matrix(coeff_matrix)
+
+ psf_model.tf_np_poly_opd.alpha_mat.assign(
+ np.zeros_like(psf_model.tf_np_poly_opd.alpha_mat) # type: ignore
+ )
+
+ return psf_model
+
+
+class MetricsParamsHandler:
+ """Metrics Parameters Handler.
+
+ A class to handle training parameters accessed:
+
+ Parameters
+ ----------
+ metrics_params: Recursive Namespace object
+ Recursive Namespace object containing metrics input parameters
+ trained_model: Recursive Namespace object
+ Recursive Namespace object containing trained model input parameters
+
+
+ """
+
+ def __init__(self, metrics_params, trained_model):
+ self.metrics_params = metrics_params
+ self.trained_model = trained_model
+
+ def evaluate_metrics_polychromatic_lowres(self, psf_model, simPSF, dataset):
+ """Evaluate Polychromatic PSF Low-Res Metrics.
+
+ A function to evaluate metrics for Low-Res Polychromatic PSF.
+
+ Inputs
+ ------
+ psf_model: object
+ PSF model class instance of the psf model selected for metrics evaluation.
+ simPSF: object
+ SimPSFToolkit instance
+ test_dataset: dict
+ Test dataset dictionary
+
+ Returns
+ -------
+ poly_metric: dict
+ Dictionary containing RMSE, Relative RMSE values, and
+ corresponding Standard Deviation values for Low-Res Polychromatic PSF metrics.
+
+ """
+ logger.info("Computing polychromatic metrics at low resolution.")
+
+ rmse, rel_rmse, std_rmse, std_rel_rmse = wf_metrics.compute_poly_metric(
+ tf_semiparam_field=psf_model,
+ GT_tf_semiparam_field=ground_truth_psf_model(
+ self.metrics_params, dataset["C_poly"]
+ ),
+ simPSF_np=simPSF,
+ tf_pos=dataset["positions"],
+ tf_SEDs=dataset["SEDs"],
+ n_bins_lda=self.trained_model.model_params.n_bins_lda,
+ n_bins_gt=self.metrics_params.ground_truth_model.model_params.n_bins_lda,
+ batch_size=self.metrics_params.metrics_hparams.batch_size,
+ dataset_dict=dataset,
+ )
+
+ poly_metric = {
+ "rmse": rmse,
+ "rel_rmse": rel_rmse,
+ "std_rmse": std_rmse,
+ "std_rel_rmse": std_rel_rmse,
+ }
+ return poly_metric
+
+ def evaluate_metrics_mono_rmse(self, psf_model, simPSF, dataset):
+ """Evaluate Monochromatic PSF RMSE Metrics.
+
+ A function to evaluate metrics for Monochromatic PSF.
+
+ Inputs
+ ------
+ psf_model: object
+ PSF model class instance of the psf model selected for metrics evaluation.
+ simPSF: object
+ SimPSFToolkit instance
+ test_dataset: dict
+ Test dataset dictionary
+
+ Returns
+ -------
+ mono_metric: dict
+ Dictionary containing RMSE, Relative RMSE values, and
+ corresponding Standard Deviation values for Monochromatic PSF metrics.
+
+ """
+ logger.info("Computing monochromatic metrics.")
+ lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
+ (
+ rmse_lda,
+ rel_rmse_lda,
+ std_rmse_lda,
+ std_rel_rmse_lda,
+ ) = wf_metrics.compute_mono_metric(
+ tf_semiparam_field=psf_model,
+ GT_tf_semiparam_field=ground_truth_psf_model(
+ self.metrics_params, dataset["C_poly"]
+ ),
+ simPSF_np=simPSF,
+ tf_pos=dataset["positions"],
+ lambda_list=lambda_list,
+ )
+
+ mono_metric = {
+ "rmse_lda": rmse_lda,
+ "rel_rmse_lda": rel_rmse_lda,
+ "std_rmse_lda": std_rmse_lda,
+ "std_rel_rmse_lda": std_rel_rmse_lda,
+ }
+ return mono_metric
+
+ def evaluate_metrics_opd(self, psf_model, simPSF, dataset):
+ """Evaluate OPD Metrics.
+
+ A function to evaluate metrics for Optical Path Differences.
+
+ Inputs
+ ------
+ psf_model: object
+ PSF model class instance of the psf model selected for metrics evaluation.
+ simPSF: object
+ SimPSFToolkit instance
+ test_dataset: dict
+ Test dataset dictionary
+
+ Returns
+ -------
+ opd_metric: dict
+ Dictionary containing RMSE, Relative RMSE values, and
+ corresponding Standard Deviation values for OPD metrics.
+
+ """
+ logger.info("Computing OPD metrics.")
+ (
+ rmse_opd,
+ rel_rmse_opd,
+ rmse_std_opd,
+ rel_rmse_std_opd,
+ ) = wf_metrics.compute_opd_metrics(
+ tf_semiparam_field=psf_model,
+ GT_tf_semiparam_field=ground_truth_psf_model(
+ self.metrics_params, dataset["C_poly"]
+ ),
+ pos=dataset["positions"],
+ batch_size=self.metrics_params.metrics_hparams.batch_size,
+ )
+
+ opd_metric = {
+ "rmse_opd": rmse_opd,
+ "rel_rmse_opd": rel_rmse_opd,
+ "rmse_std_opd": rmse_std_opd,
+ "rel_rmse_std_opd": rel_rmse_std_opd,
+ }
+ return opd_metric
+
+ def evaluate_metrics_shape(self, psf_model, simPSF, dataset):
+ """Evaluate PSF Shape Metrics.
+
+ A function to evaluate metrics for PSF shape.
+
+ Inputs
+ ------
+ psf_model: object
+ PSF model class instance of the psf model selected for metrics evaluation.
+ simPSF: object
+ SimPSFToolkit instance
+ dataset: dict
+ Test dataset dictionary
+
+ Returns
+ shape_results: dict
+ Dictionary containing RMSE, Relative RMSE values, and
+ corresponding Standard Deviation values for PSF Shape metrics.
+
+ """
+ logger.info("Computing Shape metrics.")
+
+ shape_results = wf_metrics.compute_shape_metrics(
+ tf_semiparam_field=psf_model,
+ GT_tf_semiparam_field=ground_truth_psf_model(
+ self.metrics_params, dataset["C_poly"]
+ ),
+ simPSF_np=simPSF,
+ SEDs=dataset["SEDs"],
+ tf_pos=dataset["positions"],
+ n_bins_lda=self.trained_model.model_params.n_bins_lda,
+ n_bins_gt=self.metrics_params.ground_truth_model.model_params.n_bins_lda,
+ batch_size=self.metrics_params.metrics_hparams.batch_size,
+ output_Q=self.metrics_params.metrics_hparams.output_Q,
+ output_dim=self.metrics_params.metrics_hparams.output_dim,
+ opt_stars_rel_pix_rmse=self.metrics_params.metrics_hparams.opt_stars_rel_pix_rmse,
+ dataset_dict=dataset,
+ )
+ return shape_results
+
+
+def evaluate_model(
+ metrics_params,
+ trained_model_params,
+ training_data,
+ test_data,
+ psf_model,
+ weights_path,
+ metrics_output,
+):
+ r"""Evaluate the trained model.
+
+ For parameters check the training script click help.
+
+ Inputs
+ ------
+ metrics_params: Recursive Namespace object
+ Recursive Namespace object containing metrics input parameters
+ trained_model_params: Recursive Namespace object
+ Recursive Namespace object containing trained model input parameters
+ training_data: object
+ TrainingDataHandler object
+ test_data: object
+ TestDataHandler object
+ psf_model: object
+ PSF model object
+ weights_path: str
+ Directory location of model weights
+ metrics_output: str
+ Directory location of metrics output
+
+ """
+ # Start measuring elapsed time
+ starting_time = time.time()
+
+ try:
+ ## Load datasets
+ # -----------------------------------------------------
+ # Get training data
+ logger.info(f"Fetching and preprocessing training and test data...")
+
+ # Initialize metrics_handler
+ metrics_handler = MetricsParamsHandler(metrics_params, trained_model_params)
+
+ ## Prepare models
+ # Prepare np input
+ simPSF_np = training_data.simPSF
+
+ ## Load the model's weights
+ try:
+ logger.info("Loading PSF model weights from {}".format(weights_path))
+ psf_model.load_weights(weights_path)
+ except:
+ logger.exception("An error occurred with the weights_path file.")
+ exit()
+
+ ## Metric evaluation on the test dataset
+ logger.info("\n***\nMetric evaluation on the test dataset\n***\n")
+
+ # Polychromatic star reconstructions
+ poly_metric = metrics_handler.evaluate_metrics_polychromatic_lowres(
+ psf_model, simPSF_np, test_data.test_dataset
+ )
+
+ # Monochromatic star reconstructions
+ if metrics_params.eval_mono_metric_rmse:
+ mono_metric = metrics_handler.evaluate_metrics_mono_rmse(
+ psf_model, simPSF_np, test_data.test_dataset
+ )
+ else:
+ mono_metric = None
+
+ # OPD metrics
+ if metrics_params.eval_opd_metric_rmse:
+ opd_metric = metrics_handler.evaluate_metrics_opd(
+ psf_model, simPSF_np, test_data.test_dataset
+ )
+ else:
+ opd_metric = None
+
+ # Shape metrics
+ logger.info(
+ "Computing polychromatic high-resolution metrics and shape metrics."
+ )
+ shape_results_dict = metrics_handler.evaluate_metrics_shape(
+ psf_model, simPSF_np, test_data.test_dataset
+ )
+ # Save metrics
+ test_metrics = {
+ "poly_metric": poly_metric,
+ "mono_metric": mono_metric,
+ "opd_metric": opd_metric,
+ "shape_results_dict": shape_results_dict,
+ }
+
+ ## Metric evaluation on the train dataset
+ logger.info("\n***\nMetric evaluation on the train dataset\n***\n")
+
+ # Polychromatic star reconstructions
+ logger.info("Computing polychromatic metrics at low resolution.")
+
+ train_poly_metric = metrics_handler.evaluate_metrics_polychromatic_lowres(
+ psf_model, simPSF_np, training_data.train_dataset
+ )
+
+ # Monochromatic star reconstructions turn into a class
+ if metrics_params.eval_mono_metric_rmse:
+ train_mono_metric = metrics_handler.evaluate_metrics_mono_rmse(
+ psf_model, simPSF_np, training_data.train_dataset
+ )
+ else:
+ train_mono_metric = None
+
+ # OPD metrics turn into a class
+ if metrics_params.eval_opd_metric_rmse:
+ train_opd_metric = metrics_handler.evaluate_metrics_opd(
+ psf_model, simPSF_np, training_data.train_dataset
+ )
+ else:
+ train_opd_metric = None
+
+ # Shape metrics turn into a class
+ if metrics_params.eval_train_shape_sr_metric_rmse:
+ train_shape_results_dict = metrics_handler.evaluate_metrics_shape(
+ psf_model, simPSF_np, training_data.train_dataset
+ )
+ else:
+ train_shape_results_dict = None
+
+ # Save metrics into dictionary
+ train_metrics = {
+ "poly_metric": train_poly_metric,
+ "mono_metric": train_mono_metric,
+ "opd_metric": train_opd_metric,
+ "shape_results_dict": train_shape_results_dict,
+ }
+
+ ## Save results
+ metrics = {"test_metrics": test_metrics, "train_metrics": train_metrics}
+ run_id_name = (
+ trained_model_params.model_params.model_name + trained_model_params.id_name
+ )
+ output_path = metrics_output + "/" + "metrics-" + run_id_name
+ np.save(output_path, metrics, allow_pickle=True)
+
+ ## Print final time
+ final_time = time.time()
+ logger.info("\nTotal elapsed time: %f" % (final_time - starting_time))
+
+ ## Close log file
+ logger.info("\n Good bye..")
+
+ return metrics
+ except Exception as e:
+ logger.info("Error: %s" % e)
+ raise
diff --git a/src/wf_psf/plotting/__init__.py b/src/wf_psf/plotting/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/wf_psf/plotting/plots_interface.py b/src/wf_psf/plotting/plots_interface.py
new file mode 100644
index 00000000..30e22674
--- /dev/null
+++ b/src/wf_psf/plotting/plots_interface.py
@@ -0,0 +1,549 @@
+"""Plot Interface.
+
+An interface module with classes to handle different
+plot configurations.
+
+:Author: Jennifer Pollack
+
+"""
+
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib as mpl
+import matplotlib.ticker as mtick
+import seaborn as sns
+import os
+import re
+import logging
+
+logger = logging.getLogger(__name__)
+logging.getLogger("matplotlib").setLevel(logging.WARNING)
+
+
+def define_plot_style(): # type: ignore
+ """Define Plot Style.
+
+ A function to set plot_style
+ parameters.
+
+ """
+ plot_style = {
+ "figure.figsize": (12, 8),
+ "figure.dpi": 200,
+ "figure.autolayout": True,
+ "lines.linewidth": 2,
+ "lines.linestyle": "-",
+ "lines.marker": "o",
+ "lines.markersize": 10,
+ "legend.fontsize": 20,
+ "legend.loc": "best",
+ "axes.titlesize": 24,
+ "font.size": 16,
+ }
+ mpl.rcParams.update(plot_style)
+ # Use seaborn style
+ sns.set()
+
+
+def make_plot(
+ x_axis,
+ y_axis,
+ y_axis_err,
+ label,
+ plot_title,
+ x_axis_label,
+ y_right_axis_label,
+ y_left_axis_label,
+ filename,
+ plot_show=False,
+):
+ """Make Plot.
+
+ A function to generate metrics plots.
+
+ Parameters
+ ----------
+ x_axis: list
+ x-axis values
+ y_axis: list
+ y-axis values
+ y_axis_err: list
+ Error values for y-axis points
+ label: str
+ Label for the points
+ plot_title: str
+ Name of plot
+ x_axis_label: str
+ Label for x-axis
+ y_left_axis_label: str
+ Label for left vertical axis of plot
+ y_right_axis_label: str
+ Label for right vertical axis of plot
+ filename: str
+ Name of file to save plot
+ plot_show: bool
+ Boolean flag to set plot display
+
+
+ """
+ define_plot_style()
+
+ fig = plt.figure(figsize=(12, 8))
+ ax1 = fig.add_subplot(111)
+
+ ax1.set_title(plot_title)
+ ax1.set_xlabel(x_axis_label)
+ ax1.set_ylabel(y_left_axis_label)
+ ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter("%.1e"))
+ ax2 = ax1.twinx()
+ plt.minorticks_on()
+
+ ax2.set_ylabel(y_right_axis_label)
+ ax2.grid(False)
+
+ for it in range(len(y_axis)): # type: ignore
+ for k, _ in y_axis[it].items():
+ ax1.errorbar(
+ x=x_axis[it],
+ y=y_axis[it][k],
+ yerr=y_axis_err[it][k],
+ label=label[it],
+ alpha=0.75,
+ )
+ ax1.legend()
+ kwargs = dict(
+ linewidth=2, linestyle="dashed", markersize=4, marker="^", alpha=0.5
+ )
+ ax2.plot(x_axis[it], y_axis[it][k], **kwargs)
+
+ plt.savefig(filename)
+
+ if plot_show is True:
+ plt.show()
+
+
+class MetricsPlotHandler:
+ """MetricsPlotHandler class.
+
+ A class to handle plot parameters for various
+ metrics results.
+
+ Parameters
+ ----------
+ id: str
+ Class ID name
+ plotting_params: Recursive Namespace object
+ metrics: dict
+ Dictionary containing list of metrics
+ list_of_stars: list
+ List containing the number of training stars per run
+ metric_name: str
+ Name of metric
+ rmse: str
+ Root-mean square error label
+ std_rmse: str
+ Standard error on root-mean square error standard label
+ plots_dir: str
+ Output directory for metrics plots
+
+ """
+
+ ids = ("poly_metrics", "opd_metrics", "poly_pixel")
+
+ def __init__(
+ self,
+ plotting_params,
+ metrics,
+ list_of_stars,
+ metric_name,
+ rmse,
+ std_rmse,
+ plot_title,
+ plots_dir,
+ ):
+ self.plotting_params = plotting_params
+ self.metrics = metrics
+ self.metric_name = metric_name
+ self.rmse = rmse
+ self.std_rmse = std_rmse
+ self.plot_title = plot_title
+ self.plots_dir = plots_dir
+ self.list_of_stars = list_of_stars
+
+ def get_metrics(self, dataset):
+ """Get Metrics.
+
+ A function to get metrics: rmse, rmse_std
+ for each run input, e.g. wf-outputs-xxxxxxxxxxxx.
+
+ Parameters
+ ----------
+ dataset: str
+ A str representing dataset type, i.e. test_metrics or train_metrics.
+
+ Returns
+ -------
+ tuple:
+ A tuple consisting of the id, root-mean-square (rms) and standard rms errors for a metric for each input run.
+
+ """
+ rmse = []
+ std_rmse = []
+ metrics_id = []
+ for k, v in self.metrics.items():
+ for metrics_data in v:
+ run_id = list(metrics_data.keys())[0]
+ metrics_id.append(run_id + "-" + k)
+
+ rmse.append(
+ {
+ (k + "-" + run_id): metrics_data[run_id][0][dataset][
+ self.metric_name
+ ][self.rmse]
+ }
+ )
+ std_rmse.append(
+ {
+ (k + "-" + run_id): metrics_data[run_id][0][dataset][
+ self.metric_name
+ ][self.std_rmse]
+ }
+ )
+
+ return metrics_id, rmse, std_rmse
+
+ def plot(self):
+ """Plot.
+
+ A function to generate metric plots as function of number of stars
+ for the train and test metrics.
+
+ """
+ for plot_dataset in ["test_metrics", "train_metrics"]:
+ metrics_id, rmse, std_rmse = self.get_metrics(plot_dataset)
+ make_plot(
+ x_axis=self.list_of_stars,
+ y_axis=rmse,
+ y_axis_err=std_rmse,
+ label=metrics_id,
+ plot_title="Stars " + plot_dataset + self.plot_title,
+ x_axis_label="Number of stars",
+ y_left_axis_label="Absolute error",
+ y_right_axis_label="Relative error [%]",
+ filename=os.path.join(
+ self.plots_dir,
+ plot_dataset
+ + "_"
+ + self.metric_name
+ + "_nstars_"
+ + "_".join(str(nstar) for nstar in self.list_of_stars)
+ + "_RMSE.png",
+ ),
+ plot_show=self.plotting_params.plot_show,
+ )
+
+
+class MonochromaticMetricsPlotHandler:
+ """MonochromaticMetricsPlotHandler class.
+
+ A class to handle plot parameters for monochromatic
+ metrics results.
+
+ Parameters
+ ----------
+ id: str
+ Class ID name
+ plotting_params: Recursive Namespace object
+ RecursiveNamespace object containing plotting parameters
+ metrics_confs: dict
+ Dictionary containing the metric configurations as RecursiveNamespace objects for each run
+ metrics: list
+ Dictionary containing list of metrics
+ list_of_stars: list
+ List containing the number of stars used for each training data set
+ plots_dir: str
+ Output directory for metrics plots
+
+ """
+
+ ids = ("mono_metrics",)
+
+ def __init__(
+ self,
+ plotting_params,
+ metrics_confs,
+ metrics,
+ list_of_stars,
+ plots_dir,
+ ):
+ self.plotting_params = plotting_params
+ self.metrics_confs = metrics_confs
+ self.metrics = metrics
+ self.list_of_stars = list_of_stars
+ self.plots_dir = plots_dir
+
+ def plot(self):
+ """Plot.
+
+ A function to generate plots for the train and test
+ metrics.
+
+ """
+ # Define common data
+ # Common data
+ lambda_list = np.arange(0.55, 0.9, 0.01)
+ for plot_dataset in ["test_metrics", "train_metrics"]:
+ y_axis = []
+ y_axis_err = []
+ metrics_id = []
+
+ for k, v in self.metrics.items():
+ if self.metrics_confs[k].metrics.eval_mono_metric_rmse:
+ for metrics_data in v:
+ run_id = list(metrics_data.keys())[0]
+ metrics_id.append(run_id + "-" + k)
+ y_axis.append(
+ {
+ (k + "-" + run_id): metrics_data[run_id][0][
+ plot_dataset
+ ]["mono_metric"]["rmse_lda"]
+ }
+ )
+ y_axis_err.append(
+ {
+ (k + "-" + run_id): metrics_data[run_id][0][
+ plot_dataset
+ ]["mono_metric"]["std_rmse_lda"]
+ }
+ )
+
+ make_plot(
+ x_axis=[lambda_list for _ in range(len(y_axis))],
+ y_axis=y_axis,
+ y_axis_err=y_axis_err,
+ label=metrics_id,
+ plot_title="Stars "
+ + plot_dataset # type: ignore
+ + "\nMonochromatic pixel RMSE @ Euclid resolution",
+ x_axis_label="Wavelength [um]",
+ y_left_axis_label="Absolute error",
+ y_right_axis_label="Relative error [%]",
+ filename=os.path.join(
+ self.plots_dir,
+ (
+ plot_dataset
+ + "_nstars_"
+ + "_".join(str(nstar) for nstar in self.list_of_stars)
+ + "_monochrom_pixel_RMSE.png"
+ ),
+ ),
+ plot_show=self.plotting_params.plot_show,
+ )
+
+
+class ShapeMetricsPlotHandler:
+ """ShapeMetricsPlotHandler class.
+
+ A class to handle plot parameters shape
+ metrics results.
+
+ Parameters
+ ----------
+ id: str
+ Class ID name
+ plotting_params: Recursive Namespace object
+ Recursive Namespace Object containing plotting parameters
+ metrics: list
+ Dictionary containing list of metrics
+ list_of_stars: list
+ List containing the number of stars used for each training data set
+ plots_dir: str
+ Output directory for metrics plots
+
+ """
+
+ id = "shape_metrics"
+
+ def __init__(self, plotting_params, metrics, list_of_stars, plots_dir):
+ self.plotting_params = plotting_params
+ self.metrics = metrics
+ self.list_of_stars = list_of_stars
+ self.plots_dir = plots_dir
+
+ def plot(self):
+ """Plot.
+
+ A function to generate plots for the train and test
+ metrics.
+
+ """
+ # Define common data
+ # Common data
+ e1_req_euclid = 2e-04
+ e2_req_euclid = 2e-04
+ R2_req_euclid = 1e-03
+ for plot_dataset in ["test_metrics", "train_metrics"]:
+ e1_rmse = []
+ e1_std_rmse = []
+ e2_rmse = []
+ e2_std_rmse = []
+ rmse_R2_meanR2 = []
+ std_rmse_R2_meanR2 = []
+ metrics_id = []
+
+ for k, v in self.metrics.items():
+ for metrics_data in v:
+ run_id = list(metrics_data.keys())[0]
+ metrics_id.append(run_id + "-" + k)
+
+ e1_rmse.append(
+ {
+ (k + "-" + run_id): metrics_data[run_id][0][plot_dataset][
+ "shape_results_dict"
+ ]["rmse_e1"]
+ / e1_req_euclid
+ }
+ )
+ e1_std_rmse.append(
+ {
+ (k + "-" + run_id): metrics_data[run_id][0][plot_dataset][
+ "shape_results_dict"
+ ]["std_rmse_e1"]
+ }
+ )
+
+ e2_rmse.append(
+ {
+ (k + "-" + run_id): metrics_data[run_id][0][plot_dataset][
+ "shape_results_dict"
+ ]["rmse_e2"]
+ / e2_req_euclid
+ }
+ )
+ e2_std_rmse.append(
+ {
+ (k + "-" + run_id): metrics_data[run_id][0][plot_dataset][
+ "shape_results_dict"
+ ]["std_rmse_e2"]
+ }
+ )
+
+ rmse_R2_meanR2.append(
+ {
+ (k + "-" + run_id): metrics_data[run_id][0][plot_dataset][
+ "shape_results_dict"
+ ]["rmse_R2_meanR2"]
+ / R2_req_euclid
+ }
+ )
+
+ std_rmse_R2_meanR2.append(
+ {
+ (k + "-" + run_id): metrics_data[run_id][0][plot_dataset][
+ "shape_results_dict"
+ ]["std_rmse_R2_meanR2"]
+ }
+ )
+
+ make_plot(
+ x_axis=self.list_of_stars,
+ y_axis=e1_rmse,
+ y_axis_err=e1_std_rmse,
+ label=metrics_id,
+ plot_title="Stars " + plot_dataset + ".\nShape RMSE",
+ x_axis_label="Number of stars",
+ y_left_axis_label="Absolute error",
+ y_right_axis_label="Relative error [%]",
+ filename=os.path.join(
+ self.plots_dir,
+ plot_dataset
+ + "_nstars_"
+ + "_".join(str(nstar) for nstar in self.list_of_stars)
+ + "_Shape_RMSE.png",
+ ),
+ plot_show=self.plotting_params.plot_show,
+ )
+
+
+def get_number_of_stars(metrics):
+ """Get Number of Stars.
+
+ A function to get the number of stars used
+ in training the model.
+
+ Parameters
+ ----------
+ metrics: dict
+ A dictionary containig the metrics results per run
+ Returns
+ -------
+ list_of_stars: list
+ A list containing the number of training stars per run.
+ """
+ list_of_stars = []
+
+ for k, v in metrics.items():
+ for run in v:
+ run_id = list(run.keys())[0]
+ list_of_stars.append(int(re.search(r"\d+", run_id).group()))
+
+ return list_of_stars
+
+
+def plot_metrics(plotting_params, list_of_metrics, metrics_confs, plot_saving_path):
+ r"""Plot model results.
+
+ Parameters
+ ----------
+ plotting_params: RecursiveNamespace Object
+ Recursive Namespace object containing plot configuration parameters
+ list_of_metrics: list
+ List containing all model metrics
+ metrics_confs: list
+ List containing all metrics configuration parameters
+ plot_saving_path: str
+ Directory path for saving output plots
+
+ """
+ metrics = {
+ "poly_metric": {
+ "rmse": "rmse",
+ "std_rmse": "std_rmse",
+ "plot_title": ".\nPolychromatic pixel RMSE @ Euclid resolution",
+ },
+ "opd_metric": {
+ "rmse": "rmse_opd",
+ "std_rmse": "rmse_std_opd",
+ "plot_title": ".\nOPD RMSE",
+ },
+ "shape_results_dict": {
+ "rmse": "pix_rmse",
+ "std_rmse": "pix_rmse_std",
+ "plot_title": "\nPixel RMSE @ 3x Euclid resolution",
+ },
+ }
+
+ list_of_stars = get_number_of_stars(list_of_metrics)
+
+ for k, v in metrics.items():
+ metrics_plot = MetricsPlotHandler(
+ plotting_params,
+ list_of_metrics,
+ list_of_stars,
+ k,
+ v["rmse"],
+ v["std_rmse"],
+ v["plot_title"],
+ plot_saving_path,
+ )
+ metrics_plot.plot()
+
+ monochrom_metrics_plot = MonochromaticMetricsPlotHandler(
+ plotting_params, metrics_confs, list_of_metrics, list_of_stars, plot_saving_path
+ )
+ monochrom_metrics_plot.plot()
+
+ shape_metrics_plot = ShapeMetricsPlotHandler(
+ plotting_params, list_of_metrics, list_of_stars, plot_saving_path
+ )
+ shape_metrics_plot.plot()
diff --git a/src/wf_psf/psf_models/__init__.py b/src/wf_psf/psf_models/__init__.py
new file mode 100644
index 00000000..a84fd807
--- /dev/null
+++ b/src/wf_psf/psf_models/__init__.py
@@ -0,0 +1 @@
+__all__ = ["psf_models", "psf_model_parametric", "psf_model_semiparametric"]
diff --git a/src/wf_psf/psf_models/psf_model_parametric.py b/src/wf_psf/psf_models/psf_model_parametric.py
new file mode 100644
index 00000000..44fc75a7
--- /dev/null
+++ b/src/wf_psf/psf_models/psf_model_parametric.py
@@ -0,0 +1,235 @@
+"""PSF Model Parametric.
+
+A module which defines the classes and methods
+to manage the parameters of the psf parametric model.
+
+:Authors: Tobias Liaudat and Jennifer Pollack
+
+"""
+
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.keras.engine import data_adapter
+from wf_psf.psf_models.psf_models import register_psfclass
+from wf_psf.psf_models.tf_layers import (
+ TF_poly_Z_field,
+ TF_zernike_OPD,
+ TF_batch_poly_PSF,
+)
+from wf_psf.psf_models.tf_layers import (
+ TF_NP_poly_OPD,
+ TF_batch_mono_PSF,
+ TF_physical_layer,
+)
+from wf_psf.utils.utils import PI_zernikes
+
+
+@register_psfclass
+class TF_PSF_field_model(tf.keras.Model):
+ """Parametric PSF field model!
+
+ Fully parametric model based on the Zernike polynomial basis.
+
+ Parameters
+ ----------
+ ids: tuple
+ A tuple storing the string attribute of the PSF model class
+ zernike_maps: Tensor(n_batch, opd_dim, opd_dim)
+ Zernike polynomial maps.
+ obscurations: Tensor(opd_dim, opd_dim)
+ Predefined obscurations of the phase.
+ batch_size: int
+ Batch size.
+ output_Q: float
+ Oversampling used. This should match the oversampling Q used to generate
+ the diffraction zero padding that is found in the input `packed_SEDs`.
+ We call this other Q the `input_Q`.
+ In that case, we replicate the original sampling of the model used to
+ calculate the input `packed_SEDs`.
+ The final oversampling of the generated PSFs with respect to the
+ original instrument sampling depend on the division `input_Q/output_Q`.
+ It is not recommended to use `output_Q < 1`.
+ Although it works with float values it is better to use integer values.
+ l2_param: float
+ Parameter going with the l2 loss on the opd. If it is `0.` the loss
+ is not added. Default is `0.`.
+ output_dim: int
+ Output dimension of the PSF stamps.
+ n_zernikes: int
+ Order of the Zernike polynomial for the parametric model.
+ d_max: int
+ Maximum degree of the polynomial for the Zernike coefficient variations.
+ x_lims: [float, float]
+ Limits for the x coordinate of the PSF field.
+ y_lims: [float, float]
+ Limits for the x coordinate of the PSF field.
+ coeff_mat: Tensor or None
+ Initialization of the coefficient matrix defining the parametric psf
+ field model.
+
+ """
+
+ ids = ("parametric",)
+
+ def __init__(
+ self,
+ zernike_maps,
+ obscurations,
+ batch_size,
+ output_Q,
+ l2_param=0.0,
+ output_dim=64,
+ n_zernikes=45,
+ d_max=2,
+ x_lims=[0, 1e3],
+ y_lims=[0, 1e3],
+ coeff_mat=None,
+ name="TF_PSF_field_model",
+ ):
+ super(TF_PSF_field_model, self).__init__()
+
+ self.output_Q = output_Q
+
+ # Inputs: TF_poly_Z_field
+ self.n_zernikes = n_zernikes
+ self.d_max = d_max
+ self.x_lims = x_lims
+ self.y_lims = y_lims
+
+ # Inputs: TF_zernike_OPD
+ # They are not stored as they are memory-heavy
+ # zernike_maps =[]
+
+ # Inputs: TF_batch_poly_PSF
+ self.batch_size = batch_size
+ self.obscurations = obscurations
+ self.output_dim = output_dim
+
+ # Inputs: Loss
+ self.l2_param = l2_param
+
+ # Initialize the first layer
+ self.tf_poly_Z_field = TF_poly_Z_field(
+ x_lims=self.x_lims,
+ y_lims=self.y_lims,
+ n_zernikes=self.n_zernikes,
+ d_max=self.d_max,
+ )
+
+ # Initialize the zernike to OPD layer
+ self.tf_zernike_OPD = TF_zernike_OPD(zernike_maps=zernike_maps)
+
+ # Initialize the batch opd to batch polychromatic PSF layer
+ self.tf_batch_poly_PSF = TF_batch_poly_PSF(
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
+ )
+
+ # Initialize the model parameters with non-default value
+ if coeff_mat is not None:
+ self.assign_coeff_matrix(coeff_mat)
+
+ # # Depending on the parameter we define the forward model
+ # # This is, we add or not the L2 loss to the OPD.
+ # if self.l2_param == 0.:
+ # self.call = self.call_basic
+ # else:
+ # self.call = self.call_l2_opd_loss
+
+ def get_coeff_matrix(self):
+ """Get coefficient matrix."""
+ return self.tf_poly_Z_field.get_coeff_matrix()
+
+ def assign_coeff_matrix(self, coeff_mat):
+ """Assign coefficient matrix."""
+ self.tf_poly_Z_field.assign_coeff_matrix(coeff_mat)
+
+ def set_output_Q(self, output_Q, output_dim=None):
+ """Set the value of the output_Q parameter.
+ Useful for generating/predicting PSFs at a different sampling wrt the
+ observation sampling.
+ """
+ self.output_Q = output_Q
+ if output_dim is not None:
+ self.output_dim = output_dim
+ # Reinitialize the PSF batch poly generator
+ self.tf_batch_poly_PSF = TF_batch_poly_PSF(
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
+ )
+
+ def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
+ """Predict a set of monochromatic PSF at desired positions.
+
+ input_positions: Tensor(batch_dim x 2)
+
+ lambda_obs: float
+ Observed wavelength in um.
+
+ phase_N: int
+ Required wavefront dimension. Should be calculated with as:
+ ``simPSF_np = wf.SimPSFToolkit(...)``
+ ``phase_N = simPSF_np.feasible_N(lambda_obs)``
+ """
+
+ # Initialise the monochromatic PSF batch calculator
+ tf_batch_mono_psf = TF_batch_mono_PSF(
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
+ )
+ # Set the lambda_obs and the phase_N parameters
+ tf_batch_mono_psf.set_lambda_phaseN(phase_N, lambda_obs)
+
+ # Continue the OPD maps
+ zernike_coeffs = self.tf_poly_Z_field(input_positions)
+ opd_maps = self.tf_zernike_OPD(zernike_coeffs)
+
+ # Compute the monochromatic PSFs
+ mono_psf_batch = tf_batch_mono_psf(opd_maps)
+
+ return mono_psf_batch
+
+ def predict_opd(self, input_positions):
+ """Predict the OPD at some positions.
+
+ Parameters
+ ----------
+ input_positions: Tensor(batch_dim x 2)
+ Positions to predict the OPD.
+
+ Returns
+ -------
+ opd_maps : Tensor [batch x opd_dim x opd_dim]
+ OPD at requested positions.
+
+ """
+ # Continue the OPD maps
+ zernike_coeffs = self.tf_poly_Z_field(input_positions)
+ opd_maps = self.tf_zernike_OPD(zernike_coeffs)
+
+ return opd_maps
+
+ def call(self, inputs):
+ """Define the PSF field forward model.
+
+ [1] From positions to Zernike coefficients
+ [2] From Zernike coefficients to OPD maps
+ [3] From OPD maps and SED info to polychromatic PSFs
+
+ OPD: Optical Path Differences
+ """
+ # Unpack inputs
+ input_positions = inputs[0]
+ packed_SEDs = inputs[1]
+
+ # Continue the forward model
+ zernike_coeffs = self.tf_poly_Z_field(input_positions)
+ opd_maps = self.tf_zernike_OPD(zernike_coeffs)
+ # Add l2 loss on the OPD
+ self.add_loss(self.l2_param * tf.math.reduce_sum(tf.math.square(opd_maps)))
+ poly_psfs = self.tf_batch_poly_PSF([opd_maps, packed_SEDs])
+
+ return poly_psfs
diff --git a/src/wf_psf/psf_models/psf_model_semiparametric.py b/src/wf_psf/psf_models/psf_model_semiparametric.py
new file mode 100644
index 00000000..3a250490
--- /dev/null
+++ b/src/wf_psf/psf_models/psf_model_semiparametric.py
@@ -0,0 +1,387 @@
+"""PSF Model Semi-Parametric.
+
+A module which defines the classes and methods
+to manage the parameters of the psf semi-parametric model.
+
+:Authors: Tobias Liaudat and Jennifer Pollack
+
+"""
+
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.keras.engine import data_adapter
+from wf_psf.psf_models import psf_models as psfm
+from wf_psf.psf_models import tf_layers as tfl
+from wf_psf.utils.utils import PI_zernikes, zernike_generator
+from wf_psf.psf_models.tf_layers import (
+ TF_batch_poly_PSF,
+ TF_batch_mono_PSF,
+)
+import logging
+
+
+logger = logging.getLogger(__name__)
+
+
+@psfm.register_psfclass
+class TF_SemiParam_field(tf.keras.Model):
+ """PSF field forward model.
+
+ Semi parametric model based on the Zernike polynomial basis.
+
+ Parameters
+ ----------
+ ids: tuple
+ A tuple storing the string attribute of the PSF model class
+ model_params: Recursive Namespace
+ Recursive Namespace object containing parameters for this PSF model class
+ training_params: Recursive Namespace
+ Recursive Namespace object containing training hyperparameters for this PSF model class
+ coeff_mat: Tensor or None
+ Initialization of the coefficient matrix defining the parametric psf field model
+
+ """
+
+ ids = ("poly",)
+
+ def __init__(self, model_params, training_params, coeff_mat=None):
+ super().__init__()
+
+ # Inputs: random seed for Tensor Flow initialization
+ self.random_seed = model_params.param_hparams.random_seed
+
+ # Inputs: pupil diameter
+ self.pupil_diam = model_params.pupil_diameter
+
+ # Inputs: oversampling used
+ self.output_Q = model_params.output_Q
+
+ # Inputs: TF_poly_Z_field
+ self.n_zernikes = model_params.param_hparams.n_zernikes
+ self.d_max = model_params.param_hparams.d_max
+ self.x_lims = model_params.x_lims
+ self.y_lims = model_params.y_lims
+
+ # Inputs: TF_NP_poly_OPD
+ self.d_max_nonparam = model_params.nonparam_hparams.d_max_nonparam
+ self.zernike_maps = psfm.tf_zernike_cube(self.n_zernikes, self.pupil_diam)
+ self.opd_dim = tf.shape(self.zernike_maps)[1].numpy()
+ self.opd_dim = tf.shape(self.zernike_maps)[1].numpy()
+
+ # Inputs: TF_batch_poly_PSF
+ self.batch_size = training_params.batch_size
+ self.obscurations = psfm.tf_obscurations(self.pupil_diam)
+ self.output_dim = model_params.output_dim
+
+ # Inputs: Loss
+ self.l2_param = model_params.param_hparams.l2_param
+
+ # Inputs: Project DD model features
+ self.project_dd_features = model_params.nonparam_hparams.project_dd_features
+
+ # Inputs: Reset DD model features
+ self.reset_dd_features = model_params.nonparam_hparams.reset_dd_features
+
+ # Inputs: Save optimiser history Parametric model features
+ self.save_optim_history_param = (
+ model_params.param_hparams.save_optim_history_param
+ )
+
+ # Inputs: Save optimiser history NonParameteric model features
+ self.save_optim_history_nonparam = (
+ model_params.nonparam_hparams.save_optim_history_nonparam
+ )
+
+ # Initialize the first layer
+ self.tf_poly_Z_field = tfl.TF_poly_Z_field(
+ x_lims=self.x_lims,
+ y_lims=self.y_lims,
+ random_seed=self.random_seed,
+ n_zernikes=self.n_zernikes,
+ d_max=self.d_max,
+ )
+
+ # Initialize the zernike to OPD layer
+ self.tf_zernike_OPD = tfl.TF_zernike_OPD(zernike_maps=self.zernike_maps)
+
+ # Initialize the non-parametric (np) layer
+ self.tf_np_poly_opd = tfl.TF_NP_poly_OPD(
+ x_lims=self.x_lims,
+ y_lims=self.y_lims,
+ random_seed=self.random_seed,
+ d_max=self.d_max_nonparam,
+ opd_dim=self.opd_dim,
+ )
+
+ # Initialize the batch opd to batch polychromatic PSF layer
+ self.tf_batch_poly_PSF = tfl.TF_batch_poly_PSF(
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
+ )
+
+ # Initialize the model parameters with non-default value
+ # self._coeff_mat = coeff_mat
+ if coeff_mat is not None:
+ self.assign_coeff_matrix(coeff_mat)
+
+ def get_coeff_matrix(self):
+ """Get coefficient matrix.
+
+ A function to get the coefficient matrix
+ for parametric model.
+
+ Returns
+ -------
+ coefficient matrix: float
+ Tensor Flow coefficient matrix for the parametric PSF field model
+
+ """
+ return self.tf_poly_Z_field.get_coeff_matrix()
+
+ def assign_coeff_matrix(self, coeff_mat):
+ """Assign coefficient matrix.
+
+ A function to set the coefficient matrix.
+
+ Parameters
+ ----------
+ coeff_mat: float
+ Tensor Flow coefficient matrix for the parametric PSF field model
+
+ """
+ self.tf_poly_Z_field.assign_coeff_matrix(coeff_mat)
+
+ def set_zero_nonparam(self):
+ """Set to zero the non-parametric part.
+
+ A function to set non-parametric alpha parameters
+ equal to zero.
+
+ """
+ self.tf_np_poly_opd.set_alpha_zero()
+
+ def set_nonzero_nonparam(self):
+ """Set to non-zero the non-parametric part.
+
+ A function to set non-parametric alpha parameters
+ equal to non-zero values.
+
+ """
+ self.tf_np_poly_opd.set_alpha_identity()
+
+ def set_trainable_layers(self, param_bool=True, nonparam_bool=True):
+ """Set Trainable Layers.
+
+ A function to set the layers to be trainable or not.
+
+ Parameters
+ ----------
+ param_bool: bool
+ Boolean flag for the parametric layers
+ nonparam_bool: bool
+ Boolean flag for the non-parametric layers
+
+ """
+ self.tf_np_poly_opd.trainable = nonparam_bool
+ self.tf_poly_Z_field.trainable = param_bool
+
+ def set_output_Q(self, output_Q, output_dim=None):
+ """Set the value of the output_Q parameter.
+
+ Useful for generating/predicting PSFs at a different sampling wrt the
+ observation sampling.
+
+ Parameters
+ ----------
+ output_Q: float
+ Oversampling factor
+ output_dim: int
+ Output dimension
+
+ """
+ self.output_Q = output_Q
+ if output_dim is not None:
+ self.output_dim = output_dim
+
+ # Reinitialize the PSF batch poly generator
+ self.tf_batch_poly_PSF = TF_batch_poly_PSF(
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
+ )
+
+ def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
+ # TO Do Clean up
+ """Predict a set of monochromatic PSF at desired positions.
+
+ Parameters
+ ----------
+ input_positions: Tensor(batch_dim x 2)
+ lambda_obs: float
+ Observed wavelength in um.
+
+ phase_N: int
+ Required wavefront dimension. Should be calculated with as:
+ ``simPSF_np = wf.SimPSFToolkit(...)``
+ ``phase_N = simPSF_np.feasible_N(lambda_obs)``
+
+ """
+ # Initialise the monochromatic PSF batch calculator
+ tf_batch_mono_psf = TF_batch_mono_PSF(
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
+ )
+ # Set the lambda_obs and the phase_N parameters
+ tf_batch_mono_psf.set_lambda_phaseN(phase_N, lambda_obs)
+
+ # Calculate parametric part
+ zernike_coeffs = self.tf_poly_Z_field(input_positions)
+ param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
+
+ # Calculate the non parametric part
+ nonparam_opd_maps = self.tf_np_poly_opd(input_positions)
+
+ # Add the estimations
+ opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
+
+ # Compute the monochromatic PSFs
+ mono_psf_batch = tf_batch_mono_psf(opd_maps)
+
+ return mono_psf_batch
+
+ def predict_opd(self, input_positions):
+ """Predict the OPD at some positions.
+
+ Parameters
+ ----------
+ input_positions: Tensor(batch_dim x 2)
+ Positions to predict the OPD.
+
+ Returns
+ -------
+ opd_maps : Tensor [batch x opd_dim x opd_dim]
+ OPD at requested positions.
+
+ """
+ # Calculate parametric part
+ zernike_coeffs = self.tf_poly_Z_field(input_positions)
+ param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
+ # Calculate the non parametric part
+ nonparam_opd_maps = self.tf_np_poly_opd(input_positions)
+ # Add the estimations
+ opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
+
+ return opd_maps
+
+ def assign_S_mat(self, S_mat):
+ """Assign DD features matrix."""
+ self.tf_np_poly_opd.assign_S_mat(S_mat)
+
+ def project_DD_features(self, tf_zernike_cube):
+ """
+ Project non-parametric wavefront onto first n_z Zernikes and transfer
+ their parameters to the parametric model.
+
+ """
+ # Compute Zernike norm for projections
+ n_pix_zernike = PI_zernikes(tf_zernike_cube[0, :, :], tf_zernike_cube[0, :, :])
+ # Multiply Alpha matrix with DD features matrix S
+ inter_res_v2 = tf.tensordot(
+ self.tf_np_poly_opd.alpha_mat[: self.tf_poly_Z_field.coeff_mat.shape[1], :],
+ self.tf_np_poly_opd.S_mat,
+ axes=1,
+ )
+ # Project over first n_z Zernikes
+ # TO DO: Clean up
+ delta_C_poly = tf.constant(
+ np.array(
+ [
+ [
+ PI_zernikes(
+ tf_zernike_cube[i, :, :],
+ inter_res_v2[j, :, :],
+ n_pix_zernike,
+ )
+ for j in range(self.tf_poly_Z_field.coeff_mat.shape[1])
+ ]
+ for i in range(self.n_zernikes)
+ ]
+ ),
+ dtype=tf.float32,
+ )
+ old_C_poly = self.tf_poly_Z_field.coeff_mat
+ # Corrected parametric coeff matrix
+ new_C_poly = old_C_poly + delta_C_poly
+ self.assign_coeff_matrix(new_C_poly)
+
+ # Remove extracted features from non-parametric model
+ # Mix DD features with matrix alpha
+ S_tilde = tf.tensordot(
+ self.tf_np_poly_opd.alpha_mat, self.tf_np_poly_opd.S_mat, axes=1
+ )
+ # TO DO: Clean Up
+ # Get beta tilde as the protection of the first n_param_poly_terms (6 for d_max=2) onto the first n_zernikes.
+ beta_tilde_inner = np.array(
+ [
+ [
+ PI_zernikes(tf_zernike_cube[j, :, :], S_tilde_slice, n_pix_zernike)
+ for j in range(self.n_zernikes)
+ ]
+ for S_tilde_slice in S_tilde[
+ : self.tf_poly_Z_field.coeff_mat.shape[1], :, :
+ ]
+ ]
+ )
+
+ # Only pad in the first dimension so we get a matrix of size (d_max_nonparam_terms)x(n_zernikes) --> 21x15 or 21x45.
+ beta_tilde = np.pad(
+ beta_tilde_inner,
+ [(0, S_tilde.shape[0] - beta_tilde_inner.shape[0]), (0, 0)],
+ mode="constant",
+ )
+
+ # Unmix beta tilde with the inverse of alpha
+ beta = tf.constant(
+ np.linalg.inv(self.tf_np_poly_opd.alpha_mat) @ beta_tilde, dtype=tf.float32
+ )
+ # To do: Clarify comment or delete.
+ # Get the projection for the unmixed features
+
+ # Now since beta.shape[1]=n_zernikes we can take the whole beta matrix.
+ S_mat_projected = tf.tensordot(beta, tf_zernike_cube, axes=[1, 0])
+
+ # Subtract the projection from the DD features
+ S_new = self.tf_np_poly_opd.S_mat - S_mat_projected
+ self.assign_S_mat(S_new)
+
+ def call(self, inputs):
+ """Define the PSF field forward model.
+
+ [1] From positions to Zernike coefficients
+ [2] From Zernike coefficients to OPD maps
+ [3] From OPD maps and SED info to polychromatic PSFs
+
+ OPD: Optical Path Differences
+ """
+ # Unpack inputs
+ input_positions = inputs[0]
+ packed_SEDs = inputs[1]
+
+ # Forward model
+ # Calculate parametric part
+ zernike_coeffs = self.tf_poly_Z_field(input_positions)
+ param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
+ # Add l2 loss on the parametric OPD
+ self.add_loss(
+ self.l2_param * tf.math.reduce_sum(tf.math.square(param_opd_maps))
+ )
+ # Calculate the non parametric part
+ nonparam_opd_maps = self.tf_np_poly_opd(input_positions)
+ # Add the estimations
+ opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
+ # Compute the polychromatic PSFs
+ poly_psfs = self.tf_batch_poly_PSF([opd_maps, packed_SEDs])
+
+ return poly_psfs
diff --git a/src/wf_psf/psf_models/psf_models.py b/src/wf_psf/psf_models/psf_models.py
new file mode 100644
index 00000000..bcfec0b4
--- /dev/null
+++ b/src/wf_psf/psf_models/psf_models.py
@@ -0,0 +1,230 @@
+"""PSF_Models.
+
+A module which provides general utility methods
+to manage the parameters of the psf model.
+
+:Authors: Tobias Liaudat and Jennifer Pollack
+
+"""
+
+import numpy as np
+import tensorflow as tf
+from tensorflow.python.keras.engine import data_adapter
+from wf_psf.utils.utils import PI_zernikes, zernike_generator
+from wf_psf.sims.SimPSFToolkit import SimPSFToolkit
+import glob
+from sys import exit
+import logging
+
+logger = logging.getLogger(__name__)
+
+PSF_CLASS = {}
+
+
+class PsfModelError(Exception):
+ """PSF Model Parameter Error exception class for specific error scenarios."""
+
+ def __init__(
+ self, message="An error with your PSF model parameter settings occurred."
+ ):
+ self.message = message
+ super().__init__(self.message)
+
+
+def register_psfclass(psf_class):
+ """Register PSF Class.
+
+ A wrapper function to register all PSF model classes
+ in a dictionary.
+
+ Parameters
+ ----------
+ psf_class: type
+ PSF Class
+
+ Returns
+ -------
+ psf_class: type
+ PSF class
+
+ """
+ for id in psf_class.ids:
+ PSF_CLASS[id] = psf_class
+
+ return psf_class
+
+
+def set_psf_model(model_name):
+ """Set PSF Model Class.
+
+ A function to select a class of
+ the PSF model from a dictionary.
+
+ Parameters
+ ----------
+ model_name: str
+ Name of PSF model
+
+ Returns
+ -------
+ psf_class: class
+ Name of PSF model class
+
+ """
+
+ try:
+ psf_class = PSF_CLASS[model_name]
+ except KeyError as e:
+ logger.exception(e)
+ raise PsfModelError("PSF model entered is invalid. Check your config settings.")
+ return psf_class
+
+
+def get_psf_model(model_params, training_hparams, *coeff_matrix):
+ """Get PSF Model Class Instance.
+
+ A function to instantiate a
+ PSF model class.
+
+ Parameters
+ ----------
+ model_name: str
+ Short name of PSF model
+ model_params: type
+ Recursive Namespace object
+ training_hparams: type
+ Recursive Namespace object
+ coeff_matrix: Tensor or None, optional
+ Initialization of the coefficient matrix defining the parametric psf field model
+
+ Returns
+ -------
+ psf_class: class instance
+ PSF model class instance
+
+ """
+ psf_class = set_psf_model(model_params.model_name)
+
+ return psf_class(model_params, training_hparams, *coeff_matrix)
+
+
+def get_psf_model_weights_filepath(weights_filepath):
+ """Get PSF model weights filepath.
+
+ A function to return the basename of the user-specified psf model weights path.
+
+ Parameters
+ ----------
+ weights_filepath: str
+ Basename of the psf model weights to be loaded.
+
+ Returns
+ -------
+ str
+ The absolute path concatenated to the basename of the psf model weights to be loaded.
+
+ """
+ try:
+ return glob.glob(weights_filepath)[0].split(".")[0]
+ except IndexError:
+ logger.exception(
+ "PSF weights file not found. Check that you've specified the correct weights file in the metrics config file."
+ )
+ raise PsfModelError("PSF model weights error.")
+
+
+def tf_zernike_cube(n_zernikes, pupil_diam):
+ """Tensor Flow Zernike Cube.
+
+ A function to generate Zernike maps on
+ a three-dimensional tensor.
+
+ Parameters
+ ----------
+ n_zernikes: int
+ Number of Zernike polynomials
+ pupil_diam: float
+ Size of the pupil diameter
+
+ Returns
+ -------
+ Zernike map tensor
+ TensorFlow EagerTensor type
+
+ """
+ # Prepare the inputs
+ # Generate Zernike maps
+ zernikes = zernike_generator(n_zernikes=n_zernikes, wfe_dim=pupil_diam)
+ # Now as cubes
+ np_zernike_cube = np.zeros(
+ (len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1])
+ )
+
+ for it in range(len(zernikes)):
+ np_zernike_cube[it, :, :] = zernikes[it]
+
+ np_zernike_cube[np.isnan(np_zernike_cube)] = 0
+
+ return tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
+
+
+def tf_obscurations(pupil_diam, N_filter=2):
+ """Tensor Flow Obscurations.
+
+ A function to generate obscurations as a tensor.
+
+ Parameters
+ ----------
+ pupil_diam: float
+ Size of the pupil diameter
+ N_filters: int
+ Number of filters
+
+ Returns
+ -------
+ Obscurations tensor
+ TensorFlow EagerTensor type
+
+ """
+ obscurations = SimPSFToolkit.generate_pupil_obscurations(
+ N_pix=pupil_diam, N_filter=N_filter
+ )
+ return tf.convert_to_tensor(obscurations, dtype=tf.complex64)
+
+ ## Generate initializations -- This looks like it could be moved to PSF model package
+ # Prepare np input
+
+
+def simPSF(model_params):
+ """Simulated PSF model.
+
+ A function to instantiate a
+ simulated PSF model object.
+
+ Features
+ --------
+ model_params: Recursive Namespace object
+ Recursive Namespace object storing model parameters
+
+ """
+
+ simPSF_np = SimPSFToolkit(
+ max_order=model_params.param_hparams.n_zernikes,
+ pupil_diameter=model_params.pupil_diameter,
+ output_dim=model_params.output_dim,
+ oversampling_rate=model_params.oversampling_rate,
+ output_Q=model_params.output_Q,
+ SED_interp_pts_per_bin=model_params.sed_interp_pts_per_bin,
+ SED_extrapolate=model_params.sed_extrapolate,
+ SED_interp_kind=model_params.sed_interp_kind,
+ SED_sigma=model_params.sed_sigma,
+ )
+
+ simPSF_np.gen_random_Z_coeffs(max_order=model_params.param_hparams.n_zernikes)
+ z_coeffs = simPSF_np.normalize_zernikes(
+ simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms
+ )
+ simPSF_np.set_z_coeffs(z_coeffs)
+ simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
+
+ return simPSF_np
diff --git a/wf_psf/tf_layers.py b/src/wf_psf/psf_models/tf_layers.py
similarity index 79%
rename from wf_psf/tf_layers.py
rename to src/wf_psf/psf_models/tf_layers.py
index 4971c587..619917f2 100644
--- a/wf_psf/tf_layers.py
+++ b/src/wf_psf/psf_models/tf_layers.py
@@ -1,13 +1,16 @@
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
-from wf_psf.tf_modules import TF_mono_PSF
-from wf_psf.utils import calc_poly_position_mat
-import wf_psf.utils as utils
+from wf_psf.psf_models.tf_modules import TF_mono_PSF
+from wf_psf.utils.utils import calc_poly_position_mat
+import wf_psf.utils.utils as utils
+import logging
+
+logger = logging.getLogger(__name__)
class TF_poly_Z_field(tf.keras.layers.Layer):
- """ Calculate the zernike coefficients for a given position.
+ """Calculate the zernike coefficients for a given position.
This module implements a polynomial model of Zernike
coefficient variation.
@@ -21,7 +24,15 @@ class TF_poly_Z_field(tf.keras.layers.Layer):
"""
- def __init__(self, x_lims, y_lims, n_zernikes=45, d_max=2, name='TF_poly_Z_field'):
+ def __init__(
+ self,
+ x_lims,
+ y_lims,
+ random_seed=None,
+ n_zernikes=45,
+ d_max=2,
+ name="TF_poly_Z_field",
+ ):
super().__init__(name=name)
self.n_zernikes = n_zernikes
@@ -30,32 +41,34 @@ def __init__(self, x_lims, y_lims, n_zernikes=45, d_max=2, name='TF_poly_Z_field
self.coeff_mat = None
self.x_lims = x_lims
self.y_lims = y_lims
+ self.random_seed = random_seed
self.init_coeff_matrix()
def get_poly_coefficients_shape(self):
- """ Return the shape of the coefficient matrix."""
+ """Return the shape of the coefficient matrix."""
return (self.n_zernikes, int((self.d_max + 1) * (self.d_max + 2) / 2))
def assign_coeff_matrix(self, coeff_mat):
- """ Assign coefficient matrix."""
+ """Assign coefficient matrix."""
self.coeff_mat.assign(coeff_mat)
def get_coeff_matrix(self):
- """ Get coefficient matrix."""
+ """Get coefficient matrix."""
return self.coeff_mat
def init_coeff_matrix(self):
- """ Initialize coefficient matrix."""
+ """Initialize coefficient matrix."""
+ tf.random.set_seed(self.random_seed)
coef_init = tf.random_uniform_initializer(minval=-0.01, maxval=0.01)
self.coeff_mat = tf.Variable(
initial_value=coef_init(self.get_poly_coefficients_shape()),
trainable=True,
- dtype=tf.float32
+ dtype=tf.float32,
)
def call(self, positions):
- """ Calculate the zernike coefficients for a given position.
+ """Calculate the zernike coefficients for a given position.
The position polynomial matrix and the coefficients should be
set before calling this function.
@@ -69,14 +82,16 @@ def call(self, positions):
-------
zernikes_coeffs: Tensor(batch, n_zernikes, 1, 1)
"""
- poly_mat = calc_poly_position_mat(positions, self.x_lims, self.y_lims, self.d_max)
+ poly_mat = calc_poly_position_mat(
+ positions, self.x_lims, self.y_lims, self.d_max
+ )
zernikes_coeffs = tf.transpose(tf.linalg.matmul(self.coeff_mat, poly_mat))
return zernikes_coeffs[:, :, tf.newaxis, tf.newaxis]
class TF_zernike_OPD(tf.keras.layers.Layer):
- """ Turn zernike coefficients into an OPD.
+ """Turn zernike coefficients into an OPD.
Will use all of the Zernike maps provided.
Both the Zernike maps and the Zernike coefficients must be provided.
@@ -92,13 +107,13 @@ class TF_zernike_OPD(tf.keras.layers.Layer):
"""
- def __init__(self, zernike_maps, name='TF_zernike_OPD'):
+ def __init__(self, zernike_maps, name="TF_zernike_OPD"):
super().__init__(name=name)
self.zernike_maps = zernike_maps
def call(self, z_coeffs):
- """ Perform the weighted sum of Zernikes coeffs and maps.
+ """Perform the weighted sum of Zernikes coeffs and maps.
Returns
-------
@@ -135,7 +150,7 @@ class TF_batch_poly_PSF(tf.keras.layers.Layer):
"""
- def __init__(self, obscurations, output_Q, output_dim=64, name='TF_batch_poly_PSF'):
+ def __init__(self, obscurations, output_Q, output_dim=64, name="TF_batch_poly_PSF"):
super().__init__(name=name)
self.output_Q = output_Q
@@ -162,7 +177,7 @@ def calculate_mono_PSF(self, packed_elems):
lambda_obs,
self.obscurations,
output_Q=self.output_Q,
- output_dim=self.output_dim
+ output_dim=self.output_dim,
)
# Calculate the PSF
@@ -185,7 +200,7 @@ def _calculate_poly_PSF(elems_to_unpack):
elems_to_unpack,
parallel_iterations=10,
fn_output_signature=tf.float32,
- swap_memory=True
+ swap_memory=True,
)
# Readability
@@ -211,7 +226,7 @@ def _calculate_PSF_batch(elems_to_unpack):
elems_to_unpack,
parallel_iterations=10,
fn_output_signature=tf.float32,
- swap_memory=True
+ swap_memory=True,
)
psf_poly_batch = _calculate_PSF_batch((opd_batch, packed_SED_data))
@@ -237,10 +252,10 @@ class TF_batch_mono_PSF(tf.keras.layers.Layer):
Output oversampling value.
output_dim: int
Output PSF stamp dimension.
-
+
"""
- def __init__(self, obscurations, output_Q, output_dim=64, name='TF_batch_mono_PSF'):
+ def __init__(self, obscurations, output_Q, output_dim=64, name="TF_batch_mono_PSF"):
super().__init__(name=name)
self.output_Q = output_Q
@@ -254,8 +269,7 @@ def __init__(self, obscurations, output_Q, output_dim=64, name='TF_batch_mono_PS
self.current_opd = None
def calculate_mono_PSF(self, current_opd):
- """Calculate monochromatic PSF from OPD info.
- """
+ """Calculate monochromatic PSF from OPD info."""
# Calculate the PSF
mono_psf = self.tf_mono_psf_gen.__call__(current_opd[tf.newaxis, :, :])
mono_psf = tf.squeeze(mono_psf, axis=0)
@@ -263,23 +277,23 @@ def calculate_mono_PSF(self, current_opd):
return mono_psf
def init_mono_PSF(self):
- """ Initialise or restart the PSF generator. """
+ """Initialise or restart the PSF generator."""
self.tf_mono_psf_gen = TF_mono_PSF(
self.phase_N,
self.lambda_obs,
self.obscurations,
output_Q=self.output_Q,
- output_dim=self.output_dim
+ output_dim=self.output_dim,
)
def set_lambda_phaseN(self, phase_N=914, lambda_obs=0.7):
- """ Set the lambda value for monochromatic PSFs and the phaseN. """
+ """Set the lambda value for monochromatic PSFs and the phaseN."""
self.phase_N = phase_N
self.lambda_obs = lambda_obs
self.init_mono_PSF()
def set_output_params(self, output_Q, output_dim):
- """ Set output patams, Q and dimension. """
+ """Set output patams, Q and dimension."""
self.output_Q = output_Q
self.output_dim = output_dim
self.init_mono_PSF()
@@ -296,7 +310,7 @@ def _calculate_PSF_batch(elems_to_unpack):
elems_to_unpack,
parallel_iterations=10,
fn_output_signature=tf.float32,
- swap_memory=True
+ swap_memory=True,
)
mono_psf_batch = _calculate_PSF_batch((opd_batch))
@@ -305,7 +319,7 @@ def _calculate_PSF_batch(elems_to_unpack):
class TF_NP_poly_OPD(tf.keras.layers.Layer):
- """ Non-parametric OPD generation with polynomial variations.
+ """Non-parametric OPD generation with polynomial variations.
Parameters
@@ -314,6 +328,8 @@ class TF_NP_poly_OPD(tf.keras.layers.Layer):
Limits of the x axis.
y_lims: [int, int]
Limits of the y axis.
+ random_seed: int
+ Random seed initialization for Tensor Flow
d_max: int
Max degree of polynomial determining the FoV variations.
opd_dim: int
@@ -321,11 +337,20 @@ class TF_NP_poly_OPD(tf.keras.layers.Layer):
"""
- def __init__(self, x_lims, y_lims, d_max=3, opd_dim=256, name='TF_NP_poly_OPD'):
+ def __init__(
+ self,
+ x_lims,
+ y_lims,
+ random_seed=None,
+ d_max=3,
+ opd_dim=256,
+ name="TF_NP_poly_OPD",
+ ):
super().__init__(name=name)
# Parameters
self.x_lims = x_lims
self.y_lims = y_lims
+ self.random_seed = random_seed
self.d_max = d_max
self.opd_dim = opd_dim
@@ -337,16 +362,17 @@ def __init__(self, x_lims, y_lims, d_max=3, opd_dim=256, name='TF_NP_poly_OPD'):
self.init_vars()
def init_vars(self):
- """ Initialize trainable variables.
+ """Initialize trainable variables.
Basic initialization. Random uniform for S and identity for alpha.
"""
# S initialization
+ tf.random.set_seed(self.random_seed)
random_init = tf.random_uniform_initializer(minval=-0.001, maxval=0.001)
self.S_mat = tf.Variable(
initial_value=random_init(shape=[self.n_poly, self.opd_dim, self.opd_dim]),
trainable=True,
- dtype=tf.float32
+ dtype=tf.float32,
)
# Alpha initialization
@@ -355,19 +381,19 @@ def init_vars(self):
)
def set_alpha_zero(self):
- """ Set alpha matrix to zero."""
- _ = self.alpha_mat.assign(tf.zeros_like(self.alpha_mat, dtype=tf.float32))
+ """Set alpha matrix to zero."""
+ self.alpha_mat.assign(tf.zeros_like(self.alpha_mat, dtype=tf.float32))
def set_alpha_identity(self):
- """ Set alpha matrix to the identity."""
- _ = self.alpha_mat.assign(tf.eye(self.n_poly, dtype=tf.float32))
+ """Set alpha matrix to the identity."""
+ self.alpha_mat.assign(tf.eye(self.n_poly, dtype=tf.float32))
def assign_S_mat(self, S_mat):
- """ Assign DD features matrix."""
+ """Assign DD features matrix."""
self.S_mat.assign(S_mat)
def call(self, positions):
- """ Calculate the OPD maps for the given positions.
+ """Calculate the OPD maps for the given positions.
Calculating: Pi(pos) x alpha x S
@@ -381,7 +407,9 @@ def call(self, positions):
opd_maps: Tensor(batch, opd_dim, opd_dim)
"""
# Calculate the Pi matrix
- poly_mat = calc_poly_position_mat(positions, self.x_lims, self.y_lims, self.d_max)
+ poly_mat = calc_poly_position_mat(
+ positions, self.x_lims, self.y_lims, self.d_max
+ )
# We need to transpose it here to have the batch dimension at first
poly_mat = tf.transpose(poly_mat, perm=[1, 0])
@@ -391,7 +419,7 @@ def call(self, positions):
class TF_NP_MCCD_OPD_v2(tf.keras.layers.Layer):
- """ Non-parametric OPD generation with hybrid-MCCD variations.
+ """Non-parametric OPD generation with hybrid-MCCD variations.
Parameters
@@ -426,16 +454,19 @@ def __init__(
spatial_dic,
x_lims,
y_lims,
+ random_seed=None,
d_max=2,
graph_features=6,
l1_rate=1e-5,
opd_dim=256,
- name='TF_NP_MCCD_OPD_v2'
+ name="TF_NP_MCCD_OPD_v2",
):
super().__init__(name=name)
# Parameters
self.x_lims = x_lims
self.y_lims = y_lims
+ self.random_seed = random_seed
+ logger.info(type(self.random_seed))
self.d_max = d_max
self.opd_dim = opd_dim
@@ -459,54 +490,73 @@ def __init__(
self.init_vars()
def init_vars(self):
- """ Initialize trainable variables.
+ """Initialize trainable variables.
Basic initialization. Random uniform for S and identity for alpha.
"""
# S initialization
+ tf.random.set_seed(self.random_seed)
random_init = tf.random_uniform_initializer(minval=-0.001, maxval=0.001)
self.S_poly = tf.Variable(
- initial_value=random_init(shape=[self.poly_features, self.opd_dim, self.opd_dim]),
+ initial_value=random_init(
+ shape=[self.poly_features, self.opd_dim, self.opd_dim]
+ ),
trainable=True,
- dtype=tf.float32
+ dtype=tf.float32,
)
self.S_graph = tf.Variable(
- initial_value=random_init(shape=[self.graph_features, self.opd_dim, self.opd_dim]),
+ initial_value=random_init(
+ shape=[self.graph_features, self.opd_dim, self.opd_dim]
+ ),
trainable=True,
- dtype=tf.float32
+ dtype=tf.float32,
)
# Alpha initialization
self.alpha_poly = tf.Variable(
- initial_value=tf.eye(num_rows=self.poly_features, num_columns=self.poly_features),
+ initial_value=tf.eye(
+ num_rows=self.poly_features, num_columns=self.poly_features
+ ),
trainable=True,
- dtype=tf.float32
+ dtype=tf.float32,
)
self.alpha_graph = tf.Variable(
- initial_value=tf.eye(num_rows=self.n_graph_elems, num_columns=self.graph_features),
+ initial_value=tf.eye(
+ num_rows=self.n_graph_elems, num_columns=self.graph_features
+ ),
trainable=True,
- dtype=tf.float32
+ dtype=tf.float32,
)
def set_alpha_zero(self):
- """ Set alpha matrix to zero."""
- _ = self.alpha_poly.assign(tf.zeros_like(self.alpha_poly, dtype=tf.float32))
- _ = self.alpha_graph.assign(tf.zeros_like(self.alpha_graph, dtype=tf.float32))
+ """Set alpha matrix to zero."""
+ self.alpha_poly.assign(tf.zeros_like(self.alpha_poly, dtype=tf.float32))
+ self.alpha_graph.assign(tf.zeros_like(self.alpha_graph, dtype=tf.float32))
def set_alpha_identity(self):
- """ Set alpha matrix to the identity."""
- _ = self.alpha_poly.assign(
- tf.eye(num_rows=self.poly_features, num_columns=self.poly_features, dtype=tf.float32)
+ """Set alpha matrix to the identity."""
+ self.alpha_poly.assign(
+ tf.eye(
+ num_rows=self.poly_features,
+ num_columns=self.poly_features,
+ dtype=tf.float32,
+ )
)
- _ = self.alpha_graph.assign(
- tf.eye(num_rows=self.n_graph_elems, num_columns=self.graph_features, dtype=tf.float32)
+ self.alpha_graph.assign(
+ tf.eye(
+ num_rows=self.n_graph_elems,
+ num_columns=self.graph_features,
+ dtype=tf.float32,
+ )
)
def predict(self, positions):
- """ Prediction step."""
+ """Prediction step."""
## Polynomial part
# Calculate the Pi matrix
- poly_mat = calc_poly_position_mat(positions, self.x_lims, self.y_lims, self.d_max)
+ poly_mat = calc_poly_position_mat(
+ positions, self.x_lims, self.y_lims, self.d_max
+ )
# We need to transpose it here to have the batch dimension at first
A_poly = tf.linalg.matmul(tf.transpose(poly_mat, perm=[1, 0]), self.alpha_poly)
interp_poly_opd = tf.tensordot(A_poly, self.S_poly, axes=1)
@@ -522,7 +572,7 @@ def predict(self, positions):
train_values=tf.expand_dims(A_graph_train, axis=0),
query_points=tf.expand_dims(positions, axis=0),
order=2,
- regularization_weight=0.0
+ regularization_weight=0.0,
)
# Remove extra dimension required by tfa's interpolate_spline
@@ -532,7 +582,7 @@ def predict(self, positions):
return tf.math.add(interp_poly_opd, interp_graph_opd)
def call(self, positions):
- """ Calculate the OPD maps for the given positions.
+ """Calculate the OPD maps for the given positions.
Calculating: batch(spatial_dict) x alpha x S
@@ -550,8 +600,10 @@ def call(self, positions):
# Try Lp norm with p=1.1
p = 1.1
self.add_loss(
- self.l1_rate *
- tf.math.pow(tf.math.reduce_sum(tf.math.pow(tf.math.abs(self.alpha_graph), p)), 1 / p)
+ self.l1_rate
+ * tf.math.pow(
+ tf.math.reduce_sum(tf.math.pow(tf.math.abs(self.alpha_graph), p)), 1 / p
+ )
)
def calc_index(idx_pos):
@@ -565,11 +617,15 @@ def calc_index(idx_pos):
# Tensor product to calculate the contribution
# Polynomial contribution
- batch_poly_dict = tf.gather(self.poly_dic, indices=indices, axis=0, batch_dims=0)
+ batch_poly_dict = tf.gather(
+ self.poly_dic, indices=indices, axis=0, batch_dims=0
+ )
intermediate_poly = tf.linalg.matmul(batch_poly_dict, self.alpha_poly)
contribution_poly = tf.tensordot(intermediate_poly, self.S_poly, axes=1)
# Graph contribution
- batch_graph_dict = tf.gather(self.graph_dic, indices=indices, axis=0, batch_dims=0)
+ batch_graph_dict = tf.gather(
+ self.graph_dic, indices=indices, axis=0, batch_dims=0
+ )
intermediate_graph = tf.linalg.matmul(batch_graph_dict, self.alpha_graph)
contribution_graph = tf.tensordot(intermediate_graph, self.S_graph, axes=1)
@@ -577,7 +633,7 @@ def calc_index(idx_pos):
class TF_NP_GRAPH_OPD(tf.keras.layers.Layer):
- """ Non-parametric OPD generation with only graph-cosntraint variations.
+ """Non-parametric OPD generation with only graph-cosntraint variations.
Parameters
@@ -612,15 +668,17 @@ def __init__(
spatial_dic,
x_lims,
y_lims,
+ random_seed=None,
graph_features=6,
l1_rate=1e-5,
opd_dim=256,
- name='TF_NP_GRAPH_OPD'
+ name="TF_NP_GRAPH_OPD",
):
super().__init__(name=name)
# Parameters
self.x_lims = x_lims
self.y_lims = y_lims
+ self.random_seed = random_seed
self.opd_dim = opd_dim
# L1 regularisation parameter
@@ -640,38 +698,47 @@ def __init__(
self.init_vars()
def init_vars(self):
- """ Initialize trainable variables.
+ """Initialize trainable variables.
Basic initialization. Random uniform for S and identity for alpha.
"""
# S initialization
+ tf.random.set_seed(self.random_seed)
random_init = tf.random_uniform_initializer(minval=-0.001, maxval=0.001)
self.S_graph = tf.Variable(
- initial_value=random_init(shape=[self.graph_features, self.opd_dim, self.opd_dim]),
+ initial_value=random_init(
+ shape=[self.graph_features, self.opd_dim, self.opd_dim]
+ ),
trainable=True,
- dtype=tf.float32
+ dtype=tf.float32,
)
# Alpha initialization
self.alpha_graph = tf.Variable(
- initial_value=tf.eye(num_rows=self.n_graph_elems, num_columns=self.graph_features),
+ initial_value=tf.eye(
+ num_rows=self.n_graph_elems, num_columns=self.graph_features
+ ),
trainable=True,
- dtype=tf.float32
+ dtype=tf.float32,
)
def set_alpha_zero(self):
- """ Set alpha matrix to zero."""
- _ = self.alpha_graph.assign(tf.zeros_like(self.alpha_graph, dtype=tf.float32))
+ """Set alpha matrix to zero."""
+ self.alpha_graph.assign(tf.zeros_like(self.alpha_graph, dtype=tf.float32))
def set_alpha_identity(self):
- """ Set alpha matrix to the identity."""
- _ = self.alpha_graph.assign(
- tf.eye(num_rows=self.n_graph_elems, num_columns=self.graph_features, dtype=tf.float32)
+ """Set alpha matrix to the identity."""
+ self.alpha_graph.assign(
+ tf.eye(
+ num_rows=self.n_graph_elems,
+ num_columns=self.graph_features,
+ dtype=tf.float32,
+ )
)
def predict(self, positions):
- """ Prediction step."""
+ """Prediction step."""
## Graph part
A_graph_train = tf.linalg.matmul(self.graph_dic, self.alpha_graph)
@@ -684,7 +751,7 @@ def predict(self, positions):
train_values=tf.expand_dims(A_graph_train, axis=0),
query_points=tf.expand_dims(positions, axis=0),
order=2,
- regularization_weight=0.0
+ regularization_weight=0.0,
)
# Remove extra dimension required by tfa's interpolate_spline
@@ -694,7 +761,7 @@ def predict(self, positions):
return interp_graph_opd
def call(self, positions):
- """ Calculate the OPD maps for the given positions.
+ """Calculate the OPD maps for the given positions.
Calculating: batch(spatial_dict) x alpha x S
@@ -714,8 +781,10 @@ def call(self, positions):
# Try Lp norm with p=1.1
p = 1.1
self.add_loss(
- self.l1_rate *
- tf.math.pow(tf.math.reduce_sum(tf.math.pow(tf.math.abs(self.alpha_graph), p)), 1 / p)
+ self.l1_rate
+ * tf.math.pow(
+ tf.math.reduce_sum(tf.math.pow(tf.math.abs(self.alpha_graph), p)), 1 / p
+ )
)
def calc_index(idx_pos):
@@ -729,7 +798,9 @@ def calc_index(idx_pos):
# Tensor product to calculate the contribution
# Graph contribution
- batch_graph_dict = tf.gather(self.graph_dic, indices=indices, axis=0, batch_dims=0)
+ batch_graph_dict = tf.gather(
+ self.graph_dic, indices=indices, axis=0, batch_dims=0
+ )
intermediate_graph = tf.linalg.matmul(batch_graph_dict, self.alpha_graph)
contribution_graph = tf.tensordot(intermediate_graph, self.S_graph, axes=1)
@@ -737,7 +808,7 @@ def calc_index(idx_pos):
class TF_physical_layer(tf.keras.layers.Layer):
- """ Store and calculate the zernike coefficients for a given position.
+ """Store and calculate the zernike coefficients for a given position.
This layer gives the Zernike contribution of the physical layer.
It is fixed and not trainable.
@@ -765,29 +836,29 @@ def __init__(
self,
obs_pos,
zks_prior,
- interpolation_type='none',
+ interpolation_type="none",
interpolation_args=None,
- name='TF_physical_layer',
+ name="TF_physical_layer",
):
super().__init__(name=name)
self.obs_pos = obs_pos
self.zks_prior = zks_prior
if interpolation_args is None:
- interpolation_args = {'order': 2, 'K': 50}
+ interpolation_args = {"order": 2, "K": 50}
# Define the prediction routine
- if interpolation_type == 'none':
+ if interpolation_type == "none":
self.predict = self.call
- elif interpolation_type == 'all':
+ elif interpolation_type == "all":
self.predict = self.interpolate_all
- elif interpolation_type == 'top_K':
+ elif interpolation_type == "top_K":
self.predict = self.interpolate_top_K
- elif interpolation_type == 'independent_Zk':
+ elif interpolation_type == "independent_Zk":
self.predict = self.interpolate_independent_Zk
def interpolate_all(self, positions):
- """ Zernike interpolation
-
+ """Zernike interpolation
+
Right now all the input elements are used to build the RBF interpolant
that is going to be used for the interpolation.
@@ -800,8 +871,8 @@ def interpolate_all(self, positions):
train_points=tf.expand_dims(self.obs_pos, axis=0),
train_values=tf.expand_dims(self.zks_prior, axis=0),
query_points=tf.expand_dims(positions, axis=0),
- order=self.interpolation_args['order'],
- regularization_weight=0.0
+ order=self.interpolation_args["order"],
+ regularization_weight=0.0,
)
# Remove extra dimension required by tfa's interpolate_spline
interp_zks = tf.squeeze(interp_zks, axis=0)
@@ -809,8 +880,8 @@ def interpolate_all(self, positions):
return interp_zks[:, :, tf.newaxis, tf.newaxis]
def interpolate_top_K(self, positions):
- """ Zernike interpolation
-
+ """Zernike interpolation
+
The class wf.utils.ZernikeInterpolation allows to use only the K closest
elements for the interpolation. Even though, the interpolation error is smaller
the computing time is bigger.
@@ -819,30 +890,30 @@ def interpolate_top_K(self, positions):
zk_interpolator = utils.ZernikeInterpolation(
self.obs_pos,
self.zks_prior,
- k=self.interpolation_args['K'],
- order=self.interpolation_args['order']
+ k=self.interpolation_args["K"],
+ order=self.interpolation_args["order"],
)
interp_zks = zk_interpolator.interpolate_zks(positions)
return interp_zks[:, :, tf.newaxis, tf.newaxis]
def interpolate_independent_Zk(self, positions):
- """ Zernike interpolation
-
+ """Zernike interpolation
+
The class wf.utils.IndependentZernikeInterpolation allows to interpolate each
order of the Zernike polynomials independently using all the points avaialble to build
the interpolant.
"""
zk_interpolator = utils.IndependentZernikeInterpolation(
- self.obs_pos, self.zks_prior, order=self.interpolation_args['order']
+ self.obs_pos, self.zks_prior, order=self.interpolation_args["order"]
)
interp_zks = zk_interpolator.interpolate_zks(positions)
return interp_zks[:, :, tf.newaxis, tf.newaxis]
def call(self, positions):
- """ Calculate the prior zernike coefficients for a given position.
+ """Calculate the prior zernike coefficients for a given position.
The position polynomial matrix and the coefficients should be
set before calling this function.
@@ -901,7 +972,9 @@ class OLD_TF_batch_poly_PSF(tf.keras.layers.Layer):
"""
- def __init__(self, obscurations, psf_batch, output_dim=64, name='TF_batch_poly_PSF'):
+ def __init__(
+ self, obscurations, psf_batch, output_dim=64, name="TF_batch_poly_PSF"
+ ):
super().__init__(name=name)
self.obscurations = obscurations
@@ -940,8 +1013,8 @@ def calculate_mono_PSF(self, packed_elems):
def calculate_poly_PSF(self, packed_elems):
"""Calculate a polychromatic PSF."""
- print('TF_batch_poly_PSF: calculate_poly_PSF: packed_elems.type')
- print(packed_elems.dtype)
+ logger.info("TF_batch_poly_PSF: calculate_poly_PSF: packed_elems.type")
+ logger.info(packed_elems.dtype)
def _calculate_poly_PSF(elems_to_unpack):
return tf.map_fn(
@@ -949,7 +1022,7 @@ def _calculate_poly_PSF(elems_to_unpack):
elems_to_unpack,
parallel_iterations=10,
fn_output_signature=tf.float32,
- swap_memory=True
+ swap_memory=True,
)
# Readability
diff --git a/wf_psf/tf_modules.py b/src/wf_psf/psf_models/tf_modules.py
similarity index 86%
rename from wf_psf/tf_modules.py
rename to src/wf_psf/psf_models/tf_modules.py
index f22e3934..cc9a8bd2 100644
--- a/wf_psf/tf_modules.py
+++ b/src/wf_psf/psf_models/tf_modules.py
@@ -3,7 +3,7 @@
class TF_fft_diffract(tf.Module):
- """ Diffract the wavefront into a monochromatic PSF.
+ """Diffract the wavefront into a monochromatic PSF.
Parameters
----------
@@ -21,8 +21,8 @@ def __init__(self, output_dim=64, output_Q=2, name=None):
self.downsample_layer = tf.keras.layers.AveragePooling2D(
pool_size=(self.output_Q, self.output_Q),
strides=None,
- padding='valid',
- data_format='channels_last'
+ padding="valid",
+ data_format="channels_last",
)
def crop_img(self, image):
@@ -48,8 +48,11 @@ def tf_crop_img(self, image, output_crop_dim):
# Crop image
cropped_image = tf.image.crop_to_bounding_box(
- tf.transpose(image, perm=[1, 2, 0]), offset_height, offset_width, target_height,
- target_width
+ tf.transpose(image, perm=[1, 2, 0]),
+ offset_height,
+ offset_width,
+ target_height,
+ target_width,
)
return tf.transpose(cropped_image, perm=[2, 0, 1])
@@ -61,16 +64,19 @@ def normalize_psf(self, psf):
return psf / norm_factor
def __call__(self, input_phase):
- """ Calculate the normalized PSF from the padded phase array.
- """
+ """Calculate the normalized PSF from the padded phase array."""
# Perform the FFT-based diffraction operation
# fft_phase = tf.signal.fftshift(tf.signal.fft2d(input_phase))
- fft_phase = tf.signal.fftshift(tf.signal.fft2d(input_phase[:, ...]), axes=[1, 2])
+ fft_phase = tf.signal.fftshift(
+ tf.signal.fft2d(input_phase[:, ...]), axes=[1, 2]
+ )
psf = tf.math.pow(tf.cast(tf.math.abs(fft_phase), dtype=tf.float64), 2)
# Crop the image
# We crop to output_dim*Q
- cropped_psf = self.tf_crop_img(psf, output_crop_dim=int(self.output_dim * self.output_Q))
+ cropped_psf = self.tf_crop_img(
+ psf, output_crop_dim=int(self.output_dim * self.output_Q)
+ )
# Downsample image
# We downsample by a factor Q to get output_dim
@@ -96,8 +102,7 @@ def __call__(self, input_phase):
class TF_build_phase(tf.Module):
- """ Build complex phase map from OPD map.
- """
+ """Build complex phase map from OPD map."""
def __init__(self, phase_N, lambda_obs, obscurations, name=None):
super().__init__(name=name)
@@ -107,7 +112,7 @@ def __init__(self, phase_N, lambda_obs, obscurations, name=None):
self.obscurations = obscurations
def zero_padding_diffraction(self, no_pad_phase):
- """ Pad with zeros corresponding to the required lambda.
+ """Pad with zeros corresponding to the required lambda.
Important: To check the original size of the ``no_pad_phase`` variable
we have to look in the [1] dimension not the [0] as it is the batch.
@@ -115,8 +120,12 @@ def zero_padding_diffraction(self, no_pad_phase):
# pad_num = int(self.phase_N//2 - no_pad_phase.shape[0]//2)
phase_shape = tf.shape(no_pad_phase)
# pure tensorflow
- start = tf.math.floordiv(tf.cast(self.phase_N, dtype=tf.int32), tf.cast(2, dtype=tf.int32))
- stop = tf.math.floordiv(tf.cast(phase_shape[1], dtype=tf.int32), tf.cast(2, dtype=tf.int32))
+ start = tf.math.floordiv(
+ tf.cast(self.phase_N, dtype=tf.int32), tf.cast(2, dtype=tf.int32)
+ )
+ stop = tf.math.floordiv(
+ tf.cast(phase_shape[1], dtype=tf.int32), tf.cast(2, dtype=tf.int32)
+ )
pad_num = tf.math.subtract(start, stop) # start - stop
padding = [(0, 0), (pad_num, pad_num), (pad_num, pad_num)]
@@ -127,12 +136,14 @@ def zero_padding_diffraction(self, no_pad_phase):
# return tf.pad(no_pad_phase, padding)
def apply_obscurations(self, phase):
- """Multiply element-wise with the obscurations. """
+ """Multiply element-wise with the obscurations."""
return tf.math.multiply(phase, tf.cast(self.obscurations, phase.dtype))
def opd_to_phase(self, opd):
"""Convert from opd to phase."""
- pre_phase = tf.math.multiply(tf.cast((2 * np.pi) / self.lambda_obs, opd.dtype), opd)
+ pre_phase = tf.math.multiply(
+ tf.cast((2 * np.pi) / self.lambda_obs, opd.dtype), opd
+ )
phase = tf.math.exp(tf.dtypes.complex(tf.cast(0, pre_phase.dtype), pre_phase))
# return tf.cast(phase, dtype=tf.complex64)
return phase
@@ -147,7 +158,7 @@ def __call__(self, opd):
class TF_zernike_OPD(tf.Module):
- """ Turn zernike coefficients into an OPD.
+ """Turn zernike coefficients into an OPD.
Will use all of the Zernike maps provided.
Both the Zernike maps and the Zernike coefficients must be provided.
@@ -180,7 +191,9 @@ class TF_Zernike_mono_PSF(tf.Module):
Following a Zernike model.
"""
- def __init__(self, phase_N, lambda_obs, obscurations, zernike_maps, output_dim=64, name=None):
+ def __init__(
+ self, phase_N, lambda_obs, obscurations, zernike_maps, output_dim=64, name=None
+ ):
super().__init__(name=name)
self.tf_build_opd_zernike = TF_zernike_OPD(zernike_maps)
@@ -196,10 +209,11 @@ def __call__(self, z_coeffs):
class TF_mono_PSF(tf.Module):
- """ Calculate a monochromatic PSF from an OPD map.
- """
+ """Calculate a monochromatic PSF from an OPD map."""
- def __init__(self, phase_N, lambda_obs, obscurations, output_Q, output_dim=64, name=None):
+ def __init__(
+ self, phase_N, lambda_obs, obscurations, output_Q, output_dim=64, name=None
+ ):
super().__init__(name=name)
self.output_Q = output_Q
diff --git a/wf_psf/tf_psf_field.py b/src/wf_psf/psf_models/tf_psf_field.py
similarity index 87%
rename from wf_psf/tf_psf_field.py
rename to src/wf_psf/psf_models/tf_psf_field.py
index 16a91d8c..1e95e584 100644
--- a/wf_psf/tf_psf_field.py
+++ b/src/wf_psf/psf_models/tf_psf_field.py
@@ -1,13 +1,21 @@
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.engine import data_adapter
-from wf_psf.tf_layers import TF_poly_Z_field, TF_zernike_OPD, TF_batch_poly_PSF
-from wf_psf.tf_layers import TF_NP_poly_OPD, TF_batch_mono_PSF, TF_physical_layer
-from wf_psf.utils import PI_zernikes
+from wf_psf.psf_models.tf_layers import (
+ TF_poly_Z_field,
+ TF_zernike_OPD,
+ TF_batch_poly_PSF,
+)
+from wf_psf.psf_models.tf_layers import (
+ TF_NP_poly_OPD,
+ TF_batch_mono_PSF,
+ TF_physical_layer,
+)
+from wf_psf.utils.utils import PI_zernikes
class TF_PSF_field_model(tf.keras.Model):
- """ Parametric PSF field model!
+ """Parametric PSF field model!
Fully parametric model based on the Zernike polynomial basis.
@@ -54,14 +62,14 @@ def __init__(
obscurations,
batch_size,
output_Q,
- l2_param=0.,
+ l2_param=0.0,
output_dim=64,
n_zernikes=45,
d_max=2,
x_lims=[0, 1e3],
y_lims=[0, 1e3],
coeff_mat=None,
- name='TF_PSF_field_model'
+ name="TF_PSF_field_model",
):
super(TF_PSF_field_model, self).__init__()
@@ -87,7 +95,10 @@ def __init__(
# Initialize the first layer
self.tf_poly_Z_field = TF_poly_Z_field(
- x_lims=self.x_lims, y_lims=self.y_lims, n_zernikes=self.n_zernikes, d_max=self.d_max
+ x_lims=self.x_lims,
+ y_lims=self.y_lims,
+ n_zernikes=self.n_zernikes,
+ d_max=self.d_max,
)
# Initialize the zernike to OPD layer
@@ -95,7 +106,9 @@ def __init__(
# Initialize the batch opd to batch polychromatic PSF layer
self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
)
# Initialize the model parameters with non-default value
@@ -110,15 +123,15 @@ def __init__(
# self.call = self.call_l2_opd_loss
def get_coeff_matrix(self):
- """ Get coefficient matrix."""
+ """Get coefficient matrix."""
return self.tf_poly_Z_field.get_coeff_matrix()
def assign_coeff_matrix(self, coeff_mat):
- """ Assign coefficient matrix."""
+ """Assign coefficient matrix."""
self.tf_poly_Z_field.assign_coeff_matrix(coeff_mat)
def set_output_Q(self, output_Q, output_dim=None):
- """ Set the value of the output_Q parameter.
+ """Set the value of the output_Q parameter.
Useful for generating/predicting PSFs at a different sampling wrt the
observation sampling.
"""
@@ -127,11 +140,13 @@ def set_output_Q(self, output_Q, output_dim=None):
self.output_dim = output_dim
# Reinitialize the PSF batch poly generator
self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
)
def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
- """ Predict a set of monochromatic PSF at desired positions.
+ """Predict a set of monochromatic PSF at desired positions.
input_positions: Tensor(batch_dim x 2)
@@ -146,7 +161,9 @@ def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
# Initialise the monochromatic PSF batch calculator
tf_batch_mono_psf = TF_batch_mono_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
)
# Set the lambda_obs and the phase_N parameters
tf_batch_mono_psf.set_lambda_phaseN(phase_N, lambda_obs)
@@ -161,7 +178,7 @@ def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
return mono_psf_batch
def predict_opd(self, input_positions):
- """ Predict the OPD at some positions.
+ """Predict the OPD at some positions.
Parameters
----------
@@ -204,7 +221,7 @@ def call(self, inputs):
class TF_SemiParam_field(tf.keras.Model):
- """ PSF field forward model!
+ """PSF field forward model!
Semi parametric model based on the Zernike polynomial basis. The
@@ -254,14 +271,14 @@ def __init__(
batch_size,
output_Q,
d_max_nonparam=3,
- l2_param=0.,
+ l2_param=0.0,
output_dim=64,
n_zernikes=45,
d_max=2,
x_lims=[0, 1e3],
y_lims=[0, 1e3],
coeff_mat=None,
- name='TF_SemiParam_field'
+ name="TF_SemiParam_field",
):
super(TF_SemiParam_field, self).__init__()
@@ -292,7 +309,10 @@ def __init__(
# Initialize the first layer
self.tf_poly_Z_field = TF_poly_Z_field(
- x_lims=self.x_lims, y_lims=self.y_lims, n_zernikes=self.n_zernikes, d_max=self.d_max
+ x_lims=self.x_lims,
+ y_lims=self.y_lims,
+ n_zernikes=self.n_zernikes,
+ d_max=self.d_max,
)
# Initialize the zernike to OPD layer
@@ -300,12 +320,17 @@ def __init__(
# Initialize the non-parametric layer
self.tf_np_poly_opd = TF_NP_poly_OPD(
- x_lims=self.x_lims, y_lims=self.y_lims, d_max=self.d_max_nonparam, opd_dim=self.opd_dim
+ x_lims=self.x_lims,
+ y_lims=self.y_lims,
+ d_max=self.d_max_nonparam,
+ opd_dim=self.opd_dim,
)
# Initialize the batch opd to batch polychromatic PSF layer
self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
)
# Initialize the model parameters with non-default value
@@ -320,28 +345,28 @@ def __init__(
# self.call = self.call_l2_opd_loss
def get_coeff_matrix(self):
- """ Get coefficient matrix."""
+ """Get coefficient matrix."""
return self.tf_poly_Z_field.get_coeff_matrix()
def assign_coeff_matrix(self, coeff_mat):
- """ Assign coefficient matrix."""
+ """Assign coefficient matrix."""
self.tf_poly_Z_field.assign_coeff_matrix(coeff_mat)
def set_zero_nonparam(self):
- """ Set to zero the non-parametric part."""
+ """Set to zero the non-parametric part."""
self.tf_np_poly_opd.set_alpha_zero()
def set_nonzero_nonparam(self):
- """ Set to non-zero the non-parametric part."""
+ """Set to non-zero the non-parametric part."""
self.tf_np_poly_opd.set_alpha_identity()
def set_trainable_layers(self, param_bool=True, nonparam_bool=True):
- """ Set the layers to be trainable or not."""
+ """Set the layers to be trainable or not."""
self.tf_np_poly_opd.trainable = nonparam_bool
self.tf_poly_Z_field.trainable = param_bool
def set_output_Q(self, output_Q, output_dim=None):
- """ Set the value of the output_Q parameter.
+ """Set the value of the output_Q parameter.
Useful for generating/predicting PSFs at a different sampling wrt the
observation sampling.
"""
@@ -351,11 +376,13 @@ def set_output_Q(self, output_Q, output_dim=None):
# Reinitialize the PSF batch poly generator
self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
)
def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
- """ Predict a set of monochromatic PSF at desired positions.
+ """Predict a set of monochromatic PSF at desired positions.
input_positions: Tensor(batch_dim x 2)
@@ -370,7 +397,9 @@ def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
# Initialise the monochromatic PSF batch calculator
tf_batch_mono_psf = TF_batch_mono_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
)
# Set the lambda_obs and the phase_N parameters
tf_batch_mono_psf.set_lambda_phaseN(phase_N, lambda_obs)
@@ -389,7 +418,7 @@ def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
return mono_psf_batch
def predict_opd(self, input_positions):
- """ Predict the OPD at some positions.
+ """Predict the OPD at some positions.
Parameters
----------
@@ -413,31 +442,39 @@ def predict_opd(self, input_positions):
return opd_maps
def assign_S_mat(self, S_mat):
- """ Assign DD features matrix."""
+ """Assign DD features matrix."""
self.tf_np_poly_opd.assign_S_mat(S_mat)
def project_DD_features(self, tf_zernike_cube):
- """
- Project non-parametric wavefront onto first n_z Zernikes and transfer
+ """
+ Project non-parametric wavefront onto first n_z Zernikes and transfer
their parameters to the parametric model.
-
+
"""
# Compute Zernike norm for projections
n_pix_zernike = PI_zernikes(tf_zernike_cube[0, :, :], tf_zernike_cube[0, :, :])
# Multiply Alpha matrix with DD features matrix S
inter_res_v2 = tf.tensordot(
- self.tf_np_poly_opd.alpha_mat[:self.tf_poly_Z_field.coeff_mat.shape[1], :],
+ self.tf_np_poly_opd.alpha_mat[: self.tf_poly_Z_field.coeff_mat.shape[1], :],
self.tf_np_poly_opd.S_mat,
- axes=1
+ axes=1,
)
# Project over first n_z Zernikes
delta_C_poly = tf.constant(
- np.array([[
- PI_zernikes(tf_zernike_cube[i, :, :], inter_res_v2[j, :, :], n_pix_zernike)
- for j in range(self.tf_poly_Z_field.coeff_mat.shape[1])
- ]
- for i in range(self.n_zernikes)]),
- dtype=tf.float32
+ np.array(
+ [
+ [
+ PI_zernikes(
+ tf_zernike_cube[i, :, :],
+ inter_res_v2[j, :, :],
+ n_pix_zernike,
+ )
+ for j in range(self.tf_poly_Z_field.coeff_mat.shape[1])
+ ]
+ for i in range(self.n_zernikes)
+ ]
+ ),
+ dtype=tf.float32,
)
old_C_poly = self.tf_poly_Z_field.coeff_mat
# Corrected parametric coeff matrix
@@ -446,21 +483,27 @@ def project_DD_features(self, tf_zernike_cube):
# Remove extracted features from non-parametric model
# Mix DD features with matrix alpha
- S_tilde = tf.tensordot(self.tf_np_poly_opd.alpha_mat, self.tf_np_poly_opd.S_mat, axes=1)
+ S_tilde = tf.tensordot(
+ self.tf_np_poly_opd.alpha_mat, self.tf_np_poly_opd.S_mat, axes=1
+ )
# Get beta tilde as the proyection of the first n_param_poly_terms (6 for d_max=2) onto the first n_zernikes.
- beta_tilde_inner = np.array([[
- PI_zernikes(
- tf_zernike_cube[j, :, :],
- S_tilde_slice,
- n_pix_zernike
- ) for j in range(self.n_zernikes)
- ] for S_tilde_slice in S_tilde[:self.tf_poly_Z_field.coeff_mat.shape[1], :, :]])
+ beta_tilde_inner = np.array(
+ [
+ [
+ PI_zernikes(tf_zernike_cube[j, :, :], S_tilde_slice, n_pix_zernike)
+ for j in range(self.n_zernikes)
+ ]
+ for S_tilde_slice in S_tilde[
+ : self.tf_poly_Z_field.coeff_mat.shape[1], :, :
+ ]
+ ]
+ )
# Only pad in the firs dimention so we get a matrix of size (d_max_nonparam_terms)x(n_zernikes) --> 21x15 or 21x45.
beta_tilde = np.pad(
beta_tilde_inner,
[(0, S_tilde.shape[0] - beta_tilde_inner.shape[0]), (0, 0)],
- mode='constant'
+ mode="constant",
)
# Unmix beta tilde with the inverse of alpha
@@ -494,7 +537,9 @@ def call(self, inputs):
zernike_coeffs = self.tf_poly_Z_field(input_positions)
param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
# Add l2 loss on the parametric OPD
- self.add_loss(self.l2_param * tf.math.reduce_sum(tf.math.square(param_opd_maps)))
+ self.add_loss(
+ self.l2_param * tf.math.reduce_sum(tf.math.square(param_opd_maps))
+ )
# Calculate the non parametric part
nonparam_opd_maps = self.tf_np_poly_opd(input_positions)
# Add the estimations
@@ -506,7 +551,7 @@ def call(self, inputs):
class TF_physical_poly_field(tf.keras.Model):
- """ PSF field forward model with a physical layer
+ """PSF field forward model with a physical layer
WaveDiff-original with a physical layer
@@ -567,16 +612,16 @@ def __init__(
zks_prior,
output_Q,
d_max_nonparam=3,
- l2_param=0.,
+ l2_param=0.0,
output_dim=64,
n_zks_param=45,
d_max=2,
x_lims=[0, 1e3],
y_lims=[0, 1e3],
coeff_mat=None,
- interpolation_type='none',
+ interpolation_type="none",
interpolation_args=None,
- name='TF_physical_poly_field'
+ name="TF_physical_poly_field",
):
super(TF_physical_poly_field, self).__init__(name=name)
@@ -602,8 +647,10 @@ def __init__(
self.opd_dim = tf.shape(zernike_maps)[1].numpy()
# Check if the Zernike maps are enough
- if (self.n_zks_prior > self.n_zks_total) or (self.n_zks_param > self.n_zks_total):
- raise ValueError('The number of Zernike maps is not enough.')
+ if (self.n_zks_prior > self.n_zks_total) or (
+ self.n_zks_param > self.n_zks_total
+ ):
+ raise ValueError("The number of Zernike maps is not enough.")
# Inputs: TF_zernike_OPD
# They are not stored as they are memory-intensive
@@ -652,28 +699,28 @@ def __init__(
self.assign_coeff_matrix(coeff_mat)
def get_coeff_matrix(self):
- """ Get coefficient matrix."""
+ """Get coefficient matrix."""
return self.tf_poly_Z_field.get_coeff_matrix()
def assign_coeff_matrix(self, coeff_mat):
- """ Assign coefficient matrix."""
+ """Assign coefficient matrix."""
self.tf_poly_Z_field.assign_coeff_matrix(coeff_mat)
def set_zero_nonparam(self):
- """ Set to zero the non-parametric part."""
+ """Set to zero the non-parametric part."""
self.tf_np_poly_opd.set_alpha_zero()
def set_nonzero_nonparam(self):
- """ Set to non-zero the non-parametric part."""
+ """Set to non-zero the non-parametric part."""
self.tf_np_poly_opd.set_alpha_identity()
def set_trainable_layers(self, param_bool=True, nonparam_bool=True):
- """ Set the layers to be trainable or not."""
+ """Set the layers to be trainable or not."""
self.tf_np_poly_opd.trainable = nonparam_bool
self.tf_poly_Z_field.trainable = param_bool
def set_output_Q(self, output_Q, output_dim=None):
- """ Set the value of the output_Q parameter.
+ """Set the value of the output_Q parameter.
Useful for generating/predicting PSFs at a different sampling wrt the
observation sampling.
"""
@@ -683,11 +730,13 @@ def set_output_Q(self, output_Q, output_dim=None):
# Reinitialize the PSF batch poly generator
self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
)
def zks_pad(self, zk_param, zk_prior):
- """ Pad the zernike coefficients with zeros to have the same length.
+ """Pad the zernike coefficients with zeros to have the same length.
Pad them to have `n_zks_total` length.
@@ -731,7 +780,7 @@ def zks_pad(self, zk_param, zk_prior):
return padded_zk_param, padded_zk_prior
def predict_step(self, data, evaluate_step=False):
- r""" Custom predict (inference) step.
+ r"""Custom predict (inference) step.
It is needed as the physical layer requires a special
interpolation (different from training).
@@ -762,7 +811,7 @@ def predict_step(self, data, evaluate_step=False):
return poly_psfs
def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
- """ Predict a set of monochromatic PSF at desired positions.
+ """Predict a set of monochromatic PSF at desired positions.
Parameters
----------
@@ -801,7 +850,7 @@ def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
return mono_psf_batch
def predict_opd(self, input_positions):
- """ Predict the OPD at some positions.
+ """Predict the OPD at some positions.
Parameters
----------
@@ -826,7 +875,7 @@ def predict_opd(self, input_positions):
return opd_maps
def compute_zernikes(self, input_positions):
- """ Compute Zernike coefficients at a batch of positions
+ """Compute Zernike coefficients at a batch of positions
This includes the parametric model and the physical layer
@@ -852,7 +901,7 @@ def compute_zernikes(self, input_positions):
return zks_coeffs
def predict_zernikes(self, input_positions):
- """ Predict Zernike coefficients at a batch of positions
+ """Predict Zernike coefficients at a batch of positions
This includes the parametric model and the physical layer.
The prediction of the physical layer to positions is not used
@@ -899,7 +948,9 @@ def call(self, inputs, training=True):
# Propagate to obtain the OPD
param_opd_maps = self.tf_zernike_OPD(zks_coeffs)
# Add l2 loss on the parametric OPD
- self.add_loss(self.l2_param * tf.math.reduce_sum(tf.math.square(param_opd_maps)))
+ self.add_loss(
+ self.l2_param * tf.math.reduce_sum(tf.math.square(param_opd_maps))
+ )
# Calculate the non parametric part
nonparam_opd_maps = self.tf_np_poly_opd(input_positions)
# Add the estimations
@@ -915,7 +966,7 @@ def call(self, inputs, training=True):
class TF_GT_physical_field(tf.keras.Model):
- """ Ground truth PSF field forward model with a physical layer
+ """Ground truth PSF field forward model with a physical layer
Ground truth PSF field used for evaluation purposes.
@@ -956,7 +1007,7 @@ def __init__(
zks_prior,
output_Q,
output_dim=64,
- name='TF_GT_physical_field'
+ name="TF_GT_physical_field",
):
super(TF_GT_physical_field, self).__init__()
@@ -971,7 +1022,7 @@ def __init__(
# Check if the Zernike maps are enough
if self.n_zks_prior > self.n_zks_total:
- raise ValueError('The number of Zernike maps is not enough.')
+ raise ValueError("The number of Zernike maps is not enough.")
# Inputs: TF_zernike_OPD
# They are not stored as they are memory-intensive
@@ -986,7 +1037,7 @@ def __init__(
self.tf_physical_layer = TF_physical_layer(
self.obs_pos,
self.zks_prior,
- interpolation_type='none',
+ interpolation_type="none",
)
# Initialize the zernike to OPD layer
self.tf_zernike_OPD = TF_zernike_OPD(zernike_maps=zernike_maps)
@@ -999,7 +1050,7 @@ def __init__(
)
def set_output_Q(self, output_Q, output_dim=None):
- """ Set the value of the output_Q parameter.
+ """Set the value of the output_Q parameter.
Useful for generating/predicting PSFs at a different sampling wrt the
observation sampling.
"""
@@ -1009,11 +1060,13 @@ def set_output_Q(self, output_Q, output_dim=None):
# Reinitialize the PSF batch poly generator
self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
+ obscurations=self.obscurations,
+ output_Q=self.output_Q,
+ output_dim=self.output_dim,
)
def predict_step(self, data, evaluate_step=False):
- r""" Custom predict (inference) step.
+ r"""Custom predict (inference) step.
It is needed as the physical layer requires a special
interpolation (different from training).
@@ -1040,7 +1093,7 @@ def predict_step(self, data, evaluate_step=False):
return poly_psfs
def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
- """ Predict a set of monochromatic PSF at desired positions.
+ """Predict a set of monochromatic PSF at desired positions.
Parameters
----------
@@ -1074,7 +1127,7 @@ def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
return mono_psf_batch
def predict_opd(self, input_positions):
- """ Predict the OPD at some positions.
+ """Predict the OPD at some positions.
Parameters
----------
@@ -1095,7 +1148,7 @@ def predict_opd(self, input_positions):
return opd_maps
def compute_zernikes(self, input_positions):
- """ Compute Zernike coefficients at a batch of positions
+ """Compute Zernike coefficients at a batch of positions
This only includes the physical layer
@@ -1115,7 +1168,7 @@ def compute_zernikes(self, input_positions):
return self.tf_physical_layer.call(input_positions)
def predict_zernikes(self, input_positions):
- """ Predict Zernike coefficients at a batch of positions
+ """Predict Zernike coefficients at a batch of positions
This only includes the physical layer.
For the moment, it is the same as the `compute_zernikes`.
@@ -1159,7 +1212,7 @@ def call(self, inputs, training=True):
def build_PSF_model(model_inst, optimizer=None, loss=None, metrics=None):
- """ Define the model-compilation parameters.
+ """Define the model-compilation parameters.
Specially the loss function, the optimizer and the metrics.
"""
@@ -1184,7 +1237,7 @@ def build_PSF_model(model_inst, optimizer=None, loss=None, metrics=None):
metrics=metrics,
loss_weights=None,
weighted_metrics=None,
- run_eagerly=False
+ run_eagerly=False,
)
return model_inst
diff --git a/src/wf_psf/psf_models/zernikes.py b/src/wf_psf/psf_models/zernikes.py
new file mode 100644
index 00000000..9c099fb2
--- /dev/null
+++ b/src/wf_psf/psf_models/zernikes.py
@@ -0,0 +1,58 @@
+"""Zernikes.
+
+A module to make Zernike maps.
+
+:Author: Tobias Liaudat and Jennifer Pollack
+
+"""
+import numpy as np
+import zernike as zk
+import tensorflow as tf
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def zernike_generator(n_zernikes, wfe_dim):
+ """
+ Generate Zernike maps.
+
+ Based on the zernike github repository.
+ https://github.com/jacopoantonello/zernike
+
+ Parameters
+ ----------
+ n_zernikes: int
+ Number of Zernike modes desired.
+ wfe_dim: int
+ Dimension of the Zernike map [wfe_dim x wfe_dim].
+
+ Returns
+ -------
+ zernikes: list of np.ndarray
+ List containing the Zernike modes.
+ The values outside the unit circle are filled with NaNs.
+ """
+ # Calculate which n (from the (n,m) Zernike convention) we need
+ # so that we have the desired total number of Zernike coefficients
+ min_n = (-3 + np.sqrt(1 + 8 * n_zernikes)) / 2
+ n = int(np.ceil(min_n))
+
+ # Initialize the zernike generator
+ cart = zk.RZern(n)
+ # Create a [-1,1] mesh
+ ddx = np.linspace(-1.0, 1.0, wfe_dim)
+ ddy = np.linspace(-1.0, 1.0, wfe_dim)
+ xv, yv = np.meshgrid(ddx, ddy)
+ cart.make_cart_grid(xv, yv)
+
+ c = np.zeros(cart.nk)
+ zernikes = []
+
+ # Extract each Zernike map one by one
+ for i in range(n_zernikes):
+ c *= 0.0
+ c[i] = 1.0
+ zernikes.append(cart.eval_grid(c, matrix=True))
+
+ return zernikes
diff --git a/src/wf_psf/run.py b/src/wf_psf/run.py
new file mode 100644
index 00000000..304e2fbf
--- /dev/null
+++ b/src/wf_psf/run.py
@@ -0,0 +1,107 @@
+"""WF_PSF Run.
+
+This module setups the run of the WF_PSF pipeline.
+
+:Author: Jennifer Pollack
+
+"""
+import argparse
+from wf_psf.utils.read_config import read_stream
+from wf_psf.utils.io import FileIOHandler
+import os
+import logging.config
+import logging
+from wf_psf.utils.configs_handler import get_run_config
+
+
+def setProgramOptions():
+ """Define Program Options.
+
+ Set command-line options for
+ this program.
+
+ Returns
+ -------
+ args: argparge.Namespace object
+ Argument Parser Namespace object
+
+ """
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument(
+ "--conffile",
+ "-c",
+ type=str,
+ required=True,
+ help="a configuration file containing program settings.",
+ )
+
+ parser.add_argument(
+ "--repodir",
+ "-r",
+ type=str,
+ required=True,
+ help="the path of the code repository directory.",
+ )
+
+ parser.add_argument(
+ "--outputdir",
+ "-o",
+ type=str,
+ required=True,
+ help="the path of the output directory.",
+ )
+
+ args = parser.parse_args()
+
+ return args
+
+
+def mainMethod():
+ """Main Method.
+
+ The main entry point to wavediff program.
+
+
+ """
+ args = setProgramOptions()
+
+ configs_path = os.path.dirname(args.conffile)
+ configs = read_stream(args.conffile)
+ configs_file = os.path.basename(args.conffile)
+
+ file_handler = FileIOHandler(args.repodir, args.outputdir, configs_path)
+ file_handler.setup_outputs()
+ file_handler.copy_conffile_to_output_dir(configs_file)
+
+ logger = logging.getLogger("wavediff")
+
+ logger.info("#")
+ logger.info("# Entering wavediff mainMethod()")
+ logger.info("#")
+
+ try:
+ for conf in configs:
+ for k, v in conf.items():
+ config_class = get_run_config(
+ k, os.path.join(configs_path, v), file_handler
+ )
+ logger.info(config_class)
+ file_handler.copy_conffile_to_output_dir(v)
+ config_class.run()
+
+ except Exception as e:
+ logger.error(
+ "Check your config file {} for errors. Error Msg: {}.".format(
+ args.conffile, e
+ ),
+ exc_info=True,
+ )
+
+ logger.info("#")
+ logger.info("# Exiting wavediff mainMethod()")
+ logger.info("#")
+
+
+if __name__ == "__main__":
+ mainMethod()
diff --git a/wf_psf/SimPSFToolkit.py b/src/wf_psf/sims/SimPSFToolkit.py
similarity index 77%
rename from wf_psf/SimPSFToolkit.py
rename to src/wf_psf/sims/SimPSFToolkit.py
index 743212c5..7b9758f1 100644
--- a/wf_psf/SimPSFToolkit.py
+++ b/src/wf_psf/sims/SimPSFToolkit.py
@@ -6,16 +6,19 @@
import matplotlib as mpl
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
+from wf_psf.utils.utils import PI_zernikes, zernike_generator
+
try:
from cv2 import resize, INTER_AREA
except:
- print('Problem importing opencv..')
+ print("Problem importing opencv..")
try:
from skimage.transform import downscale_local_mean
- print('Falling back to skimage.')
- print('Only integer downsampling allowed with this method.')
+
+ print("Falling back to skimage.")
+ print("Only integer downsampling allowed with this method.")
except:
- print('Problem importing skimage..')
+ print("Problem importing skimage..")
class SimPSFToolkit(object):
@@ -26,8 +29,9 @@ class SimPSFToolkit(object):
Parameters
----------
- zernike_maps: list of np.ndarray
- Each element of the list should contain a Zernike map of the order
+ Remove zernike_maps
+ XXXzernike_maps: list of np.ndarray
+ Each element of the list should contain a Zernike map of the order
(OSA/ANSI index convention) corresponding to the position in the list.
max_order: int
Maximum Zernike polynomial order. Default is `45`.
@@ -75,9 +79,9 @@ class SimPSFToolkit(object):
Self-explanatory variable. Default is `0`, use a value `>0` to activate.
SED_sigma: float
Standard deviation of the multiplicative SED Gaussian noise.
- interp_pts_per_bin: int
+ SED_interp_pts_per_bin: int
Number of points to interpolate in between SED values. It can be 0, 1 or 2.
- extrapolate: bool
+ SED_extrapolate: bool
SED interpolation mode. Default mode uses extrapolation.
SED_interp_kind: str
SED interpolation kind. Options are `'cubic'` or `'linear'`.
@@ -86,13 +90,13 @@ class SimPSFToolkit(object):
def __init__(
self,
- zernike_maps,
+ # zernike_maps,
max_order=45,
max_wfe_rms=0.1,
output_dim=64,
rand_seed=None,
plot_opt=False,
- oversampling_rate=3.,
+ oversampling_rate=3.0,
output_Q=1,
pix_sampling=12,
tel_diameter=1.2,
@@ -102,32 +106,35 @@ def __init__(
LP_filter_length=3,
verbose=0,
SED_sigma=0,
- interp_pts_per_bin=0,
- extrapolate=True,
- SED_interp_kind='linear'
+ SED_interp_pts_per_bin=0,
+ SED_extrapolate=True,
+ SED_interp_kind="linear",
):
+ # Telescope characteristics
+ self.oversampling_rate = oversampling_rate # dimensionless
+ self.output_Q = output_Q # dimensionless
+ self.pix_sampling = pix_sampling # In [um]
+ self.tel_diameter = tel_diameter # In [m]
+ self.tel_focal_length = tel_focal_length # In [m]
+ self.pupil_diameter = pupil_diameter # In [pix]
+
# Input attributes
self.max_order = max_order
self.rand_seed = rand_seed
self.plot_opt = plot_opt
- self.zernike_maps = zernike_maps
+ self.zernike_maps = zernike_generator(self.max_order, self.pupil_diameter)
+ # self.zernike_maps = zernike_maps
self.max_wfe_rms = max_wfe_rms # In [um]
self.output_dim = output_dim # In pixels per dimension
self.verbose = verbose
self.SED_sigma = SED_sigma # std dev for the SED noise distribution
- self.interp_pts_per_bin = interp_pts_per_bin # Number of points to add to each SED bin
- self.extrapolate = extrapolate # SED interpolation mode
+ self.SED_interp_pts_per_bin = (
+ SED_interp_pts_per_bin # Number of points to add to each SED bin
+ )
+ self.SED_extrapolate = SED_extrapolate # SED interpolation mode
self.SED_interp_kind = SED_interp_kind # Type of interpolation for the SED
- # Telescope characteristics
- self.oversampling_rate = oversampling_rate # dimensionless
- self.output_Q = output_Q # dimensionless
- self.pix_sampling = pix_sampling # In [um]
- self.tel_diameter = tel_diameter # In [m]
- self.tel_focal_length = tel_focal_length # In [m]
- self.pupil_diameter = pupil_diameter # In [pix]
-
# Class attributes
self.z_coeffs = None
self.psf = None
@@ -149,7 +156,7 @@ def __init__(
@staticmethod
def _OLD_fft_diffraction_op(wf, pupil_mask, pad_factor=2, match_shapes=True):
- """ Perform a fft-based diffraction.
+ """Perform a fft-based diffraction.
Parameters
----------
@@ -168,22 +175,23 @@ def _OLD_fft_diffraction_op(wf, pupil_mask, pad_factor=2, match_shapes=True):
start = (wf.shape[0] * pad_factor) // 2 - wf.shape[0] // 2
stop = (wf.shape[0] * pad_factor) // 2 + wf.shape[0] // 2
- padded_wf = np.zeros((wf.shape[0] * pad_factor, wf.shape[1] * pad_factor),
- dtype=np.complex128)
+ padded_wf = np.zeros(
+ (wf.shape[0] * pad_factor, wf.shape[1] * pad_factor), dtype=np.complex128
+ )
padded_wf[start:stop, start:stop][pupil_mask] = wf[pupil_mask]
fft_wf = np.fft.fftshift(np.fft.fft2(padded_wf))
# fft_wf = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(padded_wf)))
- psf = np.abs(fft_wf)**2
+ psf = np.abs(fft_wf) ** 2
if match_shapes:
# Return the psf with its original shape without the padding factor
x_dif = int((psf.shape[0] / pad_factor) // 2)
y_dif = int((psf.shape[1] / pad_factor) // 2)
- return psf[x_dif:psf.shape[0] - x_dif, y_dif:psf.shape[1] - y_dif]
+ return psf[x_dif : psf.shape[0] - x_dif, y_dif : psf.shape[1] - y_dif]
else:
return psf
@@ -191,7 +199,7 @@ def _OLD_fft_diffraction_op(wf, pupil_mask, pad_factor=2, match_shapes=True):
def fft_diffract(wf, output_Q, output_dim=64):
# Perform the FFT-based diffraction operation
fft_wf = np.fft.fftshift(np.fft.fft2(wf))
- psf = np.abs(fft_wf)**2
+ psf = np.abs(fft_wf) ** 2
# Calculate crop dimensions
if output_dim * output_Q < psf.shape[0]:
@@ -207,7 +215,9 @@ def fft_diffract(wf, output_Q, output_dim=64):
# Downsample the image depending on `self.output_Q`
try:
psf = resize(
- src=psf, dsize=(int(output_dim), int(output_dim)), interpolation=INTER_AREA
+ src=psf,
+ dsize=(int(output_dim), int(output_dim)),
+ interpolation=INTER_AREA,
)
except:
f_x = int(psf.shape[0] / output_dim)
@@ -261,15 +271,19 @@ def generate_pupil_obscurations(N_pix=1024, N_filter=3):
# coordinates of map in [mm]
W, H = np.meshgrid(
np.linspace(-AS_diam // 2, AS_diam // 2, N_pix),
- np.linspace(-AS_diam // 2, AS_diam // 2, N_pix)
+ np.linspace(-AS_diam // 2, AS_diam // 2, N_pix),
)
### Calculate the Aperture stop and draw it ###
- aperture_stop_mask = np.sqrt((W - AS_centre[0])**2 + (H - AS_centre[1])**2) <= (AS_diam / 2)
+ aperture_stop_mask = np.sqrt(
+ (W - AS_centre[0]) ** 2 + (H - AS_centre[1]) ** 2
+ ) <= (AS_diam / 2)
pupil_plane[~aperture_stop_mask] = 0
### Calculate the M1/M2 obscurations and draw them ###
- M1_mask = np.sqrt((W - M1_centre[0])**2 + (H - M1_centre[1])**2) <= (M1_diam / 2)
+ M1_mask = np.sqrt((W - M1_centre[0]) ** 2 + (H - M1_centre[1]) ** 2) <= (
+ M1_diam / 2
+ )
pupil_plane[M1_mask] = 0
### Calculate the spiders and draw them ###
@@ -282,7 +296,9 @@ def generate_pupil_obscurations(N_pix=1024, N_filter=3):
sp1_mask_2 = sp1_a * W + sp1_b + sp_width / 2 * np.sqrt(1 + sp1_a**2) > H
sp1_mask = np.logical_and(sp1_mask_1, sp1_mask_2)
- sp1_length_mask = np.sqrt((W - sp1_x_pos)**2 + (H - sp1_y_pos)**2) <= (sp_lenght / 2)
+ sp1_length_mask = np.sqrt((W - sp1_x_pos) ** 2 + (H - sp1_y_pos) ** 2) <= (
+ sp_lenght / 2
+ )
sp1_mask = np.logical_and(sp1_mask, sp1_length_mask)
# Spider 2
@@ -293,7 +309,9 @@ def generate_pupil_obscurations(N_pix=1024, N_filter=3):
sp2_mask_2 = sp2_a * W + sp2_b + sp_width / 2 * np.sqrt(1 + sp2_a**2) > H
sp2_mask = np.logical_and(sp2_mask_1, sp2_mask_2)
- sp2_length_mask = np.sqrt((W - sp2_x_pos)**2 + (H - sp2_y_pos)**2) <= (sp_lenght / 2)
+ sp2_length_mask = np.sqrt((W - sp2_x_pos) ** 2 + (H - sp2_y_pos) ** 2) <= (
+ sp_lenght / 2
+ )
sp2_mask = np.logical_and(sp2_mask, sp2_length_mask)
# Spider 3
@@ -304,7 +322,9 @@ def generate_pupil_obscurations(N_pix=1024, N_filter=3):
sp3_mask_2 = sp3_a * W + sp3_b + sp_width / 2 * np.sqrt(1 + sp3_a**2) > H
sp3_mask = np.logical_and(sp3_mask_1, sp3_mask_2)
- sp3_length_mask = np.sqrt((W - sp3_x_pos)**2 + (H - sp3_y_pos)**2) <= (sp_lenght / 2)
+ sp3_length_mask = np.sqrt((W - sp3_x_pos) ** 2 + (H - sp3_y_pos) ** 2) <= (
+ sp_lenght / 2
+ )
sp3_mask = np.logical_and(sp3_mask, sp3_length_mask)
# Draw the three spider arms
@@ -316,7 +336,7 @@ def generate_pupil_obscurations(N_pix=1024, N_filter=3):
top_hat_filter = np.ones((N_filter, N_filter))
pupil_plane = spsig.convolve2d(
- pupil_plane, top_hat_filter, boundary='fill', mode='same', fillvalue=0
+ pupil_plane, top_hat_filter, boundary="fill", mode="same", fillvalue=0
)
pupil_plane /= np.sum(top_hat_filter)
@@ -331,7 +351,9 @@ def crop_img(to_crop_img, ref_im):
delta_x = int(ref_im.shape[0] // 2)
delta_y = int(ref_im.shape[1] // 2)
- return to_crop_img[cent_x - delta_x:cent_x + delta_x, cent_y - delta_y:cent_y + delta_y]
+ return to_crop_img[
+ cent_x - delta_x : cent_x + delta_x, cent_y - delta_y : cent_y + delta_y
+ ]
@staticmethod
def decimate_im(input_im, decim_f):
@@ -353,7 +375,7 @@ def get_radial_idx(max_order=45):
it = 1
radial_idxs = []
- while (len(radial_idxs) <= max_order):
+ while len(radial_idxs) <= max_order:
for _it in range(it):
radial_idxs.append(it - 1)
@@ -362,76 +384,80 @@ def get_radial_idx(max_order=45):
return np.array(radial_idxs)
@staticmethod
- def psf_plotter(psf, lambda_obs=0.000, cmap='gist_stern', save_img=False):
+ def psf_plotter(psf, lambda_obs=0.000, cmap="gist_stern", save_img=False):
fig = plt.figure(figsize=(18, 10))
ax1 = fig.add_subplot(131)
- im1 = ax1.imshow(psf, cmap=cmap, interpolation='None')
+ im1 = ax1.imshow(psf, cmap=cmap, interpolation="None")
divider = make_axes_locatable(ax1)
- cax = divider.append_axes('right', size='5%', pad=0.05)
- fig.colorbar(im1, cax=cax, orientation='vertical')
+ cax = divider.append_axes("right", size="5%", pad=0.05)
+ fig.colorbar(im1, cax=cax, orientation="vertical")
ax1.set_xticks([])
ax1.set_yticks([])
- ax1.set_title('PSF (lambda=%.3f [um])' % (lambda_obs))
+ ax1.set_title("PSF (lambda=%.3f [um])" % (lambda_obs))
ax2 = fig.add_subplot(132)
- im2 = ax2.imshow(np.sqrt(abs(psf)), cmap=cmap, interpolation='None')
+ im2 = ax2.imshow(np.sqrt(abs(psf)), cmap=cmap, interpolation="None")
divider2 = make_axes_locatable(ax2)
- cax2 = divider2.append_axes('right', size='5%', pad=0.05)
- fig.colorbar(im2, cax=cax2, orientation='vertical')
- ax2.set_title('sqrt PSF (lambda=%.3f [um])' % (lambda_obs))
+ cax2 = divider2.append_axes("right", size="5%", pad=0.05)
+ fig.colorbar(im2, cax=cax2, orientation="vertical")
+ ax2.set_title("sqrt PSF (lambda=%.3f [um])" % (lambda_obs))
ax2.set_xticks([])
ax2.set_yticks([])
ax3 = fig.add_subplot(133)
- im3 = ax3.imshow(np.log(abs(psf)), cmap=cmap, interpolation='None')
+ im3 = ax3.imshow(np.log(abs(psf)), cmap=cmap, interpolation="None")
divider3 = make_axes_locatable(ax3)
- cax3 = divider3.append_axes('right', size='5%', pad=0.05)
- fig.colorbar(im3, cax=cax3, orientation='vertical')
- ax3.set_title('log PSF (lambda=%.3f [um])' % (lambda_obs))
+ cax3 = divider3.append_axes("right", size="5%", pad=0.05)
+ fig.colorbar(im3, cax=cax3, orientation="vertical")
+ ax3.set_title("log PSF (lambda=%.3f [um])" % (lambda_obs))
ax3.set_xticks([])
ax3.set_yticks([])
if save_img:
- plt.savefig('./PSF_lambda_%.3f.pdf' % lambda_obs, bbox_inches='tight')
+ plt.savefig("./PSF_lambda_%.3f.pdf" % lambda_obs, bbox_inches="tight")
plt.show()
@staticmethod
- def opd_phase_plotter(pupil_mask, opd, phase, lambda_obs, cmap='viridis', save_img=False):
+ def opd_phase_plotter(
+ pupil_mask, opd, phase, lambda_obs, cmap="viridis", save_img=False
+ ):
fig = plt.figure(figsize=(18, 10))
ax1 = fig.add_subplot(131)
- im1 = ax1.imshow(pupil_mask, interpolation='None')
+ im1 = ax1.imshow(pupil_mask, interpolation="None")
divider = make_axes_locatable(ax1)
- cax = divider.append_axes('right', size='5%', pad=0.05)
- fig.colorbar(im1, cax=cax, orientation='vertical')
- ax1.set_title('Pupil mask')
+ cax = divider.append_axes("right", size="5%", pad=0.05)
+ fig.colorbar(im1, cax=cax, orientation="vertical")
+ ax1.set_title("Pupil mask")
ax1.set_xticks([])
ax1.set_yticks([])
vmax = np.max(abs(opd))
ax2 = fig.add_subplot(132)
- im2 = ax2.imshow(opd, cmap=cmap, interpolation='None', vmin=-vmax, vmax=vmax)
+ im2 = ax2.imshow(opd, cmap=cmap, interpolation="None", vmin=-vmax, vmax=vmax)
divider2 = make_axes_locatable(ax2)
- cax2 = divider2.append_axes('right', size='5%', pad=0.05)
- fig.colorbar(im2, cax=cax2, orientation='vertical')
- ax2.set_title('OPD [um]')
+ cax2 = divider2.append_axes("right", size="5%", pad=0.05)
+ fig.colorbar(im2, cax=cax2, orientation="vertical")
+ ax2.set_title("OPD [um]")
ax2.set_xticks([])
ax2.set_yticks([])
vmax = np.max(abs(np.angle(phase)))
ax3 = fig.add_subplot(133)
- im3 = ax3.imshow(np.angle(phase), cmap=cmap, interpolation='None', vmin=-vmax, vmax=vmax)
+ im3 = ax3.imshow(
+ np.angle(phase), cmap=cmap, interpolation="None", vmin=-vmax, vmax=vmax
+ )
divider3 = make_axes_locatable(ax3)
- cax3 = divider3.append_axes('right', size='5%', pad=0.05)
- fig.colorbar(im3, cax=cax3, orientation='vertical')
- ax3.set_title('W phase [rad](wv=%.2f[um])' % (lambda_obs))
+ cax3 = divider3.append_axes("right", size="5%", pad=0.05)
+ fig.colorbar(im3, cax=cax3, orientation="vertical")
+ ax3.set_title("W phase [rad](wv=%.2f[um])" % (lambda_obs))
ax3.set_xticks([])
ax3.set_yticks([])
if save_img:
- plt.savefig('./OPD_lambda_%.3f.pdf' % lambda_obs, bbox_inches='tight')
+ plt.savefig("./OPD_lambda_%.3f.pdf" % lambda_obs, bbox_inches="tight")
plt.show()
@@ -439,25 +465,30 @@ def get_psf(self):
if self.psf is not None:
return self.psf
else:
- print('No PSF has been computed yet.')
+ print("No PSF has been computed yet.")
- def plot_psf(self, cmap='gist_stern', save_img=False):
+ def plot_psf(self, cmap="gist_stern", save_img=False):
if self.psf is not None:
self.psf_plotter(self.psf, self.lambda_obs, cmap, save_img)
else:
- print('No PSF has been computed yet.')
+ print("No PSF has been computed yet.")
- def plot_opd_phase(self, cmap='viridis', save_img=False):
+ def plot_opd_phase(self, cmap="viridis", save_img=False):
if self.opd is not None:
self.opd_phase_plotter(
- self.pupil_mask * self.obscurations, self.opd * self.obscurations, self.phase,
- self.lambda_obs, cmap, save_img
+ self.pupil_mask * self.obscurations,
+ self.opd * self.obscurations,
+ self.phase,
+ self.lambda_obs,
+ cmap,
+ save_img,
)
else:
- print('No WF has been computed yet.')
+ print("No WF has been computed yet.")
+ # This method is a setter
def gen_random_Z_coeffs(self, max_order=45, rand_seed=None):
- """ Generate a random set of Zernike coefficients.
+ """Generate a random set of Zernike coefficients.
The coefficients are generated following a uniform law U~[-1,1]
divided by their radial zernike index.
@@ -485,7 +516,7 @@ def gen_random_Z_coeffs(self, max_order=45, rand_seed=None):
z_coeffs = []
for it in range(max_order):
- z_coeffs.append((np.random.rand() - 0.5) * 2. / rad_idx[it])
+ z_coeffs.append((np.random.rand() - 0.5) * 2.0 / rad_idx[it])
self.z_coeffs = z_coeffs
@@ -495,29 +526,29 @@ def plot_z_coeffs(self, save_img=False):
fig = plt.figure(figsize=(12, 6))
ax1 = fig.add_subplot(111)
im1 = ax1.bar(np.arange(len(self.z_coeffs)), np.array(self.z_coeffs))
- ax1.set_xlabel('Zernike coefficients')
- ax1.set_ylabel('Magnitude')
+ ax1.set_xlabel("Zernike coefficients")
+ ax1.set_ylabel("Magnitude")
if save_img:
- plt.savefig('./Z_coeffs.pdf', bbox_inches='tight')
+ plt.savefig("./Z_coeffs.pdf", bbox_inches="tight")
plt.show()
else:
- print('Random coeffs not generated.')
+ print("Random coeffs not generated.")
def get_z_coeffs(self):
"""Get random coefficients"""
if self.z_coeffs is not None:
return self.z_coeffs
else:
- print('Random coeffs not generated.')
+ print("Random coeffs not generated.")
def set_z_coeffs(self, z_coeffs):
"""Set zernike coefficients."""
if len(z_coeffs) == self.max_order:
self.z_coeffs = z_coeffs
else:
- print('Zernike coefficients should be of length %d' % (self.max_order))
+ print("Zernike coefficients should be of length %d" % (self.max_order))
def normalize_zernikes(self, z_coeffs=None, max_wfe_rms=None):
"""Normalize zernike coefficients."""
@@ -551,7 +582,9 @@ def calculate_wfe_rms(self, z_coeffs=None):
opd *= self.obscurations
# Calculate normalization factor
- wfe_rms = np.sqrt(np.mean((opd[self.pupil_mask] - np.mean(opd[self.pupil_mask]))**2))
+ wfe_rms = np.sqrt(
+ np.mean((opd[self.pupil_mask] - np.mean(opd[self.pupil_mask])) ** 2)
+ )
return wfe_rms
@@ -570,8 +603,8 @@ def generate_mono_PSF(self, lambda_obs=0.725, regen_sample=False, get_psf=False)
"""Generate monochromatic PSF."""
if lambda_obs < 0.55 * 0.9 or lambda_obs > 0.9 * 1.1:
print(
- 'WARNING: requested wavelength %.4f um is not in VIS passband [0.55,0.9]um' %
- (lambda_obs)
+ "WARNING: requested wavelength %.4f um is not in VIS passband [0.55,0.9]um"
+ % (lambda_obs)
)
self.lambda_obs = lambda_obs
@@ -613,12 +646,15 @@ def diffract_phase(self, lambda_obs=None):
"""Diffract the phase map."""
if lambda_obs is None:
if self.lambda_obs is None:
- print('WARNING: No wavelength is defined. Using default value 0.8um.')
+ print("WARNING: No wavelength is defined. Using default value 0.8um.")
lambda_obs = 0.8
else:
lambda_obs = self.lambda_obs
elif lambda_obs < 0.55 * 0.99 or lambda_obs > 0.9 * 1.01:
- print('WARNING: wavelength %.4f is not in VIS passband [0.55,0.9]um' % (lambda_obs))
+ print(
+ "WARNING: wavelength %.4f is not in VIS passband [0.55,0.9]um"
+ % (lambda_obs)
+ )
# Calculate the feasible lambda closest to lambda_obs
possible_lambda = self.feasible_wavelength(lambda_obs)
@@ -635,9 +671,9 @@ def diffract_phase(self, lambda_obs=None):
stop = possible_N // 2 + self.opd.shape[0] // 2
self.phase = np.zeros((possible_N, possible_N), dtype=np.complex128)
- self.phase[start:stop,
- start:stop][self.pupil_mask
- ] = np.exp(2j * np.pi * self.opd[self.pupil_mask] / self.lambda_obs)
+ self.phase[start:stop, start:stop][self.pupil_mask] = np.exp(
+ 2j * np.pi * self.opd[self.pupil_mask] / self.lambda_obs
+ )
# Project obscurations to the phase
self.phase[start:stop, start:stop] *= self.obscurations
@@ -656,19 +692,26 @@ def feasible_N(self, lambda_obs):
Input wavelength must be in [um].
"""
# Calculate the required N for the input lambda_obs
- req_N = (self.oversampling_rate * self.pupil_diameter * lambda_obs *
- self.tel_focal_length) / (
- self.tel_diameter * self.pix_sampling
- )
+ req_N = (
+ self.oversampling_rate
+ * self.pupil_diameter
+ * lambda_obs
+ * self.tel_focal_length
+ ) / (self.tel_diameter * self.pix_sampling)
# Recalculate the req_N into a possible value (a pair integer)
possible_N = int((req_N // 2) * 2)
return possible_N
def feasible_wavelength(self, lambda_obs):
- """Calculate closest fesible wavelength to target wavelength.
+ """Calculate closest feasible wavelength to target wavelength.
Input wavelength must be in [um].
+
+ Parameters
+ ----------
+ lambda_obs: float
+
"""
# Calculate a feasible N for the input lambda_obs
possible_N = self.feasible_N(lambda_obs)
@@ -680,12 +723,15 @@ def feasible_wavelength(self, lambda_obs):
if self.verbose > 0:
# print("Requested wavelength: %.5f \nRequired N: %.2f"%(lambda_obs, req_N))
- print("Possible wavelength: %.5f \nPossible N: %.2f" % (possible_lambda, possible_N))
+ print(
+ "Possible wavelength: %.5f \nPossible N: %.2f"
+ % (possible_lambda, possible_N)
+ )
return possible_lambda
@staticmethod
- def gen_SED_interp(SED, n_bins=35, interp_kind='cubic'):
+ def gen_SED_interp(SED, n_bins=35, interp_kind="cubic"):
"""Generate SED interpolator.
Returns the interpolator and the wavelengths in [nm].
@@ -696,7 +742,11 @@ def gen_SED_interp(SED, n_bins=35, interp_kind='cubic'):
wvlength = np.linspace(wv_min, wv_max, num=n_bins, endpoint=True)
SED_interp = sinterp.interp1d(
- SED[:, 0], SED[:, 1], kind=interp_kind, bounds_error=False, fill_value="extrapolate"
+ SED[:, 0],
+ SED[:, 1],
+ kind=interp_kind,
+ bounds_error=False,
+ fill_value="extrapolate",
)
return wvlength, SED_interp
@@ -704,7 +754,7 @@ def gen_SED_interp(SED, n_bins=35, interp_kind='cubic'):
@staticmethod
def filter_SED(SED, n_bins, filter_lims=None):
"""Generate filtered SED.
-
+
Returns a 'n_bins' point SED and wvlength vector.
Each bin 'i' is obtained integrating the SED from filter_lims[i][0] and filter_lims[i][1]
@@ -728,7 +778,7 @@ def filter_SED(SED, n_bins, filter_lims=None):
# If not given, define equiespaced equaly sized filters
if filter_lims is None:
wvlength = np.linspace(wv_min, wv_max, num=n_bins + 1, endpoint=True)
- filter_lims = [wvlength[it:it + 2] for it in range(n_bins)]
+ filter_lims = [wvlength[it : it + 2] for it in range(n_bins)]
# Smaller filtered SED
SED_filt = np.zeros((n_bins, 2))
@@ -738,14 +788,20 @@ def filter_SED(SED, n_bins, filter_lims=None):
lim_low = np.abs(SED[:, 0] - lims[0]).argmin()
lim_hi = np.abs(SED[:, 0] - lims[1]).argmin()
# Sum internal points (whole point surrounding inside the bin)
- SED_filt[idx, 1] = np.sum(SED[(lim_low + 1):lim_hi, 1])
+ SED_filt[idx, 1] = np.sum(SED[(lim_low + 1) : lim_hi, 1])
# Sum lower lim portion
- SED_filt[idx, 1] = SED_filt[
- idx, 1] + SED[lim_low, 1] * (SED[lim_low, 0] - lims[0] + wv_step / 2) / wv_step
+ SED_filt[idx, 1] = (
+ SED_filt[idx, 1]
+ + SED[lim_low, 1] * (SED[lim_low, 0] - lims[0] + wv_step / 2) / wv_step
+ )
# Sum upper lim portion
if lim_hi != lim_low:
- SED_filt[idx, 1] = SED_filt[
- idx, 1] + SED[lim_hi, 1] * (lims[1] - SED[lim_hi, 0] + wv_step / 2) / wv_step
+ SED_filt[idx, 1] = (
+ SED_filt[idx, 1]
+ + SED[lim_hi, 1]
+ * (lims[1] - SED[lim_hi, 0] + wv_step / 2)
+ / wv_step
+ )
# Weight by the size of the bin
SED_filt[idx, 1] = SED_filt[idx, 1] * (lims[1] - lims[0])
@@ -765,14 +821,14 @@ def SED_gen_noise(n_bins, SED_sigma):
Parameters
----------
n_bins: int
- Number of bins of the SED. It will be the length of the output noise vector.
+ Number of bins of the SED. It will be the length of the output noise vector.
SED_sigma: positive float
Standard deviation value of the Gaussian noise vector.
"""
return np.random.normal(0, SED_sigma, n_bins)
- def interp_SED(self, SED_filt, n_points=0, n_bins=35, interp_kind='cubic'):
+ def interp_SED(self, SED_filt, n_points=0, n_bins=35, interp_kind="cubic"):
"""Interpolate the binned SED.
Returns a ('n_bins')x('n_points'+1) point SED and wvlength vector.
@@ -781,18 +837,21 @@ def interp_SED(self, SED_filt, n_points=0, n_bins=35, interp_kind='cubic'):
----------
SED_filt: np.ndarray
The filtered SED. In the first column it contains the wavelength positions. In the
- second column the SED value for each bin.
+ second column the SED value for each bin.
n_points: int
- Number of points to add in each of the filtered SED bins. It can only be 1 or 2.
+ Number of points to add in each of the filtered SED bins. It can only be 1 or 2.
"""
# Generate interpolation function from the binned SED
- _, SED_interpolator = self.gen_SED_interp(SED_filt, n_bins, interp_kind=interp_kind)
+ _, SED_interpolator = self.gen_SED_interp(
+ SED_filt, n_bins, interp_kind=interp_kind
+ )
wv_step = SED_filt[1, 0] - SED_filt[0, 0]
- # Regenerate the wavelenght points
+ # Regenerate the wavelength points
+ # JP: This can be turned into a function
if n_points == 1:
- if self.extrapolate:
+ if self.SED_extrapolate:
# Add points at the border of each bin : *--o--*--o--*--o--*--o--*
SED = np.zeros((n_bins * 2 + 1, 3))
# Set wavelength points then interpolate
@@ -818,14 +877,14 @@ def interp_SED(self, SED_filt, n_points=0, n_bins=35, interp_kind='cubic'):
# Apply weights to bins
SED[:, 1] *= SED[:, 2]
elif n_points == 2:
- if self.extrapolate:
+ if self.SED_extrapolate:
# Add 2 points per bin: -*-o-*-*-o-*-*-o-*-*-o-*-
SED = np.zeros((n_bins * 3, 3))
SED[1::3, 0] = SED_filt[:, 0]
SED[::3, 0] = SED_filt[:, 0] - wv_step / 3
SED[2::3, 0] = SED_filt[:, 0] + wv_step / 3
SED[:, 1] = SED_interpolator(SED[:, 0])
- # Set weigths for new bins (borders have half the bin size)
+ # Set weights for new bins (borders have half the bin size)
SED[:, 2] = np.ones(n_bins * 3)
# Apply weights to bins
SED[:, 1] *= SED[:, 2]
@@ -842,7 +901,7 @@ def interp_SED(self, SED_filt, n_points=0, n_bins=35, interp_kind='cubic'):
# Apply weights to bins
SED[:, 1] *= SED[:, 2]
elif n_points == 3:
- if self.extrapolate:
+ if self.SED_extrapolate:
# Add 3 points inside each bin : *-*-o-*-*-*-o-*-*-*-o-*-*-*-o-*-*
SED = np.zeros((n_bins * 4 + 1, 3))
# Set wavelength points then interpolate
@@ -866,23 +925,25 @@ def interp_SED(self, SED_filt, n_points=0, n_bins=35, interp_kind='cubic'):
return SED
- def gen_SED_sampler(self, SED, n_bins, interp_kind='cubic'):
+ def gen_SED_sampler(self, SED, n_bins, interp_kind="cubic"):
"""Generate SED sampler.
-
+
Returns the sampler and the wavelengths in [nm]
"""
# Integrate SED into n_bins
SED_filt = self.filter_SED(SED, n_bins)
# Add noise. Scale sigma for each bin. Normalise the SED.
- #SED_filt[:,1] = SED_filt[:,1] + self.SED_gen_noise(len(SED_filt), self.SED_sigma)/len(SED_filt) # Here we assume 1/N as the mean bin value
+ # SED_filt[:,1] = SED_filt[:,1] + self.SED_gen_noise(len(SED_filt), self.SED_sigma)/len(SED_filt) # Here we assume 1/N as the mean bin value
SED_filt[:, 1] += np.multiply(
SED_filt[:, 1], self.SED_gen_noise(len(SED_filt), self.SED_sigma)
)
SED_filt[:, 1] = SED_filt[:, 1] / np.sum(SED_filt[:, 1])
# Add inside-bin points - Interpolate
- SED_filt = self.interp_SED(SED_filt, self.interp_pts_per_bin, n_bins, self.SED_interp_kind)
+ SED_filt = self.interp_SED(
+ SED_filt, self.SED_interp_pts_per_bin, n_bins, self.SED_interp_kind
+ )
# Add weights if not present
if SED_filt.shape[1] == 2:
@@ -895,7 +956,7 @@ def gen_SED_sampler(self, SED, n_bins, interp_kind='cubic'):
SED_filt[:, 1] / SED_filt[:, 2],
kind=interp_kind,
bounds_error=False,
- fill_value="extrapolate"
+ fill_value="extrapolate",
)
return SED_filt[:, 0], SED_sampler, SED_filt[:, 2]
@@ -903,7 +964,17 @@ def gen_SED_sampler(self, SED, n_bins, interp_kind='cubic'):
def calc_SED_wave_values(self, SED, n_bins=35):
"""Calculate feasible wavelength and SED values.
- Feasable so that the padding number N is integer.
+ Feasible so that the padding number N is integer.
+ Meaning choice of wavelengths matters in speeding
+ up the diffraction computation.
+
+ Parameters
+ ----------
+ SED:
+ Spectral energy distribution for a star
+ n_bins: int
+ Number of bins
+
"""
# Generate SED interpolator and wavelength array (use new sampler method)
wvlength, SED_interp, weights = self.gen_SED_sampler(SED, n_bins)
@@ -928,7 +999,7 @@ def generate_poly_PSF(self, SED, n_bins=35):
"""Generate polychromatic PSF with a specific SED.
The wavelength space will be the Euclid VIS instrument band:
- [550,900]nm and will be sample in ``n_bins``.
+ [550,900]nm and will be sampled in ``n_bins``.
"""
# Calculate the feasible values of wavelength and the corresponding
@@ -941,13 +1012,16 @@ def generate_poly_PSF(self, SED, n_bins=35):
fig = plt.figure(figsize=(14, 8))
ax1 = fig.add_subplot(111)
- ax1.plot(SED[:, 0], SED[:, 1], label='Input SED')
+ ax1.plot(SED[:, 0], SED[:, 1], label="Input SED")
ax1.scatter(
- feasible_wv * 1e3, SED_interp(feasible_wv * 1e3), label='Interpolated', c='red'
+ feasible_wv * 1e3,
+ SED_interp(feasible_wv * 1e3),
+ label="Interpolated",
+ c="red",
)
- ax1.set_xlabel('wavelength [nm]')
- ax1.set_ylabel('SED(wavelength)')
- ax1.set_title('SED')
+ ax1.set_xlabel("wavelength [nm]")
+ ax1.set_ylabel("SED(wavelength)")
+ ax1.set_title("SED")
ax1.legend()
# plt.savefig(output_path+'SED_interp_nbin_%d.pdf'%n_bins, bbox_inches='tight')
plt.show()
@@ -966,7 +1040,7 @@ def generate_poly_PSF(self, SED, n_bins=35):
# This pythonic version of the polychromatic calculation is not working
# The parallelisation with the class with shared variables might not be working
-# It may work if we define a @staticmethod for the diffracvtion
+# It may work if we define a @staticmethod for the diffraction
# psf_cube = np.array([_sed*self.generate_mono_PSF(_wv, get_psf=True)
# for _wv, _sed in zip(feasible_wv, SED_norm)])
# # Sum to obtain the polychromatic PSFs
diff --git a/src/wf_psf/sims/__init__.py b/src/wf_psf/sims/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/wf_psf/tests/conftest.py b/src/wf_psf/tests/conftest.py
new file mode 100644
index 00000000..eb5b350f
--- /dev/null
+++ b/src/wf_psf/tests/conftest.py
@@ -0,0 +1,148 @@
+"""FIXTURES FOR GENERATING TESTS FOR WF-PSF MODULES: CONFTEST.
+
+This module contains fixtures to use in unit tests for
+various wf_psf packages.
+
+:Author: Jennifer Pollack
+
+
+"""
+import pytest
+from wf_psf.utils.read_config import RecursiveNamespace
+from wf_psf.training.train import TrainingParamsHandler
+from wf_psf.psf_models import psf_models
+from wf_psf.data.training_preprocessing import TrainingDataHandler, TestDataHandler
+
+training_config = RecursiveNamespace(
+ id_name="_sample_w_bis1_2k",
+ data_config="data_config.yaml",
+ metrics_config="metrics_config.yaml",
+ model_params=RecursiveNamespace(
+ model_name="poly",
+ n_bins_lda=8,
+ output_Q=3,
+ oversampling_rate=3,
+ output_dim=32,
+ pupil_diameter=256,
+ use_sample_weights=True,
+ interpolation_type="None",
+ sed_interp_pts_per_bin=0,
+ sed_extrapolate=True,
+ sed_interp_kind="linear",
+ sed_sigma=0,
+ x_lims=[0.0, 1000.0],
+ y_lims=[0.0, 1000.0],
+ param_hparams=RecursiveNamespace(
+ random_seed=3877572,
+ l2_param=0.0,
+ n_zernikes=15,
+ d_max=2,
+ save_optim_history_param=True,
+ ),
+ nonparam_hparams=RecursiveNamespace(
+ d_max_nonparam=5,
+ num_graph_features=10,
+ l1_rate=1e-08,
+ project_dd_features=False,
+ reset_dd_features=False,
+ save_optim_history_nonparam=True,
+ ),
+ ),
+ training_hparams=RecursiveNamespace(
+ n_epochs_params=[2, 2],
+ n_epochs_non_params=[2, 2],
+ batch_size=32,
+ multi_cycle_params=RecursiveNamespace(
+ total_cycles=2,
+ cycle_def="complete",
+ save_all_cycles=True,
+ saved_cycle="cycle2",
+ learning_rate_params=[1.0e-2, 1.0e-2],
+ learning_rate_non_params=[1.0e-1, 1.0e-1],
+ n_epochs_params=[2, 2],
+ n_epochs_non_params=[2, 2],
+ ),
+ ),
+)
+
+data = RecursiveNamespace(
+ training=RecursiveNamespace(
+ data_dir="data",
+ file="coherent_euclid_dataset/train_Euclid_res_200_TrainStars_id_001.npy",
+ stars=None,
+ positions=None,
+ SEDS=None,
+ zernike_coef=None,
+ C_poly=None,
+ params=RecursiveNamespace(
+ d_max=2,
+ max_order=45,
+ x_lims=[0, 1000.0],
+ y_lims=[0, 1000.0],
+ grid_points=[4, 4],
+ n_bins=20,
+ max_wfe_rms=0.1,
+ oversampling_rate=3.0,
+ output_Q=3.0,
+ output_dim=32,
+ LP_filter_length=2,
+ pupil_diameter=256,
+ euclid_obsc=True,
+ n_stars=200,
+ ),
+ ),
+ test=RecursiveNamespace(
+ data_dir="data",
+ file="coherent_euclid_dataset/test_Euclid_res_id_001.npy",
+ stars=None,
+ noisy_stars=None,
+ positions=None,
+ SEDS=None,
+ zernike_coef=None,
+ C_poly=None,
+ parameters=RecursiveNamespace(
+ d_max=2,
+ max_order=45,
+ x_lims=[0, 1000.0],
+ y_lims=[0, 1000.0],
+ grid_points=[4, 4],
+ max_wfe_rms=0.1,
+ ),
+ ),
+)
+
+
+@pytest.fixture(scope="module", params=[training_config])
+def training_params():
+ return TrainingParamsHandler(training_config)
+
+
+@pytest.fixture(scope="module")
+def training_data():
+ return TrainingDataHandler(
+ data.training,
+ psf_models.simPSF(training_config.model_params),
+ training_config.model_params.n_bins_lda,
+ )
+
+
+@pytest.fixture(scope="module")
+def test_data():
+ return TestDataHandler(
+ data.test,
+ psf_models.simPSF(training_config.model_params),
+ training_config.model_params.n_bins_lda,
+ )
+
+
+@pytest.fixture(scope="module")
+def test_dataset(test_data):
+ return test_data.test_dataset
+
+
+@pytest.fixture(scope="module")
+def psf_model():
+ return psf_models.get_psf_model(
+ training_config.model_params,
+ training_config.training_hparams,
+ )
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint b/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint
new file mode 100644
index 00000000..2c9a4a9d
--- /dev/null
+++ b/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint
@@ -0,0 +1,2 @@
+model_checkpoint_path: "chkp_callback_poly_sample_w_bis1_2k_cycle2"
+all_model_checkpoint_paths: "chkp_callback_poly_sample_w_bis1_2k_cycle2"
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle1.data-00000-of-00001 b/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle1.data-00000-of-00001
new file mode 100644
index 00000000..ab643a5c
Binary files /dev/null and b/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle1.data-00000-of-00001 differ
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle1.index b/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle1.index
new file mode 100644
index 00000000..d7cab753
Binary files /dev/null and b/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle1.index differ
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle2.data-00000-of-00001 b/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle2.data-00000-of-00001
new file mode 100644
index 00000000..2ccec7e7
Binary files /dev/null and b/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle2.data-00000-of-00001 differ
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle2.index b/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle2.index
new file mode 100644
index 00000000..9771d9c4
Binary files /dev/null and b/src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle2.index differ
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/config/configs.yaml b/src/wf_psf/tests/data/validation/main_random_seed/config/configs.yaml
new file mode 100644
index 00000000..a958b07e
--- /dev/null
+++ b/src/wf_psf/tests/data/validation/main_random_seed/config/configs.yaml
@@ -0,0 +1,2 @@
+---
+metrics_conf: metrics_config.yaml
\ No newline at end of file
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/config/data_config.yaml b/src/wf_psf/tests/data/validation/main_random_seed/config/data_config.yaml
new file mode 100644
index 00000000..115abf0e
--- /dev/null
+++ b/src/wf_psf/tests/data/validation/main_random_seed/config/data_config.yaml
@@ -0,0 +1,44 @@
+# Training and test datasets for training and/or metrics evaluation
+data:
+ training:
+ # Specify directory path to data; Default setting is /path/to/repo/data
+ data_dir: data/coherent_euclid_dataset/
+ file: train_Euclid_res_200_TrainStars_id_001.npy
+ # if training dataset file does not exist, generate a new one by setting values below
+ stars: null
+ positions: null
+ SEDS: null
+ zernike_coef: null
+ C_poly: null
+ params: #
+ d_max: 2
+ max_order: 45
+ x_lims: [0, 1000.0]
+ y_lims: [0, 1000.0]
+ grid_points: [4, 4]
+ n_bins: 20
+ max_wfe_rms: 0.1
+ oversampling_rate: 3.0
+ output_Q: 3.0
+ output_dim: 32
+ LP_filter_length: 2
+ pupil_diameter: 256
+ euclid_obsc: true
+ n_stars: 200
+ test:
+ data_dir: data/coherent_euclid_dataset/
+ file: test_Euclid_res_id_001.npy
+ # If test dataset file not provided produce a new one
+ stars: null
+ noisy_stars: null
+ positions: null
+ SEDS: null
+ zernike_coef: null
+ C_poly: null
+ parameters:
+ d_max: 2
+ max_order: 45
+ x_lims: [0, 1000.0]
+ y_lims: [0, 1000.0]
+ grid_points: [4,4]
+ max_wfe_rms: 0.1
\ No newline at end of file
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/config/metrics_config.yaml b/src/wf_psf/tests/data/validation/main_random_seed/config/metrics_config.yaml
new file mode 100644
index 00000000..f3fcd12e
--- /dev/null
+++ b/src/wf_psf/tests/data/validation/main_random_seed/config/metrics_config.yaml
@@ -0,0 +1,123 @@
+metrics:
+ # Specify the type of model weights to load by entering "psf_model" to load weights of final psf model or "checkpoint" to load weights from a checkpoint callback.
+ model_save_path: checkpoint
+ # Choose the training cycle for which to evaluate the psf_model. Can be: 1, 2, ...
+ saved_training_cycle: 2
+ # Metrics-only run: Specify model_params for a pre-trained model else leave blank if running training + metrics
+ # Specify path to Parent Directory of Trained Model
+ trained_model_path: src/wf_psf/tests/data/validation/main_random_seed
+ # Name of the Trained Model Config file stored in config sub-directory in the trained_model_path parent directory
+ trained_model_config: training_config.yaml
+ #Evaluate the monchromatic RMSE metric.
+ eval_mono_metric_rmse: True
+ #Evaluate the OPD RMSE metric.
+ eval_opd_metric_rmse: True
+ #Evaluate the super-resolution and the shape RMSE metrics for the train dataset.
+ eval_train_shape_sr_metric_rmse: True
+ # Name of Plotting Config file - Enter name of yaml file to run plot metrics else if empty run metrics evaluation only
+ plotting_config:
+ ground_truth_model:
+ model_params:
+ #Model used as ground truth for the evaluation. Options are: 'poly' for polychromatic and 'physical' [not available].
+ model_name: poly
+
+ # Evaluation parameters
+ #Number of bins used for the ground truth model poly PSF generation
+ n_bins_lda: 20
+
+ #Downsampling rate to match the oversampled model to the specified telescope's sampling.
+ output_Q: 3
+
+ #Oversampling rate used for the OPD/WFE PSF model.
+ oversampling_rate: 3
+
+ #Dimension of the pixel PSF postage stamp
+ output_dim: 32
+
+ #Dimension of the OPD/Wavefront space."
+ pupil_diameter: 256
+
+ #Boolean to define if we use sample weights based on the noise standard deviation estimation
+ use_sample_weights: True
+
+ #Interpolation type for the physical poly model. Options are: 'none', 'all', 'top_K', 'independent_Zk'."
+ interpolation_type: None
+
+ # SED intepolation points per bin
+ sed_interp_pts_per_bin: 0
+
+ # SED extrapolate
+ sed_extrapolate: True
+
+ # SED interpolate kind
+ sed_interp_kind: linear
+
+ # Standard deviation of the multiplicative SED Gaussian noise.
+ sed_sigma: 0
+
+ #Limits of the PSF field coordinates for the x axis.
+ x_lims: [0.0, 1.0e+3]
+
+ #Limits of the PSF field coordinates for the y axis.
+ y_lims: [0.0, 1.0e+3]
+
+ # Hyperparameters for Parametric model
+ param_hparams:
+ # Random seed for Tensor Flow Initialization
+ random_seed: 3877572
+
+ # Parameter for the l2 loss function for the Optical path differences (OPD)/WFE
+ l2_param: 0.
+
+ #Zernike polynomial modes to use on the parametric part.
+ n_zernikes: 45
+
+ #Max polynomial degree of the parametric part.
+ d_max: 2
+
+ #Flag to save optimisation history for parametric model
+ save_optim_history_param: true
+
+ # Hyperparameters for non-parametric model
+ nonparam_hparams:
+ #Max polynomial degree of the non-parametric part.
+ d_max_nonparam: 5
+
+ # Number of graph features
+ num_graph_features: 10
+
+ #L1 regularisation parameter for the non-parametric part."
+ l1_rate: 1.0e-8
+
+ #Flag to enable Projected learning for DD_features to be used with `poly` or `semiparametric` model.
+ project_dd_features: False
+
+ #Flag to reset DD_features to be used with `poly` or `semiparametric` model
+ reset_dd_features: False
+
+ #Flag to save optimisation history for non-parametric model
+ save_optim_history_nonparam: True
+
+ metrics_hparams:
+ # Batch size to use for the evaluation.
+ batch_size: 16
+
+ #Save RMS error for each super resolved PSF in the test dataset in addition to the mean across the FOV."
+ #Flag to get Super-Resolution pixel PSF RMSE for each individual test star.
+ #If `True`, the relative pixel RMSE of each star is added to ther saving dictionary.
+ opt_stars_rel_pix_rmse: False
+
+ ## Specific parameters
+ # Parameter for the l2 loss of the OPD.
+ l2_param: 0.
+
+ ## Define the resolution at which you'd like to measure the shape of the PSFs
+ #Downsampling rate from the high-resolution pixel modelling space.
+ # Recommended value: 1
+ output_Q: 1
+
+ #Dimension of the pixel PSF postage stamp; it should be big enough so that most of the signal is contained inside the postage stamp.
+ # It also depends on the Q values used.
+ # Recommended value: 64 or higher
+ output_dim: 64
+
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/config/training_config.yaml b/src/wf_psf/tests/data/validation/main_random_seed/config/training_config.yaml
new file mode 100644
index 00000000..ef1f0635
--- /dev/null
+++ b/src/wf_psf/tests/data/validation/main_random_seed/config/training_config.yaml
@@ -0,0 +1,122 @@
+training:
+ # ID name
+ id_name: _sample_w_bis1_2k
+ # Name of Data Config file
+ data_config: data_config.yaml
+ # Metrics Config file - Enter file to run metrics evaluation else if empty run train only
+ metrics_config: metrics_config.yaml
+ model_params:
+ # Model type. Options are: 'mccd', 'graph', 'poly, 'param', 'poly_physical'."
+ model_name: poly
+
+ #Num of wavelength bins to reconstruct polychromatic objects.
+ n_bins_lda: 8
+
+ #Downsampling rate to match the oversampled model to the specified telescope's sampling.
+ output_Q: 3
+
+ #Oversampling rate used for the OPD/WFE PSF model.
+ oversampling_rate: 3
+
+ #Dimension of the pixel PSF postage stamp
+ output_dim: 32
+
+ #Dimension of the OPD/Wavefront space."
+ pupil_diameter: 256
+
+ #Boolean to define if we use sample weights based on the noise standard deviation estimation
+ use_sample_weights: True
+
+ #Interpolation type for the physical poly model. Options are: 'none', 'all', 'top_K', 'independent_Zk'."
+ interpolation_type: None
+
+ # SED intepolation points per bin
+ sed_interp_pts_per_bin: 0
+
+ # SED extrapolate
+ sed_extrapolate: True
+
+ # SED interpolate kind
+ sed_interp_kind: linear
+
+ # Standard deviation of the multiplicative SED Gaussian noise.
+ sed_sigma: 0
+
+ #Limits of the PSF field coordinates for the x axis.
+ x_lims: [0.0, 1.0e+3]
+
+ #Limits of the PSF field coordinates for the y axis.
+ y_lims: [0.0, 1.0e+3]
+
+ # Hyperparameters for Parametric model
+ param_hparams:
+ # Random seed for Tensor Flow Initialization
+ random_seed: 3877572
+
+ # Parameter for the l2 loss function for the Optical path differences (OPD)/WFE
+ l2_param: 0.
+
+ #Zernike polynomial modes to use on the parametric part.
+ n_zernikes: 15
+
+ #Max polynomial degree of the parametric part. chg to max_deg_param
+ d_max: 2
+
+ #Flag to save optimisation history for parametric model
+ save_optim_history_param: true
+
+ # Hyperparameters for non-parametric model
+ nonparam_hparams:
+
+ #Max polynomial degree of the non-parametric part. chg to max_deg_nonparam
+ d_max_nonparam: 5
+
+ # Number of graph features
+ num_graph_features: 10
+
+ #L1 regularisation parameter for the non-parametric part."
+ l1_rate: 1.0e-8
+
+ #Flag to enable Projected learning for DD_features to be used with `poly` or `semiparametric` model.
+ project_dd_features: False
+
+ #Flag to reset DD_features to be used with `poly` or `semiparametric` model
+ reset_dd_features: False
+
+ #Flag to save optimisation history for non-parametric model
+ save_optim_history_nonparam: true
+
+ # Training hyperparameters
+ training_hparams:
+ n_epochs_params: [2, 2, 2]
+
+ n_epochs_non_params: [2, 2, 2]
+
+ batch_size: 32
+
+ multi_cycle_params:
+
+ # Total amount of cycles to perform.
+ total_cycles: 2
+
+ # Train cycle definition. It can be: 'parametric', 'non-parametric', 'complete', 'only-non-parametric' and 'only-parametric'."
+ cycle_def: complete
+
+ # Make checkpoint at every cycle or just save the checkpoint at the end of the training."
+ save_all_cycles: True
+
+ #"Saved cycle to use for the evaluation. Can be 'cycle1', 'cycle2', ..."
+ saved_cycle: cycle2
+
+ # Learning rates for the parametric parts. It should be a str where numeric values are separated by spaces.
+ learning_rate_params: [1.0e-2, 1.0e-2]
+
+ # Learning rates for the non-parametric parts. It should be a str where numeric values are separated by spaces."
+ learning_rate_non_params: [1.0e-1, 1.0e-1]
+
+ # Number of training epochs of the parametric parts. It should be a strign where numeric values are separated by spaces."
+ n_epochs_params: [20, 20]
+
+ # Number of training epochs of the non-parametric parts. It should be a str where numeric values are separated by spaces."
+ n_epochs_non_params: [100, 120]
+
\ No newline at end of file
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/config/training_config_error.yaml b/src/wf_psf/tests/data/validation/main_random_seed/config/training_config_error.yaml
new file mode 100644
index 00000000..97bd36a1
--- /dev/null
+++ b/src/wf_psf/tests/data/validation/main_random_seed/config/training_config_error.yaml
@@ -0,0 +1,122 @@
+training:
+ # ID name
+ id_name: _errorsample_w_bis1_2k
+ # Name of Data Config file
+ data_config: data_config.yaml
+ # Metrics Config file - Enter file to run metrics evaluation else if empty run train only
+ metrics_config: metrics_config.yaml
+ model_params:
+ # Model type. Options are: 'mccd', 'graph', 'poly, 'param', 'poly_physical'."
+ model_name: poly
+
+ #Num of wavelength bins to reconstruct polychromatic objects.
+ n_bins_lda: 8
+
+ #Downsampling rate to match the oversampled model to the specified telescope's sampling.
+ output_Q: 3
+
+ #Oversampling rate used for the OPD/WFE PSF model.
+ oversampling_rate: 3
+
+ #Dimension of the pixel PSF postage stamp
+ output_dim: 32
+
+ #Dimension of the OPD/Wavefront space."
+ pupil_diameter: 256
+
+ #Boolean to define if we use sample weights based on the noise standard deviation estimation
+ use_sample_weights: True
+
+ #Interpolation type for the physical poly model. Options are: 'none', 'all', 'top_K', 'independent_Zk'."
+ interpolation_type: None
+
+ # SED intepolation points per bin
+ sed_interp_pts_per_bin: 0
+
+ # SED extrapolate
+ sed_extrapolate: True
+
+ # SED interpolate kind
+ sed_interp_kind: linear
+
+ # Standard deviation of the multiplicative SED Gaussian noise.
+ sed_sigma: 0
+
+ #Limits of the PSF field coordinates for the x axis.
+ x_lims: [0.0, 1.0e+3]
+
+ #Limits of the PSF field coordinates for the y axis.
+ y_lims: [0.0, 1.0e+3]
+
+ # Hyperparameters for Parametric model
+ param_hparams:
+ # Random seed for Tensor Flow Initialization
+ random_seed: 3877572
+
+ # Parameter for the l2 loss function for the Optical path differences (OPD)/WFE
+ l2_param: 0.
+
+ #Zernike polynomial modes to use on the parametric part.
+ n_zernikes: 15
+
+ #Max polynomial degree of the parametric part. chg to max_deg_param
+ d_max: 2
+
+ #Flag to save optimisation history for parametric model
+ save_optim_history_param: true
+
+ # Hyperparameters for non-parametric model
+ nonparam_hparams:
+
+ #Max polynomial degree of the non-parametric part. chg to max_deg_nonparam
+ d_max_nonparam: 5
+
+ # Number of graph features
+ num_graph_features: 10
+
+ #L1 regularisation parameter for the non-parametric part."
+ l1_rate: 1.0e-8
+
+ #Flag to enable Projected learning for DD_features to be used with `poly` or `semiparametric` model.
+ project_dd_features: False
+
+ #Flag to reset DD_features to be used with `poly` or `semiparametric` model
+ reset_dd_features: False
+
+ #Flag to save optimisation history for non-parametric model
+ save_optim_history_nonparam: true
+
+ # Training hyperparameters
+ training_hparams:
+ n_epochs_params: [2, 2, 2]
+
+ n_epochs_non_params: [2, 2, 2]
+
+ batch_size: 32
+
+ multi_cycle_params:
+
+ # Total amount of cycles to perform.
+ total_cycles: 2
+
+ # Train cycle definition. It can be: 'parametric', 'non-parametric', 'complete', 'only-non-parametric' and 'only-parametric'."
+ cycle_def: complete
+
+ # Make checkpoint at every cycle or just save the checkpoint at the end of the training."
+ save_all_cycles: True
+
+ #"Saved cycle to use for the evaluation. Can be 'cycle1', 'cycle2', ..."
+ saved_cycle: cycle2
+
+ # Learning rates for the parametric parts. It should be a str where numeric values are separated by spaces.
+ learning_rate_params: [1.0e-2, 1.0e-2]
+
+ # Learning rates for the non-parametric parts. It should be a str where numeric values are separated by spaces."
+ learning_rate_non_params: [1.0e-1, 1.0e-1]
+
+ # Number of training epochs of the parametric parts. It should be a strign where numeric values are separated by spaces."
+ n_epochs_params: [20, 20]
+
+ # Number of training epochs of the non-parametric parts. It should be a str where numeric values are separated by spaces."
+ n_epochs_non_params: [100, 120]
+
\ No newline at end of file
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/metrics/metrics-poly_sample_w_bis1_2k.npy b/src/wf_psf/tests/data/validation/main_random_seed/metrics/metrics-poly_sample_w_bis1_2k.npy
new file mode 100644
index 00000000..5d689478
Binary files /dev/null and b/src/wf_psf/tests/data/validation/main_random_seed/metrics/metrics-poly_sample_w_bis1_2k.npy differ
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/optim-hist/optim_hist_poly_sample_w_bis1_2k.npy b/src/wf_psf/tests/data/validation/main_random_seed/optim-hist/optim_hist_poly_sample_w_bis1_2k.npy
new file mode 100644
index 00000000..218201c4
Binary files /dev/null and b/src/wf_psf/tests/data/validation/main_random_seed/optim-hist/optim_hist_poly_sample_w_bis1_2k.npy differ
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/psf_model/checkpoint b/src/wf_psf/tests/data/validation/main_random_seed/psf_model/checkpoint
new file mode 100644
index 00000000..5436c9ce
--- /dev/null
+++ b/src/wf_psf/tests/data/validation/main_random_seed/psf_model/checkpoint
@@ -0,0 +1,2 @@
+model_checkpoint_path: "chkp_poly_sample_w_bis1_2k_cycle2"
+all_model_checkpoint_paths: "chkp_poly_sample_w_bis1_2k_cycle2"
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle1.data-00000-of-00001 b/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle1.data-00000-of-00001
new file mode 100644
index 00000000..ab643a5c
Binary files /dev/null and b/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle1.data-00000-of-00001 differ
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle1.index b/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle1.index
new file mode 100644
index 00000000..d7cab753
Binary files /dev/null and b/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle1.index differ
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle2.data-00000-of-00001 b/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle2.data-00000-of-00001
new file mode 100644
index 00000000..2ccec7e7
Binary files /dev/null and b/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle2.data-00000-of-00001 differ
diff --git a/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle2.index b/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle2.index
new file mode 100644
index 00000000..9771d9c4
Binary files /dev/null and b/src/wf_psf/tests/data/validation/main_random_seed/psf_model/psf_model_poly_sample_w_bis1_2k_cycle2.index differ
diff --git a/src/wf_psf/tests/metrics_test.py b/src/wf_psf/tests/metrics_test.py
new file mode 100644
index 00000000..c22c1b80
--- /dev/null
+++ b/src/wf_psf/tests/metrics_test.py
@@ -0,0 +1,344 @@
+"""UNIT TESTS FOR PACKAGE MODULE: Metrics.
+
+This module contains unit tests for the wf_psf.metrics module.
+
+:Author: Jennifer Pollack
+
+"""
+import pytest
+from wf_psf.utils.read_config import RecursiveNamespace
+from wf_psf.training import train
+from wf_psf.metrics.metrics_interface import MetricsParamsHandler
+from wf_psf.psf_models import psf_models
+import tensorflow as tf
+import numpy as np
+import os
+
+metrics_params = RecursiveNamespace(
+ model_save_path="psf_model",
+ saved_training_cycle="2",
+ trained_model_path="data/validation/main_random_seed",
+ trained_model_config="config/training_config.yaml",
+ plotting_config=None,
+ eval_mono_metric_rmse=True,
+ eval_opd_metric_rmse=True,
+ eval_train_shape_sr_metric_rmse=True,
+ ground_truth_model=RecursiveNamespace(
+ model_params=RecursiveNamespace(
+ model_name="poly",
+ dataset_type="C_poly",
+ sed_interp_pts_per_bin=0,
+ sed_extrapolate=True,
+ sed_interp_kind="linear",
+ sed_sigma=0,
+ n_bins_lda=20,
+ output_Q=3,
+ oversampling_rate=3,
+ output_dim=32,
+ pupil_diameter=256,
+ use_sample_weights=True,
+ interpolation_type="None",
+ x_lims=[0.0, 1000.0],
+ y_lims=[0.0, 1000.0],
+ param_hparams=RecursiveNamespace(
+ random_seed=3877572,
+ l2_param=0.0,
+ n_zernikes=45,
+ d_max=2,
+ save_optim_history_param=True,
+ ),
+ nonparam_hparams=RecursiveNamespace(
+ d_max_nonparam=5,
+ num_graph_features=10,
+ l1_rate=1e-08,
+ project_dd_features=False,
+ reset_dd_features=False,
+ save_optim_history_nonparam=True,
+ ),
+ )
+ ),
+ metrics_hparams=RecursiveNamespace(
+ batch_size=16,
+ opt_stars_rel_pix_rmse=False,
+ l2_param=0.0,
+ output_Q=1,
+ output_dim=64,
+ ),
+)
+
+cwd = os.getcwd()
+
+psf_model_path = os.path.join(
+ cwd,
+ "src/wf_psf/tests",
+ metrics_params.trained_model_path,
+ metrics_params.model_save_path,
+)
+
+main_dir = os.path.join(
+ cwd, "src/wf_psf/tests", metrics_params.trained_model_path, "metrics"
+)
+
+
+@pytest.fixture(scope="module")
+def weights_path_basename(training_params):
+ weights_path = (
+ psf_model_path
+ + "/"
+ + metrics_params.model_save_path
+ + "_"
+ + training_params.model_params.model_name
+ + training_params.id_name
+ + "_cycle"
+ + metrics_params.saved_training_cycle
+ )
+
+ return weights_path
+
+
+@pytest.fixture(scope="module")
+def main_metrics(training_params):
+ metrics_filename = (
+ "metrics-"
+ + training_params.model_params.model_name
+ + training_params.id_name
+ + ".npy"
+ )
+ return np.load(os.path.join(main_dir, metrics_filename), allow_pickle=True)[()]
+
+
+@pytest.mark.skip(reason="Requires gpu")
+def test_eval_metrics_polychromatic_lowres(
+ training_params,
+ weights_path_basename,
+ training_data,
+ psf_model,
+ test_dataset,
+ main_metrics,
+):
+ metrics_handler = MetricsParamsHandler(metrics_params, training_params)
+
+ ## Prepare models
+ # Prepare np input
+ simPSF_np = training_data.simPSF
+
+ # Load the trained model weights
+ psf_model.load_weights(weights_path_basename)
+
+ poly_metric = metrics_handler.evaluate_metrics_polychromatic_lowres(
+ psf_model, simPSF_np, test_dataset
+ )
+
+ tol = 1.0e-7
+ ratio_rmse = abs(
+ 1.0 - main_metrics["test_metrics"]["poly_metric"]["rmse"] / poly_metric["rmse"]
+ )
+ ratio_rel_rmse = abs(
+ 1.0
+ - main_metrics["test_metrics"]["poly_metric"]["rel_rmse"]
+ / poly_metric["rel_rmse"]
+ )
+ ratio_std_rmse = abs(
+ 1.0
+ - main_metrics["test_metrics"]["poly_metric"]["std_rmse"]
+ / poly_metric["std_rmse"]
+ )
+ ratio_rel_std_rmse = abs(
+ 1.0
+ - main_metrics["test_metrics"]["poly_metric"]["std_rel_rmse"]
+ / poly_metric["std_rel_rmse"]
+ )
+
+ assert ratio_rmse < tol
+ assert ratio_rel_rmse < tol
+ assert ratio_std_rmse < tol
+ assert ratio_rel_std_rmse < tol
+
+
+@pytest.mark.skip(reason="Requires gpu")
+def test_evaluate_metrics_opd(
+ training_params,
+ weights_path_basename,
+ training_data,
+ psf_model,
+ test_dataset,
+ main_metrics,
+):
+ metrics_handler = MetricsParamsHandler(metrics_params, training_params)
+
+ ## Prepare models
+ # Prepare np input
+ simPSF_np = training_data.simPSF
+
+ ## Load the trained model weights
+ psf_model.load_weights(weights_path_basename)
+
+ opd_metric = metrics_handler.evaluate_metrics_opd(
+ psf_model, simPSF_np, test_dataset
+ )
+
+ tol = 1.0e-9
+ ratio_rmse_opd = abs(
+ 1
+ - main_metrics["test_metrics"]["opd_metric"]["rmse_opd"]
+ / opd_metric["rmse_opd"]
+ )
+ ratio_rel_rmse_opd = abs(
+ 1.0
+ - main_metrics["test_metrics"]["opd_metric"]["rel_rmse_opd"]
+ / opd_metric["rel_rmse_opd"]
+ )
+ ratio_rmse_std_opd = abs(
+ 1.0
+ - main_metrics["test_metrics"]["opd_metric"]["rmse_std_opd"]
+ / opd_metric["rmse_std_opd"]
+ )
+ ratio_rel_rmse_std_opd = abs(
+ 1.0
+ - main_metrics["test_metrics"]["opd_metric"]["rel_rmse_std_opd"]
+ / opd_metric["rel_rmse_std_opd"]
+ )
+
+ assert ratio_rmse_opd < tol
+ assert ratio_rel_rmse_opd < tol
+ assert ratio_rmse_std_opd < tol
+ assert ratio_rel_rmse_std_opd < tol
+
+
+@pytest.mark.skip(reason="Requires gpu")
+def test_eval_metrics_mono_rmse(
+ training_params,
+ weights_path_basename,
+ training_data,
+ psf_model,
+ test_dataset,
+ main_metrics,
+):
+ metrics_handler = MetricsParamsHandler(metrics_params, training_params)
+
+ ## Prepare models
+ # Prepare np input
+ simPSF_np = training_data.simPSF
+
+ ## Load the trained model weights
+ psf_model.load_weights(weights_path_basename)
+
+ mono_metric = metrics_handler.evaluate_metrics_mono_rmse(
+ psf_model, simPSF_np, test_dataset
+ )
+
+ nlambda = len(mono_metric["rmse_lda"])
+
+ tol = 1.0e-9
+ ratio_rmse_mono = abs(
+ 1
+ - np.sum(
+ np.array(main_metrics["test_metrics"]["mono_metric"]["rmse_lda"])
+ / np.array(mono_metric["rmse_lda"])
+ )
+ / nlambda
+ )
+ ratio_rel_rmse_mono = abs(
+ 1.0
+ - np.sum(
+ np.array(main_metrics["test_metrics"]["mono_metric"]["rel_rmse_lda"])
+ / np.array(mono_metric["rel_rmse_lda"])
+ )
+ / nlambda
+ )
+ ratio_rmse_std_mono = abs(
+ 1.0
+ - np.sum(
+ np.array(main_metrics["test_metrics"]["mono_metric"]["std_rmse_lda"])
+ / np.array(mono_metric["std_rmse_lda"])
+ )
+ / nlambda
+ )
+
+ ratio_rel_rmse_std_mono = abs(
+ 1.0
+ - np.sum(
+ np.array(main_metrics["test_metrics"]["mono_metric"]["std_rel_rmse_lda"])
+ / np.array(mono_metric["std_rel_rmse_lda"])
+ )
+ / nlambda
+ )
+
+ assert ratio_rmse_mono < tol
+ assert ratio_rel_rmse_mono < tol
+ assert ratio_rmse_std_mono < tol
+ assert ratio_rel_rmse_std_mono < tol
+
+
+@pytest.mark.skip(reason="Requires gpu")
+def test_evaluate_metrics_shape(
+ training_params,
+ weights_path_basename,
+ training_data,
+ psf_model,
+ test_dataset,
+ main_metrics,
+):
+ metrics_handler = MetricsParamsHandler(metrics_params, training_params)
+
+ ## Prepare models
+ # Prepare np input
+ simPSF_np = training_data.simPSF
+
+ ## Load the trained model weights
+ psf_model.load_weights(weights_path_basename)
+
+ shape_metric = metrics_handler.evaluate_metrics_shape(
+ psf_model, simPSF_np, test_dataset
+ )
+
+ tol = 1.0e-9
+ ratio_rmse_e1 = abs(
+ 1.0
+ - main_metrics["test_metrics"]["shape_results_dict"]["rmse_e1"]
+ / shape_metric["rmse_e1"]
+ )
+
+ ratio_std_rmse_e1 = abs(
+ 1.0
+ - main_metrics["test_metrics"]["shape_results_dict"]["std_rmse_e1"]
+ / shape_metric["std_rmse_e1"]
+ )
+
+ ratio_rel_rmse_e1 = abs(
+ 1.0
+ - main_metrics["test_metrics"]["shape_results_dict"]["rel_rmse_e1"]
+ / shape_metric["rel_rmse_e1"]
+ )
+
+ ratio_std_rel_rmse_e1 = abs(
+ 1.0
+ - main_metrics["test_metrics"]["shape_results_dict"]["std_rel_rmse_e1"]
+ / shape_metric["std_rel_rmse_e1"]
+ )
+
+ ratio_rmse_e2 = abs(
+ 1.0
+ - main_metrics["test_metrics"]["shape_results_dict"]["rmse_e2"]
+ / shape_metric["rmse_e2"]
+ )
+
+ ratio_std_rmse_e2 = abs(
+ 1.0
+ - main_metrics["test_metrics"]["shape_results_dict"]["std_rmse_e2"]
+ / shape_metric["std_rmse_e2"]
+ )
+
+ ratio_rmse_R2_meanR2 = abs(
+ 1.0
+ - main_metrics["test_metrics"]["shape_results_dict"]["rmse_R2_meanR2"]
+ / shape_metric["rmse_R2_meanR2"]
+ )
+
+ assert ratio_rmse_e1 < tol
+ assert ratio_std_rmse_e1 < tol
+ assert ratio_rel_rmse_e1 < tol
+ assert ratio_std_rel_rmse_e1 < tol
+ assert ratio_rmse_e2 < tol
+ assert ratio_std_rmse_e2 < tol
+ assert ratio_rmse_R2_meanR2 < tol
diff --git a/src/wf_psf/tests/psf_models_test.py b/src/wf_psf/tests/psf_models_test.py
new file mode 100644
index 00000000..fbbb1191
--- /dev/null
+++ b/src/wf_psf/tests/psf_models_test.py
@@ -0,0 +1,23 @@
+"""UNIT TESTS FOR PACKAGE MODULE: PSF MODELS.
+
+This module contains unit tests for the wf_psf.psf_models psf_models module.
+
+:Author: Jennifer Pollack
+
+
+"""
+
+import pytest
+from wf_psf.psf_models import psf_models
+from wf_psf.utils.io import FileIOHandler
+import os
+
+
+def test_get_psf_model_weights_filepath():
+ weights_filepath = "src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint*_poly*_sample_w_bis1_2k_cycle2*"
+
+ ans = psf_models.get_psf_model_weights_filepath(weights_filepath)
+ assert (
+ ans
+ == "src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint_callback_poly_sample_w_bis1_2k_cycle2"
+ )
diff --git a/src/wf_psf/tests/test_utils/configs_handler_test.py b/src/wf_psf/tests/test_utils/configs_handler_test.py
new file mode 100644
index 00000000..5e8db118
--- /dev/null
+++ b/src/wf_psf/tests/test_utils/configs_handler_test.py
@@ -0,0 +1,72 @@
+"""UNIT TESTS FOR PACKAGE MODULE: CONFIGS_HANDLER.
+
+This module contains unit tests for the wf_psf.utils configs_handler module.
+
+:Author: Jennifer Pollack
+
+
+"""
+
+import pytest
+from wf_psf.utils import configs_handler
+from wf_psf.utils.io import FileIOHandler
+import os
+
+
+@configs_handler.register_configclass
+class TestClass:
+ ids = ("test_conf",)
+
+ def __init__(self, config_params, file_handler):
+ self.config_param = config_params
+ self.file_handler = file_handler
+
+
+def test_register_configclass():
+ assert configs_handler.CONFIG_CLASS["test_conf"] == TestClass
+
+
+def test_set_run_config():
+ config_class = configs_handler.set_run_config("test_conf")
+ assert config_class == TestClass
+
+ config_class = configs_handler.set_run_config("training_conf")
+ assert config_class == configs_handler.TrainingConfigHandler
+
+ config_class = configs_handler.set_run_config("metrics_conf")
+ assert config_class == configs_handler.MetricsConfigHandler
+
+ config_class = configs_handler.set_run_config("plotting_conf")
+ assert config_class == configs_handler.PlottingConfigHandler
+
+
+def test_get_run_config(path_to_repo_dir, path_to_tmp_output_dir, path_to_config_dir):
+ test_file_handler = FileIOHandler(
+ path_to_repo_dir, path_to_tmp_output_dir, path_to_config_dir
+ )
+
+ config_class = configs_handler.get_run_config(
+ "test_conf", "fake_config.yaml", test_file_handler
+ )
+
+ assert type(config_class) is TestClass
+
+
+def test_MetricsConfigHandler_weights_basename_filepath(
+ path_to_repo_dir, path_to_tmp_output_dir, path_to_config_dir
+):
+ test_file_handler = FileIOHandler(
+ path_to_repo_dir, path_to_tmp_output_dir, path_to_config_dir
+ )
+
+ metrics_config_file = "validation/main_random_seed/config/metrics_config.yaml"
+
+ metrics_object = configs_handler.MetricsConfigHandler(
+ os.path.join(path_to_config_dir, metrics_config_file), test_file_handler
+ )
+ weights_filepath = metrics_object.weights_basename_filepath
+
+ assert (
+ weights_filepath
+ == "src/wf_psf/tests/data/validation/main_random_seed/checkpoint/checkpoint*_poly*_sample_w_bis1_2k_cycle2*"
+ )
diff --git a/src/wf_psf/tests/test_utils/conftest.py b/src/wf_psf/tests/test_utils/conftest.py
new file mode 100644
index 00000000..6df37416
--- /dev/null
+++ b/src/wf_psf/tests/test_utils/conftest.py
@@ -0,0 +1,33 @@
+"""FIXTURES FOR GENERATING TESTS FOR WF-PSF UTILS PACKAGE: CONFTEST.
+
+This module contains fixtures to use in unit tests for the
+wf_psf utils package.
+
+:Author: Jennifer Pollack
+
+
+"""
+import pytest
+import os
+
+cwd = os.getcwd()
+
+
+@pytest.fixture(scope="class")
+def path_to_repo_dir():
+ return cwd
+
+
+@pytest.fixture
+def path_to_test_dir(path_to_repo_dir):
+ return os.path.join(path_to_repo_dir, "src", "wf_psf", "tests")
+
+
+@pytest.fixture
+def path_to_tmp_output_dir(tmp_path):
+ return tmp_path
+
+
+@pytest.fixture
+def path_to_config_dir(path_to_test_dir):
+ return os.path.join(path_to_test_dir, "data")
diff --git a/src/wf_psf/tests/test_utils/io_test.py b/src/wf_psf/tests/test_utils/io_test.py
new file mode 100644
index 00000000..1f2d0a7d
--- /dev/null
+++ b/src/wf_psf/tests/test_utils/io_test.py
@@ -0,0 +1,52 @@
+"""UNIT TESTS FOR PACKAGE MODULE: IO.
+
+This module contains unit tests for the wf_psf.utils io module.
+
+:Author: Jennifer Pollack
+
+
+"""
+
+import pytest
+from wf_psf.utils.io import FileIOHandler
+import os
+
+
+@pytest.fixture
+def test_file_handler(path_to_repo_dir, path_to_tmp_output_dir, path_to_config_dir):
+ test_file_handler = FileIOHandler(
+ path_to_repo_dir, path_to_tmp_output_dir, path_to_config_dir
+ )
+ test_file_handler._make_output_dir()
+ test_file_handler._make_run_dir()
+ test_file_handler._setup_dirs()
+ return test_file_handler
+
+
+def test_make_output_dir(test_file_handler, path_to_tmp_output_dir):
+ assert os.path.exists(
+ os.path.join(path_to_tmp_output_dir, test_file_handler.parent_output_dir)
+ )
+
+
+def test_make_run_dir(test_file_handler):
+ assert os.path.exists(test_file_handler._run_output_dir)
+
+
+def test_setup_dirs(test_file_handler):
+ wf_outdirs = [
+ "_config",
+ "_checkpoint",
+ "_log_files",
+ "_metrics",
+ "_optimizer",
+ "_plots",
+ "_psf_model",
+ ]
+
+ for odir in wf_outdirs:
+ assert os.path.exists(
+ os.path.join(
+ test_file_handler._run_output_dir, test_file_handler.__dict__[odir]
+ )
+ )
diff --git a/src/wf_psf/tests/train_test.py b/src/wf_psf/tests/train_test.py
new file mode 100644
index 00000000..2a317031
--- /dev/null
+++ b/src/wf_psf/tests/train_test.py
@@ -0,0 +1,146 @@
+"""UNIT TESTS FOR PACKAGE MODULE: Train.
+
+This module contains unit tests for the wf_psf.train module.
+
+:Author: Jennifer Pollack
+
+"""
+
+import pytest
+from wf_psf.utils.read_config import RecursiveNamespace
+from wf_psf.training import train
+from wf_psf.psf_models import psf_models
+import tensorflow as tf
+import numpy as np
+import os
+from re import search
+
+
+cwd = os.getcwd()
+
+validation_dir = "src/wf_psf/tests/data/validation/main_random_seed"
+
+
+@pytest.fixture(scope="module")
+def tmp_checkpoint_dir(tmp_path_factory):
+ tmp_chkp_dir = tmp_path_factory.mktemp("checkpoint")
+ return str(tmp_chkp_dir)
+
+
+@pytest.fixture(scope="module")
+def tmp_optimizer_dir(tmp_path_factory):
+ tmp_optim_hist_dir = tmp_path_factory.mktemp("optim-hist")
+ return str(tmp_optim_hist_dir)
+
+
+@pytest.fixture(scope="module")
+def tmp_psf_model_dir(tmp_path_factory):
+ tmp_psf_model_dir = tmp_path_factory.mktemp("psf_model_dir")
+ return str(tmp_psf_model_dir)
+
+
+@pytest.fixture(scope="module")
+def checkpoint_dir():
+ return os.path.join(
+ cwd,
+ validation_dir,
+ "checkpoint",
+ )
+
+
+@pytest.fixture(scope="module")
+def optimizer_dir():
+ return os.path.join(
+ cwd,
+ validation_dir,
+ "optim-hist",
+ )
+
+
+@pytest.fixture(scope="module")
+def psf_model_dir():
+ return os.path.join(
+ cwd,
+ validation_dir,
+ "psf_model",
+ )
+
+
+@pytest.mark.skip(reason="Requires gpu")
+def test_train(
+ training_params,
+ training_data,
+ test_data,
+ checkpoint_dir,
+ optimizer_dir,
+ psf_model_dir,
+ tmp_checkpoint_dir,
+ tmp_optimizer_dir,
+ tmp_psf_model_dir,
+ psf_model,
+):
+ train.train(
+ training_params,
+ training_data,
+ test_data,
+ tmp_checkpoint_dir,
+ tmp_optimizer_dir,
+ tmp_psf_model_dir,
+ )
+
+ weights_type_dict = {
+ checkpoint_dir: "checkpoint_callback_",
+ psf_model_dir: "psf_model_",
+ }
+
+ # Evaluate the weights for each checkpoint callback and the final psf models wrt baseline
+ weights_basename = (
+ training_params.model_params.model_name + training_params.id_name + "_cycle"
+ )
+
+ tmp_psf_model = psf_models.get_psf_model(
+ training_params.model_params, training_params.training_hparams
+ )
+
+ for weights_dir, tmp_weights_dir in zip(
+ [checkpoint_dir, psf_model_dir], [tmp_checkpoint_dir, tmp_psf_model_dir]
+ ):
+ first_cycle = 1
+
+ if search("psf_model", weights_dir):
+ if not training_params.training_hparams.multi_cycle_params.save_all_cycles:
+ first_cycle = (
+ training_params.training_hparams.multi_cycle_params.total_cycles
+ )
+
+ for cycle in range(
+ first_cycle,
+ training_params.training_hparams.multi_cycle_params.total_cycles,
+ ):
+ basename_cycle = (
+ weights_dir
+ + "/"
+ + weights_type_dict[weights_dir]
+ + weights_basename
+ + str(cycle)
+ )
+
+ tmp_basename_cycle = (
+ tmp_weights_dir
+ + "/"
+ + weights_type_dict[weights_dir]
+ + weights_basename
+ + str(cycle)
+ )
+
+ psf_model.load_weights(basename_cycle)
+ saved_model_weights = psf_model.get_weights()
+
+ tmp_psf_model.load_weights(tmp_basename_cycle)
+ tmp_saved_model_weights = tmp_psf_model.get_weights()
+
+ diff = abs(
+ np.array(saved_model_weights) - np.array(tmp_saved_model_weights)
+ )
+ for arr in diff:
+ assert np.mean(arr) < 1.0e-9
diff --git a/src/wf_psf/training/__init__.py b/src/wf_psf/training/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/wf_psf/training/train.py b/src/wf_psf/training/train.py
new file mode 100644
index 00000000..01d3d075
--- /dev/null
+++ b/src/wf_psf/training/train.py
@@ -0,0 +1,432 @@
+"""Train.
+
+A module which defines the classes and methods
+to manage training of the psf model.
+
+:Author: Jennifer Pollack
+
+"""
+
+import sys
+import numpy as np
+import time
+import tensorflow as tf
+import tensorflow_addons as tfa
+from wf_psf.utils.read_config import read_conf
+import os
+import logging
+import wf_psf.utils.io as io
+from wf_psf.psf_models import psf_models, psf_model_semiparametric
+import wf_psf.training.train_utils as train_utils
+import wf_psf.data.training_preprocessing as training_preprocessing
+
+logger = logging.getLogger(__name__)
+
+
+def setup_training():
+ """Setup Training.
+
+ A function to setup training.
+
+
+ """
+ device_name = get_gpu_info()
+ logger.info("Found GPU at: {}".format(device_name))
+
+
+def filepath_chkp_callback(checkpoint_dir, model_name, id_name, current_cycle):
+ return (
+ checkpoint_dir
+ + "/checkpoint_callback_"
+ + model_name
+ + id_name
+ + "_cycle"
+ + str(current_cycle)
+ )
+
+
+class TrainingParamsHandler:
+ """Training Parameters Handler.
+
+ A class to handle training parameters accessed:
+
+ Parameters
+ ----------
+ training_params: Recursive Namespace object
+ Recursive Namespace object containing training input parameters
+
+ """
+
+ def __init__(self, training_params):
+ self.training_params = training_params
+ self.run_id_name = self.model_name + self.id_name
+ self.optimizer_params = {}
+
+ @property
+ def id_name(self):
+ """ID Name.
+
+ Set unique ID name.
+
+ Returns
+ -------
+ id_name: str
+ A unique ID.
+ """
+ return self.training_params.id_name
+
+ @property
+ def model_name(self):
+ """PSF Model Name.
+
+ Set model_name.
+
+ Returns
+ -------
+ model_name: str
+ Name of PSF model
+
+ """
+ return self.training_params.model_params.model_name
+
+ @property
+ def model_params(self):
+ """PSF Model Params.
+
+ Set PSF model training parameters
+
+ Returns
+ -------
+ model_params: Recursive Namespace object
+ Recursive Namespace object storing PSF model parameters
+
+ """
+ return self.training_params.model_params
+
+ @property
+ def training_hparams(self):
+ """Training Hyperparameters.
+
+ Set training hyperparameters
+
+ Returns
+ -------
+ training_hparams: Recursive Namespace object
+ Recursive Namespace object storing training hyper parameters
+
+ """
+ return self.training_params.training_hparams
+
+ @property
+ def multi_cycle_params(self):
+ """Training Multi Cycle Parameters.
+
+ Set training multi cycle parameters
+
+ Returns
+ -------
+ multi_cycle_params: Recursive Namespace object
+ Recursive Namespace object storing training multi-cycle parameters
+
+ """
+ return self.training_hparams.multi_cycle_params
+
+ @property
+ def total_cycles(self):
+ """Total Number of Cycles.
+
+ Set total number of cycles for
+ training.
+
+ Returns
+ -------
+ total_cycles: int
+ Total number of cycles for training
+ """
+ return self.multi_cycle_params.total_cycles
+
+ @property
+ def n_epochs_params(self):
+ """Number of Epochs for Parametric PSF model.
+
+ Set the number of epochs for
+ training parametric PSF model.
+
+ Returns
+ -------
+ n_epochs_params: list
+ List of number of epochs for training parametric PSF model.
+
+ """
+ return self.multi_cycle_params.n_epochs_params
+
+ @property
+ def n_epochs_non_params(self):
+ """Number of Epochs for Non-parametric PSF model.
+
+ Set the number of epochs for
+ training non-parametric PSF model.
+
+ Returns
+ -------
+ n_epochs_non_params: list
+ List of number of epochs for training non-parametric PSF model.
+
+ """
+ return self.multi_cycle_params.n_epochs_non_params
+
+ @property
+ def learning_rate_params(self):
+ """Parametric Model Learning Rate.
+
+ Set learning rate for parametric
+ PSF model.
+
+ Returns
+ -------
+ learning_rate_params: list
+ List containing learning rate for parametric PSF model
+
+ """
+ return self.multi_cycle_params.learning_rate_params
+
+ @property
+ def learning_rate_non_params(self):
+ """Non-parametric Model Learning Rate.
+
+ Set learning rate for non-parametric
+ PSF model.
+
+ Returns
+ -------
+ learning_rate_non_params: list
+ List containing learning rate for non-parametric PSF model
+
+ """
+ return self.multi_cycle_params.learning_rate_non_params
+
+ def _prepare_callbacks(self, checkpoint_dir, current_cycle):
+ """Prepare Callbacks.
+
+ A function to prepare to save the model as a callback.
+
+ Parameters
+ ----------
+ checkpoint_dir: str
+ Checkpoint directory
+ current_cycle: int
+ Integer representing the current cycle
+
+ Returns
+ -------
+ keras.callbacks.ModelCheckpoint class
+ Class to save the Keras model or model weights at some frequency
+
+ """
+
+ # -----------------------------------------------------
+ logger.info(f"Preparing Keras model callback...")
+ return tf.keras.callbacks.ModelCheckpoint(
+ filepath_chkp_callback(
+ checkpoint_dir, self.model_name, self.id_name, current_cycle
+ ),
+ monitor="mean_squared_error",
+ verbose=1,
+ save_best_only=True,
+ save_weights_only=True,
+ mode="min",
+ save_freq="epoch",
+ options=None,
+ )
+
+
+def get_gpu_info():
+ """Get GPU Information.
+
+ A function to return GPU
+ device name.
+
+ Returns
+ -------
+ device_name: str
+ Name of GPU device
+
+ """
+ device_name = tf.test.gpu_device_name()
+ return device_name
+
+
+def train(
+ training_params,
+ training_data,
+ test_data,
+ checkpoint_dir,
+ optimizer_dir,
+ psf_model_dir,
+):
+ """Train.
+
+ A function to train the psf model.
+
+ Parameters
+ ----------
+ training_params: Recursive Namespace object
+ Recursive Namespace object containing the training parameters
+ training_data: obj
+ TrainingDataHandler object containing the training data parameters
+ test_data: object
+ TestDataHandler object containing the test data parameters
+ checkpoint_dir: str
+ Absolute path to checkpoint directory
+ optimizer_dir: str
+ Absolute path to optimizer history directory
+ psf_model_dir: str
+ Absolute path to psf model directory
+
+ """
+ # Start measuring elapsed time
+ starting_time = time.time()
+
+ training_handler = TrainingParamsHandler(training_params)
+
+ psf_model = psf_models.get_psf_model(
+ training_handler.model_params,
+ training_handler.training_hparams,
+ )
+
+ logger.info(f"PSF Model class: `{training_handler.model_name}` initialized...")
+ # Model Training
+ # -----------------------------------------------------
+ # Save optimisation history in the saving dict
+ saving_optim_hist = {}
+
+ # Perform all the necessary cycles
+ current_cycle = 0
+
+ while training_handler.total_cycles > current_cycle:
+ current_cycle += 1
+
+ # If projected learning is enabled project DD_features.
+ if psf_model.project_dd_features: # need to change this
+ psf_model.project_DD_features(
+ psf_model.zernike_maps
+ ) # make this a callable function
+ logger.info("Project non-param DD features onto param model: done!")
+ if psf_model.reset_dd_features:
+ psf_model.tf_np_poly_opd.init_vars()
+ logger.info("DD features reset to random initialisation.")
+
+ # Prepare the saving callback
+ # Prepare to save the model as a callback
+ # -----------------------------------------------------
+ logger.info(f"Preparing Keras model callback...")
+
+ model_chkp_callback = training_handler._prepare_callbacks(
+ checkpoint_dir, current_cycle
+ )
+
+ # Prepare the optimizers
+ param_optim = tfa.optimizers.RectifiedAdam(
+ learning_rate=training_handler.learning_rate_params[current_cycle - 1]
+ )
+ non_param_optim = tfa.optimizers.RectifiedAdam(
+ learning_rate=training_handler.learning_rate_non_params[current_cycle - 1]
+ )
+ logger.info("Starting cycle {}..".format(current_cycle))
+ start_cycle = time.time()
+
+ # Compute training per cycle
+ (
+ psf_model,
+ hist_param,
+ hist_non_param,
+ ) = train_utils.general_train_cycle(
+ psf_model,
+ # training data
+ inputs=[
+ training_data.train_dataset["positions"],
+ training_data.sed_data,
+ ],
+ outputs=training_data.train_dataset["noisy_stars"],
+ validation_data=(
+ [
+ test_data.test_dataset["positions"],
+ test_data.sed_data,
+ ],
+ test_data.test_dataset["stars"],
+ ),
+ batch_size=training_handler.training_hparams.batch_size,
+ learning_rate_param=training_handler.learning_rate_params[
+ current_cycle - 1
+ ],
+ learning_rate_non_param=training_handler.learning_rate_non_params[
+ current_cycle - 1
+ ],
+ n_epochs_param=training_handler.n_epochs_non_params[current_cycle - 1],
+ n_epochs_non_param=training_handler.n_epochs_non_params[current_cycle - 1],
+ param_optim=param_optim,
+ non_param_optim=non_param_optim,
+ param_loss=None,
+ non_param_loss=None,
+ param_metrics=None,
+ non_param_metrics=None,
+ param_callback=None,
+ non_param_callback=None,
+ general_callback=[model_chkp_callback],
+ first_run=True if current_cycle == 1 else False,
+ cycle_def=training_handler.multi_cycle_params.cycle_def,
+ use_sample_weights=training_handler.model_params.use_sample_weights,
+ verbose=2,
+ )
+
+ # Save the weights at the end of the nth cycle
+ if training_handler.multi_cycle_params.save_all_cycles:
+ psf_model.save_weights(
+ psf_model_dir
+ + "/psf_model_"
+ + training_handler.model_name
+ + training_handler.id_name
+ + "_cycle"
+ + str(current_cycle)
+ )
+
+ end_cycle = time.time()
+ logger.info(
+ "Cycle{} elapsed time: {}".format(current_cycle, end_cycle - start_cycle)
+ )
+
+ # Save optimisation history in the saving dict
+ if psf_model.save_optim_history_param:
+ saving_optim_hist[
+ "param_cycle{}".format(current_cycle)
+ ] = hist_param.history
+ if psf_model.save_optim_history_nonparam:
+ saving_optim_hist[
+ "nonparam_cycle{}".format(current_cycle)
+ ] = hist_non_param.history
+
+ # Save last cycle if no cycles were saved
+ if not training_handler.multi_cycle_params.save_all_cycles:
+ psf_model.save_weights(
+ psf_model_dir
+ + "/psf_model_"
+ + training_handler.model_name
+ + training_handler.id_name
+ + "_cycle"
+ + str(current_cycle)
+ )
+
+ # Save optimisation history dictionary
+ np.save(
+ optimizer_dir
+ + "/optim_hist_"
+ + training_handler.model_name
+ + training_handler.id_name
+ + ".npy",
+ saving_optim_hist,
+ )
+
+ # Print final time
+ final_time = time.time()
+ logger.info("\nTotal elapsed time: %f" % (final_time - starting_time))
+ logger.info("\n Training complete..")
diff --git a/wf_psf/train_utils.py b/src/wf_psf/training/train_utils.py
similarity index 85%
rename from wf_psf/train_utils.py
rename to src/wf_psf/training/train_utils.py
index 7ace633a..6866a59c 100644
--- a/wf_psf/train_utils.py
+++ b/src/wf_psf/training/train_utils.py
@@ -1,20 +1,26 @@
import numpy as np
import tensorflow as tf
-from wf_psf.tf_psf_field import build_PSF_model
-from wf_psf.utils import NoiseEstimator
+from wf_psf.psf_models.tf_psf_field import build_PSF_model
+from wf_psf.utils.utils import NoiseEstimator
+import logging
+
+logger = logging.getLogger(__name__)
class L1ParamScheduler(tf.keras.callbacks.Callback):
"""L1 rate scheduler which sets the L1 rate according to schedule.
- Arguments:
- l1_schedule_rule: a function that takes an epoch index
- (integer, indexed from 0) and current l1_rate
+ Parameters
+ ----------
+ l1_schedule_rule: function
+ a function that takes an epoch index
+ (integer, indexed from 0) and current l1_rate
as inputs and returns a new l1_rate as output (float).
"""
def __init__(self, l1_schedule_rule):
super(L1ParamScheduler, self).__init__()
+ breakpoint()
self.l1_schedule_rule = l1_schedule_rule
def on_epoch_begin(self, epoch, logs=None):
@@ -30,7 +36,7 @@ def on_epoch_begin(self, epoch, logs=None):
def l1_schedule_rule(epoch_n, l1_rate):
if epoch_n != 0 and epoch_n % 10 == 0:
scheduled_l1_rate = l1_rate / 2
- print("\nEpoch %05d: L1 rate is %0.4e." % (epoch_n, scheduled_l1_rate))
+ logger.info("\nEpoch %05d: L1 rate is %0.4e." % (epoch_n, scheduled_l1_rate))
return scheduled_l1_rate
else:
return l1_rate
@@ -40,10 +46,10 @@ def general_train_cycle(
tf_semiparam_field,
inputs,
outputs,
- val_data,
+ validation_data,
batch_size,
- l_rate_param,
- l_rate_non_param,
+ learning_rate_param,
+ learning_rate_non_param,
n_epochs_param,
n_epochs_non_param,
param_optim=None,
@@ -56,19 +62,19 @@ def general_train_cycle(
non_param_callback=None,
general_callback=None,
first_run=False,
- cycle_def='complete',
+ cycle_def="complete",
use_sample_weights=False,
- verbose=1
+ verbose=1,
):
- """ Function to do a BCD iteration on the model.
-
+ """Function to do a BCD iteration on the model.
+
Define the model optimisation.
For the parametric part we are using:
- ``l_rate_param = 1e-2``, ``n_epochs_param = 20``.
+ ``learning_rate_param = 1e-2``, ``n_epochs_param = 20``.
For the non-parametric part we are using:
- ``l_rate_non_param = 1.0``, ``n_epochs_non_param = 100``.
-
+ ``learning_rate_non_param = 1.0``, ``n_epochs_non_param = 100``.
+
Parameters
----------
tf_semiparam_field: tf.keras.Model
@@ -77,14 +83,14 @@ def general_train_cycle(
Inputs used for Model.fit()
outputs: Tensor
Outputs used for Model.fit()
- val_data: Tuple
- Validation data used for Model.fit().
+ validation_data: Tuple
+ Validation test data used for Model.fit().
Tuple of input, output validation data
batch_size: int
Batch size for the training.
- l_rate_param: float
+ learning_rate_param: float
Learning rate for the parametric part
- l_rate_non_param: float
+ learning_rate_non_param: float
Learning rate for the non-parametric part
n_epochs_param: int
Number of epochs for the parametric part
@@ -143,7 +149,7 @@ def general_train_cycle(
# Initialize return variables
hist_param = None
hist_non_param = None
-
+
# Parametric train
# Define Loss
@@ -155,7 +161,7 @@ def general_train_cycle(
# Define optimisers
if param_optim is None:
optimizer = tf.keras.optimizers.Adam(
- learning_rate=l_rate_param,
+ learning_rate=learning_rate_param,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
@@ -197,7 +203,7 @@ def general_train_cycle(
if strategy_opt == 0:
# Parameters
- max_w = 2.
+ max_w = 2.0
min_w = 0.1
# Epsilon is to avoid outliers
epsilon = np.median(variances) * 0.1
@@ -217,14 +223,18 @@ def general_train_cycle(
else:
sample_weight = None
- # Define the training cycle
- if cycle_def == 'parametric' or cycle_def == 'complete' or cycle_def == 'only-parametric':
+ # Define the training cycle
+ if (
+ cycle_def == "parametric"
+ or cycle_def == "complete"
+ or cycle_def == "only-parametric"
+ ):
# If it is the first run
if first_run:
# Set the non-parametric model to zero
# With alpha to zero its already enough
tf_semiparam_field.set_zero_nonparam()
- if cycle_def == 'only-parametric':
+ if cycle_def == "only-parametric":
# Set the non-parametric part to zero
tf_semiparam_field.set_zero_nonparam()
@@ -239,27 +249,31 @@ def general_train_cycle(
metrics=metrics,
)
# Train the parametric part
- print('Starting parametric update..')
+ logger.info("Starting parametric update..")
hist_param = tf_semiparam_field.fit(
x=inputs,
y=outputs,
batch_size=batch_size,
epochs=n_epochs_param,
- validation_data=val_data,
+ validation_data=validation_data,
callbacks=callbacks,
sample_weight=sample_weight,
- verbose=verbose
+ verbose=verbose,
)
## Non parametric train
- # Define the training cycle
- if cycle_def == 'non-parametric' or cycle_def == 'complete' or cycle_def == 'only-non-parametric':
+ # Define the training cycle
+ if (
+ cycle_def == "non-parametric"
+ or cycle_def == "complete"
+ or cycle_def == "only-non-parametric"
+ ):
# If it is the first run
if first_run:
# Set the non-parametric model to non-zero
# With alpha to zero its already enough
tf_semiparam_field.set_nonzero_nonparam()
- if cycle_def == 'only-non-parametric':
+ if cycle_def == "only-non-parametric":
# Set the parametric layer to zero
coeff_mat = tf_semiparam_field.get_coeff_matrix()
tf_semiparam_field.assign_coeff_matrix(tf.zeros_like(coeff_mat))
@@ -276,7 +290,7 @@ def general_train_cycle(
# Define optimiser
if non_param_optim is None:
optimizer = tf.keras.optimizers.Adam(
- learning_rate=l_rate_non_param,
+ learning_rate=learning_rate_non_param,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-07,
@@ -309,17 +323,17 @@ def general_train_cycle(
loss=loss,
metrics=metrics,
)
- # Train the parametric part
- print('Starting non-parametric update..')
+ # Train the nonparametric part
+ logger.info("Starting non-parametric update..")
hist_non_param = tf_semiparam_field.fit(
x=inputs,
y=outputs,
batch_size=batch_size,
epochs=n_epochs_non_param,
- validation_data=val_data,
+ validation_data=validation_data,
callbacks=callbacks,
sample_weight=sample_weight,
- verbose=verbose
+ verbose=verbose,
)
return tf_semiparam_field, hist_param, hist_non_param
@@ -329,9 +343,9 @@ def param_train_cycle(
tf_semiparam_field,
inputs,
outputs,
- val_data,
+ validation_data,
batch_size,
- l_rate,
+ learning_rate,
n_epochs,
param_optim=None,
param_loss=None,
@@ -339,11 +353,9 @@ def param_train_cycle(
param_callback=None,
general_callback=None,
use_sample_weights=False,
- verbose=1
+ verbose=1,
):
- """ Training cycle for parametric model.
-
- """
+ """Training cycle for parametric model."""
# Define Loss
if param_loss is None:
loss = tf.keras.losses.MeanSquaredError()
@@ -353,7 +365,11 @@ def param_train_cycle(
# Define optimiser
if param_optim is None:
optimizer = tf.keras.optimizers.Adam(
- learning_rate=l_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False
+ learning_rate=learning_rate,
+ beta_1=0.9,
+ beta_2=0.999,
+ epsilon=1e-07,
+ amsgrad=False,
)
else:
optimizer = param_optim
@@ -390,7 +406,7 @@ def param_train_cycle(
if strategy_opt == 0:
# Parameters
- max_w = 2.
+ max_w = 2.0
min_w = 0.1
# Epsilon is to avoid outliers
epsilon = np.median(variances) * 0.1
@@ -417,16 +433,16 @@ def param_train_cycle(
)
# Train the parametric part
- print('Starting parametric update..')
+ logger.info("Starting parametric update..")
hist_param = tf_semiparam_field.fit(
x=inputs,
y=outputs,
batch_size=batch_size,
epochs=n_epochs,
- validation_data=val_data,
+ validation_data=validation_data,
callbacks=callbacks,
sample_weight=sample_weight,
- verbose=verbose
+ verbose=verbose,
)
return tf_semiparam_field, hist_param
diff --git a/src/wf_psf/utils/__init__.py b/src/wf_psf/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/wf_psf/utils/configs_handler.py b/src/wf_psf/utils/configs_handler.py
new file mode 100644
index 00000000..bb8dc688
--- /dev/null
+++ b/src/wf_psf/utils/configs_handler.py
@@ -0,0 +1,647 @@
+"""Configs_Handler.
+
+A module which provides general utility methods
+to manage the parameters of the config files
+
+:Authors: Jennifer Pollack
+
+"""
+import numpy as np
+from wf_psf.utils.read_config import read_conf
+from wf_psf.data.training_preprocessing import TrainingDataHandler, TestDataHandler
+from wf_psf.training import train
+from wf_psf.psf_models import psf_models
+from wf_psf.metrics.metrics_interface import evaluate_model
+from wf_psf.plotting.plots_interface import plot_metrics
+import logging
+from wf_psf.utils.io import FileIOHandler
+import os
+import re
+import glob
+
+
+logger = logging.getLogger(__name__)
+
+CONFIG_CLASS = {}
+
+
+def register_configclass(config_class):
+ """Register Config Class.
+
+ A wrapper function to register all config classes
+ in a dictionary.
+
+ Parameters
+ ----------
+ config_class: type
+ Config Class
+
+ Returns
+ -------
+ config_class: type
+ Config class
+
+ """
+ for id in config_class.ids:
+ CONFIG_CLASS[id] = config_class
+
+ return config_class
+
+
+def set_run_config(config_name):
+ """Set Config Class.
+
+ A function to select the class of
+ a configuration from CONFIG_CLASS dictionary.
+
+ Parameters
+ ----------
+ config_name: str
+ Name of config
+
+ Returns
+ -------
+ config_class: class
+ Name of config class
+
+ """
+ try:
+ config_id = [id for id in CONFIG_CLASS.keys() if re.search(id, config_name)][0]
+ config_class = CONFIG_CLASS[config_id]
+ except KeyError as e:
+ logger.exception("Config name entered is invalid. Check your config settings.")
+ exit()
+
+ return config_class
+
+
+def get_run_config(run_config, config_params, file_handler):
+ """Get Run Configuration.
+
+ A function to get the configuration
+ for a wf-psf run.
+
+ Parameters
+ ----------
+ run_config: str
+ Name of the type of run configuraton
+ config_params: str
+ Path of the run configuration file
+ file_handler: object
+ A class instance of FileIOHandler
+
+ Returns
+ -------
+ config_class: object
+ A class instance of the selected configuration class.
+
+ """
+ config_class = set_run_config(run_config)
+
+ return config_class(config_params, file_handler)
+
+
+class ConfigParameterError(Exception):
+ """Custom Config Parameter Error exception class for specific error scenarios."""
+
+ def __init__(self, message="An error with your config settings occurred."):
+ self.message = message
+ super().__init__(self.message)
+
+
+class DataConfigHandler:
+ """DataConfigHandler.
+
+ A class to handle data configuration
+ parameters.
+
+ Parameters
+ ----------
+ data_conf: str
+ Path of the data configuration file
+ training_model_params: Recursive Namespace object
+ Recursive Namespace object containing the training model parameters
+
+ """
+
+ def __init__(self, data_conf, training_model_params):
+ try:
+ self.data_conf = read_conf(data_conf)
+ except FileNotFoundError as e:
+ logger.exception(e)
+ exit()
+ except TypeError as e:
+ logger.exception(e)
+ exit()
+
+ self.simPSF = psf_models.simPSF(training_model_params)
+ self.training_data = TrainingDataHandler(
+ self.data_conf.data.training,
+ self.simPSF,
+ training_model_params.n_bins_lda,
+ )
+ self.test_data = TestDataHandler(
+ self.data_conf.data.test,
+ self.simPSF,
+ training_model_params.n_bins_lda,
+ )
+
+
+@register_configclass
+class TrainingConfigHandler:
+ """TrainingConfigHandler.
+
+ A class to handle training configuration
+ parameters.
+
+ Parameters
+ ----------
+ ids: tuple
+ A tuple containing a string id for the Configuration Class
+ training_conf: str
+ Path of the training configuration file
+ file_handler: object
+ A instance of the FileIOHandler class
+
+ """
+
+ ids = ("training_conf",)
+
+ def __init__(self, training_conf, file_handler):
+ self.training_conf = read_conf(training_conf)
+ self.file_handler = file_handler
+ self.data_conf = DataConfigHandler(
+ os.path.join(
+ file_handler.config_path, self.training_conf.training.data_config
+ ),
+ self.training_conf.training.model_params,
+ )
+ self.file_handler.copy_conffile_to_output_dir(
+ self.training_conf.training.data_config
+ )
+ self.checkpoint_dir = file_handler.get_checkpoint_dir(
+ self.file_handler._run_output_dir
+ )
+ self.optimizer_dir = file_handler.get_optimizer_dir(
+ self.file_handler._run_output_dir
+ )
+ self.psf_model_dir = file_handler.get_psf_model_dir(
+ self.file_handler._run_output_dir
+ )
+
+ def run(self):
+ """Run.
+
+ A function to run wave-diff according to the
+ input configuration.
+
+ """
+ train.train(
+ self.training_conf.training,
+ self.data_conf.training_data,
+ self.data_conf.test_data,
+ self.checkpoint_dir,
+ self.optimizer_dir,
+ self.psf_model_dir,
+ )
+
+ if self.training_conf.training.metrics_config is not None:
+ self.file_handler.copy_conffile_to_output_dir(
+ self.training_conf.training.metrics_config
+ )
+
+ metrics = MetricsConfigHandler(
+ os.path.join(
+ self.file_handler.config_path,
+ self.training_conf.training.metrics_config,
+ ),
+ self.file_handler,
+ self.training_conf,
+ )
+
+ metrics.run()
+
+
+@register_configclass
+class MetricsConfigHandler:
+ """MetricsConfigHandler.
+
+ A class to handle metrics configuation
+ parameters.
+
+ Parameters
+ ----------
+ ids: tuple
+ A tuple containing a string id for the Configuration Class
+ metrics_conf: str
+ Path to the metrics configuration file
+ file_handler: object
+ An instance of the FileIOHandler
+ training_conf: RecursiveNamespace object
+ RecursiveNamespace object containing the training configuration parameters
+
+
+ """
+
+ ids = ("metrics_conf",)
+
+ def __init__(self, metrics_conf, file_handler, training_conf=None):
+ self._metrics_conf = read_conf(metrics_conf)
+ self._file_handler = file_handler
+ self.trained_model_path = self._get_trained_model_path(training_conf)
+ self._training_conf = self._load_training_conf(training_conf)
+
+ @property
+ def metrics_conf(self):
+ return self._metrics_conf
+
+ @property
+ def metrics_dir(self):
+ return self._file_handler.get_metrics_dir(self._file_handler._run_output_dir)
+
+ @property
+ def training_conf(self):
+ return self._training_conf
+
+ @property
+ def plotting_conf(self):
+ return self.metrics_conf.metrics.plotting_config
+
+ @property
+ def data_conf(self):
+ return self._load_data_conf()
+
+ @property
+ def psf_model(self):
+ return psf_models.get_psf_model(
+ self.training_conf.training.model_params,
+ self.training_conf.training.training_hparams,
+ )
+
+ @property
+ def weights_path(self):
+ return psf_models.get_psf_model_weights_filepath(self.weights_basename_filepath)
+
+ def _get_trained_model_path(self, training_conf):
+ """Get Trained Model Path.
+
+ Helper method to get the trained model path.
+
+ Parameters
+ ----------
+ training_conf: None or RecursiveNamespace
+ None type or RecursiveNamespace
+
+ Returns
+ -------
+ str
+ A string representing the path to the trained model output run directory.
+
+ """
+ if training_conf is None:
+ try:
+ return self._metrics_conf.metrics.trained_model_path
+
+ except TypeError as e:
+ logger.exception(e)
+ raise ConfigParameterError(
+ "Metrics config file trained model path or config values are empty."
+ )
+ else:
+ return os.path.join(
+ self._file_handler.output_path,
+ self._file_handler.parent_output_dir,
+ self._file_handler.workdir,
+ )
+
+ def _load_training_conf(self, training_conf):
+ """Load Training Conf.
+ Load the training configuration if training_conf is not provided.
+
+ Parameters
+ ----------
+ training_conf: None or RecursiveNamespace
+ None type or a RecursiveNamespace storing the training configuration parameter setttings.
+
+ Returns
+ -------
+ RecursiveNamespace storing the training configuration parameter settings.
+
+ """
+ if training_conf is None:
+ try:
+ return read_conf(
+ os.path.join(
+ self._file_handler.get_config_dir(self.trained_model_path),
+ self._metrics_conf.metrics.trained_model_config,
+ )
+ )
+ except TypeError as e:
+ logger.exception(e)
+ raise ConfigParameterError(
+ "Metrics config file trained model path or config values are empty."
+ )
+ else:
+ return training_conf
+
+ def _load_data_conf(self):
+ """Load Data Conf.
+
+ A method to load the data configuration file
+ and return an instance of DataConfigHandler class.
+
+ Returns
+ -------
+ An instance of the DataConfigHandler class.
+ """
+ try:
+ return DataConfigHandler(
+ os.path.join(
+ self._file_handler.config_path,
+ self.training_conf.training.data_config,
+ ),
+ self.training_conf.training.model_params,
+ )
+ except TypeError as e:
+ logger.exception(e)
+ raise ConfigParameterError("Data configuration loading error.")
+
+ @property
+ def weights_basename_filepath(self):
+ """Get PSF model weights filepath.
+
+ A function to return the basename of the user-specified psf model weights path.
+
+ Returns
+ -------
+ weights_basename: str
+ The basename of the psf model weights to be loaded.
+
+ """
+ return os.path.join(
+ self.trained_model_path,
+ self.metrics_conf.metrics.model_save_path,
+ (
+ f"{self.metrics_conf.metrics.model_save_path}*_{self.training_conf.training.model_params.model_name}"
+ f"*{self.training_conf.training.id_name}_cycle{self.metrics_conf.metrics.saved_training_cycle}*"
+ ),
+ )
+
+ def call_plot_config_handler_run(self, model_metrics):
+ """Make Metrics Plots.
+
+ A function to call the PlottingConfigHandler run
+ command to generate metrics plots.
+
+ Parameters
+ ----------
+ model_metrics: dict
+ A dictionary storing the metrics
+ output generated during evaluation
+ of the trained PSF model.
+
+ """
+ self._plotting_conf = os.path.join(
+ self._file_handler.config_path,
+ self.plotting_conf,
+ )
+
+ plots_config_handler = PlottingConfigHandler(
+ self._plotting_conf,
+ self._file_handler,
+ )
+
+ # Update metrics_confs dict with latest result
+ plots_config_handler.metrics_confs[
+ self._file_handler.workdir
+ ] = self.metrics_conf
+
+ # Update metric results dict with latest result
+ plots_config_handler.list_of_metrics_dict[self._file_handler.workdir] = [
+ {
+ self.training_conf.training.model_params.model_name
+ + self.training_conf.training.id_name: [model_metrics]
+ }
+ ]
+
+ plots_config_handler.run()
+
+ def run(self):
+ """Run.
+
+ A function to run wave-diff according to the
+ input configuration.
+
+ """
+ logger.info(
+ "Running metrics evaluation on psf model: {}".format(self.weights_path)
+ )
+
+ model_metrics = evaluate_model(
+ self.metrics_conf.metrics,
+ self.training_conf.training,
+ self.data_conf.training_data,
+ self.data_conf.test_data,
+ self.psf_model,
+ self.weights_path,
+ self.metrics_dir,
+ )
+
+ if self.plotting_conf is not None:
+ self._file_handler.copy_conffile_to_output_dir(
+ self.metrics_conf.metrics.plotting_config
+ )
+ self.call_plot_config_handler_run(model_metrics)
+
+
+@register_configclass
+class PlottingConfigHandler:
+ """PlottingConfigHandler.
+
+ A class to handle plotting config
+ settings.
+
+ Parameters
+ ----------
+ ids: tuple
+ A tuple containing a string id for the Configuration Class
+ plotting_conf: str
+ Name of plotting configuration file
+ file_handler: obj
+ An instance of the FileIOHandler class
+
+ """
+
+ ids = ("plotting_conf",)
+
+ def __init__(self, plotting_conf, file_handler):
+ self.plotting_conf = read_conf(plotting_conf)
+ self.file_handler = file_handler
+ self.metrics_confs = {}
+ self.check_and_update_metrics_confs()
+ self.list_of_metrics_dict = self.make_dict_of_metrics()
+ self.plots_dir = self.file_handler.get_plots_dir(
+ self.file_handler._run_output_dir
+ )
+
+ def check_and_update_metrics_confs(self):
+ """Check and Update Metrics Confs.
+
+ A function to check if user provided inputs metrics
+ dir to add to metrics_confs dictionary.
+
+ """
+ if self.plotting_conf.plotting_params.metrics_dir:
+ self._update_metrics_confs()
+
+ def make_dict_of_metrics(self):
+ """Make dictionary of metrics.
+
+ A function to create a dictionary for each metrics per run.
+
+ Returns
+ -------
+ dict
+ A dictionary containing metrics or an empty dictionary.
+
+ """
+ if self.plotting_conf.plotting_params.metrics_dir:
+ return self.load_metrics_into_dict()
+ else:
+ return {}
+
+ def _update_metrics_confs(self):
+ """Update Metrics Configurations.
+
+ A method to update the metrics_confs dictionary
+ with each set of metrics configuration parameters
+ provided as input.
+
+ """
+ for wf_dir, metrics_conf in zip(
+ self.plotting_conf.plotting_params.metrics_dir,
+ self.plotting_conf.plotting_params.metrics_config,
+ ):
+ self.metrics_confs[wf_dir] = read_conf(
+ os.path.join(
+ self.plotting_conf.plotting_params.metrics_output_path,
+ self.file_handler.get_config_dir(wf_dir),
+ metrics_conf,
+ )
+ )
+
+ def _metrics_run_id_name(self, wf_outdir, metrics_params):
+ """Get Metrics Run ID Name.
+
+ A function to generate run id name
+ for the metrics of a trained model
+
+ Parameters
+ ----------
+ wf_outdir: str
+ Name of the wf-psf run output directory
+ metrics_params: RecursiveNamespace Object
+ RecursiveNamespace object containing the metrics parameters used to evaluated the trained model.
+
+ Returns
+ -------
+ metrics_run_id_name: list
+ List containing the model name and id for each training run
+ """
+
+ try:
+ training_conf = read_conf(
+ os.path.join(
+ metrics_params.metrics.trained_model_path,
+ metrics_params.metrics.trained_model_config,
+ )
+ )
+ id_name = training_conf.training.id_name
+ model_name = training_conf.training.model_params.model_name
+ return [model_name + id_name]
+
+ except (TypeError, FileNotFoundError):
+ logger.info("Trained model path not provided...")
+ logger.info(
+ "Trying to retrieve training config file from workdir: {}".format(
+ wf_outdir
+ )
+ )
+
+ training_confs = [
+ read_conf(training_conf)
+ for training_conf in glob.glob(
+ os.path.join(
+ self.plotting_conf.plotting_params.metrics_output_path,
+ self.file_handler.get_config_dir(wf_outdir),
+ "training*",
+ )
+ )
+ ]
+
+ run_ids = [
+ training_conf.training.model_params.model_name
+ + training_conf.training.id_name
+ for training_conf in training_confs
+ ]
+
+ return run_ids
+ except:
+ logger.exception("File not found.")
+
+ def load_metrics_into_dict(self):
+ """Load Metrics into Dictionary.
+
+ A method to load a metrics file of
+ a trained model from a previous run into a
+ dictionary.
+
+ Returns
+ -------
+ metrics_files_dict: dict
+ A dictionary containing all of the metrics from the loaded metrics files.
+
+ """
+ metrics_dict = {}
+
+ for k, v in self.metrics_confs.items():
+ run_id_names = self._metrics_run_id_name(k, v)
+
+ metrics_dict[k] = []
+ for run_id_name in run_id_names:
+ output_path = os.path.join(
+ self.plotting_conf.plotting_params.metrics_output_path,
+ k,
+ "metrics",
+ "metrics-" + run_id_name + ".npy",
+ )
+ logger.info(
+ "Attempting to read in trained model config file...{}".format(
+ output_path
+ )
+ )
+ try:
+ metrics_dict[k].append(
+ {run_id_name: [np.load(output_path, allow_pickle=True)[()]]}
+ )
+ except FileNotFoundError:
+ logger.error(
+ "The required file for the plots was not found. Please check your configs settings."
+ )
+
+ return metrics_dict
+
+ def run(self):
+ """Run.
+
+ A function to run wave-diff according to the
+ input configuration.
+
+ """
+ logger.info("Generating metric plots...")
+ plot_metrics(
+ self.plotting_conf.plotting_params,
+ self.list_of_metrics_dict,
+ self.metrics_confs,
+ self.plots_dir,
+ )
diff --git a/wf_psf/graph_utils.py b/src/wf_psf/utils/graph_utils.py
similarity index 90%
rename from wf_psf/graph_utils.py
rename to src/wf_psf/utils/graph_utils.py
index c654b86f..1f6c5463 100644
--- a/wf_psf/graph_utils.py
+++ b/src/wf_psf/utils/graph_utils.py
@@ -48,7 +48,7 @@ def __init__(
ea_gridsize=10,
distances=None,
auto_run=True,
- verbose=2
+ verbose=2,
):
r"""Initialize class attributes."""
self.obs_data = obs_data
@@ -57,7 +57,9 @@ def __init__(
self.obs_weights = obs_weights
# change to same format as that we will use for
# residual matrix R later on
- self.obs_weights = np.transpose(self.obs_weights.reshape((shap[0] * shap[1], shap[2])))
+ self.obs_weights = np.transpose(
+ self.obs_weights.reshape((shap[0] * shap[1], shap[2]))
+ )
self.n_comp = n_comp
if n_eigenvects is None:
self.n_eigenvects = self.obs_data.shape[2]
@@ -110,8 +112,8 @@ def _build_graphs(self):
R -= vect.T.dot(vect.dot(R))
if self.verbose:
print(
- " > selected e: {}\tselected a:".format(e) +
- "{}\t chosen index: {}/{}".format(a, j, self.n_eigenvects)
+ " > selected e: {}\tselected a:".format(e)
+ + "{}\t chosen index: {}/{}".format(a, j, self.n_eigenvects)
)
self.VT = np.vstack((eigenvect for eigenvect in list_eigenvects))
self.alpha = np.zeros((self.n_comp, self.VT.shape[0]))
@@ -134,7 +136,7 @@ def pick_emax(self, epsilon=1e-15):
# r_med = np.min(dist_ratios**2)
# return np.log(epsilon)/np.log(r_med)
- return 1.
+ return 1.0
def select_params(self, R, e_range, a_range):
r"""Select best graph parameters.
@@ -163,7 +165,9 @@ def select_params(self, R, e_range, a_range):
# optimize over a
Peas = np.array([gen_Pea(self.distances, current_e, a) for a in a_range])
all_eigenvects = np.array([self.gen_eigenvects(Pea) for Pea in Peas])
- ea_idx, eigen_idx, best_VT = select_vstar(all_eigenvects, R, self.obs_weights)
+ ea_idx, eigen_idx, best_VT = select_vstar(
+ all_eigenvects, R, self.obs_weights
+ )
current_a = a_range[ea_idx]
return current_e, current_a, eigen_idx, best_VT
@@ -175,7 +179,7 @@ def gen_eigenvects(self, mat):
with the smallest eigenvalues.
"""
U, s, vT = np.linalg.svd(mat, full_matrices=True)
- vT = vT[-self.n_eigenvects:]
+ vT = vT[-self.n_eigenvects :]
return vT
@@ -193,11 +197,13 @@ def select_vstar(eigenvects, R, weights):
weights: numpy.ndarray
Entry-wise weights for :math:`R_i`.
"""
- loss = np.sum((weights * R)**2)
+ loss = np.sum((weights * R) ** 2)
for i, Pea_eigenvects in enumerate(eigenvects):
for j, vect in enumerate(Pea_eigenvects):
colvect = np.copy(vect).reshape(1, -1)
- current_loss = np.sum((weights * R - colvect.T.dot(colvect.dot(weights * R)))**2)
+ current_loss = np.sum(
+ (weights * R - colvect.T.dot(colvect.dot(weights * R))) ** 2
+ )
if current_loss < loss:
loss = current_loss
eigen_idx = j
@@ -212,7 +218,7 @@ def pairwise_distances(obs_pos):
ones = np.ones(obs_pos.shape[0])
out0 = np.outer(obs_pos[:, 0], ones)
out1 = np.outer(obs_pos[:, 1], ones)
- return np.sqrt((out0 - out0.T)**2 + (out1 - out1.T)**2)
+ return np.sqrt((out0 - out0.T) ** 2 + (out1 - out1.T) ** 2)
def gen_Pea(distances, e, a):
@@ -242,8 +248,8 @@ def gen_Pea(distances, e, a):
"""
Pea = np.copy(distances**e)
- np.fill_diagonal(Pea, 1.)
- Pea = -1. / Pea
+ np.fill_diagonal(Pea, 1.0)
+ Pea = -1.0 / Pea
for i in range(Pea.shape[0]):
- Pea[i, i] = a * (np.sum(-1. * Pea[i]) - 1.)
+ Pea[i, i] = a * (np.sum(-1.0 * Pea[i]) - 1.0)
return Pea
diff --git a/src/wf_psf/utils/io.py b/src/wf_psf/utils/io.py
new file mode 100644
index 00000000..bb822448
--- /dev/null
+++ b/src/wf_psf/utils/io.py
@@ -0,0 +1,310 @@
+"""IO.
+
+A module which defines methods to
+manage wf-psf inputs and outputs.
+
+:Author: Jennifer Pollack
+
+"""
+
+import pathlib
+import os
+import logging
+from datetime import datetime
+import shutil
+
+
+class FileIOHandler:
+ """FileIOHandler.
+
+ A class to manage the output
+ file structure.
+
+ Parameters
+ ----------
+ repodir_path: str
+ Absolute path to the code repository directory
+ output_path: str
+ Absolute path to output directory
+
+ """
+
+ def __init__(self, repodir_path, output_path, config_path):
+ self.repodir_path = repodir_path
+ self.output_path = output_path
+ self.config_path = config_path
+ self._timestamp = self.get_timestamp()
+ self.parent_output_dir = "wf-outputs"
+ self.workdir = self.parent_output_dir + "-" + self._timestamp
+ self._run_output_dir = os.path.join(
+ self.output_path,
+ self.parent_output_dir,
+ self.workdir,
+ )
+ self._config = "config"
+ self._checkpoint = "checkpoint"
+ self._log_files = "log-files"
+ self._metrics = "metrics"
+ self._optimizer = "optim-hist"
+ self._plots = "plots"
+ self._psf_model = "psf_model"
+
+ def setup_outputs(self):
+ """Setup Outputs.
+
+ A function to call
+ specific functions
+ to set up output
+ directories and logging.
+
+ Parameters
+ ----------
+ output_path: str
+ Path to output directory
+
+ """
+ self._make_output_dir()
+ self._make_run_dir()
+ self._setup_dirs()
+ self._setup_logging()
+
+ def _make_output_dir(self):
+ """Make Output Directory.
+
+ A function to make the parent
+ output directory "wf-outputs".
+
+ """
+ pathlib.Path(os.path.join(self.output_path, self.parent_output_dir)).mkdir(
+ exist_ok=True
+ )
+
+ def _make_run_dir(self):
+ """Make Run Directory.
+
+ A function to make a unique directory
+ per run.
+
+ """
+ pathlib.Path(self._run_output_dir).mkdir(exist_ok=True)
+
+ def _setup_dirs(self):
+ """Setup Directories.
+
+ A function to setup the output
+ directories.
+
+ """
+ list_of_dirs = (
+ self._config,
+ self._checkpoint,
+ self._log_files,
+ self._metrics,
+ self._optimizer,
+ self._plots,
+ self._psf_model,
+ )
+ for dir in list_of_dirs:
+ self._make_dir(dir)
+
+ def get_timestamp(self):
+ """Get Timestamp.
+
+ A function to return the date and
+ time.
+
+ Returns
+ -------
+ timestamp: str
+ A string representation of the date and time.
+ """
+
+ timestamp = datetime.now().strftime("%Y%m%d%H%M")
+ return timestamp
+
+ def _setup_logging(self):
+ """Setup Logger.
+
+ A function to set up
+ logging.
+
+ """
+ logfile = "wf-psf_" + self._timestamp + ".log"
+ logfile = os.path.join(
+ self._run_output_dir,
+ self._log_files,
+ logfile,
+ )
+
+ logging.config.fileConfig(
+ os.path.join(self.repodir_path, "config/logging.conf"),
+ defaults={"filename": logfile},
+ disable_existing_loggers=False,
+ )
+
+ def _make_dir(self, dir_name):
+ """Make Directory.
+
+ A function to make a subdirectory
+ inside the run directory "wf-outputs-xxx".
+
+ Parameters
+ ----------
+ dir_name: str
+ Name of directory
+
+ """
+ pathlib.Path(
+ os.path.join(
+ self._run_output_dir,
+ dir_name,
+ )
+ ).mkdir(exist_ok=True)
+
+ def get_config_dir(self, run_output_dir):
+ """Get Config Directory.
+
+ A function that returns path
+ of config directory.
+
+ Returns
+ -------
+ str
+ Absolute path to config directory
+
+ """
+ return os.path.join(
+ run_output_dir,
+ self._config,
+ )
+
+ def copy_conffile_to_output_dir(self, source_file):
+ """Write File.
+
+ A function to copy a file to
+ the output run directory.
+
+ Parameters
+ ----------
+ source_dir: str
+ Location of source file
+ source_file: str
+ Name of source file
+ """
+
+ source = os.path.join(self.config_path, source_file)
+ destination = os.path.join(
+ self.get_config_dir(self._run_output_dir), source_file
+ )
+
+ shutil.copy(source, destination)
+
+ def get_checkpoint_dir(self, checkpoint_dir):
+ """Get Checkpoint Directory.
+
+ A function that returns path
+ of checkpoint directory.
+
+ Parameters
+ ----------
+ checkpoint_dir: str
+ Name of checkpoint directory
+
+ Returns
+ -------
+ str
+ Absolute path to checkpoint directory
+
+ """
+ return os.path.join(
+ checkpoint_dir,
+ self._checkpoint,
+ )
+
+ def get_optimizer_dir(self, optimizer_dir):
+ """Get Optimizer Directory.
+
+ A function that returns path
+ of optimizer directory.
+
+ Parameters
+ ----------
+ optimizer_dir: str
+ Name of optimiser directory
+
+ Returns
+ -------
+ str
+ Absolute path to optimizer directory
+
+ """
+ return os.path.join(
+ optimizer_dir,
+ self._optimizer,
+ )
+
+ def get_psf_model_dir(self, psf_model_dir):
+ """Get PSF Model Directory.
+
+ A function that returns path
+ of PSF model directory.
+
+ Parameters
+ ----------
+ psf_model_dir: str
+ Name of psf model directory
+
+ Returns
+ -------
+ str
+ Absolute path to psf model directory
+
+ """
+ return os.path.join(
+ psf_model_dir,
+ self._psf_model,
+ )
+
+ def get_metrics_dir(self, metrics_dir):
+ """Get Metrics Directory.
+
+ A function that returns path
+ of metrics directory.
+
+ Parameters
+ ----------
+ metrics_dir: str
+ Name of metrics directory
+
+ Returns
+ -------
+ str
+ Absolute path to metrics directory
+
+ """
+ return os.path.join(
+ metrics_dir,
+ self._metrics,
+ )
+
+ def get_plots_dir(self, plots_dir):
+ """Get Plots Directory.
+
+ A function that returns path
+ of plots directory.
+
+ Parameters
+ ----------
+ plots_dir: str
+ Name of plots directory
+
+ Returns
+ -------
+ str
+ Absolute path to plots directory
+
+ """
+ return os.path.join(
+ plots_dir,
+ self._plots,
+ )
diff --git a/src/wf_psf/utils/read_config.py b/src/wf_psf/utils/read_config.py
new file mode 100644
index 00000000..a84480f1
--- /dev/null
+++ b/src/wf_psf/utils/read_config.py
@@ -0,0 +1,151 @@
+"""Read Config.
+
+A module which defines methods to
+read configuration files.
+
+:Author: Jennifer Pollack
+
+"""
+
+import yaml
+from yaml.scanner import ScannerError
+from yaml.parser import ParserError
+import pprint
+from types import SimpleNamespace
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class RecursiveNamespace(SimpleNamespace):
+ """RecursiveNamespace.
+
+ A child class of the type SimpleNamespace to
+ create nested namespaces (objects).
+
+ Parameters
+ ----------
+ **kwargs
+ Extra keyword arguments used to build
+ a nested namespace.
+
+ """
+
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ for key, val in kwargs.items():
+ if type(val) == dict:
+ setattr(self, key, RecursiveNamespace(**val))
+ elif type(val) == list:
+ setattr(self, key, list(map(self.map_entry, val)))
+
+ @staticmethod
+ def map_entry(entry):
+ """Map Entry.
+
+ A function to map a dictionary to a
+ RecursiveNamespace object.
+
+ Parameters
+ ----------
+ entry: type
+
+ Returns
+ -------
+ RecursiveNamespace
+ RecursiveNamespace object if entry type is a dictionary
+ entry: type
+ Original type of entry if type is not a dictionary
+ """
+
+ if isinstance(entry, dict):
+ return RecursiveNamespace(**entry)
+
+ return entry
+
+
+def read_yaml(conf_file):
+ """Read Yaml.
+
+ A function to read a YAML file.
+
+ Parameters
+ ----------
+ conf_file: str
+ Name of configuration file
+
+ Returns
+ -------
+ config: dict
+ A dictionary containing configuration parameters.
+ """
+ with open(conf_file) as f:
+ config = yaml.safe_load(f)
+
+ return config
+
+
+def read_conf(conf_file):
+ """Read Conf.
+
+ A function to read a yaml configuration file, recursively.
+
+ Parameters
+ ----------
+ conf_file: str
+ Name of configuration file
+
+ Returns
+ -------
+ RecursiveNamespace
+ Recursive Namespace object
+
+ """
+ logger.info("Loading...{}".format(conf_file))
+ with open(conf_file, "r") as f:
+ try:
+ my_conf = yaml.safe_load(f)
+ except (ParserError, ScannerError, TypeError):
+ logger.exception(
+ "There is a syntax problem with your config file. Please check {}.".format(
+ conf_file
+ )
+ )
+ exit()
+
+ if my_conf == None:
+ raise TypeError(
+ "Config file {} is empty...Stopping Program.".format(conf_file)
+ )
+ exit()
+
+ try:
+ return RecursiveNamespace(**my_conf)
+ except TypeError as e:
+ logger.exception(
+ "Check your config file for errors. Error Msg: {}.".format(e)
+ )
+ exit()
+
+
+def read_stream(conf_file):
+ """Read Stream.
+
+ A generator to read multiple docs in a yaml config.
+
+ Parameters
+ ----------
+ conf_file
+ Name of configuration file
+
+ Yields
+ ------
+ dict
+ A dictionary containing all config files.
+
+ """
+ stream = open(conf_file, "r")
+ docs = yaml.load_all(stream, yaml.FullLoader)
+
+ for doc in docs:
+ yield doc
diff --git a/wf_psf/utils.py b/src/wf_psf/utils/utils.py
similarity index 68%
rename from wf_psf/utils.py
rename to src/wf_psf/utils/utils.py
index cc056a38..d2e3d426 100644
--- a/wf_psf/utils.py
+++ b/src/wf_psf/utils/utils.py
@@ -3,10 +3,11 @@
import tensorflow_addons as tfa
import PIL
import zernike as zk
+
try:
from cv2 import resize, INTER_AREA
except:
- print('Problem importing opencv..')
+ print("Problem importing opencv..")
import sys
@@ -19,21 +20,30 @@ def scale_to_range(input_array, old_range, new_range):
def calc_wfe(zernike_basis, zks):
- wfe = np.einsum('ijk,ijk->jk', zernike_basis, zks.reshape(-1, 1, 1))
+ wfe = np.einsum("ijk,ijk->jk", zernike_basis, zks.reshape(-1, 1, 1))
return wfe
def calc_wfe_rms(zernike_basis, zks, pupil_mask):
wfe = calc_wfe(zernike_basis, zks)
- wfe_rms = np.sqrt(np.mean((wfe[pupil_mask] - np.mean(wfe[pupil_mask]))**2))
+ wfe_rms = np.sqrt(np.mean((wfe[pupil_mask] - np.mean(wfe[pupil_mask])) ** 2))
return wfe_rms
def generate_SED_elems(SED, sim_psf_toolkit, n_bins=20):
- r"""Generate the SED elements needed for using the TF_poly_PSF.
+ """Generate SED Elements.
+
+ A function to generate the SED elements needed for using the
+ Tensor Flow class: TF_poly_PSF.
- sim_psf_toolkit: An instance of the SimPSFToolkit class with the correct
+ Parameters
+ ----------
+ SED:
+ sim_psf_toolkit:
+ An instance of the SimPSFToolkit class with the correct
initialization values.
+ n_bins: int
+ Number of wavelength bins
"""
feasible_wv, SED_norm = sim_psf_toolkit.calc_SED_wave_values(SED, n_bins)
@@ -42,20 +52,68 @@ def generate_SED_elems(SED, sim_psf_toolkit, n_bins=20):
return feasible_N, feasible_wv, SED_norm
+def generate_SED_elems_in_tensorflow(
+ SED, sim_psf_toolkit, n_bins=20, tf_dtype=tf.float64
+):
+ """Generate SED Elements in Tensor Flow Units.
+
+ A function to generate the SED elements needed for using the
+ Tensor Flow class: TF_poly_PSF.
+
+ Parameters
+ ----------
+ SED:
+ sim_psf_toolkit:
+ An instance of the SimPSFToolkit class with the correct
+ initialization values.
+ n_bins: int
+ Number of wavelength bins
+ tf_dtype: tf.
+ Tensor Flow data type
+ """
+
+ feasible_wv, SED_norm = sim_psf_toolkit.calc_SED_wave_values(SED, n_bins)
+ feasible_N = np.array([sim_psf_toolkit.feasible_N(_wv) for _wv in feasible_wv])
+
+ return convert_to_tf([feasible_N, feasible_wv, SED_norm], tf_dtype)
+
+
+def convert_to_tf(data, tf_dtype):
+ return [tf.convert_to_tensor(x, dtype=tf_dtype) for x in data]
+
+
def generate_packed_elems(SED, sim_psf_toolkit, n_bins=20):
- r"""Generate the packed values for using the TF_poly_PSF."""
- feasible_N, feasible_wv, SED_norm = generate_SED_elems(SED, sim_psf_toolkit, n_bins=n_bins)
+ """Generate Packed Elements.
+ This name is too generic. may make obsolete
- tf_feasible_N = tf.convert_to_tensor(feasible_N, dtype=tf.float64)
- tf_feasible_wv = tf.convert_to_tensor(feasible_wv, dtype=tf.float64)
- tf_SED_norm = tf.convert_to_tensor(SED_norm, dtype=tf.float64)
+ A function to store the packed values for using the TF_poly_PSF.
- # returnes the packed tensors
- return [tf_feasible_N, tf_feasible_wv, tf_SED_norm]
+ Parameters
+ ----------
+ SED:
+ sim_psf_toolkit:
+ n_bins: int
+ Number of wavelength bins
+
+ Returns
+ -------
+ list
+ List of tensors
+ """
+ feasible_N, feasible_wv, SED_norm = generate_SED_elems(
+ SED, sim_psf_toolkit, n_bins=n_bins
+ )
+
+ feasible_N = tf.convert_to_tensor(feasible_N, dtype=tf.float64)
+ feasible_wv = tf.convert_to_tensor(feasible_wv, dtype=tf.float64)
+ SED_norm = tf.convert_to_tensor(SED_norm, dtype=tf.float64)
+
+ # returns the packed tensors
+ return [feasible_N, feasible_wv, SED_norm]
def calc_poly_position_mat(pos, x_lims, y_lims, d_max):
- r""" Calculate a matrix with position polynomials.
+ r"""Calculate a matrix with position polynomials.
Scale positions to the square:
[self.x_lims[0], self.x_lims[1]] x [self.y_lims[0], self.y_lims[1]]
@@ -72,7 +130,7 @@ def calc_poly_position_mat(pos, x_lims, y_lims, d_max):
for d in range(d_max + 1):
row_idx = d * (d + 1) // 2
for p in range(d + 1):
- poly_list.append(scaled_pos_x**(d - p) * scaled_pos_y**p)
+ poly_list.append(scaled_pos_x ** (d - p) * scaled_pos_y**p)
return tf.convert_to_tensor(poly_list, dtype=tf.float32)
@@ -165,14 +223,16 @@ def zernike_generator(n_zernikes, wfe_dim):
def add_noise(image, desired_SNR):
- """ Add noise to an image to obtain a desired SNR. """
- sigma_noise = np.sqrt((np.sum(image**2)) / (desired_SNR * image.shape[0] * image.shape[1]))
+ """Add noise to an image to obtain a desired SNR."""
+ sigma_noise = np.sqrt(
+ (np.sum(image**2)) / (desired_SNR * image.shape[0] * image.shape[1])
+ )
noisy_image = image + np.random.standard_normal(image.shape) * sigma_noise
return noisy_image
class NoiseEstimator(object):
- """ Noise estimator.
+ """Noise estimator.
Parameters
----------
@@ -203,7 +263,7 @@ def _init_window(self):
for _x in range(self.img_dim[0]):
for _y in range(self.img_dim[1]):
- if np.sqrt((_x - mid_x)**2 + (_y - mid_y)**2) <= self.win_rad:
+ if np.sqrt((_x - mid_x) ** 2 + (_y - mid_y) ** 2) <= self.win_rad:
self.window[_x, _y] = False
@staticmethod
@@ -221,8 +281,8 @@ def estimate_noise(self, image):
class ZernikeInterpolation(object):
- """ Interpolate zernikes
-
+ """Interpolate zernikes
+
This class helps to interpolate zernikes using only the closest K elements
in a given dataset using a RBF interpolation.
@@ -248,10 +308,9 @@ def __init__(self, tf_pos, tf_zks, k=50, order=2):
self.order = order
def interpolate_zk(self, single_pos):
- """ Interpolate a single position
- """
+ """Interpolate a single position"""
# Compute distance
- dist = tf.math.reduce_euclidean_norm(self.tf_pos - single_pos, axis=1) * -1.
+ dist = tf.math.reduce_euclidean_norm(self.tf_pos - single_pos, axis=1) * -1.0
# Get top K elements
result = tf.math.top_k(dist, k=self.k)
# Gather useful elements from the array
@@ -275,7 +334,7 @@ def interpolate_zk(self, single_pos):
train_values=tf.expand_dims(rec_zks, axis=0),
query_points=tf.expand_dims(single_pos[tf.newaxis, :], axis=0),
order=self.order,
- regularization_weight=0.0
+ regularization_weight=0.0,
)
# Remove extra dimension required by tfa's interpolate_spline
interp_zk = tf.squeeze(interp_zk, axis=0)
@@ -283,21 +342,20 @@ def interpolate_zk(self, single_pos):
return interp_zk
def interpolate_zks(self, interp_positions):
- """ Vectorize to interpolate to each position
- """
+ """Vectorize to interpolate to each position"""
interp_zks = tf.map_fn(
self.interpolate_zk,
interp_positions,
parallel_iterations=10,
fn_output_signature=tf.float32,
- swap_memory=True
+ swap_memory=True,
)
return tf.squeeze(interp_zks, axis=1)
class IndependentZernikeInterpolation(object):
- """ Interpolate each Zernike polynomial independently
-
+ """Interpolate each Zernike polynomial independently
+
The interpolation is done independently for each Zernike polynomial.
Parameters
@@ -320,22 +378,21 @@ def __init__(self, tf_pos, tf_zks, order=2):
self.target_pos = None
def interp_one_zk(self, zk_prior):
- """ Interpolate each Zerkine polynomial independently
- """
+ """Interpolate each Zerkine polynomial independently"""
interp_zk = tfa.image.interpolate_spline(
train_points=tf.expand_dims(self.tf_pos, axis=0),
train_values=tf.expand_dims(zk_prior[:, tf.newaxis], axis=0),
query_points=tf.expand_dims(self.target_pos, axis=0),
order=self.order,
- regularization_weight=0.0
+ regularization_weight=0.0,
)
# Remove extra dimension required by tfa's interpolate_spline
return tf.squeeze(interp_zk, axis=0)
def interpolate_zks(self, target_pos):
- """ Vectorize to interpolate to each Zernike!
-
+ """Vectorize to interpolate to each Zernike!
+
Each zernike is computed indepently from the others.
"""
self.target_pos = target_pos
@@ -345,7 +402,7 @@ def interpolate_zks(self, target_pos):
tf.transpose(self.tf_zks, perm=[1, 0]),
parallel_iterations=10,
fn_output_signature=tf.float32,
- swap_memory=True
+ swap_memory=True,
)
# Remove null dimension and transpose back to have batch at input
@@ -356,7 +413,7 @@ def load_multi_cycle_params_click(args):
"""
Load multiple cycle training parameters.
- For backwards compatibility, the training parameters are received as a string,
+ For backwards compatibility, the training parameters are received as a string,
separated and stored in the args dictionary.
Parameters
@@ -369,51 +426,59 @@ def load_multi_cycle_params_click(args):
args: dictionary
The input dictionary with all multi-cycle training parameters correctly loaded.
"""
- if args['l_rate_param'] is None:
- args['l_rate_param'] = list(map(float, args['l_rate_param_multi_cycle'].split(' ')))
- if len(args['l_rate_param']) == 1:
- args['l_rate_param'] = args['l_rate_param'] * args['total_cycles']
- elif len(args['l_rate_param']) != args['total_cycles']:
+ if args["l_rate_param"] is None:
+ args["l_rate_param"] = list(
+ map(float, args["l_rate_param_multi_cycle"].split(" "))
+ )
+ if len(args["l_rate_param"]) == 1:
+ args["l_rate_param"] = args["l_rate_param"] * args["total_cycles"]
+ elif len(args["l_rate_param"]) != args["total_cycles"]:
print(
- 'Invalid argument: --l_rate_param. Expected 1 or {} values but {} were given.'.format(
- args['total_cycles'], len(args['l_rate_param'])
+ "Invalid argument: --l_rate_param. Expected 1 or {} values but {} were given.".format(
+ args["total_cycles"], len(args["l_rate_param"])
)
)
sys.exit()
- if args['l_rate_non_param'] is None:
- args['l_rate_non_param'] = list(map(float, args['l_rate_non_param_multi_cycle'].split(' ')))
- if len(args['l_rate_non_param']) == 1:
- args['l_rate_non_param'] = args['l_rate_non_param'] * args['total_cycles']
- elif len(args['l_rate_non_param']) != args['total_cycles']:
+ if args["l_rate_non_param"] is None:
+ args["l_rate_non_param"] = list(
+ map(float, args["l_rate_non_param_multi_cycle"].split(" "))
+ )
+ if len(args["l_rate_non_param"]) == 1:
+ args["l_rate_non_param"] = args["l_rate_non_param"] * args["total_cycles"]
+ elif len(args["l_rate_non_param"]) != args["total_cycles"]:
print(
- 'Invalid argument: --l_rate_non_param. Expected 1 or {} values but {} were given.'
- .format(args['total_cycles'], len(args['l_rate_non_param']))
+ "Invalid argument: --l_rate_non_param. Expected 1 or {} values but {} were given.".format(
+ args["total_cycles"], len(args["l_rate_non_param"])
+ )
)
sys.exit()
- if args['n_epochs_param'] is None:
- args['n_epochs_param'] = list(map(int, args['n_epochs_param_multi_cycle'].split(' ')))
- if len(args['n_epochs_param']) == 1:
- args['n_epochs_param'] = args['n_epochs_param'] * args['total_cycles']
- elif len(args['n_epochs_param']) != args['total_cycles']:
+ if args["n_epochs_param"] is None:
+ args["n_epochs_param"] = list(
+ map(int, args["n_epochs_param_multi_cycle"].split(" "))
+ )
+ if len(args["n_epochs_param"]) == 1:
+ args["n_epochs_param"] = args["n_epochs_param"] * args["total_cycles"]
+ elif len(args["n_epochs_param"]) != args["total_cycles"]:
print(
- 'Invalid argument: --n_epochs_param. Expected 1 or {} values but {} were given.'.format(
- args['total_cycles'], len(args['n_epochs_param'])
+ "Invalid argument: --n_epochs_param. Expected 1 or {} values but {} were given.".format(
+ args["total_cycles"], len(args["n_epochs_param"])
)
)
sys.exit()
- if args['n_epochs_non_param'] is None:
- args['n_epochs_non_param'] = list(
- map(int, args['n_epochs_non_param_multi_cycle'].split(' '))
+ if args["n_epochs_non_param"] is None:
+ args["n_epochs_non_param"] = list(
+ map(int, args["n_epochs_non_param_multi_cycle"].split(" "))
)
- if len(args['n_epochs_non_param']) == 1:
- args['n_epochs_non_param'] = args['n_epochs_non_param'] * args['total_cycles']
- elif len(args['n_epochs_non_param']) != args['total_cycles']:
+ if len(args["n_epochs_non_param"]) == 1:
+ args["n_epochs_non_param"] = args["n_epochs_non_param"] * args["total_cycles"]
+ elif len(args["n_epochs_non_param"]) != args["total_cycles"]:
print(
- 'Invalid argument: --n_epochs_non_param. Expected 1 or {} values but {} were given.'
- .format(args['total_cycles'], len(args['n_epochs_non_param']))
+ "Invalid argument: --n_epochs_non_param. Expected 1 or {} values but {} were given.".format(
+ args["total_cycles"], len(args["n_epochs_non_param"])
+ )
)
sys.exit()
@@ -421,7 +486,7 @@ def load_multi_cycle_params_click(args):
def PI_zernikes(tf_z1, tf_z2, norm_factor=None):
- """ Compute internal product between zernikes and OPDs
+ """Compute internal product between zernikes and OPDs
Defined such that Zernikes are orthonormal to each other
diff --git a/wf_psf/GenFieldPSF.py b/wf_psf/GenFieldPSF.py
deleted file mode 100644
index 45a98469..00000000
--- a/wf_psf/GenFieldPSF.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import numpy as np
-import scipy as sp
-import scipy.interpolate as sinterp
-import scipy.io as sio
-import matplotlib.pyplot as plt
-import matplotlib as mpl
-from matplotlib.colors import ListedColormap, LinearSegmentedColormap
-from mpl_toolkits.axes_grid1 import make_axes_locatable
-
-
-class GenFieldPSF(object):
-
- def __init__(
- self,
- sim_psf_toolkit,
- max_order=45,
- xlim=1e3,
- ylim=1e3,
- n_bins=35,
- lim_max_wfe_rms=None,
- verbose=0
- ):
- # Input attributes
- self.sim_psf_toolkit = sim_psf_toolkit
- self.max_order = max_order
- self.xlim = xlim
- self.ylim = ylim
- self.n_bins = n_bins
- self.verbose = verbose
- if lim_max_wfe_rms is None:
- self.lim_max_wfe_rms = sim_psf_toolkit.max_wfe_rms
-
- # Class attributes
- self.z_coeffs = []
- self.new_z_coef = None
-
- def gen_z_coeffs(self, random_gen=False):
- if len(self.z_coeffs) == 0:
- # x-variable
- if random_gen:
- rand_seed = None
- else:
- rand_seed = 1
- # Generate Z coeffs and normalize them
- # Normalize coefficients
- self.sim_psf_toolkit.gen_random_Z_coeffs(max_order=self.max_order, rand_seed=rand_seed)
- z_coef_1 = self.sim_psf_toolkit.normalize_zernikes(
- self.sim_psf_toolkit.get_z_coeffs(), self.lim_max_wfe_rms
- )
- self.z_coeffs.append(np.array(z_coef_1))
-
- # y-variable
- if random_gen:
- rand_seed = None
- else:
- rand_seed = 4
- self.sim_psf_toolkit.gen_random_Z_coeffs(max_order=self.max_order, rand_seed=rand_seed)
- z_coef_4 = self.sim_psf_toolkit.normalize_zernikes(
- self.sim_psf_toolkit.get_z_coeffs(), self.lim_max_wfe_rms
- )
- self.z_coeffs.append(np.array(z_coef_4))
-
- # global-variable
- if random_gen:
- rand_seed = None
- else:
- rand_seed = 5
- self.sim_psf_toolkit.gen_random_Z_coeffs(max_order=self.max_order, rand_seed=rand_seed)
- z_coef_5 = self.sim_psf_toolkit.normalize_zernikes(
- self.sim_psf_toolkit.get_z_coeffs(), self.lim_max_wfe_rms
- )
- self.z_coeffs.append(np.array(z_coef_5) / 5)
-
- # Normalize PSF field
- self.normalize_field()
-
- def normalize_field(self):
- """Normalize the zernike coefficients for extreme positions.
-
- At this moment I only check the extreme positions.
- This should check the WFE_RMS throughout all the field.
-
- """
- max_wfe_corner_1 = self.compute_wfe_rms(x=0, y=0)
- max_wfe_corner_2 = self.compute_wfe_rms(x=self.xlim, y=self.ylim)
- max_wfe_corner_3 = self.compute_wfe_rms(x=0, y=self.ylim)
- max_wfe_corner_4 = self.compute_wfe_rms(x=self.xlim, y=0)
-
- max_wfe_rms = np.max([
- max_wfe_corner_1, max_wfe_corner_2, max_wfe_corner_3, max_wfe_corner_4
- ])
-
- if max_wfe_rms > self.lim_max_wfe_rms:
- if self.verbose:
- print(
- 'WFE_RMS %.4f [um] is exceeding from max value: %.4f [um]. Normalizing.' %
- (max_wfe_rms, self.lim_max_wfe_rms)
- )
-
- wfe_rms_ratio = self.lim_max_wfe_rms / max_wfe_rms
- for it in range(len(self.z_coeffs)):
- self.z_coeffs[it] = self.z_coeffs[it] * wfe_rms_ratio
-
- def erase_z_coeffs(self):
- self.z_coeffs = []
-
- def zernike_coeff_map(self, x, y):
- """ Calculate Zernikes for a specific position
-
- Normalize (x,y) inputs
- (x,y) need to be in [0,self.xlim] x [0,self.ylim]
- (x_norm,y_norm) need to be in [-1, +1] x [-1, +1]
- """
- if x >= self.xlim and x <= 0:
- print('WARNING! x value: %f is not between the limits [0, %f]' % (x, self.xlim))
- if y >= self.ylim and y <= 0:
- print('WARNING! y value: %f is not between the limits [0, %f]' % (y, self.ylim))
-
- x_norm = (x - self.xlim / 2) / (self.xlim / 2)
- y_norm = (y - self.ylim / 2) / (self.ylim / 2)
-
- self.new_z_coef = self.z_coeffs[0] * x_norm + self.z_coeffs[1] * y_norm + self.z_coeffs[2]
-
- dif_wfe_rms = self.sim_psf_toolkit.check_wfe_rms(z_coeffs=self.new_z_coef)
-
- if dif_wfe_rms < 0:
- print(
- "WARNING: Position (%d,%d) posses an WFE_RMS of %f.\n" %
- (x, y, self.sim_psf_toolkit.max_wfe_rms - dif_wfe_rms)
- )
- print(
- "It exceeds the maximum allowed error (max WFE_RMS=%.4f [um])" %
- (self.sim_psf_toolkit.max_wfe_rms)
- )
-
- if self.verbose > 0:
- print("Info for position: (%d, %d)" % (x, y))
- print("WFE_RMS: %.4f [um]" % (self.sim_psf_toolkit.max_wfe_rms - dif_wfe_rms))
- print("MAX_WFE_RMS: %.4f [um]" % (self.sim_psf_toolkit.max_wfe_rms))
-
- def compute_wfe_rms(self, x, y):
- """Compute the WFE RMS for a specific field position."""
- x_norm = (x - self.xlim / 2) / (self.xlim / 2)
- y_norm = (y - self.ylim / 2) / (self.ylim / 2)
-
- self.new_z_coef = self.z_coeffs[0] * x_norm + self.z_coeffs[1] * y_norm + self.z_coeffs[2]
-
- dif_wfe_rms = self.sim_psf_toolkit.check_wfe_rms(z_coeffs=self.new_z_coef)
-
- return self.sim_psf_toolkit.max_wfe_rms - dif_wfe_rms
-
- def get_zernike_coeff_map(self):
- if self.new_z_coef is not None:
- return self.new_z_coef
- else:
- print('Coeff map has not been calculated yet.')
-
- def get_mono_PSF(self, x, y, lambda_obs=0.725):
-
- # Calculate the specific field's zernike coeffs
- self.zernike_coeff_map(x, y)
- # Set the Z coefficients to the PSF toolkit generator
- self.sim_psf_toolkit.set_z_coeffs(self.get_zernike_coeff_map())
- # Generate the monochromatic psf
- self.sim_psf_toolkit.generate_mono_PSF(lambda_obs=lambda_obs, regen_sample=False)
- # Return the generated PSF
-
- return self.sim_psf_toolkit.get_psf()
-
- def inspect_opd_map(self, cmap='viridis', save_img=False):
- """Plot the last saved OPD map."""
- self.sim_psf_toolkit.plot_opd_phase(cmap, save_img)
-
- def inspect_field_wfe_rms(self, mesh_bins=20, save_img=False):
- """Plot a chart of WFE_RMS as a function of position throughout the field."""
- x_coord = np.linspace(0, self.xlim, mesh_bins)
- y_coord = np.linspace(0, self.ylim, mesh_bins)
-
- x_mesh, y_mesh = np.meshgrid(x_coord, y_coord)
-
- wfe_rms_field = np.array([
- self.compute_wfe_rms(_x, _y) for _x, _y in zip(x_mesh.flatten(), y_mesh.flatten())
- ])
-
- wfe_rms_field_mesh = wfe_rms_field.reshape((mesh_bins, mesh_bins))
-
- # Plot the field
- fig = plt.figure(figsize=(8, 8))
- ax1 = fig.add_subplot(111)
- im1 = ax1.imshow(wfe_rms_field_mesh, interpolation='None')
- divider = make_axes_locatable(ax1)
- cax = divider.append_axes('right', size='5%', pad=0.05)
- fig.colorbar(im1, cax=cax, orientation='vertical')
- ax1.set_title('PSF field WFE RMS [um]')
- ax1.set_xlabel('x axis')
- ax1.set_ylabel('y axis')
-
- if save_img:
- plt.savefig('./WFE_field_meshdim_%d.pdf' % mesh_bins, bbox_inches='tight')
-
- plt.show()
-
- def get_poly_PSF(self, x, y, SED):
- # Calculate the specific field's zernike coeffs
- self.zernike_coeff_map(x, y)
- # Set the Z coefficients to the PSF toolkit generator
- self.sim_psf_toolkit.set_z_coeffs(self.get_zernike_coeff_map())
-
- return self.sim_psf_toolkit.generate_poly_PSF(SED, n_bins=self.n_bins)
diff --git a/wf_psf/GenPolyFieldPSF.py b/wf_psf/GenPolyFieldPSF.py
deleted file mode 100644
index 119c6d60..00000000
--- a/wf_psf/GenPolyFieldPSF.py
+++ /dev/null
@@ -1,253 +0,0 @@
-import numpy as np
-import matplotlib.pyplot as plt
-import matplotlib as mpl
-from matplotlib.colors import ListedColormap, LinearSegmentedColormap
-from mpl_toolkits.axes_grid1 import make_axes_locatable
-
-
-class GenPolyFieldPSF(object):
- """Generate PSF field with polynomial vairiations of Zernike coefficients."""
-
- def __init__(
- self,
- sim_psf_toolkit,
- d_max=2,
- grid_points=[4, 4],
- max_order=45,
- x_lims=[0, 1e3],
- y_lims=[0, 1e3],
- n_bins=35,
- lim_max_wfe_rms=None,
- auto_init=True,
- verbose=False
- ):
- # Input attributes
- self.sim_psf_toolkit = sim_psf_toolkit
- self.max_order = max_order
- self.d_max = d_max
- self.x_lims = x_lims
- self.y_lims = y_lims
- self.grid_points = grid_points
- self.n_bins = n_bins
- self.verbose = verbose
- if lim_max_wfe_rms is None:
- self.lim_max_wfe_rms = sim_psf_toolkit.max_wfe_rms
- else:
- self.lim_max_wfe_rms = lim_max_wfe_rms
-
- self.auto_init = auto_init
-
- # Class attributes
- self.C_poly = None
- self.WFE_RMS = None
-
- # Build coefficient polynomial matric
- if self.auto_init:
- self.build_poly_coefficients()
-
- def scale_positions(self, xv_flat, yv_flat):
- # Scale positions to the square [-1,1] x [-1,1]
- scaled_x = (xv_flat - self.x_lims[0]) / (self.x_lims[1] - self.x_lims[0])
- scaled_x = (scaled_x - 0.5) * 2
- scaled_y = (yv_flat - self.y_lims[0]) / (self.y_lims[1] - self.y_lims[0])
- scaled_y = (scaled_y - 0.5) * 2
-
- return scaled_x, scaled_y
-
- def poly_mat_gen(self, xv_flat, yv_flat):
- """ Generate polynomial matrix of positions.
-
- Parameters
- ----------
- xv_flat: np.ndarray (dim,)
- x positions.
- yv_flat: np.ndarray (dim,)
- y positions.
- """
- n_mono = (self.d_max + 1) * (self.d_max + 2) // 2
- if np.isscalar(xv_flat):
- Pi = np.zeros((n_mono, 1))
- else:
- Pi = np.zeros((n_mono, xv_flat.shape[0]))
-
- # Scale positions to the square [-1,1] x [-1,1]
- scaled_x, scaled_y = self.scale_positions(xv_flat, yv_flat)
-
- for d in range(self.d_max + 1):
- row_idx = d * (d + 1) // 2
- for p in range(d + 1):
- Pi[row_idx + p, :] = scaled_x**(d - p) * scaled_y**p
-
- return Pi
-
- def zernike_poly_gen(self, xv_flat, yv_flat):
- """ Generate zernike values from positions.
-
- Parameters
- ----------
- xv_flat: np.ndarray (dim,)
- x positions.
- yv_flat: np.ndarray (dim,)
- y positions.
- """
- # Generate the polynomial matrix
- Pi_samples = self.poly_mat_gen(xv_flat, yv_flat)
-
- return self.C_poly @ Pi_samples
-
- def set_C_poly(self, C_poly):
- """ Set the polynomial coefficients.
-
- Parameters
- ----------
- C_poly: np.ndarray
- Polynomial coefficients.
- """
- self.C_poly = C_poly
-
- def set_WFE_RMS(self, WFE_RMS):
- """ Set the WFE RMS map.
-
- Parameters
- ----------
- WFE_RMS: np.ndarray
- WFE_RMS map of the C_poly values.
- """
- self.WFE_RMS = WFE_RMS
-
- def build_poly_coefficients(self):
- """Build a polynomial coefficient matrix."""
- ## Choose the anchor points on a regular grid
- x = np.linspace(self.x_lims[0], self.x_lims[1], num=self.grid_points[0], endpoint=True)
- y = np.linspace(self.y_lims[0], self.y_lims[1], num=self.grid_points[1], endpoint=True)
- # Build mesh
- xv_grid, yv_grid = np.meshgrid(x, y)
-
- ## Random position shift
- # It is done as a random shift defined in a
- # square centred in each grid position so that there is
- # no overlaping between the squares.
- xv_rand = np.random.rand(self.grid_points[0], self.grid_points[1])
- yv_rand = np.random.rand(self.grid_points[0], self.grid_points[1])
- # Calculate the shift length
- x_step = (self.x_lims[1] - self.x_lims[0]) / self.grid_points[0]
- y_step = (self.y_lims[1] - self.y_lims[0]) / self.grid_points[1]
- # Center and scale shifts
- xv_rand = (xv_rand - 0.5) * x_step
- yv_rand = (yv_rand - 0.5) * y_step
- # Add the shift to the grid values
- xv = xv_grid + xv_rand
- yv = yv_grid + yv_rand
- # Flatten
- xv_flat = xv.flatten()
- yv_flat = yv.flatten()
- # Check the limits
- xv_flat[xv_flat > self.x_lims[1]] = self.x_lims[1]
- xv_flat[xv_flat < self.x_lims[0]] = self.x_lims[0]
- yv_flat[yv_flat > self.y_lims[1]] = self.y_lims[1]
- yv_flat[yv_flat < self.y_lims[0]] = self.y_lims[0]
-
- ## Select a random vector of size `max_order` for each position
- # When concatenated into the Z matrix we have:
- Z = np.random.randn(self.max_order, len(xv_flat))
- # Normalize so that each position has the lim_max_wfe_rms
- norm_weights = np.sqrt(np.sum(Z**2, axis=1))
- Z /= (norm_weights.reshape((-1, 1)) / self.lim_max_wfe_rms)
-
- ## Generate position polynomial matrix
- Pi = self.poly_mat_gen(xv_flat, yv_flat)
-
- ## Estimate by least-squares the C matrix
- self.C_poly = Z @ np.linalg.pinv(Pi)
- # Re-estimate the Z matrix with the estimated C
- Z_hat = self.C_poly @ Pi
-
- ## Sampling the space
- # Choose the anchor points on a regular grid
- x = np.linspace(self.x_lims[0], self.x_lims[1], num=100, endpoint=True)
- y = np.linspace(self.y_lims[0], self.y_lims[1], num=100, endpoint=True)
- xv_grid, yv_grid = np.meshgrid(x, y)
- # Recalculate the Zernike coefficients with the new C_poly matrix
- Z_est = self.zernike_poly_gen(xv_grid.flatten(), yv_grid.flatten())
-
- ## We need to renormalize and check that the WFE RMS has a max value near the expected one
- # Calculate the WFE_RMS on the new grid
- calc_wfe = np.sqrt(np.sum(Z_est**2, axis=0))
- # Due to the polynomnial behaviour we will set the mean WFE_RMS over the field of view to be 80% of
- # the maximum allowed WFE_RMS per position.
- scale_factor = (0.8 * self.lim_max_wfe_rms) / np.mean(calc_wfe)
- self.C_poly *= scale_factor
-
- # Recalculate the Z coefficients
- scaled_Z_est = self.zernike_poly_gen(xv_grid.flatten(), yv_grid.flatten())
- # Calculate and save the WFE_RMS map of the C_poly values.
- self.WFE_RMS = np.sqrt(np.sum(scaled_Z_est**2, axis=0)).reshape(xv_grid.shape)
-
- def show_WFE_RMS(self, save_img=False, save_name='WFE_field_meshdim'):
- """Plot the WFE RMS map."""
- fig = plt.figure(figsize=(8, 8))
- ax1 = fig.add_subplot(111)
- im1 = ax1.imshow(self.WFE_RMS, interpolation='None')
- divider = make_axes_locatable(ax1)
- cax = divider.append_axes('right', size='5%', pad=0.05)
- fig.colorbar(im1, cax=cax, orientation='vertical')
- ax1.set_title('PSF field WFE RMS [um]')
- ax1.set_xlabel('x axis')
- ax1.set_ylabel('y axis')
- if save_img:
- plt.savefig('./' + save_name + '.pdf', bbox_inches='tight')
- plt.show()
-
- def calc_zernike(self, xv_flat, yv_flat):
- """ Calculate Zernikes for a specific position.
-
- Normalize (x,y) inputs
- (x,y) need to be in [self.x_lims[0],self.x_lims[1]] x [self.y_lims[0],self.y_lims[1]]
- (x_norm,y_norm) need to be in [-1, +1] x [-1, +1]
- """
- # Check limits
- x_check = np.sum(xv_flat >= self.x_lims[1] * 1.1) + np.sum(xv_flat <= self.x_lims[0] * 1.1)
- y_check = np.sum(yv_flat >= self.y_lims[1] * 1.1) + np.sum(yv_flat <= self.y_lims[0] * 1.1)
-
- if self.verbose and x_check > 0:
- print(
- 'WARNING! x value is outside the limits [%f, %f]' %
- (self.x_lims[0], self.x_lims[1])
- )
- print(xv_flat)
- print(x_check)
- if self.verbose and y_check > 0:
- print(
- 'WARNING! y value is outside the limits [%f, %f]' %
- (self.y_lims[0], self.y_lims[1])
- )
- print(yv_flat)
- print(y_check)
-
- # Return Zernikes
- # The position scaling is done inside zernike_poly_gen
- return self.zernike_poly_gen(xv_flat, yv_flat)
-
- def get_mono_PSF(self, xv_flat, yv_flat, lambda_obs=0.725):
- """ Calculate monochromatic PSF at a specific position and wavelength.
- """
- # Calculate the specific field's zernike coeffs
- zernikes = self.calc_zernike(xv_flat, yv_flat)
- # Set the Z coefficients to the PSF toolkit generator
- self.sim_psf_toolkit.set_z_coeffs(zernikes)
- # Generate the monochromatic psf
- self.sim_psf_toolkit.generate_mono_PSF(lambda_obs=lambda_obs, regen_sample=False)
- # Return the generated PSF
- return self.sim_psf_toolkit.get_psf()
-
- def get_poly_PSF(self, xv_flat, yv_flat, SED):
- """ Calculate polychromatic PSF for a specific position and SED.
- """
- # Calculate the specific field's zernike coeffs
- zernikes = self.calc_zernike(xv_flat, yv_flat)
- # Set the Z coefficients to the PSF toolkit generator
- self.sim_psf_toolkit.set_z_coeffs(zernikes)
- poly_psf = self.sim_psf_toolkit.generate_poly_PSF(SED, n_bins=self.n_bins)
- opd = self.sim_psf_toolkit.opd
-
- return poly_psf, zernikes, opd
diff --git a/wf_psf/__init__.py b/wf_psf/__init__.py
deleted file mode 100644
index 6268ef26..00000000
--- a/wf_psf/__init__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from . import *
-
-from .SimPSFToolkit import *
-from .GenPolyFieldPSF import *
-from .graph_utils import *
-
-from .tf_layers import *
-from .tf_modules import *
-from .tf_psf_field import *
-from .tf_mccd_psf_field import *
-from .tf_alt_psf_models import *
-
-from .utils import *
-from .metrics import *
-from .train_utils import *
-
-from .script_utils import *
-
-from .info import __version__, __about__
-
-__all__ = [] # List of submodules
-__all__ += ['SimPSFToolkit', 'GenPolyFieldPSF']
-__all__ += ['tf_layers', 'tf_modules', 'tf_psf_field', 'tf_mccd_psf_field']
-__all__ += ['graph_utils', 'utils', 'metrics', 'train_utils']
-__all__ += ['tf_alt_psf_models']
-__all__ += ['script_utils']
diff --git a/wf_psf/info.py b/wf_psf/info.py
deleted file mode 100644
index ef9c08ab..00000000
--- a/wf_psf/info.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- coding: utf-8 -*-
-
-"""PACKAGE INFO.
-
-This module provides some basic information about the package.
-
-"""
-
-# Set the package release version
-version_info = (1, 3, 0)
-__version__ = '.'.join(str(c) for c in version_info)
-
-# Set the package details
-__author__ = 'Tobias Liaudat'
-__email__ = 'tobiasliaudat@gmail.com'
-__year__ = '2022'
-__url__ = 'https://github.com/tobias-liaudat/wf-psf'
-__description__ = 'Data-driven wavefront-based PSF modelling framework.'
-
-# Default package properties
-__license__ = 'MIT'
-__about__ = ('{}\nAuthor: {} \nEmail: {} \nYear: {} \nInfo: {}'
- ''.format(__name__, __author__, __email__, __year__,
- __description__))
-__setup_requires__ = ['pytest-runner', ]
-__tests_require__ = ['pytest', ]
diff --git a/wf_psf/method_comp_utils.py b/wf_psf/method_comp_utils.py
deleted file mode 100644
index 300178b0..00000000
--- a/wf_psf/method_comp_utils.py
+++ /dev/null
@@ -1,227 +0,0 @@
-import os
-import numpy as np
-from astropy.io import fits
-import galsim as gs
-import warnings
-
-try:
- import mccd
-except:
- warnings.warn(
- "Warning! Problem importing MCCD package."
- "Check out that MCCD dependencies are correctly installed."
- )
-
-
-def interpsfex(dotpsfpath, pos):
- """Use PSFEx generated model to perform spatial PSF interpolation.
- Parameters
- ----------
- dotpsfpath : string
- Path to .psf file (PSFEx output).
- pos : np.ndaray
- Positions where the PSF model should be evaluated.
- Returns
- -------
- PSFs : np.ndarray
- Each row is the PSF imagette at the corresponding asked position.
- """
-
- if not os.path.exists(dotpsfpath):
- return None
-
- # read PSF model and extract basis and polynomial degree and scale position
- PSF_model = fits.open(dotpsfpath)[1]
-
- PSF_basis = np.array(PSF_model.data)[0][0]
- try:
- deg = PSF_model.header['POLDEG1']
- except KeyError:
- # constant PSF model
- return PSF_basis[0, :, :]
-
- # scale coordinates
- x_interp, x_scale = (PSF_model.header['POLZERO1'], PSF_model.header['POLSCAL1'])
- y_interp, y_scale = (PSF_model.header['POLZERO2'], PSF_model.header['POLSCAL2'])
- xs, ys = (pos[:, 0] - x_interp) / x_scale, (pos[:, 1] - y_interp) / y_scale
-
- # compute polynomial coefficients
- coeffs = np.array([[x**i for i in range(deg + 1)] for x in xs])
- cross_coeffs = np.array([
- np.concatenate([[(x**j) * (y**i)
- for j in range(deg - i + 1)]
- for i in range(1, deg + 1)])
- for x, y in zip(xs, ys)
- ])
- coeffs = np.hstack((coeffs, cross_coeffs))
-
- # compute interpolated PSF
- PSFs = np.array([
- np.sum([coeff * atom
- for coeff, atom in zip(coeffs_posi, PSF_basis)], axis=0)
- for coeffs_posi in coeffs
- ])
-
- return PSFs
-
-
-def match_psfs(interp_psfs, test_stars, psf_size=1.25):
- """ Match PSF model to stars - in flux, shift and pixel sampling - for validation tests.
- Returns both the matched PSFs' stamps.
-
- Parameters
- ----------
- interp_psfs: np.ndarray
- PSFs stamps to be matched.
- test_stars: np.ndarray
- Star stamps to be used for comparison with the PSF model.
- psf_size: float
- PSF size in sigma format.
- """
-
- sigmas = np.ones((test_stars.shape[0],)) * psf_size
-
- cents = [
- mccd.utils.CentroidEstimator(test_stars[it, :, :], sig=sigmas[it])
- for it in range(test_stars.shape[0])
- ]
- # Calculate shifts
- test_shifts = np.array([ce.return_shifts() for ce in cents])
-
- # Estimate shift kernels
- lanc_rad = np.ceil(3. * np.max(sigmas)).astype(int)
- shift_kernels, _ = mccd.utils.shift_ker_stack(test_shifts, upfact=1, lanc_rad=lanc_rad)
-
- # Degrade PSFs
- interp_psfs = np.array([
- mccd.utils.degradation_op(interp_psfs[j, :, :], shift_kernels[:, :, j], D=1)
- for j in range(test_stars.shape[0])
- ])
-
- # Optimised fulx matching
- norm_factor = np.array([
- np.sum(_star * _psf) / np.sum(_psf * _psf) for _star, _psf in zip(test_stars, interp_psfs)
- ]).reshape(-1, 1, 1)
- interp_psfs *= norm_factor
-
- return interp_psfs
-
-
-def crop_at_center(img, output_size):
- """" Crop image to a desired output size."""
- x = img.shape[0]
- y = img.shape[1]
- startx = x // 2 - (output_size[0] // 2)
- starty = y // 2 - (output_size[1] // 2)
- return img[startx:startx + output_size[0], starty:starty + output_size[1]]
-
-
-def calc_shapes(matched_psfs, test_stars):
- # Measure shapes of the reconstructions
- pred_moments = [gs.hsm.FindAdaptiveMom(gs.Image(_pred), strict=False) for _pred in matched_psfs]
-
- # Measure shapes of the reconstructions
- GT_pred_moments = [
- gs.hsm.FindAdaptiveMom(gs.Image(_pred), strict=False) for _pred in test_stars
- ]
-
- pred_e1_HSM, pred_e2_HSM, pred_R2_HSM = [], [], []
- GT_pred_e1_HSM, GT_pred_e2_HSM, GT_pred_R2_HSM = [], [], []
-
- for it in range(len(test_stars)):
- # Only save shape if both measurements do not raise errors
- if pred_moments[it].moments_status == 0 and GT_pred_moments[it].moments_status == 0:
-
- pred_e1_HSM.append(pred_moments[it].observed_shape.g1)
- pred_e2_HSM.append(pred_moments[it].observed_shape.g2)
- pred_R2_HSM.append(2 * (pred_moments[it].moments_sigma**2))
-
- GT_pred_e1_HSM.append(GT_pred_moments[it].observed_shape.g1)
- GT_pred_e2_HSM.append(GT_pred_moments[it].observed_shape.g2)
- GT_pred_R2_HSM.append(2 * (GT_pred_moments[it].moments_sigma**2))
-
- print(
- 'Total number of stars: %d\t Number of shape measurements problems %d.' %
- (len(test_stars), len(test_stars) - len(pred_e1_HSM))
- )
- pred_e1_HSM = np.array(pred_e1_HSM)
- pred_e2_HSM = np.array(pred_e2_HSM)
- pred_R2_HSM = np.array(pred_R2_HSM)
-
- GT_pred_e1_HSM = np.array(GT_pred_e1_HSM)
- GT_pred_e2_HSM = np.array(GT_pred_e2_HSM)
- GT_pred_R2_HSM = np.array(GT_pred_R2_HSM)
-
- return (pred_e1_HSM, pred_e2_HSM, pred_R2_HSM), (GT_pred_e1_HSM, GT_pred_e2_HSM, GT_pred_R2_HSM)
-
-
-def shape_pix_metrics(exp_psfs, exp_stars):
-
- # Calculate residuals
- residuals = np.sqrt(np.mean((exp_psfs - exp_stars)**2, axis=(1, 2)))
- GT_star_mean = np.sqrt(np.mean((exp_stars)**2, axis=(1, 2)))
-
- # RMSE calculations
- rmse = np.mean(residuals)
- rel_rmse = 100. * np.mean(residuals / GT_star_mean)
-
- # STD calculations
- pix_rmse_std = np.std(residuals)
- rel_pix_rmse_std = 100. * np.std(residuals / GT_star_mean)
-
- ## Shape error
- # Calculate shapes
- psf_shapes, star_shapes = calc_shapes(exp_psfs, exp_stars)
- # Extract results
- pred_e1_HSM, pred_e2_HSM, pred_R2_HSM = psf_shapes[0], psf_shapes[1], psf_shapes[2]
- GT_pred_e1_HSM, GT_pred_e2_HSM, GT_pred_R2_HSM = star_shapes[0], star_shapes[1], star_shapes[2]
-
- # e1
- e1_res = GT_pred_e1_HSM - pred_e1_HSM
- e1_res_rel = (GT_pred_e1_HSM - pred_e1_HSM) / GT_pred_e1_HSM
-
- rmse_e1 = np.sqrt(np.mean(e1_res**2))
- rel_rmse_e1 = 100. * np.sqrt(np.mean(e1_res_rel**2))
- std_rmse_e1 = np.std(e1_res)
- std_rel_rmse_e1 = 100. * np.std(e1_res_rel)
-
- # e2
- e2_res = GT_pred_e2_HSM - pred_e2_HSM
- e2_res_rel = (GT_pred_e2_HSM - pred_e2_HSM) / GT_pred_e2_HSM
-
- rmse_e2 = np.sqrt(np.mean(e2_res**2))
- rel_rmse_e2 = 100. * np.sqrt(np.mean(e2_res_rel**2))
- std_rmse_e2 = np.std(e2_res)
- std_rel_rmse_e2 = 100. * np.std(e2_res_rel)
-
- # R2
- R2_res = GT_pred_R2_HSM - pred_R2_HSM
-
- rmse_R2_meanR2 = np.sqrt(np.mean(R2_res**2)) / np.mean(GT_pred_R2_HSM)
- std_rmse_R2_meanR2 = np.std(R2_res / GT_pred_R2_HSM)
-
- # Save the metrics
- metrics_dict = {
- 'pix_rmse': rmse,
- 'pix_rel_rmse': rel_rmse,
- 'pix_rmse_std': pix_rmse_std,
- 'rel_pix_rmse_std': rel_pix_rmse_std,
- 'pred_e1_HSM': pred_e1_HSM,
- 'pred_e2_HSM': pred_e2_HSM,
- 'pred_R2_HSM': pred_R2_HSM,
- 'GT_pred_e1_HSM': GT_pred_e1_HSM,
- 'GT_ped_e2_HSM': GT_pred_e2_HSM,
- 'GT_pred_R2_HSM': GT_pred_R2_HSM,
- 'rmse_e1': rmse_e1,
- 'std_rmse_e1': std_rmse_e1,
- 'rel_rmse_e1': rel_rmse_e1,
- 'std_rel_rmse_e1': std_rel_rmse_e1,
- 'rmse_e2': rmse_e2,
- 'std_rmse_e2': std_rmse_e2,
- 'rel_rmse_e2': rel_rmse_e2,
- 'std_rel_rmse_e2': std_rel_rmse_e2,
- 'rmse_R2_meanR2': rmse_R2_meanR2,
- 'std_rmse_R2_meanR2': std_rmse_R2_meanR2,
- }
-
- return metrics_dict
diff --git a/wf_psf/sandbox.py b/wf_psf/sandbox.py
deleted file mode 100644
index 2783c5e6..00000000
--- a/wf_psf/sandbox.py
+++ /dev/null
@@ -1,648 +0,0 @@
-import numpy as np
-import tensorflow as tf
-import matplotlib.pyplot as plt
-from wf_psf.utils import generate_packed_elems
-from wf_psf.tf_psf_field import build_PSF_model
-""" Place to temporarily save some useful functions.
-"""
-
-
-class L1ParamScheduler(tf.keras.callbacks.Callback):
- """L1 rate scheduler which sets the L1 rate according to schedule.
-
- Arguments:
- l1_schedule_rule: a function that takes an epoch index
- (integer, indexed from 0) and current l1_rate
- as inputs and returns a new l1_rate as output (float).
- """
-
- def __init__(self, l1_schedule_rule):
- super(L1ParamScheduler, self).__init__()
- self.l1_schedule_rule = l1_schedule_rule
-
- def on_epoch_begin(self, epoch, logs=None):
- # Get the current learning rate from model's optimizer.
- l1_rate = float(tf.keras.backend.get_value(self.model.l1_rate))
- # Call schedule function to get the scheduled learning rate.
- scheduled_l1_rate = self.l1_schedule_rule(epoch, l1_rate)
- # Set the value back to the optimizer before this epoch starts
- self.model.set_l1_rate(scheduled_l1_rate)
- # tf.keras.backend.set_value(self.model.optimizer.lr, scheduled_lr)
-
-
-def l1_schedule_rule(epoch_n, l1_rate):
- if epoch_n != 0 and epoch_n % 10 == 0:
- scheduled_l1_rate = l1_rate / 2
- print("\nEpoch %05d: L1 rate is %0.4e." % (epoch_n, scheduled_l1_rate))
- return scheduled_l1_rate
- else:
- return l1_rate
-
-
-def first_train_cycle(
- tf_semiparam_field,
- inputs,
- outputs,
- batch_size,
- l_rate_param,
- l_rate_non_param,
- n_epochs_param,
- n_epochs_non_param,
- param_callback=None,
- non_param_callback=None
-):
-
- ## First parametric train
-
- # Define the model optimisation
- # l_rate_param = 1e-2
- # n_epochs_param = 20
-
- loss = tf.keras.losses.MeanSquaredError()
- optimizer = tf.keras.optimizers.Adam(
- learning_rate=l_rate_param, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False
- )
- metrics = [tf.keras.metrics.MeanSquaredError()]
-
- # Set the non-parametric model to zero
- # With alpha to zero its already enough
- tf_semiparam_field.set_zero_nonparam()
-
- # Set the non parametric layer to non trainable
- # And keep the parametric layer to trainable
- tf_semiparam_field.set_trainable_layers(param_bool=True, nonparam_bool=False)
-
- # Compile the model for the first optimisation
- tf_semiparam_field = build_PSF_model(
- tf_semiparam_field, optimizer=optimizer, loss=loss, metrics=metrics
- )
-
- # Train the parametric part
- history_param = tf_semiparam_field.fit(
- x=inputs, y=outputs, batch_size=batch_size, epochs=n_epochs_param, callbacks=param_callback
- )
-
- # Plot losses
- plt.figure()
- plt.subplot(211)
- plt.plot(history_param.history['loss'])
- plt.xlabel('Number of iterations')
- plt.ylabel('Total loss')
- plt.subplot(212)
- plt.loglog(history_param.history['loss'])
- plt.xlabel('Number of iterations')
- plt.ylabel('Total loss')
- plt.show()
-
- ## Non parametric train
- # Set the non-parametric model to non-zero
- # With alpha to zero its already enough
- tf_semiparam_field.set_nonzero_nonparam()
-
- # Set the non parametric layer to non trainable
- # Set the parametric layer to non trainable
- tf_semiparam_field.set_trainable_layers(param_bool=False, nonparam_bool=True)
-
- # Non parametric parameters
- # l_rate_non_param = 1.0
- # n_epochs_non_param = 100
-
- # Define the model optimisation
- loss = tf.keras.losses.MeanSquaredError()
- optimizer = tf.keras.optimizers.Adam(
- learning_rate=l_rate_non_param, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False
- )
- metrics = [tf.keras.metrics.MeanSquaredError()]
-
- # Compile the model again for the second optimisation
- tf_semiparam_field = build_PSF_model(
- tf_semiparam_field, optimizer=optimizer, loss=loss, metrics=metrics
- )
-
- # Train the parametric part
- history_non_param = tf_semiparam_field.fit(
- x=inputs,
- y=outputs,
- batch_size=batch_size,
- epochs=n_epochs_non_param,
- callbacks=non_param_callback
- )
-
- # Plot losses
- plt.figure()
- plt.subplot(211)
- plt.plot(history_non_param.history['loss'])
- plt.xlabel('Number of iterations')
- plt.ylabel('Total loss')
- plt.subplot(212)
- plt.loglog(history_non_param.history['loss'])
- plt.xlabel('Number of iterations')
- plt.ylabel('Total loss')
- plt.show()
-
- return tf_semiparam_field
-
-
-def train_cycle(
- tf_semiparam_field,
- inputs,
- outputs,
- batch_size,
- l_rate_param,
- l_rate_non_param,
- n_epochs_param,
- n_epochs_non_param,
- param_callback=None,
- non_param_callback=None
-):
-
- ## Parametric train
-
- # Define the model optimisation
- # l_rate_param = 1e-2
- # n_epochs_param = 20
-
- loss = tf.keras.losses.MeanSquaredError()
- optimizer = tf.keras.optimizers.Adam(
- learning_rate=l_rate_param, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False
- )
- metrics = [tf.keras.metrics.MeanSquaredError()]
-
- # Set the trainable layer
- tf_semiparam_field.set_trainable_layers(param_bool=True, nonparam_bool=False)
-
- # Compile the model for the first optimisation
- tf_semiparam_field = build_PSF_model(
- tf_semiparam_field, optimizer=optimizer, loss=loss, metrics=metrics
- )
-
- # Train the parametric part
- history_param = tf_semiparam_field.fit(
- x=inputs, y=outputs, batch_size=batch_size, epochs=n_epochs_param, callbacks=param_callback
- )
-
- # Plot losses
- plt.figure()
- plt.subplot(211)
- plt.plot(history_param.history['loss'])
- plt.xlabel('Number of iterations')
- plt.ylabel('Total loss')
- plt.subplot(212)
- plt.loglog(history_param.history['loss'])
- plt.xlabel('Number of iterations')
- plt.ylabel('Total loss')
- plt.show()
-
- ## Non parametric train
- # Set the non parametric layer to non trainable
- tf_semiparam_field.set_trainable_layers(param_bool=False, nonparam_bool=True)
-
- # Non parametric parameters
- # l_rate_non_param = 1.0
- # n_epochs_non_param = 100
-
- # Define the model optimisation
- loss = tf.keras.losses.MeanSquaredError()
- optimizer = tf.keras.optimizers.Adam(
- learning_rate=l_rate_non_param, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False
- )
- metrics = [tf.keras.metrics.MeanSquaredError()]
-
- # Compile the model again for the second optimisation
- tf_semiparam_field = build_PSF_model(
- tf_semiparam_field, optimizer=optimizer, loss=loss, metrics=metrics
- )
-
- # Train the parametric part
- history_non_param = tf_semiparam_field.fit(
- x=inputs,
- y=outputs,
- batch_size=batch_size,
- epochs=n_epochs_non_param,
- callbacks=non_param_callback
- )
-
- # Plot losses
- plt.figure()
- plt.subplot(211)
- plt.plot(history_non_param.history['loss'])
- plt.xlabel('Number of iterations')
- plt.ylabel('Total loss')
- plt.subplot(212)
- plt.loglog(history_non_param.history['loss'])
- plt.xlabel('Number of iterations')
- plt.ylabel('Total loss')
- plt.show()
-
- return tf_semiparam_field
-
-
-def compute_metrics(
- tf_semiparam_field,
- simPSF_np,
- test_SEDs,
- train_SEDs,
- tf_test_pos,
- tf_train_pos,
- tf_test_stars,
- tf_train_stars,
- n_bins_lda,
- batch_size=16
-):
- # Generate SED data list
- test_packed_SED_data = [
- generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda) for _sed in test_SEDs
- ]
-
- tf_test_packed_SED_data = tf.convert_to_tensor(test_packed_SED_data, dtype=tf.float32)
- tf_test_packed_SED_data = tf.transpose(tf_test_packed_SED_data, perm=[0, 2, 1])
- test_pred_inputs = [tf_test_pos, tf_test_packed_SED_data]
- test_predictions = tf_semiparam_field.predict(x=test_pred_inputs, batch_size=batch_size)
-
- # Initialize the SED data list
- packed_SED_data = [
- generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda) for _sed in train_SEDs
- ]
- # First estimate the stars for the observations
- tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
- tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
- inputs = [tf_train_pos, tf_packed_SED_data]
- train_predictions = tf_semiparam_field.predict(x=inputs, batch_size=batch_size)
-
- # Calculate RMSE values
- test_res = np.sqrt(np.mean((tf_test_stars - test_predictions)**2))
- train_res = np.sqrt(np.mean((tf_train_stars - train_predictions)**2))
-
- # Pritn RMSE values
- print('Test stars RMSE:\t %.4e' % test_res)
- print('Training stars RMSE:\t %.4e' % train_res)
-
- return test_res, train_res
-
-
-def compute_opd_metrics(tf_semiparam_field, GT_tf_semiparam_field, test_pos, train_pos):
- """ Compute the OPD metrics. """
-
- np_obscurations = np.real(tf_semiparam_field.obscurations.numpy())
-
- ## For test positions
- # Param part
- zernike_coeffs = tf_semiparam_field.tf_poly_Z_field(test_pos)
- P_opd_pred = tf_semiparam_field.tf_zernike_OPD(zernike_coeffs)
- # Non-Param part
- NP_opd_pred = tf_semiparam_field.tf_NP_mccd_OPD.predict(test_pos)
- # OPD prediction
- opd_pred = tf.math.add(P_opd_pred, NP_opd_pred)
-
- # GT model
- GT_zernike_coeffs = GT_tf_semiparam_field.tf_poly_Z_field(test_pos)
- GT_opd_maps = GT_tf_semiparam_field.tf_zernike_OPD(GT_zernike_coeffs)
-
- # Compute residual and obscure the OPD
- res_opd = (GT_opd_maps.numpy() - opd_pred.numpy()) * np_obscurations
-
- # Calculate RMSE values
- test_opd_rmse = np.sqrt(np.mean(res_opd**2))
-
- # Pritn RMSE values
- print('Test stars OPD RMSE:\t %.4e' % test_opd_rmse)
-
- ## For train part
- # Param part
- zernike_coeffs = tf_semiparam_field.tf_poly_Z_field(train_pos)
- P_opd_pred = tf_semiparam_field.tf_zernike_OPD(zernike_coeffs)
- # Non-Param part
- NP_opd_pred = tf_semiparam_field.tf_NP_mccd_OPD.predict(train_pos)
- # OPD prediction
- opd_pred = tf.math.add(P_opd_pred, NP_opd_pred)
-
- # GT model
- GT_zernike_coeffs = GT_tf_semiparam_field.tf_poly_Z_field(train_pos)
- GT_opd_maps = GT_tf_semiparam_field.tf_zernike_OPD(GT_zernike_coeffs)
-
- # Compute residual and obscure the OPD
- res_opd = (GT_opd_maps.numpy() - opd_pred.numpy()) * np_obscurations
-
- # Calculate RMSE values
- train_opd_rmse = np.sqrt(np.mean(res_opd**2))
-
- # Pritn RMSE values
- print('Train stars OPD RMSE:\t %.4e' % train_opd_rmse)
-
- return test_opd_rmse, train_opd_rmse
-
-
-def compute_opd_metrics_polymodel(tf_semiparam_field, GT_tf_semiparam_field, test_pos, train_pos):
- """ Compute the OPD metrics. """
-
- np_obscurations = np.real(tf_semiparam_field.obscurations.numpy())
-
- ## For test positions
- # Param part
- zernike_coeffs = tf_semiparam_field.tf_poly_Z_field(test_pos)
- P_opd_pred = tf_semiparam_field.tf_zernike_OPD(zernike_coeffs)
- # Non-Param part
- NP_opd_pred = tf_semiparam_field.tf_np_poly_opd(test_pos)
- # OPD prediction
- opd_pred = tf.math.add(P_opd_pred, NP_opd_pred)
-
- # GT model
- GT_zernike_coeffs = GT_tf_semiparam_field.tf_poly_Z_field(test_pos)
- GT_opd_maps = GT_tf_semiparam_field.tf_zernike_OPD(GT_zernike_coeffs)
-
- # Compute residual and obscure the OPD
- res_opd = (GT_opd_maps.numpy() - opd_pred.numpy()) * np_obscurations
-
- # Calculate RMSE values
- test_opd_rmse = np.sqrt(np.mean(res_opd**2))
-
- # Pritn RMSE values
- print('Test stars OPD RMSE:\t %.4e' % test_opd_rmse)
-
- ## For train part
- # Param part
- zernike_coeffs = tf_semiparam_field.tf_poly_Z_field(train_pos)
- P_opd_pred = tf_semiparam_field.tf_zernike_OPD(zernike_coeffs)
- # Non-Param part
- NP_opd_pred = tf_semiparam_field.tf_np_poly_opd(train_pos)
- # OPD prediction
- opd_pred = tf.math.add(P_opd_pred, NP_opd_pred)
-
- # GT model
- GT_zernike_coeffs = GT_tf_semiparam_field.tf_poly_Z_field(train_pos)
- GT_opd_maps = GT_tf_semiparam_field.tf_zernike_OPD(GT_zernike_coeffs)
-
- # Compute residual and obscure the OPD
- res_opd = (GT_opd_maps.numpy() - opd_pred.numpy()) * np_obscurations
-
- # Calculate RMSE values
- train_opd_rmse = np.sqrt(np.mean(res_opd**2))
-
- # Pritn RMSE values
- print('Train stars OPD RMSE:\t %.4e' % train_opd_rmse)
-
- return test_opd_rmse, train_opd_rmse
-
-
-def compute_one_opd_rmse(GT_tf_semiparam_field, tf_semiparam_field, pos, is_poly=False):
- """ Compute the OPD map for one position!. """
-
- np_obscurations = np.real(tf_semiparam_field.obscurations.numpy())
-
- tf_pos = tf.convert_to_tensor(pos, dtype=tf.float32)
-
- ## For test positions
- # Param part
- zernike_coeffs = tf_semiparam_field.tf_poly_Z_field(tf_pos)
- P_opd_pred = tf_semiparam_field.tf_zernike_OPD(zernike_coeffs)
- # Non-Param part
- if is_poly == False:
- NP_opd_pred = tf_semiparam_field.tf_NP_mccd_OPD.predict(tf_pos)
- else:
- NP_opd_pred = tf_semiparam_field.tf_np_poly_opd(tf_pos)
- # OPD prediction
- opd_pred = tf.math.add(P_opd_pred, NP_opd_pred)
-
- # GT model
- GT_zernike_coeffs = GT_tf_semiparam_field.tf_poly_Z_field(tf_pos)
- GT_opd_maps = GT_tf_semiparam_field.tf_zernike_OPD(GT_zernike_coeffs)
-
- # Compute residual and obscure the OPD
- res_opd = (GT_opd_maps.numpy() - opd_pred.numpy()) * np_obscurations
-
- # Calculate RMSE values
- opd_rmse = np.sqrt(np.mean(res_opd**2))
-
- return opd_rmse
-
-
-def plot_function(mesh_pos, residual, tf_train_pos, tf_test_pos, title='Error'):
- vmax = np.max(residual)
- vmin = np.min(residual)
-
- plt.figure(figsize=(12, 8))
- plt.scatter(
- mesh_pos[:, 0],
- mesh_pos[:, 1],
- s=100,
- c=residual.reshape(-1, 1),
- cmap='viridis',
- marker='s',
- vmax=vmax,
- vmin=vmin
- )
- plt.colorbar()
- plt.scatter(
- tf_train_pos[:, 0], tf_train_pos[:, 1], c='k', marker='*', s=10, label='Train stars'
- )
- plt.scatter(tf_test_pos[:, 0], tf_test_pos[:, 1], c='r', marker='*', s=10, label='Test stars')
- plt.title(title)
- plt.xlabel('x-axis')
- plt.ylabel('y-axis')
- plt.show()
-
-
-def plot_residual_maps(
- GT_tf_semiparam_field,
- tf_semiparam_field,
- simPSF_np,
- train_SEDs,
- tf_train_pos,
- tf_test_pos,
- n_bins_lda=20,
- n_points_per_dim=30,
- is_poly=False
-):
-
- # Recover teh grid limits
- x_lims = tf_semiparam_field.x_lims
- y_lims = tf_semiparam_field.y_lims
-
- # Generate mesh of testing positions
- x = np.linspace(x_lims[0], x_lims[1], n_points_per_dim)
- y = np.linspace(y_lims[0], y_lims[1], n_points_per_dim)
- x_pos, y_pos = np.meshgrid(x, y)
-
- mesh_pos = np.concatenate((x_pos.flatten().reshape(-1, 1), y_pos.flatten().reshape(-1, 1)),
- axis=1)
- tf_mesh_pos = tf.convert_to_tensor(mesh_pos, dtype=tf.float32)
-
- # Testing the positions
- rec_x_pos = mesh_pos[:, 0].reshape(x_pos.shape)
- rec_y_pos = mesh_pos[:, 1].reshape(y_pos.shape)
-
- # Get random SED from the training catalog
- SED_random_integers = np.random.choice(
- np.arange(train_SEDs.shape[0]), size=mesh_pos.shape[0], replace=True
- )
- # Build the SED catalog for the testing mesh
- mesh_SEDs = np.array([train_SEDs[_id, :, :] for _id in SED_random_integers])
-
- # Generate SED data list
- mesh_packed_SED_data = [
- generate_packed_elems(_sed, simPSF_np, n_bins=n_bins_lda) for _sed in mesh_SEDs
- ]
-
- # Generate inputs
- tf_mesh_packed_SED_data = tf.convert_to_tensor(mesh_packed_SED_data, dtype=tf.float32)
- tf_mesh_packed_SED_data = tf.transpose(tf_mesh_packed_SED_data, perm=[0, 2, 1])
- mesh_pred_inputs = [tf_mesh_pos, tf_mesh_packed_SED_data]
-
- # Predict mesh stars
- model_mesh_preds = tf_semiparam_field.predict(x=mesh_pred_inputs, batch_size=16)
- GT_mesh_preds = GT_tf_semiparam_field.predict(x=mesh_pred_inputs, batch_size=16)
-
- # Calculate pixel RMSE for each star
- pix_rmse = np.array([
- np.sqrt(np.mean((_GT_pred - _model_pred)**2))
- for _GT_pred, _model_pred in zip(GT_mesh_preds, model_mesh_preds)
- ])
-
- relative_pix_rmse = np.array([
- np.sqrt(np.mean((_GT_pred - _model_pred)**2)) / np.sqrt(np.mean((_GT_pred)**2))
- for _GT_pred, _model_pred in zip(GT_mesh_preds, model_mesh_preds)
- ])
-
- # Plot absolute pixel error
- plot_function(mesh_pos, pix_rmse, tf_train_pos, tf_test_pos, title='Absolute pixel error')
- # Plot relative pixel error
- plot_function(
- mesh_pos, relative_pix_rmse, tf_train_pos, tf_test_pos, title='Relative pixel error'
- )
-
- # Compute OPD errors
- opd_rmse = np.array([
- compute_one_opd_rmse(
- GT_tf_semiparam_field, tf_semiparam_field, _pos.reshape(1, -1), is_poly
- ) for _pos in mesh_pos
- ])
-
- # Plot absolute pixel error
- plot_function(mesh_pos, opd_rmse, tf_train_pos, tf_test_pos, title='Absolute OPD error')
-
-
-def plot_imgs(mat, cmap='gist_stern', figsize=(20, 20)):
- """ Function to plot 2D images of a tensor.
- The Tensor is (batch,xdim,ydim) and the matrix of subplots is
- chosen "intelligently".
- """
-
- def dp(n, left): # returns tuple (cost, [factors])
- memo = {}
- if (n, left) in memo:
- return memo[(n, left)]
-
- if left == 1:
- return (n, [n])
-
- i = 2
- best = n
- bestTuple = [n]
- while i * i <= n:
- if n % i == 0:
- rem = dp(n / i, left - 1)
- if rem[0] + i < best:
- best = rem[0] + i
- bestTuple = [i] + rem[1]
- i += 1
-
- memo[(n, left)] = (best, bestTuple)
- return memo[(n, left)]
-
- n_images = mat.shape[0]
- row_col = dp(n_images, 2)[1]
- row_n = int(row_col[0])
- col_n = int(row_col[1])
-
- plt.figure(figsize=figsize)
- idx = 0
-
- for _i in range(row_n):
- for _j in range(col_n):
-
- plt.subplot(row_n, col_n, idx + 1)
- plt.imshow(mat[idx, :, :], cmap=cmap)
- plt.colorbar()
- plt.title('matrix id %d' % idx)
-
- idx += 1
-
- plt.show()
-
-
-def add_noise(image, desired_SNR):
- """ Function to add noise to a 2D image in order to have a desired SNR.
- """
- sigma_noise = np.sqrt((np.sum(image**2)) / (desired_SNR * image.shape[0] * image.shape[1]))
- noisy_image = image + np.random.standard_normal(image.shape) * sigma_noise
- return noisy_image
-
-
-class MyLRSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
- """ Learning rate scheduler.
-
- To be passed to the optimization algorithm.
-
- Parameters
- ----------
- lr_update_strategy: function(step, params) -> lr
- Learning rate update strategy. Should have as input the step number and some parameters
- and return a learning rate that corresponds to the input step number.
- """
-
- def __init__(self, lr_update_strategy, params=None):
- self.lr_update_strategy = lr_update_strategy
- self.params = params
-
- def __call__(self, step):
- return self.lr_update_strategy(step, self.params)
-
-
-def step_update_strategy(step, params):
- """ Piecewise constant update strategy.
-
- Parameters
- ----------
- step: int
- Optimization step (not to confound with epoch.)
- params['batch_size']: int
- Batch size.
- params['input_elements']: int
- Total number of input elements.
- params['total_epochs']: int
- Total number of epochs.
- params['epoch_list']: list of int
- List of epoch intervals. It corresponds tot he intervals of the learning rate
- list params['lr_list'].
- It must verify:
- params['epoch_list'][0] = 0
- params['epoch_list'][-1] = params['total_epochs']
- len(params['epoch_list']) = 1 + len(params['lr_list'])
- params['lr_list']: list of float
- List of learning rate values. See params['epoch_list'].
-
- Returns
- -------
- current_lr: float
- Learning rate correspondign to the current step.
-
- """
- # Calculate the current epoch
- one_epoch_in_steps = params['input_elements'] / params['batch_size']
- current_epoch = step / one_epoch_in_steps
-
- print('step')
- print(step)
- print('current_epoch')
- print(current_epoch)
-
- for it in range(len(params['lr_list'])):
- if params['epoch_list'][it] <= current_epoch and current_epoch < params['epoch_list'][it +
- 1]:
- current_lr = params['lr_list'][it]
- break
-
- print('current_lr')
- print(current_lr)
-
- return current_lr
diff --git a/wf_psf/script_utils.py b/wf_psf/script_utils.py
deleted file mode 100644
index 7f4edd16..00000000
--- a/wf_psf/script_utils.py
+++ /dev/null
@@ -1,1638 +0,0 @@
-# Import packages
-import sys
-import numpy as np
-import time
-import tensorflow as tf
-import tensorflow_addons as tfa
-
-import wf_psf.SimPSFToolkit as SimPSFToolkit
-import wf_psf.utils as wf_utils
-import wf_psf.tf_mccd_psf_field as tf_mccd_psf_field
-import wf_psf.tf_psf_field as tf_psf_field
-import wf_psf.metrics as wf_metrics
-import wf_psf.train_utils as wf_train_utils
-
-try:
- import matplotlib.pyplot as plt
- import matplotlib as mpl
- import matplotlib.ticker as mtick
- import seaborn as sns
-except:
- print('\nProblem importing plotting packages: matplotlib and seaborn.\n')
-
-
-def train_model(**args):
- r""" Train the PSF model.
-
- For parameters check the training script click help.
-
- """
- # Start measuring elapsed time
- starting_time = time.time()
-
- # Define model run id
- run_id_name = args['model'] + args['id_name']
-
- # Define paths
- log_save_file = args['base_path'] + args['log_folder']
- model_save_file = args['base_path'] + args['model_folder']
- optim_hist_file = args['base_path'] + args['optim_hist_folder']
- saving_optim_hist = dict()
-
- # Save output prints to logfile
- old_stdout = sys.stdout
- log_file = open(log_save_file + run_id_name + '_output.log', 'w')
- sys.stdout = log_file
- print('Starting the log file.')
-
- # Print GPU and tensorflow info
- device_name = tf.test.gpu_device_name()
- print('Found GPU at: {}'.format(device_name))
- print('tf_version: ' + str(tf.__version__))
-
- ## Prepare the inputs
- # Generate Zernike maps
- zernikes = wf_utils.zernike_generator(
- n_zernikes=args['n_zernikes'], wfe_dim=args['pupil_diameter']
- )
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
- for it in range(len(zernikes)):
- np_zernike_cube[it, :, :] = zernikes[it]
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
- print('Zernike cube:')
- print(tf_zernike_cube.shape)
-
- ## Load the dictionaries
- train_dataset = np.load(
- args['dataset_folder'] + args['train_dataset_file'], allow_pickle=True
- )[()]
- # train_stars = train_dataset['stars']
- # noisy_train_stars = train_dataset['noisy_stars']
- # train_pos = train_dataset['positions']
- train_SEDs = train_dataset['SEDs']
- # train_zernike_coef = train_dataset['zernike_coef']
- # train_C_poly = train_dataset['C_poly']
- train_parameters = train_dataset['parameters']
-
- test_dataset = np.load(
- args['dataset_folder'] + args['test_dataset_file'], allow_pickle=True
- )[()]
- # test_stars = test_dataset['stars']
- # test_pos = test_dataset['positions']
- test_SEDs = test_dataset['SEDs']
- # test_zernike_coef = test_dataset['zernike_coef']
-
- # Convert to tensor
- tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
- # tf_train_stars = tf.convert_to_tensor(train_dataset['stars'], dtype=tf.float32)
- tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
- tf_test_stars = tf.convert_to_tensor(test_dataset['stars'], dtype=tf.float32)
- tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
- if args['model'] == 'poly_physical':
- # Concatenate the Zernike and the positions from train and test datasets
- all_pos = np.concatenate((train_dataset['positions'], test_dataset['positions']), axis=0)
- all_zernike_prior = np.concatenate(
- (train_dataset['zernike_prior'], test_dataset['zernike_prior']), axis=0
- )
- # Convert to tensor
- tf_pos_all = tf.convert_to_tensor(all_pos, dtype=tf.float32)
- tf_zernike_prior_all = tf.convert_to_tensor(all_zernike_prior, dtype=tf.float32)
-
- print('Dataset parameters:')
- print(train_parameters)
-
- # New interp features backwards compatibility
- if 'interp_pts_per_bin' not in args:
- args['interp_pts_per_bin'] = 0
- args['extrapolate'] = True
- args['sed_interp_kind'] = 'linear'
-
- ## Generate initializations
- # Prepare np input
- simPSF_np = SimPSFToolkit(
- zernikes,
- max_order=args['n_zernikes'],
- pupil_diameter=args['pupil_diameter'],
- output_dim=args['output_dim'],
- oversampling_rate=args['oversampling_rate'],
- output_Q=args['output_q'],
- interp_pts_per_bin=args['interp_pts_per_bin'],
- extrapolate=args['extrapolate'],
- SED_interp_kind=args['sed_interp_kind'],
- SED_sigma=args['sed_sigma']
- )
- simPSF_np.gen_random_Z_coeffs(max_order=args['n_zernikes'])
- z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
- simPSF_np.set_z_coeffs(z_coeffs)
- simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
- # Obscurations
- obscurations = simPSF_np.generate_pupil_obscurations(N_pix=args['pupil_diameter'], N_filter=2)
- tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
- # Initialize the SED data list
- packed_SED_data = [
- wf_utils.generate_packed_elems(_sed, simPSF_np, n_bins=args['n_bins_lda'])
- for _sed in train_SEDs
- ]
-
- # Prepare the inputs for the training
- tf_packed_SED_data = tf.convert_to_tensor(packed_SED_data, dtype=tf.float32)
- tf_packed_SED_data = tf.transpose(tf_packed_SED_data, perm=[0, 2, 1])
-
- inputs = [tf_train_pos, tf_packed_SED_data]
-
- # Select the observed stars (noisy or noiseless)
- outputs = tf_noisy_train_stars
- # outputs = tf_train_stars
-
- ## Prepare validation data inputs
- val_SEDs = test_SEDs
- tf_val_pos = tf_test_pos
- tf_val_stars = tf_test_stars
-
- # Initialize the SED data list
- val_packed_SED_data = [
- wf_utils.generate_packed_elems(_sed, simPSF_np, n_bins=args['n_bins_lda'])
- for _sed in val_SEDs
- ]
-
- # Prepare the inputs for the validation
- tf_val_packed_SED_data = tf.convert_to_tensor(val_packed_SED_data, dtype=tf.float32)
- tf_val_packed_SED_data = tf.transpose(tf_val_packed_SED_data, perm=[0, 2, 1])
-
- # Prepare input validation tuple
- val_x_inputs = [tf_val_pos, tf_val_packed_SED_data]
- val_y_inputs = tf_val_stars
- val_data = (val_x_inputs, val_y_inputs)
-
- ## Select the model
- if args['model'] == 'mccd' or args['model'] == 'graph':
- poly_dic, graph_dic = tf_mccd_psf_field.build_mccd_spatial_dic_v2(
- obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=args['x_lims'],
- y_lims=args['y_lims'],
- d_max=args['d_max_nonparam'],
- graph_features=args['graph_features']
- )
- spatial_dic = [poly_dic, graph_dic]
-
- if args['model'] == 'mccd':
- # Initialize the WaveDiff-polygraph model
- tf_semiparam_field = tf_mccd_psf_field.TF_SP_MCCD_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=args['output_q'],
- l2_param=args['l2_param'],
- d_max_nonparam=args['d_max_nonparam'],
- graph_features=args['graph_features'],
- l1_rate=args['l1_rate'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'graph':
- # Initialize the WaveDiff-graph model
- tf_semiparam_field = tf_mccd_psf_field.TF_SP_graph_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=args['output_q'],
- l2_param=args['l2_param'],
- graph_features=args['graph_features'],
- l1_rate=args['l1_rate'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'poly':
- # Initialize the WaveDiff-original model
- tf_semiparam_field = tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- l2_param=args['l2_param'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'param':
- # Initialize the Zernike-X model
- tf_semiparam_field = tf_psf_field.TF_PSF_field_model(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- l2_param=args['l2_param'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'poly_physical':
- # Initialize the model
- tf_semiparam_field = tf_psf_field.TF_physical_poly_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_pos_all,
- zks_prior=tf_zernike_prior_all,
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- l2_param=args['l2_param'],
- output_dim=args['output_dim'],
- n_zks_param=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- # Backwards compatibility with older versions of train_eval_plot_click.py
- if 'project_dd_features' not in args:
- args['project_dd_features'] = False
- if 'project_last_cycle' not in args:
- args['project_last_cycle'] = False
- if 'reset_dd_features' not in args:
- args['reset_dd_features'] = False
- if 'pretrained_model' not in args:
- args['pretrained_model'] = None
-
- # Load pretrained model
- if args['model'] == 'poly' and args['pretrained_model'] is not None:
- tf_semiparam_field.load_weights(args['pretrained_model'])
- print('Model loaded.')
- tf_semiparam_field.project_DD_features(tf_zernike_cube)
- print('DD features projected over parametric model')
-
- # If reset_dd_features is true we project the DD features onto the param model and reset them.
- if args['model'] == 'poly' and args['reset_dd_features'
- ] and args['cycle_def'] != 'only-parametric':
- tf_semiparam_field.tf_np_poly_opd.init_vars()
- print('DD features reseted to random initialisation.')
-
- # # Model Training
- # Prepare the saving callback
- # Prepare to save the model as a callback
- filepath_chkp_callback = args['chkp_save_path'] + 'chkp_callback_' + run_id_name + '_cycle1'
- model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error',
- verbose=1,
- save_best_only=True,
- save_weights_only=True,
- mode='min',
- save_freq='epoch',
- options=None
- )
-
- # Prepare the optimisers
- param_optim = tfa.optimizers.RectifiedAdam(learning_rate=args['l_rate_param'][0])
- non_param_optim = tfa.optimizers.RectifiedAdam(learning_rate=args['l_rate_non_param'][0])
-
- print('Starting cycle 1..')
- start_cycle1 = time.time()
-
- if args['model'] == 'param':
- tf_semiparam_field, hist_param = wf_train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate=args['l_rate_param'][0],
- n_epochs=args['n_epochs_param'][0],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- use_sample_weights=args['use_sample_weights'],
- verbose=2
- )
-
- else:
- tf_semiparam_field, hist_param, hist_non_param = wf_train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate_param=args['l_rate_param'][0],
- l_rate_non_param=args['l_rate_non_param'][0],
- n_epochs_param=args['n_epochs_param'][0],
- n_epochs_non_param=args['n_epochs_non_param'][0],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None,
- non_param_loss=None,
- param_metrics=None,
- non_param_metrics=None,
- param_callback=None,
- non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=True,
- cycle_def=args['cycle_def'],
- use_sample_weights=args['use_sample_weights'],
- verbose=2
- )
-
- # Backwards compatibility with click scripts older than the projected learning feature
- if 'save_all_cycles' not in args:
- args['save_all_cycles'] = False
-
- # Save weights
- if args['save_all_cycles']:
- tf_semiparam_field.save_weights(model_save_file + 'chkp_' + run_id_name + '_cycle1')
-
- end_cycle1 = time.time()
- print('Cycle1 elapsed time: %f' % (end_cycle1 - start_cycle1))
-
- # Save optimisation history in the saving dict
- if hist_param is not None:
- saving_optim_hist['param_cycle1'] = hist_param.history
- if args['model'] != 'param' and hist_non_param is not None:
- saving_optim_hist['nonparam_cycle1'] = hist_non_param.history
-
- # Perform all the necessary cycles
- current_cycle = 1
-
- while args['total_cycles'] > current_cycle:
- current_cycle += 1
-
- # If projected learning is enabled project DD_features.
- if args['project_dd_features'] and args['model'] == 'poly':
- tf_semiparam_field.project_DD_features(tf_zernike_cube)
- print('Project non-param DD features onto param model: done!')
- if args['reset_dd_features']:
- tf_semiparam_field.tf_np_poly_opd.init_vars()
- print('DD features reseted to random initialisation.')
-
- # Prepare to save the model as a callback
- filepath_chkp_callback = args[
- 'chkp_save_path'] + 'chkp_callback_' + run_id_name + '_cycle' + str(current_cycle)
- model_chkp_callback = tf.keras.callbacks.ModelCheckpoint(
- filepath_chkp_callback,
- monitor='mean_squared_error',
- verbose=1,
- save_best_only=True,
- save_weights_only=True,
- mode='min',
- save_freq='epoch',
- options=None
- )
-
- # Prepare the optimisers
- param_optim = tfa.optimizers.RectifiedAdam(
- learning_rate=args['l_rate_param'][current_cycle - 1]
- )
- non_param_optim = tfa.optimizers.RectifiedAdam(
- learning_rate=args['l_rate_non_param'][current_cycle - 1]
- )
-
- print('Starting cycle {}..'.format(current_cycle))
- start_cycle = time.time()
-
- # Compute the next cycle
- if args['model'] == 'param':
- tf_semiparam_field, hist_param_2 = wf_train_utils.param_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate=args['l_rate_param'][current_cycle - 1],
- n_epochs=args['n_epochs_param'][current_cycle - 1],
- param_optim=param_optim,
- param_loss=None,
- param_metrics=None,
- param_callback=None,
- general_callback=[model_chkp_callback],
- use_sample_weights=args['use_sample_weights'],
- verbose=2
- )
- else:
- # Compute the next cycle
- tf_semiparam_field, hist_param_2, hist_non_param_2 = wf_train_utils.general_train_cycle(
- tf_semiparam_field,
- inputs=inputs,
- outputs=outputs,
- val_data=val_data,
- batch_size=args['batch_size'],
- l_rate_param=args['l_rate_param'][current_cycle - 1],
- l_rate_non_param=args['l_rate_non_param'][current_cycle - 1],
- n_epochs_param=args['n_epochs_param'][current_cycle - 1],
- n_epochs_non_param=args['n_epochs_non_param'][current_cycle - 1],
- param_optim=param_optim,
- non_param_optim=non_param_optim,
- param_loss=None,
- non_param_loss=None,
- param_metrics=None,
- non_param_metrics=None,
- param_callback=None,
- non_param_callback=None,
- general_callback=[model_chkp_callback],
- first_run=False,
- cycle_def=args['cycle_def'],
- use_sample_weights=args['use_sample_weights'],
- verbose=2
- )
-
- # Save the weights at the end of the second cycle
- if args['save_all_cycles']:
- tf_semiparam_field.save_weights(
- model_save_file + 'chkp_' + run_id_name + '_cycle' + str(current_cycle)
- )
-
- end_cycle = time.time()
- print('Cycle{} elapsed time: {}'.format(current_cycle, end_cycle - start_cycle))
-
- # Save optimisation history in the saving dict
- if hist_param_2 is not None:
- saving_optim_hist['param_cycle{}'.format(current_cycle)] = hist_param_2.history
- if args['model'] != 'param' and hist_non_param_2 is not None:
- saving_optim_hist['nonparam_cycle{}'.format(current_cycle)] = hist_non_param_2.history
-
- # Save last cycle if no cycles were saved
- if not args['save_all_cycles']:
- tf_semiparam_field.save_weights(
- model_save_file + 'chkp_' + run_id_name + '_cycle' + str(current_cycle)
- )
-
- # Save optimisation history dictionary
- np.save(optim_hist_file + 'optim_hist_' + run_id_name + '.npy', saving_optim_hist)
-
- ## Print final time
- final_time = time.time()
- print('\nTotal elapsed time: %f' % (final_time - starting_time))
-
- ## Close log file
- print('\n Good bye..')
- sys.stdout = old_stdout
- log_file.close()
-
-
-def evaluate_model(**args):
- r""" Evaluate the trained model.
-
- For parameters check the training script click help.
-
- """
- # Start measuring elapsed time
- starting_time = time.time()
-
- # Define model run id
- run_id_name = args['model'] + args['id_name']
- # Define paths
- log_save_file = args['base_path'] + args['log_folder']
-
- # Define saved model to use
- if args['saved_model_type'] == 'checkpoint':
- weights_paths = args['chkp_save_path'] + 'chkp_callback_' + run_id_name + '_' + args[
- 'saved_cycle']
-
- elif args['saved_model_type'] == 'final':
- model_save_file = args['base_path'] + args['model_folder']
- weights_paths = model_save_file + 'chkp_' + run_id_name + '_' + args['saved_cycle']
-
- elif args['saved_model_type'] == 'external':
- weights_paths = args['chkp_save_path']
-
- ## Save output prints to logfile
- old_stdout = sys.stdout
- log_file = open(log_save_file + run_id_name + '-metrics_output.log', 'w')
- sys.stdout = log_file
- print('Starting the log file.')
-
- try:
- ## Check GPU and tensorflow version
- device_name = tf.test.gpu_device_name()
- print('Found GPU at: {}'.format(device_name))
- print('tf_version: ' + str(tf.__version__))
-
- ## Load datasets
- train_dataset = np.load(
- args['dataset_folder'] + args['train_dataset_file'], allow_pickle=True
- )[()]
- # train_stars = train_dataset['stars']
- # noisy_train_stars = train_dataset['noisy_stars']
- # train_pos = train_dataset['positions']
- train_SEDs = train_dataset['SEDs']
- # train_zernike_coef = train_dataset['zernike_coef']
- train_C_poly = train_dataset['C_poly']
- train_parameters = train_dataset['parameters']
-
- test_dataset = np.load(
- args['dataset_folder'] + args['test_dataset_file'], allow_pickle=True
- )[()]
- # test_stars = test_dataset['stars']
- # test_pos = test_dataset['positions']
- test_SEDs = test_dataset['SEDs']
- # test_zernike_coef = test_dataset['zernike_coef']
- # ground truth d_max (spatial polynomial max order)
- if args['model_eval'] == 'poly':
- d_max_gt = test_dataset['parameters']['d_max']
-
- # Convert to tensor
- tf_noisy_train_stars = tf.convert_to_tensor(train_dataset['noisy_stars'], dtype=tf.float32)
- tf_train_pos = tf.convert_to_tensor(train_dataset['positions'], dtype=tf.float32)
- tf_test_pos = tf.convert_to_tensor(test_dataset['positions'], dtype=tf.float32)
-
- if args['model'] == 'poly_physical':
- # Concatenate the Zernike and the positions from train and test datasets
- all_zernike_prior = np.concatenate(
- (train_dataset['zernike_prior'], test_dataset['zernike_prior']), axis=0
- )
- all_pos = np.concatenate((train_dataset['positions'], test_dataset['positions']), axis=0)
- # Convert to tensor
- tf_zernike_prior_all = tf.convert_to_tensor(all_zernike_prior, dtype=tf.float32)
- tf_pos_all = tf.convert_to_tensor(all_pos, dtype=tf.float32)
-
- if args['model_eval'] == 'physical':
- # Concatenate both datasets
- all_zernike_GT = np.concatenate(
- (train_dataset['zernike_GT'], test_dataset['zernike_GT']),
- axis=0
- )
- all_pos = np.concatenate((train_dataset['positions'], test_dataset['positions']), axis=0)
- # Convert to tensor
- tf_zernike_GT_all = tf.convert_to_tensor(all_zernike_GT, dtype=tf.float32)
- tf_pos_all = tf.convert_to_tensor(all_pos, dtype=tf.float32)
-
- print('Dataset parameters:')
- print(train_parameters)
-
- ## Prepare models
- # Generate Zernike maps
- zernikes = wf_utils.zernike_generator(
- n_zernikes=args['n_zernikes'], wfe_dim=args['pupil_diameter']
- )
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
-
- for it in range(len(zernikes)):
- np_zernike_cube[it, :, :] = zernikes[it]
-
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
- # New interp features backwards compatibility
- if 'interp_pts_per_bin' not in args:
- args['interp_pts_per_bin'] = 0
- args['extrapolate'] = True
- args['sed_interp_kind'] = 'linear'
-
- # Prepare np input
- simPSF_np = SimPSFToolkit(
- zernikes,
- max_order=args['n_zernikes'],
- pupil_diameter=args['pupil_diameter'],
- output_dim=args['output_dim'],
- oversampling_rate=args['oversampling_rate'],
- output_Q=args['output_q'],
- interp_pts_per_bin=args['interp_pts_per_bin'],
- extrapolate=args['extrapolate'],
- SED_interp_kind=args['sed_interp_kind'],
- SED_sigma=args['sed_sigma']
- )
- simPSF_np.gen_random_Z_coeffs(max_order=args['n_zernikes'])
- z_coeffs = simPSF_np.normalize_zernikes(simPSF_np.get_z_coeffs(), simPSF_np.max_wfe_rms)
- simPSF_np.set_z_coeffs(z_coeffs)
- simPSF_np.generate_mono_PSF(lambda_obs=0.7, regen_sample=False)
-
- # Obscurations
- obscurations = simPSF_np.generate_pupil_obscurations(N_pix=args['pupil_diameter'], N_filter=2)
- tf_obscurations = tf.convert_to_tensor(obscurations, dtype=tf.complex64)
-
- # Outputs (needed for the MCCD model)
- outputs = tf_noisy_train_stars
-
- ## Create the model
- ## Select the model
- if args['model'] == 'mccd' or args['model'] == 'graph':
- poly_dic, graph_dic = tf_mccd_psf_field.build_mccd_spatial_dic_v2(
- obs_stars=outputs.numpy(),
- obs_pos=tf_train_pos.numpy(),
- x_lims=args['x_lims'],
- y_lims=args['y_lims'],
- d_max=args['d_max_nonparam'],
- graph_features=args['graph_features']
- )
- spatial_dic = [poly_dic, graph_dic]
-
- if args['model'] == 'mccd':
- # Initialize the model
- tf_semiparam_field = tf_mccd_psf_field.TF_SP_MCCD_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=args['output_q'],
- l2_param=args['l2_param'],
- d_max_nonparam=args['d_max_nonparam'],
- graph_features=args['graph_features'],
- l1_rate=args['l1_rate'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'graph':
- # Initialize the model
- tf_semiparam_field = tf_mccd_psf_field.TF_SP_graph_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_train_pos,
- spatial_dic=spatial_dic,
- output_Q=args['output_q'],
- l2_param=args['l2_param'],
- graph_features=args['graph_features'],
- l1_rate=args['l1_rate'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'poly':
- # Initialize the model
- tf_semiparam_field = tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- l2_param=args['l2_param'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'param':
- # Initialize the model
- tf_semiparam_field = tf_psf_field.TF_PSF_field_model(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- l2_param=args['l2_param'],
- output_dim=args['output_dim'],
- n_zernikes=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
-
- elif args['model'] == 'poly_physical':
- # Initialize the model
- tf_semiparam_field = tf_psf_field.TF_physical_poly_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_pos_all,
- zks_prior=tf_zernike_prior_all,
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- l2_param=args['l2_param'],
- output_dim=args['output_dim'],
- n_zks_param=args['n_zernikes'],
- d_max=args['d_max'],
- x_lims=args['x_lims'],
- y_lims=args['y_lims'],
- interpolation_type=args['interpolation_type'],
- )
-
- ## Load the model's weights
- tf_semiparam_field.load_weights(weights_paths)
-
- # If eval_only_param is true we put non param model to zero.
- if 'eval_only_param' not in args:
- args['eval_only_param'] = False
- elif args['eval_only_param']:
- if args['project_dd_features']:
- tf_semiparam_field.project_DD_features(tf_zernike_cube)
- tf_semiparam_field.set_zero_nonparam()
-
- ## Prepare ground truth model
- # Generate Zernike maps
- zernikes = wf_utils.zernike_generator(
- n_zernikes=args['gt_n_zernikes'], wfe_dim=args['pupil_diameter']
- )
- # Now as cubes
- np_zernike_cube = np.zeros((len(zernikes), zernikes[0].shape[0], zernikes[0].shape[1]))
- for it in range(len(zernikes)):
- np_zernike_cube[it, :, :] = zernikes[it]
-
- np_zernike_cube[np.isnan(np_zernike_cube)] = 0
- tf_zernike_cube = tf.convert_to_tensor(np_zernike_cube, dtype=tf.float32)
-
- if args['model_eval'] == 'physical':
- # Initialize the model
- GT_tf_semiparam_field = tf_psf_field.TF_GT_physical_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- obs_pos=tf_pos_all,
- zks_prior=tf_zernike_GT_all,
- output_Q=args['output_q'],
- output_dim=args['output_dim'],
- )
- elif args['model_eval'] == 'poly':
- # Initialize the model
- GT_tf_semiparam_field = tf_psf_field.TF_SemiParam_field(
- zernike_maps=tf_zernike_cube,
- obscurations=tf_obscurations,
- batch_size=args['batch_size'],
- output_Q=args['output_q'],
- d_max_nonparam=args['d_max_nonparam'],
- output_dim=args['output_dim'],
- n_zernikes=args['gt_n_zernikes'],
- # d_max_GT may differ from the current d_max of the parametric model
- # d_max=args['d_max'],
- d_max=d_max_gt,
- x_lims=args['x_lims'],
- y_lims=args['y_lims']
- )
- # For the Ground truth model
- GT_tf_semiparam_field.tf_poly_Z_field.assign_coeff_matrix(train_C_poly)
- GT_tf_semiparam_field.set_zero_nonparam()
-
- ## Metric evaluation on the test dataset
- print('\n***\nMetric evaluation on the test dataset\n***\n')
-
- if 'n_bins_gt' not in args:
- args['n_bins_gt'] = args['n_bins_lda']
-
- # Polychromatic star reconstructions
- print('Computing polychromatic metrics at low resolution.')
- rmse, rel_rmse, std_rmse, std_rel_rmse = wf_metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- tf_SEDs=test_SEDs,
- n_bins_lda=args['n_bins_lda'],
- n_bins_gt=args['n_bins_gt'],
- batch_size=args['eval_batch_size'],
- dataset_dict=test_dataset,
- )
-
- poly_metric = {
- 'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
- # Monochromatic star reconstructions
- if args['eval_mono_metric_rmse'] is True or 'eval_mono_metric_rmse' not in args:
- print('Computing monochromatic metrics.')
- lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
- rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf_metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_test_pos,
- lambda_list=lambda_list
- )
-
- mono_metric = {
- 'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
- else:
- mono_metric = None
-
- # OPD metrics
- if args['eval_opd_metric_rmse'] is True or 'eval_opd_metric_rmse' not in args:
- print('Computing OPD metrics.')
- rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf_metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_test_pos,
- batch_size=args['eval_batch_size']
- )
-
- opd_metric = {
- 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
- else:
- opd_metric = None
-
-
- # Check if all stars SR pixel RMSE are needed
- if 'opt_stars_rel_pix_rmse' not in args:
- args['opt_stars_rel_pix_rmse'] = False
-
- # Shape metrics
- print('Computing polychromatic high-resolution metrics and shape metrics.')
- shape_results_dict = wf_metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=test_SEDs,
- tf_pos=tf_test_pos,
- n_bins_lda=args['n_bins_lda'],
- n_bins_gt=args['n_bins_gt'],
- output_Q=1,
- output_dim=64,
- batch_size=args['eval_batch_size'],
- opt_stars_rel_pix_rmse=args['opt_stars_rel_pix_rmse'],
- dataset_dict=test_dataset,
- )
-
- # Save metrics
- test_metrics = {
- 'poly_metric': poly_metric,
- 'mono_metric': mono_metric,
- 'opd_metric': opd_metric,
- 'shape_results_dict': shape_results_dict
- }
-
- ## Metric evaluation on the train dataset
- print('\n***\nMetric evaluation on the train dataset\n***\n')
-
- # Polychromatic star reconstructions
- print('Computing polychromatic metrics at low resolution.')
- rmse, rel_rmse, std_rmse, std_rel_rmse = wf_metrics.compute_poly_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- tf_SEDs=train_SEDs,
- n_bins_lda=args['n_bins_lda'],
- n_bins_gt=args['n_bins_gt'],
- batch_size=args['eval_batch_size'],
- dataset_dict=train_dataset,
- )
-
- train_poly_metric = {
- 'rmse': rmse,
- 'rel_rmse': rel_rmse,
- 'std_rmse': std_rmse,
- 'std_rel_rmse': std_rel_rmse
- }
-
- # Monochromatic star reconstructions
- if args['eval_mono_metric_rmse'] is True or 'eval_mono_metric_rmse' not in args:
- print('Computing monochromatic metrics.')
- lambda_list = np.arange(0.55, 0.9, 0.01) # 10nm separation
- rmse_lda, rel_rmse_lda, std_rmse_lda, std_rel_rmse_lda = wf_metrics.compute_mono_metric(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- tf_pos=tf_train_pos,
- lambda_list=lambda_list
- )
-
- train_mono_metric = {
- 'rmse_lda': rmse_lda,
- 'rel_rmse_lda': rel_rmse_lda,
- 'std_rmse_lda': std_rmse_lda,
- 'std_rel_rmse_lda': std_rel_rmse_lda
- }
- else:
- train_mono_metric = None
-
-
- # OPD metrics
- if args['eval_opd_metric_rmse'] is True or 'eval_opd_metric_rmse' not in args:
- print('Computing OPD metrics.')
- rmse_opd, rel_rmse_opd, rmse_std_opd, rel_rmse_std_opd = wf_metrics.compute_opd_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- pos=tf_train_pos,
- batch_size=args['eval_batch_size']
- )
-
- train_opd_metric = {
- 'rmse_opd': rmse_opd,
- 'rel_rmse_opd': rel_rmse_opd,
- 'rmse_std_opd': rmse_std_opd,
- 'rel_rmse_std_opd': rel_rmse_std_opd
- }
- else:
- train_opd_metric = None
-
-
- # Shape metrics
- if args['eval_train_shape_sr_metric_rmse'] is True or 'eval_train_shape_sr_metric_rmse' not in args:
- print('Computing polychromatic high-resolution metrics and shape metrics.')
- train_shape_results_dict = wf_metrics.compute_shape_metrics(
- tf_semiparam_field=tf_semiparam_field,
- GT_tf_semiparam_field=GT_tf_semiparam_field,
- simPSF_np=simPSF_np,
- SEDs=train_SEDs,
- tf_pos=tf_train_pos,
- n_bins_lda=args['n_bins_lda'],
- n_bins_gt=args['n_bins_gt'],
- output_Q=1,
- output_dim=64,
- batch_size=args['eval_batch_size'],
- dataset_dict=train_dataset,
- )
-
- # Save metrics into dictionary
- train_metrics = {
- 'poly_metric': train_poly_metric,
- 'mono_metric': train_mono_metric,
- 'opd_metric': train_opd_metric,
- 'shape_results_dict': train_shape_results_dict
- }
- else:
- train_metrics = None
-
- ## Save results
- metrics = {'test_metrics': test_metrics, 'train_metrics': train_metrics}
- output_path = args['metric_base_path'] + 'metrics-' + run_id_name
- np.save(output_path, metrics, allow_pickle=True)
-
- ## Print final time
- final_time = time.time()
- print('\nTotal elapsed time: %f' % (final_time - starting_time))
-
- ## Close log file
- print('\n Good bye..')
- sys.stdout = old_stdout
- log_file.close()
-
- except Exception as e:
- print('Error: %s' % e)
- sys.stdout = old_stdout
- log_file.close()
- raise e
-
-
-def plot_metrics(**args):
- r""" Plot model results.
- """
- define_plot_style()
-
- plot_saving_path = args['base_path'] + args['plots_folder']
-
- # Define common data
- # Common data
- lambda_list = np.arange(0.55, 0.9, 0.01)
- star_list = np.array(args['star_numbers'])
- e1_req_euclid = 2e-04
- e2_req_euclid = 2e-04
- R2_req_euclid = 1e-03
-
- # Define the number of datasets to test
- if not isinstance(args['suffix_id_name'], str):
- n_datasets = len(args['suffix_id_name'])
- else:
- n_datasets = 1
-
- # Run id without suffix
- run_id_no_suff = args['model'] + args['base_id_name']
-
- # Define the metric data paths
- if not isinstance(args['suffix_id_name'], str):
- model_paths = [
- args['metric_base_path'] + 'metrics-' + run_id_no_suff + _suff + '.npy'
- for _suff in args['suffix_id_name']
- ]
- else:
- model_paths = [
- args['metric_base_path'] + 'metrics-' + run_id_no_suff + args['suffix_id_name'] + '.npy'
- ]
-
- print('Model paths for performance plots: ', model_paths)
-
- # Load metrics
- try:
- metrics = [np.load(_path, allow_pickle=True)[()] for _path in model_paths]
- except FileNotFoundError:
- print('The required file for the plots was not found.')
- print('Probably I am not the last job for plotting the performance metrics.')
- return 0
-
- for plot_dataset in ['test', 'train']:
-
- try:
- ## Polychromatic results
- res = extract_poly_results(metrics, test_train=plot_dataset)
- model_polyc_rmse = res[0]
- model_polyc_std_rmse = res[1]
- model_polyc_rel_rmse = res[2]
- model_polyc_std_rel_rmse = res[3]
-
- fig = plt.figure(figsize=(12, 8))
- ax1 = fig.add_subplot(111)
- ax1.errorbar(
- x=star_list,
- y=model_polyc_rmse,
- yerr=model_polyc_std_rmse,
- label=run_id_no_suff,
- alpha=0.75
- )
- plt.minorticks_on()
- ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
- ax1.legend()
- ax1.set_title(
- 'Stars ' + plot_dataset + '\n' + run_id_no_suff +
- '.\nPolychromatic pixel RMSE @ Euclid resolution'
- )
- ax1.set_xlabel('Number of stars')
- ax1.set_ylabel('Absolute error')
- ax2 = ax1.twinx()
- kwargs = dict(linewidth=2, linestyle='dashed', markersize=4, marker='^', alpha=0.5)
- ax2.plot(star_list, model_polyc_rel_rmse, **kwargs)
- ax2.set_ylabel('Relative error [%]')
- ax2.grid(False)
- plt.savefig(
- plot_saving_path + plot_dataset + '-metrics-' + run_id_no_suff +
- '_polyc_pixel_RMSE.png'
- )
- plt.show()
- except Exception:
- print('Problem with the performance metrics plot of pixel polychromatic errors.')
-
- ## Monochromatic
- if args['eval_mono_metric_rmse'] is True or 'eval_mono_metric_rmse' not in args:
- try:
- fig = plt.figure(figsize=(12, 8))
- ax1 = fig.add_subplot(111)
- for it in range(n_datasets):
- ax1.errorbar(
- x=lambda_list,
- y=metrics[it]['test_metrics']['mono_metric']['rmse_lda'],
- yerr=metrics[it]['test_metrics']['mono_metric']['std_rmse_lda'],
- label=args['model'] + args['suffix_id_name'][it],
- alpha=0.75
- )
- plt.minorticks_on()
- ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
- ax1.legend()
- ax1.set_title(
- 'Stars ' + plot_dataset + '\n' + run_id_no_suff +
- '.\nMonochromatic pixel RMSE @ Euclid resolution'
- )
- ax1.set_xlabel('Wavelength [um]')
- ax1.set_ylabel('Absolute error')
-
- ax2 = ax1.twinx()
- kwargs = dict(linewidth=2, linestyle='dashed', markersize=8, marker='^', alpha=0.5)
- for it in range(n_datasets):
- ax2.plot(
- lambda_list, metrics[it]['test_metrics']['mono_metric']['rel_rmse_lda'],
- **kwargs
- )
- ax2.set_ylabel('Relative error [%]')
- ax2.grid(False)
- plt.savefig(
- plot_saving_path + plot_dataset + '-metrics-' + run_id_no_suff +
- '_monochrom_pixel_RMSE.png'
- )
- plt.show()
- except Exception:
- print('Problem with the performance metrics plot of pixel monochromatic errors.')
-
- ## OPD results
- if args['eval_opd_metric_rmse'] is True or 'eval_opd_metric_rmse' not in args:
- try:
- res = extract_opd_results(metrics, test_train=plot_dataset)
- model_opd_rmse = res[0]
- model_opd_std_rmse = res[1]
- model_opd_rel_rmse = res[2]
- model_opd_std_rel_rmse = res[3]
-
- fig = plt.figure(figsize=(12, 8))
- ax1 = fig.add_subplot(111)
- ax1.errorbar(
- x=star_list,
- y=model_opd_rmse,
- yerr=model_opd_std_rmse,
- label=run_id_no_suff,
- alpha=0.75
- )
- plt.minorticks_on()
- ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
- ax1.legend()
- ax1.set_title('Stars ' + plot_dataset + '\n' + run_id_no_suff + '.\nOPD RMSE')
- ax1.set_xlabel('Number of stars')
- ax1.set_ylabel('Absolute error')
-
- ax2 = ax1.twinx()
- kwargs = dict(linewidth=2, linestyle='dashed', markersize=8, marker='^', alpha=0.5)
- ax2.plot(star_list, model_opd_rel_rmse, **kwargs)
- ax2.set_ylabel('Relative error [%]')
- ax2.grid(False)
- plt.savefig(
- plot_saving_path + plot_dataset + '-metrics-' + run_id_no_suff + '_OPD_RMSE.png'
- )
- plt.show()
- except Exception:
- print('Problem with the performance metrics plot of OPD errors.')
-
- ## Shape results
- if args['eval_train_shape_sr_metric_rmse'] is True or 'eval_train_shape_sr_metric_rmse' not in args or plot_dataset=='test':
- model_e1, model_e2, model_R2 = extract_shape_results(metrics, test_train=plot_dataset)
- model_e1_rmse = model_e1[0]
- model_e1_std_rmse = model_e1[1]
- model_e1_rel_rmse = model_e1[2]
- model_e1_std_rel_rmse = model_e1[3]
- model_e2_rmse = model_e2[0]
- model_e2_std_rmse = model_e2[1]
- model_e2_rel_rmse = model_e2[2]
- model_e2_std_rel_rmse = model_e2[3]
- model_rmse_R2_meanR2 = model_R2[0]
- model_std_rmse_R2_meanR2 = model_R2[1]
-
- # Compute Euclid relative error values
- model_e1_rel_euclid = model_e1_rmse / e1_req_euclid
- model_e2_rel_euclid = model_e2_rmse / e2_req_euclid
- model_R2_rel_euclid = model_rmse_R2_meanR2 / R2_req_euclid
-
-
- # Plot e1 and e2
- try:
- fig = plt.figure(figsize=(12, 8))
- ax1 = fig.add_subplot(111)
- ax1.errorbar(
- x=star_list,
- y=model_e1_rmse,
- yerr=model_e1_std_rmse,
- label='e1 ' + run_id_no_suff,
- alpha=0.75
- )
- ax1.errorbar(
- x=star_list,
- y=model_e2_rmse,
- yerr=model_e2_std_rmse,
- label='e2 ' + run_id_no_suff,
- alpha=0.75
- )
- plt.minorticks_on()
- ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
- ax1.legend()
- ax1.set_title(
- 'Stars ' + plot_dataset + '\n' + run_id_no_suff +
- '\ne1, e2 RMSE @ 3x Euclid resolution'
- )
- ax1.set_xlabel('Number of stars')
- ax1.set_ylabel('Absolute error')
-
- ax2 = ax1.twinx()
- kwargs = dict(linewidth=2, linestyle='dashed', markersize=8, marker='^', alpha=0.5)
- ax2.plot(star_list, model_e1_rel_euclid, **kwargs)
- ax2.plot(star_list, model_e2_rel_euclid, **kwargs)
- ax2.set_ylabel('Times over Euclid req.')
- ax2.grid(False)
- plt.savefig(
- plot_saving_path + plot_dataset + '-metrics-' + run_id_no_suff +
- '_shape_e1_e2_RMSE.png'
- )
- plt.show()
- except Exception:
- print('Problem with the performance metrics plot of e1/e2 errors.')
-
- # Plot R2
- try:
- fig = plt.figure(figsize=(12, 8))
- ax1 = fig.add_subplot(111)
- ax1.errorbar(
- x=star_list,
- y=model_rmse_R2_meanR2,
- yerr=model_std_rmse_R2_meanR2,
- label='R2 ' + run_id_no_suff,
- alpha=0.75
- )
- plt.minorticks_on()
- ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
- ax1.legend()
- ax1.set_title(
- 'Stars ' + plot_dataset + '\n' + run_id_no_suff +
- '\nR2/ RMSE @ 3x Euclid resolution'
- )
- ax1.set_xlabel('Number of stars')
- ax1.set_ylabel('Absolute error')
-
- ax2 = ax1.twinx()
- kwargs = dict(linewidth=2, linestyle='dashed', markersize=8, marker='^', alpha=0.5)
- ax2.plot(star_list, model_R2_rel_euclid, **kwargs)
- ax2.set_ylabel('Times over Euclid req.')
- ax2.grid(False)
- plt.savefig(
- plot_saving_path + plot_dataset + '-metrics-' + run_id_no_suff +
- '_shape_R2_RMSE.png'
- )
- plt.show()
- except Exception:
- print('Problem with the performance metrics plot of R2 errors.')
-
- ## Polychromatic pixel residual at shape measurement resolution
- try:
- res = extract_shape_pix_results(metrics, test_train=plot_dataset)
- model_polyc_shpix_rmse = res[0]
- model_polyc_shpix_std_rmse = res[1]
- model_polyc_shpix_rel_rmse = res[2]
- model_polyc_shpix_std_rel_rmse = res[3]
-
- fig = plt.figure(figsize=(12, 8))
- ax1 = fig.add_subplot(111)
- ax1.errorbar(
- x=star_list,
- y=model_polyc_shpix_rmse,
- yerr=model_polyc_shpix_std_rmse,
- label=run_id_no_suff,
- alpha=0.75
- )
- plt.minorticks_on()
- ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
- ax1.legend()
- ax1.set_title(
- 'Stars ' + plot_dataset + '\n' + run_id_no_suff +
- '\nPixel RMSE @ 3x Euclid resolution'
- )
- ax1.set_xlabel('Number of stars')
- ax1.set_ylabel('Absolute error')
-
- ax2 = ax1.twinx()
- kwargs = dict(linewidth=2, linestyle='dashed', markersize=8, marker='^', alpha=0.5)
- ax2.plot(star_list, model_polyc_shpix_rel_rmse, **kwargs)
- ax2.set_ylabel('Relative error [%]')
- ax2.grid(False)
- plt.savefig(
- plot_saving_path + plot_dataset + '-metrics-' + run_id_no_suff +
- '_poly_pixel_3xResolution_RMSE.png'
- )
- plt.show()
- except Exception:
- print(
- 'Problem with the performance metrics plot of super resolution pixel polychromatic errors.'
- )
-
-
-def plot_optimisation_metrics(**args):
- r""" Plot optimisation results.
- """
- define_plot_style()
-
- # Define saving path
- plot_saving_path = args['base_path'] + args['plots_folder']
-
- # Define the number of datasets to test
- if not isinstance(args['suffix_id_name'], str):
- n_datasets = len(args['suffix_id_name'])
- else:
- n_datasets = 1
-
- optim_hist_file = args['base_path'] + args['optim_hist_folder']
- run_id_no_suff = args['model'] + args['base_id_name']
-
- # Define the metric data paths
- if not isinstance(args['suffix_id_name'], str):
- model_paths = [
- optim_hist_file + 'optim_hist_' + run_id_no_suff + _suff + '.npy'
- for _suff in args['suffix_id_name']
- ]
- else:
- model_paths = [
- optim_hist_file + 'optim_hist_' + run_id_no_suff + str(args['suffix_id_name']) + '.npy'
- ]
-
- print('Model paths for optimisation plots: ', model_paths)
-
- try:
- # Load metrics
- metrics = [np.load(_path, allow_pickle=True)[()] for _path in model_paths]
- except FileNotFoundError:
- print('The required file for the plots was not found.')
- print('Probably I am not the last job for plotting the optimisation metrics.')
- raise 0
-
- ## Plot the first parametric cycle
- cycle_str = 'param_cycle1'
- metric_str = 'mean_squared_error'
- val_mertric_str = 'val_mean_squared_error'
-
- try:
- fig = plt.figure(figsize=(12, 8))
- ax1 = fig.add_subplot(111)
- for it in range(n_datasets):
- try:
- ax1.plot(
- metrics[it][cycle_str][metric_str],
- label=args['model'] + args['suffix_id_name'][it],
- alpha=0.75
- )
- except KeyError as KE:
- print('Error with Key: ', KE)
- plt.yscale('log')
- plt.minorticks_on()
- ax1.legend()
- ax1.set_title('Parametric cycle 1.\n' + run_id_no_suff + '_' + cycle_str)
- ax1.set_xlabel('Number of epoch')
- ax1.set_ylabel('Training MSE')
-
- ax2 = ax1.twinx()
- kwargs = dict(linewidth=2, linestyle='dashed', markersize=2, marker='^', alpha=0.5)
- for it in range(n_datasets):
- try:
- ax2.plot(metrics[it][cycle_str][val_mertric_str], **kwargs)
- except KeyError as KE:
- print('Error with Key: ', KE)
- ax2.set_ylabel('Validation MSE')
- ax2.grid(False)
- plt.savefig(plot_saving_path + 'optim_' + run_id_no_suff + '_' + cycle_str + '.png')
- plt.show()
- except Exception:
- print('Problem with the plot of the optimisation metrics of the first parametric cycle.')
-
- # Plot the first non-parametric cycle
- if args['model'] != 'param':
- try:
- cycle_str = 'nonparam_cycle1'
- metric_str = 'mean_squared_error'
- val_mertric_str = 'val_mean_squared_error'
-
- fig = plt.figure(figsize=(12, 8))
- ax1 = fig.add_subplot(111)
- for it in range(n_datasets):
- try:
- ax1.plot(
- metrics[it][cycle_str][metric_str],
- label=args['model'] + args['suffix_id_name'][it],
- alpha=0.75
- )
- except KeyError as KE:
- print('Error with Key: ', KE)
- plt.yscale('log')
- plt.minorticks_on()
- ax1.legend()
- ax1.set_title('Non-parametric cycle 1.\n' + run_id_no_suff + '_' + cycle_str)
- ax1.set_xlabel('Number of epoch')
- ax1.set_ylabel('Training MSE')
-
- ax2 = ax1.twinx()
- kwargs = dict(linewidth=2, linestyle='dashed', markersize=2, marker='^', alpha=0.5)
- for it in range(n_datasets):
- try:
- ax2.plot(metrics[it][cycle_str][val_mertric_str], **kwargs)
- except KeyError as KE:
- print('Error with Key: ', KE)
- ax2.set_ylabel('Validation MSE')
- ax2.grid(False)
- plt.savefig(plot_saving_path + 'optim_' + run_id_no_suff + '_' + cycle_str + '.png')
- plt.show()
- except Exception:
- print(
- 'Problem with the plot of the optimisation metrics of the first non-parametric cycle.'
- )
-
- ## Plot the second parametric cycle
- if cycle_str in metrics[0]:
- cycle_str = 'param_cycle2'
- metric_str = 'mean_squared_error'
- val_mertric_str = 'val_mean_squared_error'
-
- try:
- fig = plt.figure(figsize=(12, 8))
- ax1 = fig.add_subplot(111)
- for it in range(n_datasets):
- try:
- ax1.plot(
- metrics[it][cycle_str][metric_str],
- label=args['model'] + args['suffix_id_name'][it],
- alpha=0.75
- )
- except KeyError as KE:
- print('Error with Key: ', KE)
- plt.yscale('log')
- plt.minorticks_on()
- ax1.legend()
- ax1.set_title('Parametric cycle 2.\n' + run_id_no_suff + '_' + cycle_str)
- ax1.set_xlabel('Number of epoch')
- ax1.set_ylabel('Training MSE')
-
- ax2 = ax1.twinx()
- kwargs = dict(linewidth=2, linestyle='dashed', markersize=2, marker='^', alpha=0.5)
- for it in range(n_datasets):
- try:
- ax2.plot(metrics[it][cycle_str][val_mertric_str], **kwargs)
- except KeyError as KE:
- print('Error with Key: ', KE)
- ax2.set_ylabel('Validation MSE')
- ax2.grid(False)
- plt.savefig(plot_saving_path + 'optim_' + run_id_no_suff + '_' + cycle_str + '.png')
- plt.show()
- except Exception:
- print(
- 'Problem with the plot of the optimisation metrics of the second parametric cycle.'
- )
-
- ## Plot the second non-parametric cycle
- if cycle_str in metrics[0]:
- cycle_str = 'nonparam_cycle2'
- metric_str = 'mean_squared_error'
- val_mertric_str = 'val_mean_squared_error'
-
- try:
- fig = plt.figure(figsize=(12, 8))
- ax1 = fig.add_subplot(111)
- for it in range(n_datasets):
- try:
- ax1.plot(
- metrics[it][cycle_str][metric_str],
- label=args['model'] + args['suffix_id_name'][it],
- alpha=0.75
- )
- except KeyError as KE:
- print('Error with Key: ', KE)
- plt.yscale('log')
- plt.minorticks_on()
- ax1.legend()
- ax1.set_title('Non-parametric cycle 2.\n' + run_id_no_suff + '_' + cycle_str)
- ax1.set_xlabel('Number of epoch')
- ax1.set_ylabel('Training MSE')
-
- ax2 = ax1.twinx()
- kwargs = dict(linewidth=2, linestyle='dashed', markersize=2, marker='^', alpha=0.5)
- for it in range(n_datasets):
- try:
- ax2.plot(metrics[it][cycle_str][val_mertric_str], **kwargs)
- except KeyError as KE:
- print('Error with Key: ', KE)
- ax2.set_ylabel('Validation MSE')
- ax2.grid(False)
- plt.savefig(plot_saving_path + 'optim_' + run_id_no_suff + '_' + cycle_str + '.png')
- plt.show()
- except Exception:
- print(
- 'Problem with the plot of the optimisation metrics of the second non-parametric cycle.'
- )
-
-
-def define_plot_style():
- # Define plot paramters
- plot_style = {
- 'figure.figsize': (12, 8),
- 'figure.dpi': 200,
- 'figure.autolayout': True,
- 'lines.linewidth': 2,
- 'lines.linestyle': '-',
- 'lines.marker': 'o',
- 'lines.markersize': 10,
- 'legend.fontsize': 20,
- 'legend.loc': 'best',
- 'axes.titlesize': 24,
- 'font.size': 16
- }
- mpl.rcParams.update(plot_style)
- # Use seaborn style
- sns.set()
-
-
-def extract_poly_results(metrics_dicts, test_train='test'):
-
- if test_train == 'test':
- first_key = 'test_metrics'
- elif test_train == 'train':
- first_key = 'train_metrics'
- else:
- raise ValueError
-
- n_dicts = len(metrics_dicts)
-
- polyc_rmse = np.zeros(n_dicts)
- polyc_std_rmse = np.zeros(n_dicts)
- polyc_rel_rmse = np.zeros(n_dicts)
- polyc_std_rel_rmse = np.zeros(n_dicts)
-
- for it in range(n_dicts):
- polyc_rmse[it] = metrics_dicts[it][first_key]['poly_metric']['rmse']
- polyc_std_rmse[it] = metrics_dicts[it][first_key]['poly_metric']['std_rmse']
- polyc_rel_rmse[it] = metrics_dicts[it][first_key]['poly_metric']['rel_rmse']
- polyc_std_rel_rmse[it] = metrics_dicts[it][first_key]['poly_metric']['std_rel_rmse']
-
- return polyc_rmse, polyc_std_rmse, polyc_rel_rmse, polyc_std_rel_rmse
-
-
-def extract_opd_results(metrics_dicts, test_train='test'):
-
- if test_train == 'test':
- first_key = 'test_metrics'
- elif test_train == 'train':
- first_key = 'train_metrics'
- else:
- raise ValueError
-
- n_dicts = len(metrics_dicts)
-
- opd_rmse = np.zeros(n_dicts)
- opd_std_rmse = np.zeros(n_dicts)
- opd_rel_rmse = np.zeros(n_dicts)
- opd_std_rel_rmse = np.zeros(n_dicts)
-
- for it in range(n_dicts):
- opd_rmse[it] = metrics_dicts[it][first_key]['opd_metric']['rmse_opd']
- opd_std_rmse[it] = metrics_dicts[it][first_key]['opd_metric']['rmse_std_opd']
- opd_rel_rmse[it] = metrics_dicts[it][first_key]['opd_metric']['rel_rmse_opd']
- opd_std_rel_rmse[it] = metrics_dicts[it][first_key]['opd_metric']['rel_rmse_std_opd']
-
- return opd_rmse, opd_std_rmse, opd_rel_rmse, opd_std_rel_rmse
-
-
-def extract_shape_results(metrics_dicts, test_train='test'):
-
- if test_train == 'test':
- first_key = 'test_metrics'
- elif test_train == 'train':
- first_key = 'train_metrics'
- else:
- raise ValueError
-
- n_dicts = len(metrics_dicts)
-
- e1_rmse = np.zeros(n_dicts)
- e1_std_rmse = np.zeros(n_dicts)
- e1_rel_rmse = np.zeros(n_dicts)
- e1_std_rel_rmse = np.zeros(n_dicts)
-
- e2_rmse = np.zeros(n_dicts)
- e2_std_rmse = np.zeros(n_dicts)
- e2_rel_rmse = np.zeros(n_dicts)
- e2_std_rel_rmse = np.zeros(n_dicts)
-
- rmse_R2_meanR2 = np.zeros(n_dicts)
- std_rmse_R2_meanR2 = np.zeros(n_dicts)
-
- for it in range(n_dicts):
- e1_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['rmse_e1']
- e1_std_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['std_rmse_e1']
- e1_rel_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['rel_rmse_e1']
- e1_std_rel_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['std_rel_rmse_e1']
-
- e2_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['rmse_e2']
- e2_std_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['std_rmse_e2']
- e2_rel_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['rel_rmse_e2']
- e2_std_rel_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['std_rel_rmse_e2']
-
- rmse_R2_meanR2[it] = metrics_dicts[it][first_key]['shape_results_dict']['rmse_R2_meanR2']
- std_rmse_R2_meanR2[it] = metrics_dicts[it][first_key]['shape_results_dict'][
- 'std_rmse_R2_meanR2']
-
- e1 = [e1_rmse, e1_std_rmse, e1_rel_rmse, e1_std_rel_rmse]
- e2 = [e2_rmse, e2_std_rmse, e2_rel_rmse, e2_std_rel_rmse]
- R2 = [rmse_R2_meanR2, std_rmse_R2_meanR2]
-
- return e1, e2, R2
-
-
-def extract_shape_pix_results(metrics_dicts, test_train='test'):
-
- if test_train == 'test':
- first_key = 'test_metrics'
- elif test_train == 'train':
- first_key = 'train_metrics'
- else:
- raise ValueError
-
- n_dicts = len(metrics_dicts)
-
- polyc_rmse = np.zeros(n_dicts)
- polyc_std_rmse = np.zeros(n_dicts)
- polyc_rel_rmse = np.zeros(n_dicts)
- polyc_std_rel_rmse = np.zeros(n_dicts)
-
- for it in range(n_dicts):
- polyc_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['pix_rmse']
- polyc_std_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['pix_rmse_std']
- polyc_rel_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict']['rel_pix_rmse']
- polyc_std_rel_rmse[it] = metrics_dicts[it][first_key]['shape_results_dict'][
- 'rel_pix_rmse_std']
-
- return polyc_rmse, polyc_std_rmse, polyc_rel_rmse, polyc_std_rel_rmse
diff --git a/wf_psf/tf_alt_psf_models.py b/wf_psf/tf_alt_psf_models.py
deleted file mode 100644
index 56e3a52e..00000000
--- a/wf_psf/tf_alt_psf_models.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import numpy as np
-import tensorflow as tf
-from wf_psf.tf_layers import TF_poly_Z_field, TF_zernike_OPD, TF_batch_poly_PSF
-from wf_psf.tf_layers import TF_NP_poly_OPD, TF_batch_mono_PSF
-
-
-class TF_SemiParam_field_l2_OPD(tf.keras.Model):
- """ PSF field forward model!
-
- Semi parametric model based on the Zernike polynomial basis. The
-
- Parameters
- ----------
- zernike_maps: Tensor(n_batch, opd_dim, opd_dim)
- Zernike polynomial maps.
- obscurations: Tensor(opd_dim, opd_dim)
- Predefined obscurations of the phase.
- batch_size: int
- Batch sizet
- output_Q: float
- Oversampling used. This should match the oversampling Q used to generate
- the diffraction zero padding that is found in the input `packed_SEDs`.
- We call this other Q the `input_Q`.
- In that case, we replicate the original sampling of the model used to
- calculate the input `packed_SEDs`.
- The final oversampling of the generated PSFs with respect to the
- original instrument sampling depend on the division `input_Q/output_Q`.
- It is not recommended to use `output_Q < 1`.
- Although it works with float values it is better to use integer values.
- d_max_nonparam: int
- Maximum degree of the polynomial for the non-parametric variations.
- l2_param: float
- Parameter going with the l2 loss on the opd.
- output_dim: int
- Output dimension of the PSF stamps.
- n_zernikes: int
- Order of the Zernike polynomial for the parametric model.
- d_max: int
- Maximum degree of the polynomial for the Zernike coefficient variations.
- x_lims: [float, float]
- Limits for the x coordinate of the PSF field.
- y_lims: [float, float]
- Limits for the x coordinate of the PSF field.
- coeff_mat: Tensor or None
- Initialization of the coefficient matrix defining the parametric psf
- field model.
-
- """
-
- def __init__(
- self,
- zernike_maps,
- obscurations,
- batch_size,
- output_Q,
- d_max_nonparam=3,
- l2_param=1e-11,
- output_dim=64,
- n_zernikes=45,
- d_max=2,
- x_lims=[0, 1e3],
- y_lims=[0, 1e3],
- coeff_mat=None,
- name='TF_SemiParam_field_l2_OPD'
- ):
- super(TF_SemiParam_field_l2_OPD, self).__init__()
-
- # Inputs: oversampling used
- self.output_Q = output_Q
-
- # Inputs: TF_poly_Z_field
- self.n_zernikes = n_zernikes
- self.d_max = d_max
- self.x_lims = x_lims
- self.y_lims = y_lims
-
- # Inputs: TF_NP_poly_OPD
- self.d_max_nonparam = d_max_nonparam
- self.opd_dim = tf.shape(zernike_maps)[1].numpy()
-
- # Inputs: TF_zernike_OPD
- # They are not stored as they are memory-heavy
- # zernike_maps =[]
-
- # Inputs: TF_batch_poly_PSF
- self.batch_size = batch_size
- self.obscurations = obscurations
- self.output_dim = output_dim
-
- # Inputs: Loss
- self.l2_param = l2_param
-
- # Initialize the first layer
- self.tf_poly_Z_field = TF_poly_Z_field(
- x_lims=self.x_lims, y_lims=self.y_lims, n_zernikes=self.n_zernikes, d_max=self.d_max
- )
-
- # Initialize the zernike to OPD layer
- self.tf_zernike_OPD = TF_zernike_OPD(zernike_maps=zernike_maps)
-
- # Initialize the non-parametric layer
- self.tf_np_poly_opd = TF_NP_poly_OPD(
- x_lims=self.x_lims, y_lims=self.y_lims, d_max=self.d_max_nonparam, opd_dim=self.opd_dim
- )
-
- # Initialize the batch opd to batch polychromatic PSF layer
- self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
-
- # Initialize the model parameters with non-default value
- if coeff_mat is not None:
- self.assign_coeff_matrix(coeff_mat)
-
- def get_coeff_matrix(self):
- """ Get coefficient matrix."""
- return self.tf_poly_Z_field.get_coeff_matrix()
-
- def assign_coeff_matrix(self, coeff_mat):
- """ Assign coefficient matrix."""
- self.tf_poly_Z_field.assign_coeff_matrix(coeff_mat)
-
- def set_zero_nonparam(self):
- """ Set to zero the non-parametric part."""
- self.tf_np_poly_opd.set_alpha_zero()
-
- def set_nonzero_nonparam(self):
- """ Set to non-zero the non-parametric part."""
- self.tf_np_poly_opd.set_alpha_identity()
-
- def set_trainable_layers(self, param_bool=True, nonparam_bool=True):
- """ Set the layers to be trainable or not."""
- self.tf_np_poly_opd.trainable = nonparam_bool
- self.tf_poly_Z_field.trainable = param_bool
-
- def set_output_Q(self, output_Q, output_dim=None):
- """ Set the value of the output_Q parameter.
- Useful for generating/predicting PSFs at a different sampling wrt the
- observation sampling.
- """
- self.output_Q = output_Q
- if output_dim is not None:
- self.output_dim = output_dim
-
- # Reinitialize the PSF batch poly generator
- self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
-
- def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
- """ Predict a set of monochromatic PSF at desired positions.
-
- input_positions: Tensor(batch_dim x 2)
-
- lambda_obs: float
- Observed wavelength in um.
-
- phase_N: int
- Required wavefront dimension. Should be calculated with as:
- ``simPSF_np = wf.SimPSFToolkit(...)``
- ``phase_N = simPSF_np.feasible_N(lambda_obs)``
- """
-
- # Initialise the monochromatic PSF batch calculator
- tf_batch_mono_psf = TF_batch_mono_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
- # Set the lambda_obs and the phase_N parameters
- tf_batch_mono_psf.set_lambda_phaseN(phase_N, lambda_obs)
-
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_np_poly_opd(input_positions)
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
-
- # Compute the monochromatic PSFs
- mono_psf_batch = tf_batch_mono_psf(opd_maps)
-
- return mono_psf_batch
-
- def predict_opd(self, input_positions):
- """ Predict the OPD at some positions.
-
- Parameters
- ----------
- input_positions: Tensor(batch_dim x 2)
- Positions to predict the OPD.
-
- Returns
- -------
- opd_maps : Tensor [batch x opd_dim x opd_dim]
- OPD at requested positions.
-
- """
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_np_poly_opd(input_positions)
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
-
- return opd_maps
-
- def call(self, inputs):
- """Define the PSF field forward model.
-
- [1] From positions to Zernike coefficients
- [2] From Zernike coefficients to OPD maps
- [3] From OPD maps and SED info to polychromatic PSFs
-
- OPD: Optical Path Differences
- """
- # Unpack inputs
- input_positions = inputs[0]
- packed_SEDs = inputs[1]
-
- # Forward model
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_np_poly_opd(input_positions)
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
-
- # Add l2 loss on the OPD
- self.add_loss(self.l2_param * tf.math.reduce_sum(tf.math.square(opd_maps)))
-
- # Compute the polychromatic PSFs
- poly_psfs = self.tf_batch_poly_PSF([opd_maps, packed_SEDs])
-
- return poly_psfs
-
-
-class TF_PSF_field_model_l2_OPD(tf.keras.Model):
- """ Parametric PSF field model!
-
- Fully parametric model based on the Zernike polynomial basis.
-
- Parameters
- ----------
- zernike_maps: Tensor(n_batch, opd_dim, opd_dim)
- Zernike polynomial maps.
- obscurations: Tensor(opd_dim, opd_dim)
- Predefined obscurations of the phase.
- batch_size: int
- Batch size
- l2_param: float
- Parameter going with the l2 loss on the opd.
- output_dim: int
- Output dimension of the PSF stamps.
- n_zernikes: int
- Order of the Zernike polynomial for the parametric model.
- d_max: int
- Maximum degree of the polynomial for the Zernike coefficient variations.
- x_lims: [float, float]
- Limits for the x coordinate of the PSF field.
- y_lims: [float, float]
- Limits for the x coordinate of the PSF field.
- coeff_mat: Tensor or None
- Initialization of the coefficient matrix defining the parametric psf
- field model.
-
- """
-
- def __init__(
- self,
- zernike_maps,
- obscurations,
- batch_size,
- output_Q,
- l2_param=1e-11,
- output_dim=64,
- n_zernikes=45,
- d_max=2,
- x_lims=[0, 1e3],
- y_lims=[0, 1e3],
- coeff_mat=None,
- name='TF_PSF_field_model_l2_OPD'
- ):
- super(TF_PSF_field_model_l2_OPD, self).__init__()
-
- self.output_Q = output_Q
-
- # Inputs: TF_poly_Z_field
- self.n_zernikes = n_zernikes
- self.d_max = d_max
- self.x_lims = x_lims
- self.y_lims = y_lims
-
- # Inputs: TF_zernike_OPD
- # They are not stored as they are memory-heavy
- # zernike_maps =[]
-
- # Inputs: TF_batch_poly_PSF
- self.batch_size = batch_size
- self.obscurations = obscurations
- self.output_dim = output_dim
-
- # Inputs: Loss
- self.l2_param = l2_param
-
- # Initialize the first layer
- self.tf_poly_Z_field = TF_poly_Z_field(
- x_lims=self.x_lims, y_lims=self.y_lims, n_zernikes=self.n_zernikes, d_max=self.d_max
- )
-
- # Initialize the zernike to OPD layer
- self.tf_zernike_OPD = TF_zernike_OPD(zernike_maps=zernike_maps)
-
- # Initialize the batch opd to batch polychromatic PSF layer
- self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
-
- # Initialize the model parameters with non-default value
- if coeff_mat is not None:
- self.assign_coeff_matrix(coeff_mat)
-
- def get_coeff_matrix(self):
- """ Get coefficient matrix."""
- return self.tf_poly_Z_field.get_coeff_matrix()
-
- def assign_coeff_matrix(self, coeff_mat):
- """ Assign coefficient matrix."""
- self.tf_poly_Z_field.assign_coeff_matrix(coeff_mat)
-
- def set_output_Q(self, output_Q, output_dim=None):
- """ Set the value of the output_Q parameter.
- Useful for generating/predicting PSFs at a different sampling wrt the
- observation sampling.
- """
- self.output_Q = output_Q
- if output_dim is not None:
- self.output_dim = output_dim
- # Reinitialize the PSF batch poly generator
- self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
-
- def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
- """ Predict a set of monochromatic PSF at desired positions.
-
- input_positions: Tensor(batch_dim x 2)
-
- lambda_obs: float
- Observed wavelength in um.
-
- phase_N: int
- Required wavefront dimension. Should be calculated with as:
- ``simPSF_np = wf.SimPSFToolkit(...)``
- ``phase_N = simPSF_np.feasible_N(lambda_obs)``
- """
-
- # Initialise the monochromatic PSF batch calculator
- tf_batch_mono_psf = TF_batch_mono_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
- # Set the lambda_obs and the phase_N parameters
- tf_batch_mono_psf.set_lambda_phaseN(phase_N, lambda_obs)
-
- # Continue the OPD maps
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- opd_maps = self.tf_zernike_OPD(zernike_coeffs)
-
- # Compute the monochromatic PSFs
- mono_psf_batch = tf_batch_mono_psf(opd_maps)
-
- return mono_psf_batch
-
- def predict_opd(self, input_positions):
- """ Predict the OPD at some positions.
-
- Parameters
- ----------
- input_positions: Tensor(batch_dim x 2)
- Positions to predict the OPD.
-
- Returns
- -------
- opd_maps : Tensor [batch x opd_dim x opd_dim]
- OPD at requested positions.
-
- """
- # Continue the OPD maps
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- opd_maps = self.tf_zernike_OPD(zernike_coeffs)
-
- return opd_maps
-
- def call(self, inputs):
- """Define the PSF field forward model.
-
- [1] From positions to Zernike coefficients
- [2] From Zernike coefficients to OPD maps
- [3] From OPD maps and SED info to polychromatic PSFs
-
- OPD: Optical Path Differences
- """
- # Unpack inputs
- input_positions = inputs[0]
- packed_SEDs = inputs[1]
-
- # Continue the forward model
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- opd_maps = self.tf_zernike_OPD(zernike_coeffs)
-
- # Add l2 loss on the OPD
- self.add_loss(self.l2_param * tf.math.reduce_sum(tf.math.square(opd_maps)))
-
- poly_psfs = self.tf_batch_poly_PSF([opd_maps, packed_SEDs])
-
- return poly_psfs
diff --git a/wf_psf/tf_mccd_psf_field.py b/wf_psf/tf_mccd_psf_field.py
deleted file mode 100644
index 275f8fea..00000000
--- a/wf_psf/tf_mccd_psf_field.py
+++ /dev/null
@@ -1,674 +0,0 @@
-import numpy as np
-import tensorflow as tf
-from tensorflow.python.keras.engine import data_adapter
-from wf_psf.tf_layers import TF_poly_Z_field, TF_zernike_OPD, TF_batch_poly_PSF
-from wf_psf.tf_layers import TF_NP_MCCD_OPD_v2, TF_NP_GRAPH_OPD
-from wf_psf.tf_layers import TF_batch_mono_PSF
-from wf_psf.graph_utils import GraphBuilder
-from wf_psf.utils import calc_poly_position_mat
-
-
-class TF_SP_MCCD_field(tf.keras.Model):
- r""" Semi-parametric MCCD PSF field model!
-
- Semi parametric model based on the hybrid-MCCD matrix factorization scheme.
-
- The forward model is different for the training procedure and for the
- inference procedure. This makes things more complicated and requires several
- custom functions.
-
- The prediction step is the forward model for the inference while the
- ``call(inputs, trainable=True)`` is the forward model for the training
- procedure. When calling ``call(inputs, trainable=False)`` we are falling
- back to the predict function for the inference forward model. This is needed
- in order to handle the calculation of validation metrics on the validation
- dataset.
-
-
- Parameters
- ----------
- zernike_maps: Tensor(n_batch, opd_dim, opd_dim)
- Zernike polynomial maps.
- obscurations: Tensor(opd_dim, opd_dim)
- Predefined obscurations of the phase.
- batch_size: int
- Batch size
- obs_pos: Tensor(n_stars, 2)
- The positions of all the stars
- spatial_dic:
- TODO ...
- output_Q: int
- Downsampling rate to match the specified telescope's sampling. The value
- of `output_Q` should be equal to `oversampling_rate` in order to have
- the right pixel sampling corresponding to the telescope characteristics
- `pix_sampling`, `tel_diameter`, `tel_focal_length`. The final
- oversampling obtained is `oversampling_rate/output_Q`.
- Default is `1`, so the output psf will be super-resolved by a factor of
- `oversampling_rate`.
- l2_param: float
- Parameter going with the l2 loss on the opd. If it is `0.` the loss
- is not added. Default is `0.`.
- d_max_nonparam: int
- Maximum degree of the polynomial for the non-parametric variations.
- output_dim: int
- Output dimension of the PSF stamps.
- n_zernikes: int
- Order of the Zernike polynomial for the parametric model.
- d_max: int
- Maximum degree of the polynomial for the Zernike coefficient variations.
- x_lims: [float, float]
- Limits for the x coordinate of the PSF field.
- y_lims: [float, float]
- Limits for the x coordinate of the PSF field.
- coeff_mat: Tensor or None
- Initialization of the coefficient matrix defining the parametric psf
- field model.
-
- """
-
- def __init__(
- self,
- zernike_maps,
- obscurations,
- batch_size,
- obs_pos,
- spatial_dic,
- output_Q,
- l2_param=0.,
- d_max_nonparam=3,
- graph_features=6,
- l1_rate=1e-3,
- output_dim=64,
- n_zernikes=45,
- d_max=2,
- x_lims=[0, 1e3],
- y_lims=[0, 1e3],
- coeff_mat=None,
- name='TF_SP_MCCD_field'
- ):
- super(TF_SP_MCCD_field, self).__init__()
-
- # Inputs: oversampling used
- self.output_Q = output_Q
-
- # Inputs: TF_poly_Z_field
- self.n_zernikes = n_zernikes
- self.d_max = d_max
- self.x_lims = x_lims
- self.y_lims = y_lims
-
- # Inputs: TF_NP_MCCD_OPD
- self.d_max_nonparam = d_max_nonparam
- self.opd_dim = tf.shape(zernike_maps)[1].numpy()
- self.graph_features = graph_features
- self.l1_rate = l1_rate
-
- # Inputs: TF_zernike_OPD
- # They are not stored as they are memory-heavy
- # zernike_maps =[]
-
- # Inputs: TF_batch_poly_PSF
- self.batch_size = batch_size
- self.obscurations = obscurations
- self.output_dim = output_dim
-
- # Inputs: Loss
- self.l2_param = l2_param
-
- # Initialize the first layer
- self.tf_poly_Z_field = TF_poly_Z_field(
- x_lims=self.x_lims, y_lims=self.y_lims, n_zernikes=self.n_zernikes, d_max=self.d_max
- )
-
- # Initialize the zernike to OPD layer
- self.tf_zernike_OPD = TF_zernike_OPD(zernike_maps=zernike_maps)
-
- # Initialize the non-parametric layer
- self.tf_NP_mccd_OPD = TF_NP_MCCD_OPD_v2(
- obs_pos=obs_pos,
- spatial_dic=spatial_dic,
- x_lims=self.x_lims,
- y_lims=self.y_lims,
- d_max=self.d_max_nonparam,
- graph_features=self.graph_features,
- l1_rate=self.l1_rate,
- opd_dim=self.opd_dim
- )
-
- # Initialize the batch opd to batch polychromatic PSF layer
- self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
-
- # Initialize the model parameters with non-default value
- if coeff_mat is not None:
- self.assign_coeff_matrix(coeff_mat)
-
- def set_zero_nonparam(self):
- r""" Set to zero the non-parametric part."""
- self.tf_NP_mccd_OPD.set_alpha_zero()
-
- def set_output_Q(self, output_Q, output_dim=None):
- r""" Set the value of the output_Q parameter.
- Useful for generating/predicting PSFs at a different sampling wrt the
- observation sampling.
- """
- self.output_Q = output_Q
- if output_dim is not None:
- self.output_dim = output_dim
- # Reinitialize the PSF batch poly generator
- self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
-
- def set_l1_rate(self, new_l1_rate):
- r""" Set l1 rate the non-parametric part."""
- self.l1_rate = new_l1_rate
- self.tf_NP_mccd_OPD.l1_rate = new_l1_rate
-
- def set_nonzero_nonparam(self):
- r""" Set to non-zero the non-parametric part."""
- self.tf_NP_mccd_OPD.set_alpha_identity()
-
- def set_trainable_layers(self, param_bool=True, nonparam_bool=True):
- r""" Set the layers to be trainable or not."""
- self.tf_NP_mccd_OPD.trainable = nonparam_bool
- self.tf_poly_Z_field.trainable = param_bool
-
- def get_coeff_matrix(self):
- """ Get coefficient matrix."""
- return self.tf_poly_Z_field.get_coeff_matrix()
-
- def assign_coeff_matrix(self, coeff_mat):
- r""" Assign coefficient matrix."""
- self.tf_poly_Z_field.assign_coeff_matrix(coeff_mat)
-
- def predict_step(self, data, evaluate_step=False):
- r""" Custom predict (inference) step.
-
- It is needed as the non-parametric MCCD part requires a special
- interpolation (different from training).
-
- """
- if evaluate_step:
- input_data = data
- else:
- # Format input data
- data = data_adapter.expand_1d(data)
- input_data, _, _ = data_adapter.unpack_x_y_sample_weight(data)
-
- # Unpack inputs
- input_positions = input_data[0]
- packed_SEDs = input_data[1]
-
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
-
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_NP_mccd_OPD.predict(input_positions)
-
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
- # Compute the polychromatic PSFs
- poly_psfs = self.tf_batch_poly_PSF([opd_maps, packed_SEDs])
-
- return poly_psfs
-
- def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
- r""" Predict a set of monochromatic PSF at desired positions.
-
- Parameters
- ----------
- input_positions: Tensor(batch x 2)
- Positions to predict the monochromatic PSFs.
- lambda_obs: float
- Observed wavelength in um.
- phase_N: int
- Required wavefront dimension. Should be calculated with as:
- ``simPSF_np = wf.SimPSFToolkit(...)``
- ``phase_N = simPSF_np.feasible_N(lambda_obs)``
-
- Returns
- -------
- mono_psf_batch: Tensor [batch x output_dim x output_dim]
- Batch of monochromatic PSFs at requested positions and
- wavelength.
- """
- # Initialise the monochromatic PSF batch calculator
- tf_batch_mono_psf = TF_batch_mono_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
- # Set the lambda_obs and the phase_N parameters
- tf_batch_mono_psf.set_lambda_phaseN(phase_N, lambda_obs)
-
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
-
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_NP_mccd_OPD.predict(input_positions)
-
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
-
- # Compute the monochromatic PSFs
- mono_psf_batch = tf_batch_mono_psf(opd_maps)
-
- return mono_psf_batch
-
- def predict_opd(self, input_positions):
- r""" Predict the OPD at some positions.
-
- Parameters
- ----------
- input_positions: Tensor(batch_dim x 2)
- Positions to predict the OPD.
-
- Returns
- -------
- opd_maps : Tensor [batch x opd_dim x opd_dim]
- OPD at requested positions.
-
- """
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
-
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_NP_mccd_OPD.predict(input_positions)
-
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
-
- return opd_maps
-
- def call(self, inputs, training=True):
- r"""Define the PSF field forward model.
-
- [1] From positions to Zernike coefficients
- [2] From Zernike coefficients to OPD maps
- [3] From OPD maps and SED info to polychromatic PSFs
-
- OPD: Optical Path Differences
- """
- # Unpack inputs
- input_positions = inputs[0]
- packed_SEDs = inputs[1]
-
- # Forward model
- # For the training
- if training:
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_NP_mccd_OPD(input_positions)
- # Add l2 loss on the parmetric OPD
- self.add_loss(self.l2_param * tf.math.reduce_sum(tf.math.square(nonparam_opd_maps)))
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
- # Compute the polychromatic PSFs
- poly_psfs = self.tf_batch_poly_PSF([opd_maps, packed_SEDs])
-
- # For the inference
- # This is absolutely needed to compute the metrics on the
- # validation data.
- else:
- # Compute predictions
- poly_psfs = self.predict_step(inputs, evaluate_step=True)
-
- return poly_psfs
-
-
-def build_mccd_spatial_dic(
- obs_stars, obs_pos, x_lims, y_lims, d_max=2, graph_features=6, verbose=0
-):
- """Build the spatial-constraint dictionary.
-
- Based on the hybrid approach from the MCCD model.
- """
- # The obs_data needs to be in RCA format (with the batch dim at the end)
-
- # Graph parameters
- graph_kwargs = {
- 'obs_data': obs_stars.swapaxes(0, 1).swapaxes(1, 2),
- 'obs_pos': obs_pos,
- 'obs_weights': np.ones_like(obs_stars),
- 'n_comp': graph_features,
- 'n_eigenvects': 5,
- 'n_iter': 3,
- 'ea_gridsize': 10,
- 'distances': None,
- 'auto_run': True,
- 'verbose': verbose
- }
-
- # Compute graph-spatial constraint matrix
- VT = GraphBuilder(**graph_kwargs).VT
-
- # Compute polynomial-spatial constaint matrix
- tf_Pi = calc_poly_position_mat(pos=obs_pos, x_lims=x_lims, y_lims=y_lims, d_max=d_max)
-
- # Need to translate to have the batch dimension first
- spatial_dic = np.concatenate((tf_Pi.numpy(), VT), axis=0).T
-
- # Return the tf spatial dictionary
- return tf.convert_to_tensor(spatial_dic, dtype=tf.float32)
-
-
-def build_mccd_spatial_dic_v2(
- obs_stars, obs_pos, x_lims, y_lims, d_max=2, graph_features=6, verbose=0
-):
- """Build the spatial-constraint dictionaries.
-
- Based on the hybrid approach from the MCCD model.
- Returns the polynomial dict and the graph dict.
- """
- # The obs_data needs to be in RCA format (with the batch dim at the end)
-
- # Graph parameters
- graph_kwargs = {
- 'obs_data': obs_stars.swapaxes(0, 1).swapaxes(1, 2),
- 'obs_pos': obs_pos,
- 'obs_weights': np.ones_like(obs_stars),
- 'n_comp': graph_features,
- 'n_eigenvects': 5,
- 'n_iter': 3,
- 'ea_gridsize': 10,
- 'distances': None,
- 'auto_run': True,
- 'verbose': verbose
- }
-
- # Compute graph-spatial constraint matrix
- VT = GraphBuilder(**graph_kwargs).VT
-
- # Compute polynomial-spatial constaint matrix
- tf_Pi = calc_poly_position_mat(pos=obs_pos, x_lims=x_lims, y_lims=y_lims, d_max=d_max)
-
- # Return the poly dictionary and the graph dictionary
- return tf.transpose(tf_Pi, perm=[1, 0]), tf.convert_to_tensor(VT.T, dtype=tf.float32)
-
-
-class TF_SP_graph_field(tf.keras.Model):
- r""" Semi-parametric graph-constraint-only PSF field model!
-
- Semi parametric model based on the graph-constraint-only matrix factorization scheme.
-
- Parameters
- ----------
- zernike_maps: Tensor(n_batch, opd_dim, opd_dim)
- Zernike polynomial maps.
- obscurations: Tensor(opd_dim, opd_dim)
- Predefined obscurations of the phase.
- batch_size: int
- Batch size
- d_max_nonparam: int
- Maximum degree of the polynomial for the non-parametric variations.
- output_dim: int
- Output dimension of the PSF stamps.
- n_zernikes: int
- Order of the Zernike polynomial for the parametric model.
- d_max: int
- Maximum degree of the polynomial for the Zernike coefficient variations.
- x_lims: [float, float]
- Limits for the x coordinate of the PSF field.
- y_lims: [float, float]
- Limits for the x coordinate of the PSF field.
- coeff_mat: Tensor or None
- Initialization of the coefficient matrix defining the parametric psf
- field model.
-
- """
-
- def __init__(
- self,
- zernike_maps,
- obscurations,
- batch_size,
- obs_pos,
- spatial_dic,
- output_Q,
- l2_param=0.,
- graph_features=6,
- l1_rate=1e-3,
- output_dim=64,
- n_zernikes=45,
- d_max=2,
- x_lims=[0, 1e3],
- y_lims=[0, 1e3],
- coeff_mat=None,
- name='TF_SP_graph_field'
- ):
- super(TF_SP_graph_field, self).__init__()
-
- # Inputs: oversampling used
- self.output_Q = output_Q
-
- # Inputs: TF_poly_Z_field
- self.n_zernikes = n_zernikes
- self.d_max = d_max
- self.x_lims = x_lims
- self.y_lims = y_lims
-
- # Inputs: TF_NP_GRAPH_OPD
- self.opd_dim = tf.shape(zernike_maps)[1].numpy()
- self.graph_features = graph_features
- self.l1_rate = l1_rate
-
- # Inputs: TF_zernike_OPD
- # They are not stored as they are memory-heavy
- # zernike_maps =[]
-
- # Inputs: TF_batch_poly_PSF
- self.batch_size = batch_size
- self.obscurations = obscurations
- self.output_dim = output_dim
-
- # Inputs: Loss
- self.l2_param = l2_param
-
- # Initialize the first layer
- self.tf_poly_Z_field = TF_poly_Z_field(
- x_lims=self.x_lims, y_lims=self.y_lims, n_zernikes=self.n_zernikes, d_max=self.d_max
- )
-
- # Initialize the zernike to OPD layer
- self.tf_zernike_OPD = TF_zernike_OPD(zernike_maps=zernike_maps)
-
- # Initialize the non-parametric layer
- self.tf_NP_graph_OPD = TF_NP_GRAPH_OPD(
- obs_pos=obs_pos,
- spatial_dic=spatial_dic,
- x_lims=self.x_lims,
- y_lims=self.y_lims,
- graph_features=self.graph_features,
- l1_rate=self.l1_rate,
- opd_dim=self.opd_dim
- )
-
- # Initialize the batch opd to batch polychromatic PSF layer
- self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
-
- # Initialize the model parameters with non-default value
- if coeff_mat is not None:
- self.assign_coeff_matrix(coeff_mat)
-
- def set_zero_nonparam(self):
- """ Set to zero the non-parametric part."""
- self.tf_NP_graph_OPD.set_alpha_zero()
-
- def set_output_Q(self, output_Q, output_dim=None):
- r""" Set the value of the output_Q parameter.
- Useful for generating/predicting PSFs at a different sampling wrt the
- observation sampling.
- """
- self.output_Q = output_Q
- if output_dim is not None:
- self.output_dim = output_dim
- # Reinitialize the PSF batch poly generator
- self.tf_batch_poly_PSF = TF_batch_poly_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
-
- def set_l1_rate(self, new_l1_rate):
- """ Set l1 rate the non-parametric part."""
- self.l1_rate = new_l1_rate
- self.tf_NP_graph_OPD.l1_rate = new_l1_rate
-
- def set_nonzero_nonparam(self):
- """ Set to non-zero the non-parametric part."""
- self.tf_NP_graph_OPD.set_alpha_identity()
-
- def set_trainable_layers(self, param_bool=True, nonparam_bool=True):
- """ Set the layers to be trainable or not."""
- self.tf_NP_graph_OPD.trainable = nonparam_bool
- self.tf_poly_Z_field.trainable = param_bool
-
- def get_coeff_matrix(self):
- """ Get coefficient matrix."""
- return self.tf_poly_Z_field.get_coeff_matrix()
-
- def assign_coeff_matrix(self, coeff_mat):
- """ Assign coefficient matrix."""
- self.tf_poly_Z_field.assign_coeff_matrix(coeff_mat)
-
- def predict_step(self, data, evaluate_step=False):
- """ Custom predict (inference) step.
-
- It is needed as the non-parametric MCCD part requires a special
- interpolation (different from training).
-
- """
- if evaluate_step:
- input_data = data
- else:
- # Format input data
- data = data_adapter.expand_1d(data)
- input_data, _, _ = data_adapter.unpack_x_y_sample_weight(data)
-
- # Unpack inputs
- input_positions = input_data[0]
- packed_SEDs = input_data[1]
-
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
-
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_NP_graph_OPD.predict(input_positions)
-
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
- # Compute the polychromatic PSFs
- poly_psfs = self.tf_batch_poly_PSF([opd_maps, packed_SEDs])
-
- return poly_psfs
-
- def predict_mono_psfs(self, input_positions, lambda_obs, phase_N):
- r""" Predict a set of monochromatic PSF at desired positions.
-
- Parameters
- ----------
- input_positions: Tensor(batch x 2)
- Positions to predict the monochromatic PSFs.
- lambda_obs: float
- Observed wavelength in um.
- phase_N: int
- Required wavefront dimension. Should be calculated with as:
- ``simPSF_np = wf.SimPSFToolkit(...)``
- ``phase_N = simPSF_np.feasible_N(lambda_obs)``
-
- Returns
- -------
- mono_psf_batch: Tensor [batch x output_dim x output_dim]
- Batch of monochromatic PSFs at requested positions and
- wavelength.
- """
- # Initialise the monochromatic PSF batch calculator
- tf_batch_mono_psf = TF_batch_mono_PSF(
- obscurations=self.obscurations, output_Q=self.output_Q, output_dim=self.output_dim
- )
- # Set the lambda_obs and the phase_N parameters
- tf_batch_mono_psf.set_lambda_phaseN(phase_N, lambda_obs)
-
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
-
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_NP_graph_OPD.predict(input_positions)
-
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
-
- # Compute the monochromatic PSFs
- mono_psf_batch = tf_batch_mono_psf(opd_maps)
-
- return mono_psf_batch
-
- def predict_opd(self, input_positions):
- """ Predict the OPD at some positions.
-
- Parameters
- ----------
- input_positions: Tensor(batch_dim x 2)
- Positions to predict the OPD.
-
- Returns
- -------
- opd_maps : Tensor [batch x opd_dim x opd_dim]
- OPD at requested positions.
-
- """
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
-
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_NP_graph_OPD.predict(input_positions)
-
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
-
- return opd_maps
-
- def call(self, inputs, training=True):
- r"""Define the PSF field forward model.
-
- [1] From positions to Zernike coefficients
- [2] From Zernike coefficients to OPD maps
- [3] From OPD maps and SED info to polychromatic PSFs
-
- OPD: Optical Path Differences
- """
- # Unpack inputs
- input_positions = inputs[0]
- packed_SEDs = inputs[1]
-
- # Forward model
- # For the training
- if training:
- # Calculate parametric part
- zernike_coeffs = self.tf_poly_Z_field(input_positions)
- param_opd_maps = self.tf_zernike_OPD(zernike_coeffs)
- # Calculate the non parametric part
- nonparam_opd_maps = self.tf_NP_graph_OPD(input_positions)
- # Add l2 loss on the parmetric OPD
- self.add_loss(self.l2_param * tf.math.reduce_sum(tf.math.square(nonparam_opd_maps)))
- # Add the estimations
- opd_maps = tf.math.add(param_opd_maps, nonparam_opd_maps)
- # Compute the polychromatic PSFs
- poly_psfs = self.tf_batch_poly_PSF([opd_maps, packed_SEDs])
-
- # For the inference
- # This is absolutely needed to compute the metrics on the
- # validation data.
- else:
- # Compute predictions
- poly_psfs = self.predict_step(inputs, evaluate_step=True)
-
- return poly_psfs