From f4d7c7e35110c0063cea269b3d6c2596b42bd999 Mon Sep 17 00:00:00 2001 From: Duo <50307526+iProzd@users.noreply.github.com> Date: Sat, 27 Jan 2024 11:40:50 +0800 Subject: [PATCH] Merge deepmd-pytorch into main repo (#3180) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Merge the deepmd-pytorch into main repo🎉 Add the following directories: - deepmd/pt : main implementations of deepmd-pytorch - source/tests/pt: UTs for deepmd-pytorch TODO list: - [x] examples added for water/se_e2_a, water/se_atten, water/dpa2 - [x] README updated (need modified) - [x] Paths in each files have been adapted. - [x] pyproject.toml needed to be merge --------- Signed-off-by: Jinzhe Zeng Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Jinzhe Zeng --- .github/workflows/test_cuda.yml | 2 +- .github/workflows/test_python.yml | 13 +- .gitignore | 1 + README.md | 1 - backend/dynamic_metadata.py | 4 + deepmd/pt/__init__.py | 1 + deepmd/pt/entrypoints/__init__.py | 1 + deepmd/pt/entrypoints/main.py | 396 ++++ deepmd/pt/infer/__init__.py | 1 + deepmd/pt/infer/deep_eval.py | 412 ++++ deepmd/pt/infer/inference.py | 417 ++++ deepmd/pt/loss/__init__.py | 16 + deepmd/pt/loss/denoise.py | 109 + deepmd/pt/loss/ener.py | 155 ++ deepmd/pt/loss/loss.py | 12 + deepmd/pt/model/__init__.py | 1 + deepmd/pt/model/backbone/__init__.py | 12 + deepmd/pt/model/backbone/backbone.py | 12 + deepmd/pt/model/backbone/evoformer2b.py | 103 + deepmd/pt/model/descriptor/__init__.py | 46 + deepmd/pt/model/descriptor/descriptor.py | 272 +++ deepmd/pt/model/descriptor/dpa1.py | 152 ++ deepmd/pt/model/descriptor/dpa2.py | 375 ++++ deepmd/pt/model/descriptor/env_mat.py | 57 + deepmd/pt/model/descriptor/gaussian_lcc.py | 315 +++ deepmd/pt/model/descriptor/hybrid.py | 257 +++ deepmd/pt/model/descriptor/repformer_layer.py | 749 +++++++ deepmd/pt/model/descriptor/repformers.py | 348 +++ deepmd/pt/model/descriptor/se_a.py | 478 +++++ deepmd/pt/model/descriptor/se_atten.py | 392 ++++ deepmd/pt/model/model/__init__.py | 27 + deepmd/pt/model/model/atomic_model.py | 77 + deepmd/pt/model/model/dp_atomic_model.py | 214 ++ deepmd/pt/model/model/ener.py | 151 ++ deepmd/pt/model/model/make_model.py | 136 ++ deepmd/pt/model/model/model.py | 150 ++ deepmd/pt/model/model/transform_output.py | 214 ++ deepmd/pt/model/network/__init__.py | 1 + deepmd/pt/model/network/mlp.py | 217 ++ deepmd/pt/model/network/network.py | 1897 +++++++++++++++++ deepmd/pt/model/task/__init__.py | 34 + deepmd/pt/model/task/atten_lcc.py | 55 + deepmd/pt/model/task/denoise.py | 129 ++ deepmd/pt/model/task/dipole.py | 65 + deepmd/pt/model/task/ener.py | 241 +++ deepmd/pt/model/task/fitting.py | 223 ++ deepmd/pt/model/task/task.py | 12 + deepmd/pt/model/task/type_predict.py | 47 + deepmd/pt/optimizer/KFWrapper.py | 145 ++ deepmd/pt/optimizer/LKF.py | 221 ++ deepmd/pt/optimizer/__init__.py | 9 + deepmd/pt/train/__init__.py | 1 + deepmd/pt/train/training.py | 849 ++++++++ deepmd/pt/train/wrapper.py | 192 ++ deepmd/pt/utils/__init__.py | 1 + deepmd/pt/utils/ase_calc.py | 65 + deepmd/pt/utils/auto_batch_size.py | 26 + deepmd/pt/utils/cache.py | 31 + deepmd/pt/utils/dataloader.py | 319 +++ deepmd/pt/utils/dataset.py | 918 ++++++++ deepmd/pt/utils/dp_random.py | 14 + deepmd/pt/utils/env.py | 45 + deepmd/pt/utils/finetune.py | 98 + deepmd/pt/utils/learning_rate.py | 35 + deepmd/pt/utils/multi_task.py | 129 ++ deepmd/pt/utils/nlist.py | 431 ++++ deepmd/pt/utils/plugin.py | 15 + deepmd/pt/utils/preprocess.py | 318 +++ deepmd/pt/utils/region.py | 116 + deepmd/pt/utils/stat.py | 112 + deepmd/pt/utils/utils.py | 43 + examples/water/dpa2/input_torch.json | 102 + examples/water/se_atten/input_torch.json | 91 + examples/water/se_e2_a/input_torch.json | 79 + source/install/docker/Dockerfile | 2 +- source/tests/pt/__init__.py | 5 + source/tests/pt/models/dpa1.json | 39 + source/tests/pt/models/dpa1.pth | Bin 0 -> 15469 bytes source/tests/pt/models/dpa2.json | 48 + source/tests/pt/models/dpa2.pth | Bin 0 -> 179745 bytes source/tests/pt/models/dpa2_hyb.json | 69 + source/tests/pt/models/dpa2_tebd.pth | Bin 0 -> 1085 bytes source/tests/pt/requirements.txt | 6 + source/tests/pt/test_LKF.py | 35 + source/tests/pt/test_autodiff.py | 190 ++ source/tests/pt/test_calculator.py | 95 + source/tests/pt/test_deeppot.py | 81 + source/tests/pt/test_descriptor.py | 166 ++ source/tests/pt/test_descriptor_dpa1.py | 367 ++++ source/tests/pt/test_descriptor_dpa2.py | 264 +++ source/tests/pt/test_dp_test.py | 71 + source/tests/pt/test_embedding_net.py | 176 ++ source/tests/pt/test_env_mat.py | 84 + source/tests/pt/test_fitting_net.py | 139 ++ source/tests/pt/test_force_grad.py | 123 ++ source/tests/pt/test_jit.py | 140 ++ source/tests/pt/test_loss.py | 189 ++ source/tests/pt/test_lr.py | 59 + source/tests/pt/test_mlp.py | 321 +++ source/tests/pt/test_model.py | 415 ++++ source/tests/pt/test_nlist.py | 212 ++ source/tests/pt/test_permutation.py | 322 +++ source/tests/pt/test_permutation_denoise.py | 102 + source/tests/pt/test_region.py | 78 + source/tests/pt/test_rot.py | 181 ++ source/tests/pt/test_rot_denoise.py | 133 ++ source/tests/pt/test_rotation.py | 133 ++ source/tests/pt/test_sampler.py | 115 + source/tests/pt/test_saveload_dpa1.py | 151 ++ source/tests/pt/test_saveload_se_e2_a.py | 145 ++ source/tests/pt/test_se_e2_a.py | 199 ++ source/tests/pt/test_smooth.py | 230 ++ source/tests/pt/test_smooth_denoise.py | 151 ++ source/tests/pt/test_stat.py | 194 ++ source/tests/pt/test_training.py | 116 + source/tests/pt/test_trans.py | 137 ++ source/tests/pt/test_trans_denoise.py | 92 + source/tests/pt/test_unused_params.py | 98 + .../pt/water/data/data_0/set.000/box.npy | Bin 0 -> 3008 bytes .../pt/water/data/data_0/set.000/coord.npy | Bin 0 -> 184448 bytes .../pt/water/data/data_0/set.000/energy.npy | Bin 0 -> 448 bytes .../pt/water/data/data_0/set.000/force.npy | Bin 0 -> 184448 bytes source/tests/pt/water/data/data_0/type.raw | 192 ++ .../tests/pt/water/data/data_0/type_map.raw | 2 + .../pt/water/data/single/set.000/box.npy | Bin 0 -> 164 bytes .../pt/water/data/single/set.000/coord.npy | Bin 0 -> 2432 bytes .../pt/water/data/single/set.000/energy.npy | Bin 0 -> 132 bytes .../pt/water/data/single/set.000/force.npy | Bin 0 -> 2432 bytes source/tests/pt/water/data/single/type.raw | 192 ++ .../tests/pt/water/data/single/type_map.raw | 2 + source/tests/pt/water/lkf.json | 79 + source/tests/pt/water/se_atten.json | 84 + source/tests/pt/water/se_e2_a.json | 77 + source/tests/test_adjust_sel.py | 4 +- source/tests/test_finetune_se_atten.py | 150 +- source/tests/test_init_frz_model_multi.py | 43 +- source/tests/test_init_frz_model_se_a.py | 42 +- source/tests/test_init_frz_model_se_a_tebd.py | 43 +- source/tests/test_init_frz_model_se_a_type.py | 42 +- source/tests/test_init_frz_model_se_atten.py | 88 +- source/tests/test_init_frz_model_se_r.py | 43 +- source/tests/test_init_frz_model_spin.py | 43 +- ...odel_compression_se_a_ebd_type_one_side.py | 16 +- ...ession_se_a_type_one_side_exclude_types.py | 5 +- 144 files changed, 20162 insertions(+), 263 deletions(-) create mode 100644 deepmd/pt/__init__.py create mode 100644 deepmd/pt/entrypoints/__init__.py create mode 100644 deepmd/pt/entrypoints/main.py create mode 100644 deepmd/pt/infer/__init__.py create mode 100644 deepmd/pt/infer/deep_eval.py create mode 100644 deepmd/pt/infer/inference.py create mode 100644 deepmd/pt/loss/__init__.py create mode 100644 deepmd/pt/loss/denoise.py create mode 100644 deepmd/pt/loss/ener.py create mode 100644 deepmd/pt/loss/loss.py create mode 100644 deepmd/pt/model/__init__.py create mode 100644 deepmd/pt/model/backbone/__init__.py create mode 100644 deepmd/pt/model/backbone/backbone.py create mode 100644 deepmd/pt/model/backbone/evoformer2b.py create mode 100644 deepmd/pt/model/descriptor/__init__.py create mode 100644 deepmd/pt/model/descriptor/descriptor.py create mode 100644 deepmd/pt/model/descriptor/dpa1.py create mode 100644 deepmd/pt/model/descriptor/dpa2.py create mode 100644 deepmd/pt/model/descriptor/env_mat.py create mode 100644 deepmd/pt/model/descriptor/gaussian_lcc.py create mode 100644 deepmd/pt/model/descriptor/hybrid.py create mode 100644 deepmd/pt/model/descriptor/repformer_layer.py create mode 100644 deepmd/pt/model/descriptor/repformers.py create mode 100644 deepmd/pt/model/descriptor/se_a.py create mode 100644 deepmd/pt/model/descriptor/se_atten.py create mode 100644 deepmd/pt/model/model/__init__.py create mode 100644 deepmd/pt/model/model/atomic_model.py create mode 100644 deepmd/pt/model/model/dp_atomic_model.py create mode 100644 deepmd/pt/model/model/ener.py create mode 100644 deepmd/pt/model/model/make_model.py create mode 100644 deepmd/pt/model/model/model.py create mode 100644 deepmd/pt/model/model/transform_output.py create mode 100644 deepmd/pt/model/network/__init__.py create mode 100644 deepmd/pt/model/network/mlp.py create mode 100644 deepmd/pt/model/network/network.py create mode 100644 deepmd/pt/model/task/__init__.py create mode 100644 deepmd/pt/model/task/atten_lcc.py create mode 100644 deepmd/pt/model/task/denoise.py create mode 100644 deepmd/pt/model/task/dipole.py create mode 100644 deepmd/pt/model/task/ener.py create mode 100644 deepmd/pt/model/task/fitting.py create mode 100644 deepmd/pt/model/task/task.py create mode 100644 deepmd/pt/model/task/type_predict.py create mode 100644 deepmd/pt/optimizer/KFWrapper.py create mode 100644 deepmd/pt/optimizer/LKF.py create mode 100644 deepmd/pt/optimizer/__init__.py create mode 100644 deepmd/pt/train/__init__.py create mode 100644 deepmd/pt/train/training.py create mode 100644 deepmd/pt/train/wrapper.py create mode 100644 deepmd/pt/utils/__init__.py create mode 100644 deepmd/pt/utils/ase_calc.py create mode 100644 deepmd/pt/utils/auto_batch_size.py create mode 100644 deepmd/pt/utils/cache.py create mode 100644 deepmd/pt/utils/dataloader.py create mode 100644 deepmd/pt/utils/dataset.py create mode 100644 deepmd/pt/utils/dp_random.py create mode 100644 deepmd/pt/utils/env.py create mode 100644 deepmd/pt/utils/finetune.py create mode 100644 deepmd/pt/utils/learning_rate.py create mode 100644 deepmd/pt/utils/multi_task.py create mode 100644 deepmd/pt/utils/nlist.py create mode 100644 deepmd/pt/utils/plugin.py create mode 100644 deepmd/pt/utils/preprocess.py create mode 100644 deepmd/pt/utils/region.py create mode 100644 deepmd/pt/utils/stat.py create mode 100644 deepmd/pt/utils/utils.py create mode 100644 examples/water/dpa2/input_torch.json create mode 100644 examples/water/se_atten/input_torch.json create mode 100644 examples/water/se_e2_a/input_torch.json create mode 100644 source/tests/pt/__init__.py create mode 100644 source/tests/pt/models/dpa1.json create mode 100644 source/tests/pt/models/dpa1.pth create mode 100644 source/tests/pt/models/dpa2.json create mode 100644 source/tests/pt/models/dpa2.pth create mode 100644 source/tests/pt/models/dpa2_hyb.json create mode 100644 source/tests/pt/models/dpa2_tebd.pth create mode 100644 source/tests/pt/requirements.txt create mode 100644 source/tests/pt/test_LKF.py create mode 100644 source/tests/pt/test_autodiff.py create mode 100644 source/tests/pt/test_calculator.py create mode 100644 source/tests/pt/test_deeppot.py create mode 100644 source/tests/pt/test_descriptor.py create mode 100644 source/tests/pt/test_descriptor_dpa1.py create mode 100644 source/tests/pt/test_descriptor_dpa2.py create mode 100644 source/tests/pt/test_dp_test.py create mode 100644 source/tests/pt/test_embedding_net.py create mode 100644 source/tests/pt/test_env_mat.py create mode 100644 source/tests/pt/test_fitting_net.py create mode 100644 source/tests/pt/test_force_grad.py create mode 100644 source/tests/pt/test_jit.py create mode 100644 source/tests/pt/test_loss.py create mode 100644 source/tests/pt/test_lr.py create mode 100644 source/tests/pt/test_mlp.py create mode 100644 source/tests/pt/test_model.py create mode 100644 source/tests/pt/test_nlist.py create mode 100644 source/tests/pt/test_permutation.py create mode 100644 source/tests/pt/test_permutation_denoise.py create mode 100644 source/tests/pt/test_region.py create mode 100644 source/tests/pt/test_rot.py create mode 100644 source/tests/pt/test_rot_denoise.py create mode 100644 source/tests/pt/test_rotation.py create mode 100644 source/tests/pt/test_sampler.py create mode 100644 source/tests/pt/test_saveload_dpa1.py create mode 100644 source/tests/pt/test_saveload_se_e2_a.py create mode 100644 source/tests/pt/test_se_e2_a.py create mode 100644 source/tests/pt/test_smooth.py create mode 100644 source/tests/pt/test_smooth_denoise.py create mode 100644 source/tests/pt/test_stat.py create mode 100644 source/tests/pt/test_training.py create mode 100644 source/tests/pt/test_trans.py create mode 100644 source/tests/pt/test_trans_denoise.py create mode 100644 source/tests/pt/test_unused_params.py create mode 100644 source/tests/pt/water/data/data_0/set.000/box.npy create mode 100644 source/tests/pt/water/data/data_0/set.000/coord.npy create mode 100644 source/tests/pt/water/data/data_0/set.000/energy.npy create mode 100644 source/tests/pt/water/data/data_0/set.000/force.npy create mode 100644 source/tests/pt/water/data/data_0/type.raw create mode 100644 source/tests/pt/water/data/data_0/type_map.raw create mode 100644 source/tests/pt/water/data/single/set.000/box.npy create mode 100644 source/tests/pt/water/data/single/set.000/coord.npy create mode 100644 source/tests/pt/water/data/single/set.000/energy.npy create mode 100644 source/tests/pt/water/data/single/set.000/force.npy create mode 100644 source/tests/pt/water/data/single/type.raw create mode 100644 source/tests/pt/water/data/single/type_map.raw create mode 100644 source/tests/pt/water/lkf.json create mode 100644 source/tests/pt/water/se_atten.json create mode 100644 source/tests/pt/water/se_e2_a.json diff --git a/.github/workflows/test_cuda.yml b/.github/workflows/test_cuda.yml index 049fb95e3a..f164758304 100644 --- a/.github/workflows/test_cuda.yml +++ b/.github/workflows/test_cuda.yml @@ -37,7 +37,7 @@ jobs: run: python -m pip config --user set global.index-url https://mirrors.aliyun.com/pypi/simple/ - run: python -m pip install -U "pip>=21.3.1,!=23.0.0" - run: python -m pip install "tensorflow>=2.15.0rc0" - - run: python -m pip install -v -e .[gpu,test,lmp,cu12] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz" + - run: python -m pip install -v -e .[gpu,test,lmp,cu12,torch] "ase @ https://gitlab.com/ase/ase/-/archive/8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f/ase-8c5aa5fd6448c5cfb517a014dccf2b214a9dfa8f.tar.gz" env: DP_BUILD_TESTING: 1 DP_VARIANT: cuda diff --git a/.github/workflows/test_python.yml b/.github/workflows/test_python.yml index 55ef041532..091a2a61f8 100644 --- a/.github/workflows/test_python.yml +++ b/.github/workflows/test_python.yml @@ -9,12 +9,12 @@ jobs: strategy: matrix: include: - - python: 3.7 - tf: 1.14 - python: 3.8 tf: + torch: - python: "3.11" tf: + torch: steps: - uses: actions/checkout@v4 @@ -23,22 +23,25 @@ jobs: python-version: ${{ matrix.python }} cache: 'pip' - uses: mpi4py/setup-mpi@v1 - if: ${{ matrix.tf == '' }} with: mpi: openmpi # https://github.com/pypa/pip/issues/11770 - run: python -m pip install -U "pip>=21.3.1,!=23.0.0" - - run: pip install -e .[cpu,test] + - run: python -m pip install -U "torch==${{ matrix.torch }}" "numpy<1.20" + if: matrix.torch != '' + - run: pip install -e .[cpu,test,torch] env: TENSORFLOW_VERSION: ${{ matrix.tf }} DP_BUILD_TESTING: 1 - run: pip install horovod mpi4py - if: ${{ matrix.tf == '' }} env: HOROVOD_WITH_TENSORFLOW: 1 + HOROVOD_WITHOUT_PYTORCH: 1 HOROVOD_WITHOUT_GLOO: 1 - run: dp --version - run: pytest --cov=deepmd source/tests --durations=0 + env: + NUM_WORKERS: 0 - uses: codecov/codecov-action@v3 with: gcov: true diff --git a/.gitignore b/.gitignore index 82d3e4a7da..5e30cf3167 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ *.bz2 *.pyc *.pb +*.DS_Store tmp* CMakeCache.txt CMakeFiles diff --git a/README.md b/README.md index e61c18dbcb..2076e11f1b 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,6 @@ The code is organized as follows: See [DeePMD-kit Contributing Guide](CONTRIBUTING.md) to become a contributor! 🤓 - [1]: https://arxiv.org/abs/1707.01478 [2]: https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.120.143001 [3]: https://arxiv.org/abs/1805.09003 diff --git a/backend/dynamic_metadata.py b/backend/dynamic_metadata.py index 72dfcaef45..e30c97bd98 100644 --- a/backend/dynamic_metadata.py +++ b/backend/dynamic_metadata.py @@ -88,4 +88,8 @@ def dynamic_metadata( "nvidia-cudnn-cu12", "nvidia-cuda-nvcc-cu12", ], + "torch": [ + "torch>=2a", + "tqdm", + ], } diff --git a/deepmd/pt/__init__.py b/deepmd/pt/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pt/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pt/entrypoints/__init__.py b/deepmd/pt/entrypoints/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pt/entrypoints/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pt/entrypoints/main.py b/deepmd/pt/entrypoints/main.py new file mode 100644 index 0000000000..f1cd7ae210 --- /dev/null +++ b/deepmd/pt/entrypoints/main.py @@ -0,0 +1,396 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import argparse +import json +import logging +import os + +import torch +import torch.distributed as dist +from torch.distributed.elastic.multiprocessing.errors import ( + record, +) + +from deepmd import ( + __version__, +) +from deepmd.pt.infer import ( + inference, +) +from deepmd.pt.model.descriptor import ( + Descriptor, +) +from deepmd.pt.train import ( + training, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pt.utils.finetune import ( + change_finetune_model_params, +) +from deepmd.pt.utils.multi_task import ( + preprocess_shared_params, +) +from deepmd.pt.utils.stat import ( + make_stat_input, +) + + +def get_trainer( + config, + init_model=None, + restart_model=None, + finetune_model=None, + model_branch="", + force_load=False, +): + # Initialize DDP + local_rank = os.environ.get("LOCAL_RANK") + if local_rank is not None: + local_rank = int(local_rank) + assert dist.is_nccl_available() + dist.init_process_group(backend="nccl") + + multi_task = "model_dict" in config["model"] + ckpt = init_model if init_model is not None else restart_model + config["model"] = change_finetune_model_params( + ckpt, + finetune_model, + config["model"], + multi_task=multi_task, + model_branch=model_branch, + ) + config["model"]["resuming"] = (finetune_model is not None) or (ckpt is not None) + shared_links = None + if multi_task: + config["model"], shared_links = preprocess_shared_params(config["model"]) + + def prepare_trainer_input_single( + model_params_single, data_dict_single, loss_dict_single, suffix="" + ): + training_dataset_params = data_dict_single["training_data"] + type_split = False + if model_params_single["descriptor"]["type"] in ["se_e2_a"]: + type_split = True + validation_dataset_params = data_dict_single["validation_data"] + training_systems = training_dataset_params["systems"] + validation_systems = validation_dataset_params["systems"] + + # noise params + noise_settings = None + if loss_dict_single.get("type", "ener") == "denoise": + noise_settings = { + "noise_type": loss_dict_single.pop("noise_type", "uniform"), + "noise": loss_dict_single.pop("noise", 1.0), + "noise_mode": loss_dict_single.pop("noise_mode", "fix_num"), + "mask_num": loss_dict_single.pop("mask_num", 8), + "mask_prob": loss_dict_single.pop("mask_prob", 0.15), + "same_mask": loss_dict_single.pop("same_mask", False), + "mask_coord": loss_dict_single.pop("mask_coord", False), + "mask_type": loss_dict_single.pop("mask_type", False), + "max_fail_num": loss_dict_single.pop("max_fail_num", 10), + "mask_type_idx": len(model_params_single["type_map"]) - 1, + } + # noise_settings = None + + # stat files + hybrid_descrpt = model_params_single["descriptor"]["type"] == "hybrid" + has_stat_file_path = True + if not hybrid_descrpt: + ### this design requires "rcut", "rcut_smth" and "sel" in the descriptor + ### VERY BAD DESIGN!!!! + ### not all descriptors provides these parameter in their constructor + default_stat_file_name = Descriptor.get_stat_name( + model_params_single["descriptor"] + ) + model_params_single["stat_file_dir"] = data_dict_single.get( + "stat_file_dir", f"stat_files{suffix}" + ) + model_params_single["stat_file"] = data_dict_single.get( + "stat_file", default_stat_file_name + ) + model_params_single["stat_file_path"] = os.path.join( + model_params_single["stat_file_dir"], model_params_single["stat_file"] + ) + if not os.path.exists(model_params_single["stat_file_path"]): + has_stat_file_path = False + else: ### need to remove this + default_stat_file_name = [] + for descrpt in model_params_single["descriptor"]["list"]: + default_stat_file_name.append( + f'stat_file_rcut{descrpt["rcut"]:.2f}_' + f'smth{descrpt["rcut_smth"]:.2f}_' + f'sel{descrpt["sel"]}_{descrpt["type"]}.npz' + ) + model_params_single["stat_file_dir"] = data_dict_single.get( + "stat_file_dir", f"stat_files{suffix}" + ) + model_params_single["stat_file"] = data_dict_single.get( + "stat_file", default_stat_file_name + ) + assert isinstance( + model_params_single["stat_file"], list + ), "Stat file of hybrid descriptor must be a list!" + stat_file_path = [] + for stat_file_path_item in model_params_single["stat_file"]: + single_file_path = os.path.join( + model_params_single["stat_file_dir"], stat_file_path_item + ) + stat_file_path.append(single_file_path) + if not os.path.exists(single_file_path): + has_stat_file_path = False + model_params_single["stat_file_path"] = stat_file_path + + # validation and training data + validation_data_single = DpLoaderSet( + validation_systems, + validation_dataset_params["batch_size"], + model_params_single, + type_split=type_split, + noise_settings=noise_settings, + ) + if ckpt or finetune_model or has_stat_file_path: + train_data_single = DpLoaderSet( + training_systems, + training_dataset_params["batch_size"], + model_params_single, + type_split=type_split, + noise_settings=noise_settings, + ) + sampled_single = None + else: + train_data_single = DpLoaderSet( + training_systems, + training_dataset_params["batch_size"], + model_params_single, + type_split=type_split, + ) + data_stat_nbatch = model_params_single.get("data_stat_nbatch", 10) + sampled_single = make_stat_input( + train_data_single.systems, + train_data_single.dataloaders, + data_stat_nbatch, + ) + if noise_settings is not None: + train_data_single = DpLoaderSet( + training_systems, + training_dataset_params["batch_size"], + model_params_single, + type_split=type_split, + noise_settings=noise_settings, + ) + return train_data_single, validation_data_single, sampled_single + + if not multi_task: + train_data, validation_data, sampled = prepare_trainer_input_single( + config["model"], config["training"], config["loss"] + ) + else: + train_data, validation_data, sampled = {}, {}, {} + for model_key in config["model"]["model_dict"]: + ( + train_data[model_key], + validation_data[model_key], + sampled[model_key], + ) = prepare_trainer_input_single( + config["model"]["model_dict"][model_key], + config["training"]["data_dict"][model_key], + config["loss_dict"][model_key], + suffix=f"_{model_key}", + ) + + trainer = training.Trainer( + config, + train_data, + sampled, + validation_data=validation_data, + init_model=init_model, + restart_model=restart_model, + finetune_model=finetune_model, + force_load=force_load, + shared_links=shared_links, + ) + return trainer + + +def train(FLAGS): + logging.info("Configuration path: %s", FLAGS.INPUT) + with open(FLAGS.INPUT) as fin: + config = json.load(fin) + trainer = get_trainer( + config, + FLAGS.init_model, + FLAGS.restart, + FLAGS.finetune, + FLAGS.model_branch, + FLAGS.force_load, + ) + trainer.run() + + +def test(FLAGS): + trainer = inference.Tester( + FLAGS.model, + input_script=FLAGS.input_script, + system=FLAGS.system, + datafile=FLAGS.datafile, + numb_test=FLAGS.numb_test, + detail_file=FLAGS.detail_file, + shuffle_test=FLAGS.shuffle_test, + head=FLAGS.head, + ) + trainer.run() + + +def freeze(FLAGS): + model = torch.jit.script( + inference.Tester(FLAGS.model, numb_test=1, head=FLAGS.head).model + ) + torch.jit.save( + model, + FLAGS.output, + { + # TODO: _extra_files + }, + ) + + +# avoid logger conflicts of tf version +def clean_loggers(): + logger = logging.getLogger() + while logger.hasHandlers(): + logger.removeHandler(logger.handlers[0]) + + +@record +def main(args=None): + clean_loggers() + logging.basicConfig( + level=logging.WARNING if env.LOCAL_RANK else logging.INFO, + format=f"%(asctime)-15s {os.environ.get('RANK') or ''} [%(filename)s:%(lineno)d] %(levelname)s %(message)s", + ) + logging.info("DeepMD version: %s", __version__) + parser = argparse.ArgumentParser( + description="A tool to manager deep models of potential energy surface." + ) + subparsers = parser.add_subparsers(dest="command") + train_parser = subparsers.add_parser("train", help="Train a model.") + train_parser.add_argument("INPUT", help="A Json-format configuration file.") + parser_train_subgroup = train_parser.add_mutually_exclusive_group() + parser_train_subgroup.add_argument( + "-i", + "--init-model", + type=str, + default=None, + help="Initialize the model by the provided checkpoint.", + ) + parser_train_subgroup.add_argument( + "-r", + "--restart", + type=str, + default=None, + help="Restart the training from the provided checkpoint.", + ) + parser_train_subgroup.add_argument( + "-t", + "--finetune", + type=str, + default=None, + help="Finetune the frozen pretrained model.", + ) + train_parser.add_argument( + "-m", + "--model-branch", + type=str, + default="", + help="Model branch chosen for fine-tuning if multi-task. If not specified, it will re-init the fitting net.", + ) + train_parser.add_argument( + "--force-load", + action="store_true", + help="Force load from ckpt, other missing tensors will init from scratch", + ) + + test_parser = subparsers.add_parser("test", help="Test a model.") + test_parser_subgroup = test_parser.add_mutually_exclusive_group() + test_parser_subgroup.add_argument( + "-s", + "--system", + default=None, + type=str, + help="The system dir. Recursively detect systems in this directory", + ) + test_parser_subgroup.add_argument( + "-f", + "--datafile", + default=None, + type=str, + help="The path to file of test list.", + ) + test_parser_subgroup.add_argument( + "-i", + "--input-script", + default=None, + type=str, + help="The path to the input script, the validation systems will be tested.", + ) + test_parser.add_argument( + "-m", + "--model", + default="model.pt", + type=str, + help="Model checkpoint to import", + ) + test_parser.add_argument( + "--head", + default=None, + type=str, + help="Task head to test if in multi-task mode.", + ) + test_parser.add_argument( + "-n", "--numb-test", default=100, type=int, help="The number of data for test" + ) + test_parser.add_argument( + "-d", + "--detail-file", + type=str, + default=None, + help="The prefix to files where details of energy, force and virial accuracy/accuracy per atom will be written", + ) + test_parser.add_argument( + "--shuffle-test", action="store_true", default=False, help="Shuffle test data" + ) + + freeze_parser = subparsers.add_parser("freeze", help="Freeze a model.") + freeze_parser.add_argument("model", help="Resumes from checkpoint.") + freeze_parser.add_argument( + "-o", + "--output", + type=str, + default="frozen_model.pth", + help="The frozen model path", + ) + freeze_parser.add_argument( + "--head", + default=None, + type=str, + help="Task head to freeze if in multi-task mode.", + ) + + FLAGS = parser.parse_args(args) + if FLAGS.command == "train": + train(FLAGS) + elif FLAGS.command == "test": + test(FLAGS) + elif FLAGS.command == "freeze": + freeze(FLAGS) + else: + logging.error("Invalid command!") + parser.print_help() + + +if __name__ == "__main__": + main() diff --git a/deepmd/pt/infer/__init__.py b/deepmd/pt/infer/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pt/infer/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pt/infer/deep_eval.py b/deepmd/pt/infer/deep_eval.py new file mode 100644 index 0000000000..79772b47ae --- /dev/null +++ b/deepmd/pt/infer/deep_eval.py @@ -0,0 +1,412 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from pathlib import ( + Path, +) +from typing import ( + Callable, + List, + Optional, + Tuple, + Union, +) + +import numpy as np +import torch + +from deepmd.infer.deep_pot import DeepPot as DeepPotBase +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.train.wrapper import ( + ModelWrapper, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.auto_batch_size import ( + AutoBatchSize, +) +from deepmd.pt.utils.env import ( + DEVICE, + GLOBAL_PT_FLOAT_PRECISION, +) + + +class DeepEval: + def __init__( + self, + model_file: "Path", + auto_batch_size: Union[bool, int, AutoBatchSize] = True, + ): + self.model_path = model_file + state_dict = torch.load(model_file, map_location=env.DEVICE) + if "model" in state_dict: + state_dict = state_dict["model"] + self.input_param = state_dict["_extra_state"]["model_params"] + self.input_param["resuming"] = True + self.multi_task = "model_dict" in self.input_param + assert not self.multi_task, "multitask mode currently not supported!" + self.type_split = self.input_param["descriptor"]["type"] in ["se_e2_a"] + self.type_map = self.input_param["type_map"] + self.dp = ModelWrapper(get_model(self.input_param, None).to(DEVICE)) + self.dp.load_state_dict(state_dict) + self.rcut = self.dp.model["Default"].descriptor.get_rcut() + self.sec = np.cumsum(self.dp.model["Default"].descriptor.get_sel()) + if isinstance(auto_batch_size, bool): + if auto_batch_size: + self.auto_batch_size = AutoBatchSize() + else: + self.auto_batch_size = None + elif isinstance(auto_batch_size, int): + self.auto_batch_size = AutoBatchSize(auto_batch_size) + elif isinstance(auto_batch_size, AutoBatchSize): + self.auto_batch_size = auto_batch_size + else: + raise TypeError("auto_batch_size should be bool, int, or AutoBatchSize") + + def eval( + self, + coords: Union[np.ndarray, torch.Tensor], + cells: Optional[Union[np.ndarray, torch.Tensor]], + atom_types: Union[np.ndarray, torch.Tensor, List[int]], + atomic: bool = False, + ): + raise NotImplementedError + + +class DeepPot(DeepEval, DeepPotBase): + def __init__( + self, + model_file: "Path", + auto_batch_size: Union[bool, int, AutoBatchSize] = True, + neighbor_list=None, + ): + if neighbor_list is not None: + raise NotImplementedError + super().__init__( + model_file, + auto_batch_size=auto_batch_size, + ) + + def eval( + self, + coords: np.ndarray, + cells: np.ndarray, + atom_types: List[int], + atomic: bool = False, + fparam: Optional[np.ndarray] = None, + aparam: Optional[np.ndarray] = None, + efield: Optional[np.ndarray] = None, + mixed_type: bool = False, + ): + if fparam is not None or aparam is not None or efield is not None: + raise NotImplementedError + # convert all of the input to numpy array + atom_types = np.array(atom_types, dtype=np.int32) + coords = np.array(coords) + if cells is not None: + cells = np.array(cells) + natoms, numb_test = self._get_natoms_and_nframes( + coords, atom_types, len(atom_types.shape) > 1 + ) + return self._eval_func(self._eval_model, numb_test, natoms)( + coords, cells, atom_types, atomic + ) + + def _eval_func(self, inner_func: Callable, numb_test: int, natoms: int) -> Callable: + """Wrapper method with auto batch size. + + Parameters + ---------- + inner_func : Callable + the method to be wrapped + numb_test : int + number of tests + natoms : int + number of atoms + + Returns + ------- + Callable + the wrapper + """ + if self.auto_batch_size is not None: + + def eval_func(*args, **kwargs): + return self.auto_batch_size.execute_all( + inner_func, numb_test, natoms, *args, **kwargs + ) + + else: + eval_func = inner_func + return eval_func + + def _get_natoms_and_nframes( + self, + coords: np.ndarray, + atom_types: Union[List[int], np.ndarray], + mixed_type: bool = False, + ) -> Tuple[int, int]: + if mixed_type: + natoms = len(atom_types[0]) + else: + natoms = len(atom_types) + if natoms == 0: + assert coords.size == 0 + else: + coords = np.reshape(np.array(coords), [-1, natoms * 3]) + nframes = coords.shape[0] + return natoms, nframes + + def _eval_model( + self, + coords: np.ndarray, + cells: Optional[np.ndarray], + atom_types: np.ndarray, + atomic: bool = False, + ): + model = self.dp.to(DEVICE) + energy_out = None + atomic_energy_out = None + force_out = None + virial_out = None + atomic_virial_out = None + + nframes = coords.shape[0] + if len(atom_types.shape) == 1: + natoms = len(atom_types) + atom_types = np.tile(atom_types, nframes).reshape(nframes, -1) + else: + natoms = len(atom_types[0]) + + coord_input = torch.tensor( + coords.reshape([-1, natoms, 3]), dtype=GLOBAL_PT_FLOAT_PRECISION + ).to(DEVICE) + type_input = torch.tensor(atom_types, dtype=torch.long).to(DEVICE) + if cells is not None: + box_input = torch.tensor( + cells.reshape([-1, 3, 3]), dtype=GLOBAL_PT_FLOAT_PRECISION + ).to(DEVICE) + else: + box_input = None + + batch_output = model( + coord_input, type_input, box=box_input, do_atomic_virial=atomic + ) + if isinstance(batch_output, tuple): + batch_output = batch_output[0] + energy_out = batch_output["energy"].detach().cpu().numpy() + if "atom_energy" in batch_output: + atomic_energy_out = batch_output["atom_energy"].detach().cpu().numpy() + force_out = batch_output["force"].detach().cpu().numpy() + virial_out = batch_output["virial"].detach().cpu().numpy() + if "atomic_virial" in batch_output: + atomic_virial_out = batch_output["atomic_virial"].detach().cpu().numpy() + + if not atomic: + return energy_out, force_out, virial_out + else: + return ( + energy_out, + force_out, + virial_out, + atomic_energy_out, + atomic_virial_out, + ) + + def get_ntypes(self) -> int: + """Get the number of atom types of this model.""" + return len(self.type_map) + + def get_type_map(self) -> List[str]: + """Get the type map (element name of the atom types) of this model.""" + return self.type_map + + def get_dim_fparam(self) -> int: + """Get the number (dimension) of frame parameters of this DP.""" + return 0 + + def get_dim_aparam(self) -> int: + """Get the number (dimension) of atomic parameters of this DP.""" + return 0 + + +# For tests only +def eval_model( + model, + coords: Union[np.ndarray, torch.Tensor], + cells: Optional[Union[np.ndarray, torch.Tensor]], + atom_types: Union[np.ndarray, torch.Tensor, List[int]], + atomic: bool = False, + infer_batch_size: int = 2, + denoise: bool = False, +): + model = model.to(DEVICE) + energy_out = [] + atomic_energy_out = [] + force_out = [] + virial_out = [] + atomic_virial_out = [] + updated_coord_out = [] + logits_out = [] + err_msg = ( + f"All inputs should be the same format, " + f"but found {type(coords)}, {type(cells)}, {type(atom_types)} instead! " + ) + return_tensor = True + if isinstance(coords, torch.Tensor): + if cells is not None: + assert isinstance(cells, torch.Tensor), err_msg + assert isinstance(atom_types, torch.Tensor) or isinstance(atom_types, list) + atom_types = torch.tensor(atom_types, dtype=torch.long).to(DEVICE) + elif isinstance(coords, np.ndarray): + if cells is not None: + assert isinstance(cells, np.ndarray), err_msg + assert isinstance(atom_types, np.ndarray) or isinstance(atom_types, list) + atom_types = np.array(atom_types, dtype=np.int32) + return_tensor = False + + nframes = coords.shape[0] + if len(atom_types.shape) == 1: + natoms = len(atom_types) + if isinstance(atom_types, torch.Tensor): + atom_types = torch.tile(atom_types.unsqueeze(0), [nframes, 1]).reshape( + nframes, -1 + ) + else: + atom_types = np.tile(atom_types, nframes).reshape(nframes, -1) + else: + natoms = len(atom_types[0]) + + coord_input = torch.tensor( + coords.reshape([-1, natoms, 3]), dtype=GLOBAL_PT_FLOAT_PRECISION + ).to(DEVICE) + type_input = torch.tensor(atom_types, dtype=torch.long).to(DEVICE) + box_input = None + if cells is None: + pbc = False + else: + pbc = True + box_input = torch.tensor( + cells.reshape([-1, 3, 3]), dtype=GLOBAL_PT_FLOAT_PRECISION + ).to(DEVICE) + num_iter = int((nframes + infer_batch_size - 1) / infer_batch_size) + + for ii in range(num_iter): + batch_coord = coord_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + batch_atype = type_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + batch_box = None + if pbc: + batch_box = box_input[ii * infer_batch_size : (ii + 1) * infer_batch_size] + batch_output = model(batch_coord, batch_atype, box=batch_box) + if isinstance(batch_output, tuple): + batch_output = batch_output[0] + if not return_tensor: + if "energy" in batch_output: + energy_out.append(batch_output["energy"].detach().cpu().numpy()) + if "atom_energy" in batch_output: + atomic_energy_out.append( + batch_output["atom_energy"].detach().cpu().numpy() + ) + if "force" in batch_output: + force_out.append(batch_output["force"].detach().cpu().numpy()) + if "virial" in batch_output: + virial_out.append(batch_output["virial"].detach().cpu().numpy()) + if "atomic_virial" in batch_output: + atomic_virial_out.append( + batch_output["atomic_virial"].detach().cpu().numpy() + ) + if "updated_coord" in batch_output: + updated_coord_out.append( + batch_output["updated_coord"].detach().cpu().numpy() + ) + if "logits" in batch_output: + logits_out.append(batch_output["logits"].detach().cpu().numpy()) + else: + if "energy" in batch_output: + energy_out.append(batch_output["energy"]) + if "atom_energy" in batch_output: + atomic_energy_out.append(batch_output["atom_energy"]) + if "force" in batch_output: + force_out.append(batch_output["force"]) + if "virial" in batch_output: + virial_out.append(batch_output["virial"]) + if "atomic_virial" in batch_output: + atomic_virial_out.append(batch_output["atomic_virial"]) + if "updated_coord" in batch_output: + updated_coord_out.append(batch_output["updated_coord"]) + if "logits" in batch_output: + logits_out.append(batch_output["logits"]) + if not return_tensor: + energy_out = ( + np.concatenate(energy_out) if energy_out else np.zeros([nframes, 1]) + ) + atomic_energy_out = ( + np.concatenate(atomic_energy_out) + if atomic_energy_out + else np.zeros([nframes, natoms, 1]) + ) + force_out = ( + np.concatenate(force_out) if force_out else np.zeros([nframes, natoms, 3]) + ) + virial_out = ( + np.concatenate(virial_out) if virial_out else np.zeros([nframes, 3, 3]) + ) + atomic_virial_out = ( + np.concatenate(atomic_virial_out) + if atomic_virial_out + else np.zeros([nframes, natoms, 3, 3]) + ) + updated_coord_out = ( + np.concatenate(updated_coord_out) if updated_coord_out else None + ) + logits_out = np.concatenate(logits_out) if logits_out else None + else: + energy_out = ( + torch.cat(energy_out) + if energy_out + else torch.zeros([nframes, 1], dtype=GLOBAL_PT_FLOAT_PRECISION).to(DEVICE) + ) + atomic_energy_out = ( + torch.cat(atomic_energy_out) + if atomic_energy_out + else torch.zeros([nframes, natoms, 1], dtype=GLOBAL_PT_FLOAT_PRECISION).to( + DEVICE + ) + ) + force_out = ( + torch.cat(force_out) + if force_out + else torch.zeros([nframes, natoms, 3], dtype=GLOBAL_PT_FLOAT_PRECISION).to( + DEVICE + ) + ) + virial_out = ( + torch.cat(virial_out) + if virial_out + else torch.zeros([nframes, 3, 3], dtype=GLOBAL_PT_FLOAT_PRECISION).to( + DEVICE + ) + ) + atomic_virial_out = ( + torch.cat(atomic_virial_out) + if atomic_virial_out + else torch.zeros( + [nframes, natoms, 3, 3], dtype=GLOBAL_PT_FLOAT_PRECISION + ).to(DEVICE) + ) + updated_coord_out = torch.cat(updated_coord_out) if updated_coord_out else None + logits_out = torch.cat(logits_out) if logits_out else None + if denoise: + return updated_coord_out, logits_out + else: + if not atomic: + return energy_out, force_out, virial_out + else: + return ( + energy_out, + force_out, + virial_out, + atomic_energy_out, + atomic_virial_out, + ) diff --git a/deepmd/pt/infer/inference.py b/deepmd/pt/infer/inference.py new file mode 100644 index 0000000000..4906bb7a46 --- /dev/null +++ b/deepmd/pt/infer/inference.py @@ -0,0 +1,417 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import logging +import math +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np +import torch +from torch.utils.data import ( + DataLoader, + RandomSampler, +) + +from deepmd.common import ( + expand_sys_str, +) +from deepmd.pt.loss import ( + DenoiseLoss, + EnergyStdLoss, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.train.wrapper import ( + ModelWrapper, +) +from deepmd.pt.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pt.utils.env import ( + DEVICE, + JIT, + NUM_WORKERS, +) + +if torch.__version__.startswith("2"): + import torch._dynamo + + +class Tester: + def __init__( + self, + model_ckpt, + input_script=None, + system=None, + datafile=None, + numb_test=100, + detail_file=None, + shuffle_test=False, + head=None, + ): + """Construct a DeePMD tester. + + Args: + - config: The Dict-like configuration with training options. + """ + self.numb_test = numb_test + self.detail_file = detail_file + self.shuffle_test = shuffle_test + # Model + state_dict = torch.load(model_ckpt, map_location=DEVICE) + if "model" in state_dict: + state_dict = state_dict["model"] + model_params = state_dict["_extra_state"]["model_params"] + self.multi_task = "model_dict" in model_params + if self.multi_task: + assert head is not None, "Head must be specified in multitask mode!" + self.head = head + assert head in model_params["model_dict"], ( + f"Specified head {head} not found in model {model_ckpt}! " + f"Available ones are {list(model_params['model_dict'].keys())}." + ) + model_params = model_params["model_dict"][head] + state_dict_head = {"_extra_state": state_dict["_extra_state"]} + for item in state_dict: + if f"model.{head}." in item: + state_dict_head[ + item.replace(f"model.{head}.", "model.Default.") + ] = state_dict[item].clone() + state_dict = state_dict_head + + # Data + if input_script is not None: + with open(input_script) as fin: + self.input_script = json.load(fin) + training_params = self.input_script["training"] + if not self.multi_task: + assert ( + "validation_data" in training_params + ), f"Validation systems not found in {input_script}!" + self.systems = training_params["validation_data"]["systems"] + self.batchsize = training_params["validation_data"]["batch_size"] + logging.info( + f"Testing validation systems in input script: {input_script}" + ) + else: + assert ( + "data_dict" in training_params + ), f"Input script {input_script} is not in multi-task mode!" + assert head in training_params["data_dict"], ( + f"Specified head {head} not found in input script {input_script}! " + f"Available ones are {list(training_params['data_dict'].keys())}." + ) + assert ( + "validation_data" in training_params["data_dict"][head] + ), f"Validation systems not found in head {head} of {input_script}!" + self.systems = training_params["data_dict"][head]["validation_data"][ + "systems" + ] + self.batchsize = training_params["data_dict"][head]["validation_data"][ + "batch_size" + ] + logging.info( + f"Testing validation systems in head {head} of input script: {input_script}" + ) + elif system is not None: + self.systems = expand_sys_str(system) + self.batchsize = "auto" + logging.info("Testing systems in path: %s", system) + elif datafile is not None: + with open(datafile) as fin: + self.systems = fin.read().splitlines() + self.batchsize = "auto" + logging.info("Testing systems in file: %s", datafile) + else: + self.systems = None + self.batchsize = None + + self.type_split = False + if model_params["descriptor"]["type"] in ["se_e2_a"]: + self.type_split = True + self.model_params = deepcopy(model_params) + model_params["resuming"] = True + self.model = get_model(model_params).to(DEVICE) + + # Model Wrapper + self.wrapper = ModelWrapper(self.model) # inference only + if JIT: + self.wrapper = torch.jit.script(self.wrapper) + self.wrapper.load_state_dict(state_dict) + + # Loss + if "fitting_net" not in model_params: + assert ( + input_script is not None + ), "Denoise model must use --input-script mode!" + loss_params = self.input_script["loss"] + loss_type = loss_params.pop("type", "ener") + assert ( + loss_type == "denoise" + ), "Models without fitting_net only support denoise test!" + self.noise_settings = { + "noise_type": loss_params.pop("noise_type", "uniform"), + "noise": loss_params.pop("noise", 1.0), + "noise_mode": loss_params.pop("noise_mode", "fix_num"), + "mask_num": loss_params.pop("mask_num", 8), + "same_mask": loss_params.pop("same_mask", False), + "mask_coord": loss_params.pop("mask_coord", False), + "mask_type": loss_params.pop("mask_type", False), + "mask_type_idx": len(model_params["type_map"]) - 1, + } + loss_params["ntypes"] = len(model_params["type_map"]) + self.loss = DenoiseLoss(**loss_params) + else: + self.noise_settings = None + self.loss = EnergyStdLoss(inference=True) + + @staticmethod + def get_data(data): + batch_data = next(iter(data)) + for key in batch_data.keys(): + if key == "sid" or key == "fid": + continue + elif not isinstance(batch_data[key], list): + if batch_data[key] is not None: + batch_data[key] = batch_data[key].to(DEVICE) + else: + batch_data[key] = [item.to(DEVICE) for item in batch_data[key]] + input_dict = {} + for item in [ + "coord", + "atype", + "box", + ]: + if item in batch_data: + input_dict[item] = batch_data[item] + else: + input_dict[item] = None + label_dict = {} + for item in [ + "energy", + "force", + "virial", + "clean_coord", + "clean_type", + "coord_mask", + "type_mask", + ]: + if item in batch_data: + label_dict[item] = batch_data[item] + return input_dict, label_dict + + def run(self): + systems = self.systems + system_results = {} + global_sum_natoms = 0 + for cc, system in enumerate(systems): + logging.info("# ---------------output of dp test--------------- ") + logging.info(f"# testing system : {system}") + system_pred = [] + system_label = [] + dataset = DpLoaderSet( + [system], + self.batchsize, + self.model_params, + type_split=self.type_split, + noise_settings=self.noise_settings, + shuffle=self.shuffle_test, + ) + sampler = RandomSampler( + dataset, replacement=True, num_samples=dataset.total_batch + ) + if sampler is None: + logging.warning( + "Sampler not specified!" + ) # None sampler will lead to a premature stop iteration. Replacement should be True in attribute of the sampler to produce expected number of items in one iteration. + dataloader = DataLoader( + dataset, + sampler=sampler, + batch_size=None, + num_workers=min( + NUM_WORKERS, 1 + ), # setting to 0 diverges the behavior of its iterator; should be >=1 + drop_last=False, + ) + data = iter(dataloader) + + single_results = {} + sum_natoms = 0 + sys_natoms = None + for ii in range(self.numb_test): + try: + input_dict, label_dict = self.get_data(data) + except StopIteration: + if ( + ii < dataset.total_batch + ): # Unexpected stop iteration.(test step < total batch) + raise StopIteration + else: + break + model_pred, _, _ = self.wrapper(**input_dict) + system_pred.append( + { + item: model_pred[item].detach().cpu().numpy() + for item in model_pred + } + ) + system_label.append( + { + item: label_dict[item].detach().cpu().numpy() + for item in label_dict + } + ) + natoms = int(input_dict["atype"].shape[-1]) + _, more_loss = self.loss( + model_pred, label_dict, natoms, 1.0, mae=True + ) # TODO: lr here is useless + if sys_natoms is None: + sys_natoms = natoms + else: + assert ( + sys_natoms == natoms + ), "Frames in one system must be the same!" + sum_natoms += natoms + for k, v in more_loss.items(): + if "mae" in k: + single_results[k] = single_results.get(k, 0.0) + v * natoms + else: + single_results[k] = single_results.get(k, 0.0) + v**2 * natoms + if self.detail_file is not None: + save_detail_file( + Path(self.detail_file), + system_pred, + system_label, + sys_natoms, + system_name=system, + append=(cc != 0), + ) + results = { + k: v / sum_natoms if "mae" in k else math.sqrt(v / sum_natoms) + for k, v in single_results.items() + } + for item in sorted(results.keys()): + logging.info(f"{item}: {results[item]:.4f}") + logging.info("# ----------------------------------------------- ") + for k, v in single_results.items(): + system_results[k] = system_results.get(k, 0.0) + v + global_sum_natoms += sum_natoms + + global_results = { + k: v / global_sum_natoms if "mae" in k else math.sqrt(v / global_sum_natoms) + for k, v in system_results.items() + } + logging.info("# ----------weighted average of errors----------- ") + if not self.multi_task: + logging.info(f"# number of systems : {len(systems)}") + else: + logging.info(f"# number of systems for {self.head}: {len(systems)}") + for item in sorted(global_results.keys()): + logging.info(f"{item}: {global_results[item]:.4f}") + logging.info("# ----------------------------------------------- ") + return global_results + + +def save_txt_file( + fname: Path, data: np.ndarray, header: str = "", append: bool = False +): + """Save numpy array to test file. + + Parameters + ---------- + fname : str + filename + data : np.ndarray + data to save to disk + header : str, optional + header string to use in file, by default "" + append : bool, optional + if true file will be appended insted of overwriting, by default False + """ + flags = "ab" if append else "w" + with fname.open(flags) as fp: + np.savetxt(fp, data, header=header) + + +def save_detail_file( + detail_path, system_pred, system_label, natoms, system_name, append=False +): + ntest = len(system_pred) + data_e = np.concatenate([item["energy"] for item in system_label]).reshape([-1, 1]) + pred_e = np.concatenate([item["energy"] for item in system_pred]).reshape([-1, 1]) + pe = np.concatenate( + ( + data_e, + pred_e, + ), + axis=1, + ) + save_txt_file( + detail_path.with_suffix(".e.out"), + pe, + header="%s: data_e pred_e" % system_name, + append=append, + ) + pe_atom = pe / natoms + save_txt_file( + detail_path.with_suffix(".e_peratom.out"), + pe_atom, + header="%s: data_e pred_e" % system_name, + append=append, + ) + if "force" in system_pred[0]: + data_f = np.concatenate([item["force"] for item in system_label]).reshape( + [-1, 3] + ) + pred_f = np.concatenate([item["force"] for item in system_pred]).reshape( + [-1, 3] + ) + pf = np.concatenate( + ( + data_f, + pred_f, + ), + axis=1, + ) + save_txt_file( + detail_path.with_suffix(".f.out"), + pf, + header="%s: data_fx data_fy data_fz pred_fx pred_fy pred_fz" % system_name, + append=append, + ) + if "virial" in system_pred[0]: + data_v = np.concatenate([item["virial"] for item in system_label]).reshape( + [-1, 9] + ) + pred_v = np.concatenate([item["virial"] for item in system_pred]).reshape( + [-1, 9] + ) + pv = np.concatenate( + ( + data_v, + pred_v, + ), + axis=1, + ) + save_txt_file( + detail_path.with_suffix(".v.out"), + pv, + header=f"{system_name}: data_vxx data_vxy data_vxz data_vyx data_vyy " + "data_vyz data_vzx data_vzy data_vzz pred_vxx pred_vxy pred_vxz pred_vyx " + "pred_vyy pred_vyz pred_vzx pred_vzy pred_vzz", + append=append, + ) + pv_atom = pv / natoms + save_txt_file( + detail_path.with_suffix(".v_peratom.out"), + pv_atom, + header=f"{system_name}: data_vxx data_vxy data_vxz data_vyx data_vyy " + "data_vyz data_vzx data_vzy data_vzz pred_vxx pred_vxy pred_vxz pred_vyx " + "pred_vyy pred_vyz pred_vzx pred_vzy pred_vzz", + append=append, + ) diff --git a/deepmd/pt/loss/__init__.py b/deepmd/pt/loss/__init__.py new file mode 100644 index 0000000000..d3a095ce13 --- /dev/null +++ b/deepmd/pt/loss/__init__.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .denoise import ( + DenoiseLoss, +) +from .ener import ( + EnergyStdLoss, +) +from .loss import ( + TaskLoss, +) + +__all__ = [ + "DenoiseLoss", + "EnergyStdLoss", + "TaskLoss", +] diff --git a/deepmd/pt/loss/denoise.py b/deepmd/pt/loss/denoise.py new file mode 100644 index 0000000000..cd12e70bb1 --- /dev/null +++ b/deepmd/pt/loss/denoise.py @@ -0,0 +1,109 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch +import torch.nn.functional as F + +from deepmd.pt.loss.loss import ( + TaskLoss, +) +from deepmd.pt.utils import ( + env, +) + + +class DenoiseLoss(TaskLoss): + def __init__( + self, + ntypes, + masked_token_loss=1.0, + masked_coord_loss=1.0, + norm_loss=0.01, + use_l1=True, + beta=1.00, + mask_loss_coord=True, + mask_loss_token=True, + **kwargs, + ): + """Construct a layer to compute loss on coord, and type reconstruction.""" + super().__init__() + self.ntypes = ntypes + self.masked_token_loss = masked_token_loss + self.masked_coord_loss = masked_coord_loss + self.norm_loss = norm_loss + self.has_coord = self.masked_coord_loss > 0.0 + self.has_token = self.masked_token_loss > 0.0 + self.has_norm = self.norm_loss > 0.0 + self.use_l1 = use_l1 + self.beta = beta + self.frac_beta = 1.00 / self.beta + self.mask_loss_coord = mask_loss_coord + self.mask_loss_token = mask_loss_token + + def forward(self, model_pred, label, natoms, learning_rate, mae=False): + """Return loss on coord and type denoise. + + Returns + ------- + - loss: Loss to minimize. + """ + updated_coord = model_pred["updated_coord"] + logits = model_pred["logits"] + clean_coord = label["clean_coord"] + clean_type = label["clean_type"] + coord_mask = label["coord_mask"] + type_mask = label["type_mask"] + + loss = torch.tensor(0.0, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) + more_loss = {} + if self.has_coord: + if self.mask_loss_coord: + masked_updated_coord = updated_coord[coord_mask] + masked_clean_coord = clean_coord[coord_mask] + if masked_updated_coord.size(0) > 0: + coord_loss = F.smooth_l1_loss( + masked_updated_coord.view(-1, 3), + masked_clean_coord.view(-1, 3), + reduction="mean", + beta=self.beta, + ) + else: + coord_loss = torch.tensor( + 0.0, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + else: + coord_loss = F.smooth_l1_loss( + updated_coord.view(-1, 3), + clean_coord.view(-1, 3), + reduction="mean", + beta=self.beta, + ) + loss += self.masked_coord_loss * coord_loss + more_loss["coord_l1_error"] = coord_loss.detach() + if self.has_token: + if self.mask_loss_token: + masked_logits = logits[type_mask] + masked_target = clean_type[type_mask] + if masked_logits.size(0) > 0: + token_loss = F.nll_loss( + F.log_softmax(masked_logits, dim=-1), + masked_target, + reduction="mean", + ) + else: + token_loss = torch.tensor( + 0.0, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + else: + token_loss = F.nll_loss( + F.log_softmax(logits.view(-1, self.ntypes - 1), dim=-1), + clean_type.view(-1), + reduction="mean", + ) + loss += self.masked_token_loss * token_loss + more_loss["token_error"] = token_loss.detach() + if self.has_norm: + norm_x = model_pred["norm_x"] + norm_delta_pair_rep = model_pred["norm_delta_pair_rep"] + loss += self.norm_loss * (norm_x + norm_delta_pair_rep) + more_loss["norm_loss"] = norm_x.detach() + norm_delta_pair_rep.detach() + + return loss, more_loss diff --git a/deepmd/pt/loss/ener.py b/deepmd/pt/loss/ener.py new file mode 100644 index 0000000000..4ed765cf69 --- /dev/null +++ b/deepmd/pt/loss/ener.py @@ -0,0 +1,155 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch +import torch.nn.functional as F + +from deepmd.pt.loss.loss import ( + TaskLoss, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.env import ( + GLOBAL_PT_FLOAT_PRECISION, +) + + +class EnergyStdLoss(TaskLoss): + def __init__( + self, + starter_learning_rate=1.0, + start_pref_e=0.0, + limit_pref_e=0.0, + start_pref_f=0.0, + limit_pref_f=0.0, + start_pref_v=0.0, + limit_pref_v=0.0, + use_l1_all: bool = False, + inference=False, + **kwargs, + ): + """Construct a layer to compute loss on energy, force and virial.""" + super().__init__() + self.starter_learning_rate = starter_learning_rate + self.has_e = (start_pref_e != 0.0 and limit_pref_e != 0.0) or inference + self.has_f = (start_pref_f != 0.0 and limit_pref_f != 0.0) or inference + self.has_v = (start_pref_v != 0.0 and limit_pref_v != 0.0) or inference + self.start_pref_e = start_pref_e + self.limit_pref_e = limit_pref_e + self.start_pref_f = start_pref_f + self.limit_pref_f = limit_pref_f + self.start_pref_v = start_pref_v + self.limit_pref_v = limit_pref_v + self.use_l1_all = use_l1_all + self.inference = inference + + def forward(self, model_pred, label, natoms, learning_rate, mae=False): + """Return loss on loss and force. + + Args: + - natoms: Tell atom count. + - p_energy: Predicted energy of all atoms. + - p_force: Predicted force per atom. + - l_energy: Actual energy of all atoms. + - l_force: Actual force per atom. + + Returns + ------- + - loss: Loss to minimize. + """ + coef = learning_rate / self.starter_learning_rate + pref_e = self.limit_pref_e + (self.start_pref_e - self.limit_pref_e) * coef + pref_f = self.limit_pref_f + (self.start_pref_f - self.limit_pref_f) * coef + pref_v = self.limit_pref_v + (self.start_pref_v - self.limit_pref_v) * coef + loss = torch.tensor(0.0, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE) + more_loss = {} + # more_loss['log_keys'] = [] # showed when validation on the fly + # more_loss['test_keys'] = [] # showed when doing dp test + atom_norm = 1.0 / natoms + if self.has_e and "energy" in model_pred and "energy" in label: + if not self.use_l1_all: + l2_ener_loss = torch.mean( + torch.square(model_pred["energy"] - label["energy"]) + ) + if not self.inference: + more_loss["l2_ener_loss"] = l2_ener_loss.detach() + loss += atom_norm * (pref_e * l2_ener_loss) + rmse_e = l2_ener_loss.sqrt() * atom_norm + more_loss["rmse_e"] = rmse_e.detach() + # more_loss['log_keys'].append('rmse_e') + else: # use l1 and for all atoms + l1_ener_loss = F.l1_loss( + model_pred["energy"].reshape(-1), + label["energy"].reshape(-1), + reduction="sum", + ) + loss += pref_e * l1_ener_loss + more_loss["mae_e"] = F.l1_loss( + model_pred["energy"].reshape(-1), + label["energy"].reshape(-1), + reduction="mean", + ).detach() + # more_loss['log_keys'].append('rmse_e') + if mae: + mae_e = ( + torch.mean(torch.abs(model_pred["energy"] - label["energy"])) + * atom_norm + ) + more_loss["mae_e"] = mae_e.detach() + mae_e_all = torch.mean( + torch.abs(model_pred["energy"] - label["energy"]) + ) + more_loss["mae_e_all"] = mae_e_all.detach() + + if self.has_f and "force" in model_pred and "force" in label: + if "force_target_mask" in model_pred: + force_target_mask = model_pred["force_target_mask"] + else: + force_target_mask = None + if not self.use_l1_all: + if force_target_mask is not None: + diff_f = (label["force"] - model_pred["force"]) * force_target_mask + force_cnt = force_target_mask.squeeze(-1).sum(-1) + l2_force_loss = torch.mean( + torch.square(diff_f).mean(-1).sum(-1) / force_cnt + ) + else: + diff_f = label["force"] - model_pred["force"] + l2_force_loss = torch.mean(torch.square(diff_f)) + if not self.inference: + more_loss["l2_force_loss"] = l2_force_loss.detach() + loss += (pref_f * l2_force_loss).to(GLOBAL_PT_FLOAT_PRECISION) + rmse_f = l2_force_loss.sqrt() + more_loss["rmse_f"] = rmse_f.detach() + else: + l1_force_loss = F.l1_loss( + label["force"], model_pred["force"], reduction="none" + ) + if force_target_mask is not None: + l1_force_loss *= force_target_mask + force_cnt = force_target_mask.squeeze(-1).sum(-1) + more_loss["mae_f"] = ( + l1_force_loss.mean(-1).sum(-1) / force_cnt + ).mean() + l1_force_loss = (l1_force_loss.sum(-1).sum(-1) / force_cnt).sum() + else: + more_loss["mae_f"] = l1_force_loss.mean().detach() + l1_force_loss = l1_force_loss.sum(-1).mean(-1).sum() + loss += (pref_f * l1_force_loss).to(GLOBAL_PT_FLOAT_PRECISION) + if mae: + mae_f = torch.mean(torch.abs(diff_f)) + more_loss["mae_f"] = mae_f.detach() + + if self.has_v and "virial" in model_pred and "virial" in label: + diff_v = label["virial"] - model_pred["virial"].reshape(-1, 9) + l2_virial_loss = torch.mean(torch.square(diff_v)) + if not self.inference: + more_loss["l2_virial_loss"] = l2_virial_loss.detach() + loss += atom_norm * (pref_v * l2_virial_loss) + rmse_v = l2_virial_loss.sqrt() * atom_norm + more_loss["rmse_v"] = rmse_v.detach() + if mae: + mae_v = torch.mean(torch.abs(diff_v)) * atom_norm + more_loss["mae_v"] = mae_v.detach() + if not self.inference: + more_loss["rmse"] = torch.sqrt(loss.detach()) + return loss, more_loss diff --git a/deepmd/pt/loss/loss.py b/deepmd/pt/loss/loss.py new file mode 100644 index 0000000000..9f2c3a7ed7 --- /dev/null +++ b/deepmd/pt/loss/loss.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch + + +class TaskLoss(torch.nn.Module): + def __init__(self, **kwargs): + """Construct loss.""" + super().__init__() + + def forward(self, model_pred, label, natoms, learning_rate): + """Return loss .""" + raise NotImplementedError diff --git a/deepmd/pt/model/__init__.py b/deepmd/pt/model/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pt/model/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pt/model/backbone/__init__.py b/deepmd/pt/model/backbone/__init__.py new file mode 100644 index 0000000000..a76bdb2a2d --- /dev/null +++ b/deepmd/pt/model/backbone/__init__.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .backbone import ( + BackBone, +) +from .evoformer2b import ( + Evoformer2bBackBone, +) + +__all__ = [ + "BackBone", + "Evoformer2bBackBone", +] diff --git a/deepmd/pt/model/backbone/backbone.py b/deepmd/pt/model/backbone/backbone.py new file mode 100644 index 0000000000..ddeedfeff5 --- /dev/null +++ b/deepmd/pt/model/backbone/backbone.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch + + +class BackBone(torch.nn.Module): + def __init__(self, **kwargs): + """BackBone base method.""" + super().__init__() + + def forward(self, **kwargs): + """Calculate backBone.""" + raise NotImplementedError diff --git a/deepmd/pt/model/backbone/evoformer2b.py b/deepmd/pt/model/backbone/evoformer2b.py new file mode 100644 index 0000000000..1146b3a298 --- /dev/null +++ b/deepmd/pt/model/backbone/evoformer2b.py @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.pt.model.backbone import ( + BackBone, +) +from deepmd.pt.model.network.network import ( + Evoformer2bEncoder, +) + + +class Evoformer2bBackBone(BackBone): + def __init__( + self, + nnei, + layer_num=6, + attn_head=8, + atomic_dim=1024, + pair_dim=100, + feature_dim=1024, + ffn_dim=2048, + post_ln=False, + final_layer_norm=True, + final_head_layer_norm=False, + emb_layer_norm=False, + atomic_residual=False, + evo_residual=False, + residual_factor=1.0, + activation_function="gelu", + **kwargs, + ): + """Construct an evoformer backBone.""" + super().__init__() + self.nnei = nnei + self.layer_num = layer_num + self.attn_head = attn_head + self.atomic_dim = atomic_dim + self.pair_dim = pair_dim + self.feature_dim = feature_dim + self.head_dim = feature_dim // attn_head + assert ( + feature_dim % attn_head == 0 + ), f"feature_dim {feature_dim} must be divided by attn_head {attn_head}!" + self.ffn_dim = ffn_dim + self.post_ln = post_ln + self.final_layer_norm = final_layer_norm + self.final_head_layer_norm = final_head_layer_norm + self.emb_layer_norm = emb_layer_norm + self.activation_function = activation_function + self.atomic_residual = atomic_residual + self.evo_residual = evo_residual + self.residual_factor = float(residual_factor) + self.encoder = Evoformer2bEncoder( + nnei=self.nnei, + layer_num=self.layer_num, + attn_head=self.attn_head, + atomic_dim=self.atomic_dim, + pair_dim=self.pair_dim, + feature_dim=self.feature_dim, + ffn_dim=self.ffn_dim, + post_ln=self.post_ln, + final_layer_norm=self.final_layer_norm, + final_head_layer_norm=self.final_head_layer_norm, + emb_layer_norm=self.emb_layer_norm, + atomic_residual=self.atomic_residual, + evo_residual=self.evo_residual, + residual_factor=self.residual_factor, + activation_function=self.activation_function, + ) + + def forward(self, atomic_rep, pair_rep, nlist, nlist_type, nlist_mask): + """Encoder the atomic and pair representations. + + Args: + - atomic_rep: Atomic representation with shape [nframes, nloc, atomic_dim]. + - pair_rep: Pair representation with shape [nframes, nloc, nnei, pair_dim]. + - nlist: Neighbor list with shape [nframes, nloc, nnei]. + - nlist_type: Neighbor types with shape [nframes, nloc, nnei]. + - nlist_mask: Neighbor mask with shape [nframes, nloc, nnei], `False` if blank. + + Returns + ------- + - atomic_rep: Atomic representation after encoder with shape [nframes, nloc, feature_dim]. + - transformed_atomic_rep: Transformed atomic representation after encoder with shape [nframes, nloc, atomic_dim]. + - pair_rep: Pair representation after encoder with shape [nframes, nloc, nnei, attn_head]. + - delta_pair_rep: Delta pair representation after encoder with shape [nframes, nloc, nnei, attn_head]. + - norm_x: Normalization loss of atomic_rep. + - norm_delta_pair_rep: Normalization loss of delta_pair_rep. + """ + ( + atomic_rep, + transformed_atomic_rep, + pair_rep, + delta_pair_rep, + norm_x, + norm_delta_pair_rep, + ) = self.encoder(atomic_rep, pair_rep, nlist, nlist_type, nlist_mask) + return ( + atomic_rep, + transformed_atomic_rep, + pair_rep, + delta_pair_rep, + norm_x, + norm_delta_pair_rep, + ) diff --git a/deepmd/pt/model/descriptor/__init__.py b/deepmd/pt/model/descriptor/__init__.py new file mode 100644 index 0000000000..4252e34905 --- /dev/null +++ b/deepmd/pt/model/descriptor/__init__.py @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .descriptor import ( + Descriptor, + DescriptorBlock, + compute_std, + make_default_type_embedding, +) +from .dpa1 import ( + DescrptBlockSeAtten, + DescrptDPA1, +) +from .dpa2 import ( + DescrptDPA2, +) +from .env_mat import ( + prod_env_mat_se_a, +) +from .gaussian_lcc import ( + DescrptGaussianLcc, +) +from .hybrid import ( + DescrptBlockHybrid, +) +from .repformers import ( + DescrptBlockRepformers, +) +from .se_a import ( + DescrptBlockSeA, + DescrptSeA, +) + +__all__ = [ + "Descriptor", + "DescriptorBlock", + "compute_std", + "make_default_type_embedding", + "DescrptBlockSeA", + "DescrptBlockSeAtten", + "DescrptSeA", + "DescrptDPA1", + "DescrptDPA2", + "prod_env_mat_se_a", + "DescrptGaussianLcc", + "DescrptBlockHybrid", + "DescrptBlockRepformers", +] diff --git a/deepmd/pt/model/descriptor/descriptor.py b/deepmd/pt/model/descriptor/descriptor.py new file mode 100644 index 0000000000..bb98e8dc15 --- /dev/null +++ b/deepmd/pt/model/descriptor/descriptor.py @@ -0,0 +1,272 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from abc import ( + ABC, + abstractmethod, +) +from typing import ( + Callable, + List, + Optional, +) + +import numpy as np +import torch + +from deepmd.pt.model.network.network import ( + TypeEmbedNet, +) +from deepmd.pt.utils.plugin import ( + Plugin, +) + + +class Descriptor(torch.nn.Module, ABC): + """The descriptor. + Given the atomic coordinates, atomic types and neighbor list, + calculate the descriptor. + """ + + __plugins = Plugin() + local_cluster = False + + @abstractmethod + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + raise NotImplementedError + + @abstractmethod + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + raise NotImplementedError + + @abstractmethod + def get_sel(self) -> List[int]: + """Returns the number of selected atoms for each type.""" + raise NotImplementedError + + @abstractmethod + def get_ntype(self) -> int: + """Returns the number of element types.""" + raise NotImplementedError + + @abstractmethod + def get_dim_out(self) -> int: + """Returns the output dimension.""" + raise NotImplementedError + + @abstractmethod + def compute_input_stats(self, merged): + """Update mean and stddev for descriptor elements.""" + raise NotImplementedError + + @abstractmethod + def init_desc_stat(self, sumr, suma, sumn, sumr2, suma2): + """Initialize the model bias by the statistics.""" + raise NotImplementedError + + @abstractmethod + def forward( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[torch.Tensor] = None, + ): + """Calculate descriptor.""" + raise NotImplementedError + + @staticmethod + def register(key: str) -> Callable: + """Register a descriptor plugin. + + Parameters + ---------- + key : str + the key of a descriptor + + Returns + ------- + Descriptor + the registered descriptor + + Examples + -------- + >>> @Descriptor.register("some_descrpt") + class SomeDescript(Descriptor): + pass + """ + return Descriptor.__plugins.register(key) + + @classmethod + def get_stat_name(cls, config): + descrpt_type = config["type"] + return Descriptor.__plugins.plugins[descrpt_type].get_stat_name(config) + + @classmethod + def get_data_process_key(cls, config): + descrpt_type = config["type"] + return Descriptor.__plugins.plugins[descrpt_type].get_data_process_key(config) + + def __new__(cls, *args, **kwargs): + if cls is Descriptor: + try: + descrpt_type = kwargs["type"] + except KeyError: + raise KeyError("the type of descriptor should be set by `type`") + if descrpt_type in Descriptor.__plugins.plugins: + cls = Descriptor.__plugins.plugins[descrpt_type] + else: + raise RuntimeError("Unknown descriptor type: " + descrpt_type) + return super().__new__(cls) + + +class DescriptorBlock(torch.nn.Module, ABC): + """The building block of descriptor. + Given the input descriptor, provide with the atomic coordinates, + atomic types and neighbor list, calculate the new descriptor. + """ + + __plugins = Plugin() + local_cluster = False + + @staticmethod + def register(key: str) -> Callable: + """Register a DescriptorBlock plugin. + + Parameters + ---------- + key : str + the key of a DescriptorBlock + + Returns + ------- + DescriptorBlock + the registered DescriptorBlock + + Examples + -------- + >>> @DescriptorBlock.register("some_descrpt") + class SomeDescript(DescriptorBlock): + pass + """ + return DescriptorBlock.__plugins.register(key) + + def __new__(cls, *args, **kwargs): + if cls is DescriptorBlock: + try: + descrpt_type = kwargs["type"] + except KeyError: + raise KeyError("the type of DescriptorBlock should be set by `type`") + if descrpt_type in DescriptorBlock.__plugins.plugins: + cls = DescriptorBlock.__plugins.plugins[descrpt_type] + else: + raise RuntimeError("Unknown DescriptorBlock type: " + descrpt_type) + return super().__new__(cls) + + @abstractmethod + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + raise NotImplementedError + + @abstractmethod + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + raise NotImplementedError + + @abstractmethod + def get_sel(self) -> List[int]: + """Returns the number of selected atoms for each type.""" + raise NotImplementedError + + @abstractmethod + def get_ntype(self) -> int: + """Returns the number of element types.""" + raise NotImplementedError + + @abstractmethod + def get_dim_out(self) -> int: + """Returns the output dimension.""" + raise NotImplementedError + + @abstractmethod + def get_dim_in(self) -> int: + """Returns the output dimension.""" + raise NotImplementedError + + @abstractmethod + def compute_input_stats(self, merged): + """Update mean and stddev for DescriptorBlock elements.""" + raise NotImplementedError + + @abstractmethod + def init_desc_stat(self, sumr, suma, sumn, sumr2, suma2): + """Initialize the model bias by the statistics.""" + raise NotImplementedError + + def share_params(self, base_class, shared_level, resume=False): + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + if shared_level == 0: + # link buffers + if hasattr(self, "mean") and not resume: + # in case of change params during resume + sumr_base, suma_base, sumn_base, sumr2_base, suma2_base = ( + base_class.sumr, + base_class.suma, + base_class.sumn, + base_class.sumr2, + base_class.suma2, + ) + sumr, suma, sumn, sumr2, suma2 = ( + self.sumr, + self.suma, + self.sumn, + self.sumr2, + self.suma2, + ) + base_class.init_desc_stat( + sumr_base + sumr, + suma_base + suma, + sumn_base + sumn, + sumr2_base + sumr2, + suma2_base + suma2, + ) + self.mean = base_class.mean + self.stddev = base_class.stddev + # self.load_state_dict(base_class.state_dict()) # this does not work, because it only inits the model + # the following will successfully link all the params except buffers + for item in self._modules: + self._modules[item] = base_class._modules[item] + else: + raise NotImplementedError + + @abstractmethod + def forward( + self, + nlist: torch.Tensor, + extended_coord: torch.Tensor, + extended_atype: torch.Tensor, + extended_atype_embd: Optional[torch.Tensor] = None, + mapping: Optional[torch.Tensor] = None, + ): + """Calculate DescriptorBlock.""" + raise NotImplementedError + + +def compute_std(sumv2, sumv, sumn, rcut_r): + """Compute standard deviation.""" + if sumn == 0: + return 1.0 / rcut_r + val = np.sqrt(sumv2 / sumn - np.multiply(sumv / sumn, sumv / sumn)) + if np.abs(val) < 1e-2: + val = 1e-2 + return val + + +def make_default_type_embedding( + ntypes, +): + aux = {} + aux["tebd_dim"] = 8 + return TypeEmbedNet(ntypes, aux["tebd_dim"]), aux diff --git a/deepmd/pt/model/descriptor/dpa1.py b/deepmd/pt/model/descriptor/dpa1.py new file mode 100644 index 0000000000..dd34b815c9 --- /dev/null +++ b/deepmd/pt/model/descriptor/dpa1.py @@ -0,0 +1,152 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + List, + Optional, +) + +import torch + +from deepmd.pt.model.descriptor import ( + Descriptor, +) +from deepmd.pt.model.network.network import ( + TypeEmbedNet, +) + +from .se_atten import ( + DescrptBlockSeAtten, +) + + +@Descriptor.register("dpa1") +@Descriptor.register("se_atten") +class DescrptDPA1(Descriptor): + def __init__( + self, + rcut, + rcut_smth, + sel, + ntypes: int, + neuron: list = [25, 50, 100], + axis_neuron: int = 16, + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + # set_davg_zero: bool = False, + set_davg_zero: bool = True, # TODO + attn: int = 128, + attn_layer: int = 2, + attn_dotr: bool = True, + attn_mask: bool = False, + post_ln=True, + ffn=False, + ffn_embed_dim=1024, + activation="tanh", + scaling_factor=1.0, + head_num=1, + normalize=True, + temperature=None, + return_rot=False, + concat_output_tebd: bool = True, + type: Optional[str] = None, + ): + super().__init__() + del type + self.se_atten = DescrptBlockSeAtten( + rcut, + rcut_smth, + sel, + ntypes, + neuron=neuron, + axis_neuron=axis_neuron, + tebd_dim=tebd_dim, + tebd_input_mode=tebd_input_mode, + set_davg_zero=set_davg_zero, + attn=attn, + attn_layer=attn_layer, + attn_dotr=attn_dotr, + attn_mask=attn_mask, + post_ln=post_ln, + ffn=ffn, + ffn_embed_dim=ffn_embed_dim, + activation=activation, + scaling_factor=scaling_factor, + head_num=head_num, + normalize=normalize, + temperature=temperature, + return_rot=return_rot, + ) + self.type_embedding = TypeEmbedNet(ntypes, tebd_dim) + self.tebd_dim = tebd_dim + self.concat_output_tebd = concat_output_tebd + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.se_atten.get_rcut() + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return self.se_atten.get_nsel() + + def get_sel(self) -> List[int]: + """Returns the number of selected atoms for each type.""" + return self.se_atten.get_sel() + + def get_ntype(self) -> int: + """Returns the number of element types.""" + return self.se_atten.get_ntype() + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + ret = self.se_atten.get_dim_out() + if self.concat_output_tebd: + ret += self.tebd_dim + return ret + + @property + def dim_out(self): + return self.get_dim_out() + + @property + def dim_emb(self): + return self.se_atten.dim_emb + + def compute_input_stats(self, merged): + return self.se_atten.compute_input_stats(merged) + + def init_desc_stat(self, sumr, suma, sumn, sumr2, suma2): + self.se_atten.init_desc_stat(sumr, suma, sumn, sumr2, suma2) + + @classmethod + def get_stat_name(cls, config): + descrpt_type = config["type"] + assert descrpt_type in ["dpa1", "se_atten"] + return f'stat_file_dpa1_rcut{config["rcut"]:.2f}_smth{config["rcut_smth"]:.2f}_sel{config["sel"]}.npz' + + @classmethod + def get_data_process_key(cls, config): + descrpt_type = config["type"] + assert descrpt_type in ["dpa1", "se_atten"] + return {"sel": config["sel"], "rcut": config["rcut"]} + + def forward( + self, + extended_coord: torch.Tensor, + extended_atype: torch.Tensor, + nlist: torch.Tensor, + mapping: Optional[torch.Tensor] = None, + ): + del mapping + nframes, nloc, nnei = nlist.shape + nall = extended_coord.view(nframes, -1).shape[1] // 3 + g1_ext = self.type_embedding(extended_atype) + g1_inp = g1_ext[:, :nloc, :] + g1, env_mat, diff, rot_mat, sw = self.se_atten( + nlist, + extended_coord, + extended_atype, + g1_ext, + mapping=None, + ) + if self.concat_output_tebd: + g1 = torch.cat([g1, g1_inp], dim=-1) + return g1, env_mat, diff, rot_mat, sw diff --git a/deepmd/pt/model/descriptor/dpa2.py b/deepmd/pt/model/descriptor/dpa2.py new file mode 100644 index 0000000000..fbdbc91dd9 --- /dev/null +++ b/deepmd/pt/model/descriptor/dpa2.py @@ -0,0 +1,375 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + List, + Optional, +) + +import torch + +from deepmd.pt.model.descriptor import ( + Descriptor, +) +from deepmd.pt.model.network.network import ( + Identity, + Linear, + TypeEmbedNet, +) +from deepmd.pt.utils.nlist import ( + build_multiple_neighbor_list, + get_multiple_nlist_key, +) + +from .repformers import ( + DescrptBlockRepformers, +) +from .se_atten import ( + DescrptBlockSeAtten, +) + + +@Descriptor.register("dpa2") +class DescrptDPA2(Descriptor): + def __init__( + self, + ntypes: int, + repinit_rcut: float, + repinit_rcut_smth: float, + repinit_nsel: int, + repformer_rcut: float, + repformer_rcut_smth: float, + repformer_nsel: int, + # kwargs + tebd_dim: int = 8, + concat_output_tebd: bool = True, + repinit_neuron: List[int] = [25, 50, 100], + repinit_axis_neuron: int = 16, + repinit_set_davg_zero: bool = True, # TODO + repinit_activation="tanh", + # repinit still unclear: + # ffn, ffn_embed_dim, scaling_factor, normalize, + repformer_nlayers: int = 3, + repformer_g1_dim: int = 128, + repformer_g2_dim: int = 16, + repformer_axis_dim: int = 4, + repformer_do_bn_mode: str = "no", + repformer_bn_momentum: float = 0.1, + repformer_update_g1_has_conv: bool = True, + repformer_update_g1_has_drrd: bool = True, + repformer_update_g1_has_grrg: bool = True, + repformer_update_g1_has_attn: bool = True, + repformer_update_g2_has_g1g1: bool = True, + repformer_update_g2_has_attn: bool = True, + repformer_update_h2: bool = False, + repformer_attn1_hidden: int = 64, + repformer_attn1_nhead: int = 4, + repformer_attn2_hidden: int = 16, + repformer_attn2_nhead: int = 4, + repformer_attn2_has_gate: bool = False, + repformer_activation: str = "tanh", + repformer_update_style: str = "res_avg", + repformer_set_davg_zero: bool = True, # TODO + repformer_add_type_ebd_to_seq: bool = False, + type: Optional[ + str + ] = None, # work around the bad design in get_trainer and DpLoaderSet! + rcut: Optional[ + float + ] = None, # work around the bad design in get_trainer and DpLoaderSet! + rcut_smth: Optional[ + float + ] = None, # work around the bad design in get_trainer and DpLoaderSet! + sel: Optional[ + int + ] = None, # work around the bad design in get_trainer and DpLoaderSet! + ): + r"""The DPA-2 descriptor. see https://arxiv.org/abs/2312.15492. + + Parameters + ---------- + ntypes : int + Number of atom types + repinit_rcut : float + The cut-off radius of the repinit block + repinit_rcut_smth : float + From this position the inverse distance smoothly decays + to 0 at the cut-off. Use in the repinit block. + repinit_nsel : int + Maximally possible number of neighbors for repinit block. + repformer_rcut : float + The cut-off radius of the repformer block + repformer_rcut_smth : float + From this position the inverse distance smoothly decays + to 0 at the cut-off. Use in the repformer block. + repformer_nsel : int + Maximally possible number of neighbors for repformer block. + tebd_dim : int + The dimension of atom type embedding + concat_output_tebd : bool + Whether to concat type embedding at the output of the descriptor. + repinit_neuron : List[int] + repinit block: the number of neurons in the embedding net. + repinit_axis_neuron : int + repinit block: the number of dimension of split in the + symmetrization op. + repinit_activation : str + repinit block: the activation function in the embedding net + repformer_nlayers : int + repformers block: the number of repformer layers + repformer_g1_dim : int + repformers block: the dimension of single-atom rep + repformer_g2_dim : int + repformers block: the dimension of invariant pair-atom rep + repformer_axis_dim : int + repformers block: the number of dimension of split in the + symmetrization ops. + repformer_do_bn_mode : bool + repformers block: do batch norm in the repformer layers + repformer_bn_momentum : float + repformers block: moment in the batch normalization + repformer_update_g1_has_conv : bool + repformers block: update the g1 rep with convolution term + repformer_update_g1_has_drrd : bool + repformers block: update the g1 rep with the drrd term + repformer_update_g1_has_grrg : bool + repformers block: update the g1 rep with the grrg term + repformer_update_g1_has_attn : bool + repformers block: update the g1 rep with the localized + self-attention + repformer_update_g2_has_g1g1 : bool + repformers block: update the g2 rep with the g1xg1 term + repformer_update_g2_has_attn : bool + repformers block: update the g2 rep with the gated self-attention + repformer_update_h2 : bool + repformers block: update the h2 rep + repformer_attn1_hidden : int + repformers block: the hidden dimension of localized self-attention + repformer_attn1_nhead : int + repformers block: the number of heads in localized self-attention + repformer_attn2_hidden : int + repformers block: the hidden dimension of gated self-attention + repformer_attn2_nhead : int + repformers block: the number of heads in gated self-attention + repformer_attn2_has_gate : bool + repformers block: has gate in the gated self-attention + repformer_activation : str + repformers block: the activation function in the MLPs. + repformer_update_style : str + repformers block: style of update a rep. + can be res_avg or res_incr. + res_avg updates a rep `u` with: + u = 1/\sqrt{n+1} (u + u_1 + u_2 + ... + u_n) + res_incr updates a rep `u` with: + u = u + 1/\sqrt{n} (u_1 + u_2 + ... + u_n) + repformer_set_davg_zero : bool + repformers block: set the avg to zero in statistics + repformer_add_type_ebd_to_seq : bool + repformers block: concatenate the type embedding at the output. + + Returns + ------- + descriptor: torch.Tensor + the descriptor of shape nb x nloc x g1_dim. + invariant single-atom representation. + g2: torch.Tensor + invariant pair-atom representation. + h2: torch.Tensor + equivariant pair-atom representation. + rot_mat: torch.Tensor + rotation matrix for equivariant fittings + sw: torch.Tensor + The switch function for decaying inverse distance. + + """ + super().__init__() + del type, rcut, rcut_smth, sel + self.repinit = DescrptBlockSeAtten( + repinit_rcut, + repinit_rcut_smth, + repinit_nsel, + ntypes, + attn_layer=0, + neuron=repinit_neuron, + axis_neuron=repinit_axis_neuron, + tebd_dim=tebd_dim, + tebd_input_mode="concat", + # tebd_input_mode='dot_residual_s', + set_davg_zero=repinit_set_davg_zero, + activation=repinit_activation, + ) + self.repformers = DescrptBlockRepformers( + repformer_rcut, + repformer_rcut_smth, + repformer_nsel, + ntypes, + nlayers=repformer_nlayers, + g1_dim=repformer_g1_dim, + g2_dim=repformer_g2_dim, + axis_dim=repformer_axis_dim, + direct_dist=False, + do_bn_mode=repformer_do_bn_mode, + bn_momentum=repformer_bn_momentum, + update_g1_has_conv=repformer_update_g1_has_conv, + update_g1_has_drrd=repformer_update_g1_has_drrd, + update_g1_has_grrg=repformer_update_g1_has_grrg, + update_g1_has_attn=repformer_update_g1_has_attn, + update_g2_has_g1g1=repformer_update_g2_has_g1g1, + update_g2_has_attn=repformer_update_g2_has_attn, + update_h2=repformer_update_h2, + attn1_hidden=repformer_attn1_hidden, + attn1_nhead=repformer_attn1_nhead, + attn2_hidden=repformer_attn2_hidden, + attn2_nhead=repformer_attn2_nhead, + attn2_has_gate=repformer_attn2_has_gate, + activation=repformer_activation, + update_style=repformer_update_style, + set_davg_zero=repformer_set_davg_zero, + smooth=True, + add_type_ebd_to_seq=repformer_add_type_ebd_to_seq, + ) + self.type_embedding = TypeEmbedNet(ntypes, tebd_dim) + if self.repinit.dim_out == self.repformers.dim_in: + self.g1_shape_tranform = Identity() + else: + self.g1_shape_tranform = Linear( + self.repinit.dim_out, + self.repformers.dim_in, + bias=False, + init="glorot", + ) + assert self.repinit.rcut > self.repformers.rcut + assert self.repinit.sel[0] > self.repformers.sel[0] + self.concat_output_tebd = concat_output_tebd + self.tebd_dim = tebd_dim + self.rcut = self.repinit.get_rcut() + self.ntypes = ntypes + self.sel = self.repinit.sel + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> List[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntype(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_out(self) -> int: + """Returns the output dimension of this descriptor.""" + ret = self.repformers.dim_out + if self.concat_output_tebd: + ret += self.tebd_dim + return ret + + @property + def dim_out(self): + return self.get_dim_out() + + @property + def dim_emb(self): + """Returns the embedding dimension g2.""" + return self.repformers.dim_emb + + def compute_input_stats(self, merged): + sumr, suma, sumn, sumr2, suma2 = [], [], [], [], [] + for ii, descrpt in enumerate([self.repinit, self.repformers]): + merged_tmp = [ + { + key: item[key] if not isinstance(item[key], list) else item[key][ii] + for key in item + } + for item in merged + ] + ( + sumr_tmp, + suma_tmp, + sumn_tmp, + sumr2_tmp, + suma2_tmp, + ) = descrpt.compute_input_stats(merged_tmp) + sumr.append(sumr_tmp) + suma.append(suma_tmp) + sumn.append(sumn_tmp) + sumr2.append(sumr2_tmp) + suma2.append(suma2_tmp) + return sumr, suma, sumn, sumr2, suma2 + + def init_desc_stat(self, sumr, suma, sumn, sumr2, suma2): + for ii, descrpt in enumerate([self.repinit, self.repformers]): + descrpt.init_desc_stat(sumr[ii], suma[ii], sumn[ii], sumr2[ii], suma2[ii]) + + @classmethod + def get_stat_name(cls, config): + descrpt_type = config["type"] + assert descrpt_type in ["dpa2"] + return ( + f'stat_file_dpa2_repinit_rcut{config["repinit_rcut"]:.2f}_smth{config["repinit_rcut_smth"]:.2f}_sel{config["repinit_nsel"]}' + f'_repformer_rcut{config["repformer_rcut"]:.2f}_smth{config["repformer_rcut_smth"]:.2f}_sel{config["repformer_nsel"]}.npz' + ) + + @classmethod + def get_data_process_key(cls, config): + descrpt_type = config["type"] + assert descrpt_type in ["dpa2"] + return { + "sel": [config["repinit_nsel"], config["repformer_nsel"]], + "rcut": [config["repinit_rcut"], config["repformer_rcut"]], + } + + def forward( + self, + extended_coord: torch.Tensor, + extended_atype: torch.Tensor, + nlist: torch.Tensor, + mapping: Optional[torch.Tensor] = None, + ): + nframes, nloc, nnei = nlist.shape + nall = extended_coord.view(nframes, -1).shape[1] // 3 + # nlists + nlist_dict = build_multiple_neighbor_list( + extended_coord, + nlist, + [self.repformers.get_rcut(), self.repinit.get_rcut()], + [self.repformers.get_nsel(), self.repinit.get_nsel()], + ) + # repinit + g1_ext = self.type_embedding(extended_atype) + g1_inp = g1_ext[:, :nloc, :] + g1, _, _, _, _ = self.repinit( + nlist_dict[ + get_multiple_nlist_key(self.repinit.get_rcut(), self.repinit.get_nsel()) + ], + extended_coord, + extended_atype, + g1_ext, + mapping, + ) + # linear to change shape + g1 = self.g1_shape_tranform(g1) + # mapping g1 + assert mapping is not None + mapping_ext = ( + mapping.view(nframes, nall).unsqueeze(-1).expand(-1, -1, g1.shape[-1]) + ) + g1_ext = torch.gather(g1, 1, mapping_ext) + # repformer + g1, g2, h2, rot_mat, sw = self.repformers( + nlist_dict[ + get_multiple_nlist_key( + self.repformers.get_rcut(), self.repformers.get_nsel() + ) + ], + extended_coord, + extended_atype, + g1_ext, + mapping, + ) + if self.concat_output_tebd: + g1 = torch.cat([g1, g1_inp], dim=-1) + return g1, g2, h2, rot_mat, sw diff --git a/deepmd/pt/model/descriptor/env_mat.py b/deepmd/pt/model/descriptor/env_mat.py new file mode 100644 index 0000000000..63181388df --- /dev/null +++ b/deepmd/pt/model/descriptor/env_mat.py @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch + +from deepmd.pt.utils.preprocess import ( + compute_smooth_weight, +) + + +def _make_env_mat_se_a(nlist, coord, rcut: float, ruct_smth: float): + """Make smooth environment matrix.""" + bsz, natoms, nnei = nlist.shape + coord = coord.view(bsz, -1, 3) + mask = nlist >= 0 + nlist = nlist * mask + coord_l = coord[:, :natoms].view(bsz, -1, 1, 3) + index = nlist.view(bsz, -1).unsqueeze(-1).expand(-1, -1, 3) + coord_r = torch.gather(coord, 1, index) + coord_r = coord_r.view(bsz, natoms, nnei, 3) + diff = coord_r - coord_l + length = torch.linalg.norm(diff, dim=-1, keepdim=True) + # for index 0 nloc atom + length = length + ~mask.unsqueeze(-1) + t0 = 1 / length + t1 = diff / length**2 + weight = compute_smooth_weight(length, ruct_smth, rcut) + env_mat_se_a = torch.cat([t0, t1], dim=-1) * weight * mask.unsqueeze(-1) + return env_mat_se_a, diff * mask.unsqueeze(-1), weight + + +def prod_env_mat_se_a( + extended_coord, nlist, atype, mean, stddev, rcut: float, rcut_smth: float +): + """Generate smooth environment matrix from atom coordinates and other context. + + Args: + - extended_coord: Copied atom coordinates with shape [nframes, nall*3]. + - atype: Atom types with shape [nframes, nloc]. + - natoms: Batched atom statisics with shape [len(sec)+2]. + - box: Batched simulation box with shape [nframes, 9]. + - mean: Average value of descriptor per element type with shape [len(sec), nnei, 4]. + - stddev: Standard deviation of descriptor per element type with shape [len(sec), nnei, 4]. + - deriv_stddev: StdDev of descriptor derivative per element type with shape [len(sec), nnei, 4, 3]. + - rcut: Cut-off radius. + - rcut_smth: Smooth hyper-parameter for pair force & energy. + + Returns + ------- + - env_mat_se_a: Shape is [nframes, natoms[1]*nnei*4]. + """ + nframes = extended_coord.shape[0] + _env_mat_se_a, diff, switch = _make_env_mat_se_a( + nlist, extended_coord, rcut, rcut_smth + ) # shape [n_atom, dim, 4] + t_avg = mean[atype] # [n_atom, dim, 4] + t_std = stddev[atype] # [n_atom, dim, 4] + env_mat_se_a = (_env_mat_se_a - t_avg) / t_std + return env_mat_se_a, diff, switch diff --git a/deepmd/pt/model/descriptor/gaussian_lcc.py b/deepmd/pt/model/descriptor/gaussian_lcc.py new file mode 100644 index 0000000000..26ec1175b8 --- /dev/null +++ b/deepmd/pt/model/descriptor/gaussian_lcc.py @@ -0,0 +1,315 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch +import torch.nn as nn + +from deepmd.pt.model.descriptor import ( + Descriptor, +) +from deepmd.pt.model.network.network import ( + Evoformer3bEncoder, + GaussianEmbedding, + TypeEmbedNet, +) +from deepmd.pt.utils import ( + env, +) + + +class DescrptGaussianLcc(Descriptor): + def __init__( + self, + rcut, + rcut_smth, + sel: int, + ntypes: int, + num_pair: int, + embed_dim: int = 768, + kernel_num: int = 128, + pair_embed_dim: int = 64, + num_block: int = 1, + layer_num: int = 12, + attn_head: int = 48, + pair_hidden_dim: int = 16, + ffn_embedding_dim: int = 768, + dropout: float = 0.0, + droppath_prob: float = 0.1, + pair_dropout: float = 0.25, + attention_dropout: float = 0.1, + activation_dropout: float = 0.1, + pre_ln: bool = True, + do_tag_embedding: bool = False, + tag_ener_pref: bool = False, + atomic_sum_gbf: bool = False, + pre_add_seq: bool = True, + tri_update: bool = True, + **kwargs, + ): + """Construct a descriptor of Gaussian Based Local Cluster. + + Args: + - rcut: Cut-off radius. + - rcut_smth: Smooth hyper-parameter for pair force & energy. **Not used in this descriptor**. + - sel: For each element type, how many atoms is selected as neighbors. + - ntypes: Number of atom types. + - num_pair: Number of atom type pairs. Default is 2 * ntypes. + - kernel_num: Number of gaussian kernels. + - embed_dim: Dimension of atomic representation. + - pair_embed_dim: Dimension of pair representation. + - num_block: Number of evoformer blocks. + - layer_num: Number of attention layers. + - attn_head: Number of attention heads. + - pair_hidden_dim: Hidden dimension of pair representation during attention process. + - ffn_embedding_dim: Dimension during feed forward network. + - dropout: Dropout probability of atomic representation. + - droppath_prob: If not zero, it will use drop paths (Stochastic Depth) per sample and ignore `dropout`. + - pair_dropout: Dropout probability of pair representation during triangular update. + - attention_dropout: Dropout probability during attetion process. + - activation_dropout: Dropout probability of pair feed forward network. + - pre_ln: Do previous layer norm or not. + - do_tag_embedding: Add tag embedding to atomic and pair representations. (`tags`, `tags2`, `tags3` must exist) + - atomic_sum_gbf: Add sum of gaussian outputs to atomic representation or not. + - pre_add_seq: Add output of other descriptor (if has) to the atomic representation before attention. + """ + super().__init__() + self.rcut = rcut + self.rcut_smth = rcut_smth + self.embed_dim = embed_dim + self.num_pair = num_pair + self.kernel_num = kernel_num + self.pair_embed_dim = pair_embed_dim + self.num_block = num_block + self.layer_num = layer_num + self.attention_heads = attn_head + self.pair_hidden_dim = pair_hidden_dim + self.ffn_embedding_dim = ffn_embedding_dim + self.dropout = dropout + self.droppath_prob = droppath_prob + self.pair_dropout = pair_dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.pre_ln = pre_ln + self.do_tag_embedding = do_tag_embedding + self.tag_ener_pref = tag_ener_pref + self.atomic_sum_gbf = atomic_sum_gbf + self.local_cluster = True + self.pre_add_seq = pre_add_seq + self.tri_update = tri_update + + if isinstance(sel, int): + sel = [sel] + + self.ntypes = ntypes + self.sec = torch.tensor(sel) + self.nnei = sum(sel) + + if self.do_tag_embedding: + self.tag_encoder = nn.Embedding(3, self.embed_dim) + self.tag_encoder2 = nn.Embedding(2, self.embed_dim) + self.tag_type_embedding = TypeEmbedNet(10, pair_embed_dim) + self.edge_type_embedding = nn.Embedding( + (ntypes + 1) * (ntypes + 1), + pair_embed_dim, + padding_idx=(ntypes + 1) * (ntypes + 1) - 1, + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + ) + self.gaussian_encoder = GaussianEmbedding( + rcut, + kernel_num, + num_pair, + embed_dim, + pair_embed_dim, + sel, + ntypes, + atomic_sum_gbf, + ) + self.backbone = Evoformer3bEncoder( + self.nnei, + layer_num=self.layer_num, + attn_head=self.attention_heads, + atomic_dim=self.embed_dim, + pair_dim=self.pair_embed_dim, + pair_hidden_dim=self.pair_hidden_dim, + ffn_embedding_dim=self.ffn_embedding_dim, + dropout=self.dropout, + droppath_prob=self.droppath_prob, + pair_dropout=self.pair_dropout, + attention_dropout=self.attention_dropout, + activation_dropout=self.activation_dropout, + pre_ln=self.pre_ln, + tri_update=self.tri_update, + ) + + @property + def dim_out(self): + """Returns the output dimension of atomic representation.""" + return self.embed_dim + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return self.embed_dim + + @property + def dim_emb(self): + """Returns the output dimension of pair representation.""" + return self.pair_embed_dim + + def compute_input_stats(self, merged): + """Update mean and stddev for descriptor elements.""" + return [], [], [], [], [] + + def init_desc_stat(self, sumr, suma, sumn, sumr2, suma2): + pass + + def forward( + self, + extended_coord, + nlist, + atype, + nlist_type, + nlist_loc=None, + atype_tebd=None, + nlist_tebd=None, + seq_input=None, + ): + """Calculate the atomic and pair representations of this descriptor. + + Args: + - extended_coord: Copied atom coordinates with shape [nframes, nall, 3]. + - nlist: Neighbor list with shape [nframes, nloc, nnei]. + - atype: Atom type with shape [nframes, nloc]. + - nlist_type: Atom type of neighbors with shape [nframes, nloc, nnei]. + - nlist_loc: Local index of neighbor list with shape [nframes, nloc, nnei]. + - atype_tebd: Atomic type embedding with shape [nframes, nloc, tebd_dim]. + - nlist_tebd: Type embeddings of neighbor with shape [nframes, nloc, nnei, tebd_dim]. + - seq_input: The sequential input from other descriptor with + shape [nframes, nloc, tebd_dim] or [nframes * nloc, 1 + nnei, tebd_dim] + + Returns + ------- + - result: descriptor with shape [nframes, nloc, self.filter_neuron[-1] * self.axis_neuron]. + - ret: environment matrix with shape [nframes, nloc, self.neei, out_size] + """ + nframes, nloc = nlist.shape[:2] + nall = extended_coord.shape[1] + nlist2 = torch.cat( + [ + torch.arange(0, nloc, device=nlist.device) + .reshape(1, nloc, 1) + .expand(nframes, -1, -1), + nlist, + ], + dim=-1, + ) + nlist_loc2 = torch.cat( + [ + torch.arange(0, nloc, device=nlist_loc.device) + .reshape(1, nloc, 1) + .expand(nframes, -1, -1), + nlist_loc, + ], + dim=-1, + ) + nlist_type2 = torch.cat([atype.reshape(nframes, nloc, 1), nlist_type], dim=-1) + nnei2_mask = nlist2 != -1 + padding_mask = nlist2 == -1 + nlist2 = nlist2 * nnei2_mask + nlist_loc2 = nlist_loc2 * nnei2_mask + + # nframes x nloc x (1 + nnei2) x (1 + nnei2) + pair_mask = nnei2_mask.unsqueeze(-1) * nnei2_mask.unsqueeze(-2) + # nframes x nloc x (1 + nnei2) x (1 + nnei2) x head + attn_mask = torch.zeros( + [nframes, nloc, 1 + self.nnei, 1 + self.nnei, self.attention_heads], + device=nlist.device, + dtype=extended_coord.dtype, + ) + attn_mask.masked_fill_(padding_mask.unsqueeze(2).unsqueeze(-1), float("-inf")) + # (nframes x nloc) x head x (1 + nnei2) x (1 + nnei2) + attn_mask = ( + attn_mask.reshape( + nframes * nloc, 1 + self.nnei, 1 + self.nnei, self.attention_heads + ) + .permute(0, 3, 1, 2) + .contiguous() + ) + + # Atomic feature + # [(nframes x nloc) x (1 + nnei2) x tebd_dim] + atom_feature = torch.gather( + atype_tebd, + dim=1, + index=nlist_loc2.reshape(nframes, -1) + .unsqueeze(-1) + .expand(-1, -1, self.embed_dim), + ).reshape(nframes * nloc, 1 + self.nnei, self.embed_dim) + if self.pre_add_seq and seq_input is not None: + first_dim = seq_input.shape[0] + if first_dim == nframes * nloc: + atom_feature += seq_input + elif first_dim == nframes: + atom_feature_seq = torch.gather( + seq_input, + dim=1, + index=nlist_loc2.reshape(nframes, -1) + .unsqueeze(-1) + .expand(-1, -1, self.embed_dim), + ).reshape(nframes * nloc, 1 + self.nnei, self.embed_dim) + atom_feature += atom_feature_seq + else: + raise RuntimeError + atom_feature = atom_feature * nnei2_mask.reshape( + nframes * nloc, 1 + self.nnei, 1 + ) + + # Pair feature + # [(nframes x nloc) x (1 + nnei2)] + nlist_type2_reshape = nlist_type2.reshape(nframes * nloc, 1 + self.nnei) + # [(nframes x nloc) x (1 + nnei2) x (1 + nnei2)] + edge_type = nlist_type2_reshape.unsqueeze(-1) * ( + self.ntypes + 1 + ) + nlist_type2_reshape.unsqueeze(-2) + # [(nframes x nloc) x (1 + nnei2) x (1 + nnei2) x pair_dim] + edge_feature = self.edge_type_embedding(edge_type) + + # [(nframes x nloc) x (1 + nnei2) x (1 + nnei2) x 2] + edge_type_2dim = torch.cat( + [ + nlist_type2_reshape.view(nframes * nloc, 1 + self.nnei, 1, 1).expand( + -1, -1, 1 + self.nnei, -1 + ), + nlist_type2_reshape.view(nframes * nloc, 1, 1 + self.nnei, 1).expand( + -1, 1 + self.nnei, -1, -1 + ) + + self.ntypes, + ], + dim=-1, + ) + # [(nframes x nloc) x (1 + nnei2) x 3] + coord_selected = torch.gather( + extended_coord.unsqueeze(1) + .expand(-1, nloc, -1, -1) + .reshape(nframes * nloc, nall, 3), + dim=1, + index=nlist2.reshape(nframes * nloc, 1 + self.nnei, 1).expand(-1, -1, 3), + ) + + # Update pair features (or and atomic features) with gbf features + # delta_pos: [(nframes x nloc) x (1 + nnei2) x (1 + nnei2) x 3]. + atomic_feature, pair_feature, delta_pos = self.gaussian_encoder( + coord_selected, atom_feature, edge_type_2dim, edge_feature + ) + # [(nframes x nloc) x (1 + nnei2) x (1 + nnei2) x pair_dim] + attn_bias = pair_feature + + # output: [(nframes x nloc) x (1 + nnei2) x tebd_dim] + # pair: [(nframes x nloc) x (1 + nnei2) x (1 + nnei2) x pair_dim] + output, pair = self.backbone( + atomic_feature, + pair=attn_bias, + attn_mask=attn_mask, + pair_mask=pair_mask, + atom_mask=nnei2_mask.reshape(nframes * nloc, 1 + self.nnei), + ) + + return output, pair, delta_pos, None diff --git a/deepmd/pt/model/descriptor/hybrid.py b/deepmd/pt/model/descriptor/hybrid.py new file mode 100644 index 0000000000..11bbc80729 --- /dev/null +++ b/deepmd/pt/model/descriptor/hybrid.py @@ -0,0 +1,257 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + List, + Optional, +) + +import torch + +from deepmd.pt.model.descriptor import ( + DescriptorBlock, +) +from deepmd.pt.model.network.network import ( + Identity, + Linear, +) + + +@DescriptorBlock.register("hybrid") +class DescrptBlockHybrid(DescriptorBlock): + def __init__( + self, + list, + ntypes: int, + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + hybrid_mode: str = "concat", + **kwargs, + ): + """Construct a hybrid descriptor. + + Args: + - descriptor_list: list of descriptors. + - descriptor_param: descriptor configs. + """ + super().__init__() + supported_descrpt = ["se_atten", "se_uni"] + descriptor_list = [] + for descriptor_param_item in list: + descriptor_type_tmp = descriptor_param_item["type"] + assert ( + descriptor_type_tmp in supported_descrpt + ), f"Only descriptors in {supported_descrpt} are supported for `hybrid` descriptor!" + descriptor_param_item["ntypes"] = ntypes + if descriptor_type_tmp == "se_atten": + descriptor_param_item["tebd_dim"] = tebd_dim + descriptor_param_item["tebd_input_mode"] = tebd_input_mode + descriptor_list.append(DescriptorBlock(**descriptor_param_item)) + self.descriptor_list = torch.nn.ModuleList(descriptor_list) + self.descriptor_param = list + self.rcut = [descrpt.rcut for descrpt in self.descriptor_list] + self.sec = [descrpt.sec for descrpt in self.descriptor_list] + self.sel = [descrpt.sel for descrpt in self.descriptor_list] + self.split_sel = [sum(ii) for ii in self.sel] + self.local_cluster_list = [ + descrpt.local_cluster for descrpt in self.descriptor_list + ] + self.local_cluster = True in self.local_cluster_list + self.hybrid_mode = hybrid_mode + self.tebd_dim = tebd_dim + assert self.hybrid_mode in ["concat", "sequential"] + sequential_transform = [] + if self.hybrid_mode == "sequential": + for ii in range(len(descriptor_list) - 1): + if descriptor_list[ii].dim_out == descriptor_list[ii + 1].dim_in: + sequential_transform.append(Identity()) + else: + sequential_transform.append( + Linear( + descriptor_list[ii].dim_out, + descriptor_list[ii + 1].dim_in, + bias=False, + init="glorot", + ) + ) + sequential_transform.append(Identity()) + self.sequential_transform = torch.nn.ModuleList(sequential_transform) + self.ntypes = ntypes + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return [sum(ii) for ii in self.get_sel()] + + def get_sel(self) -> List[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntype(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + if self.hybrid_mode == "concat": + return sum([descrpt.dim_out for descrpt in self.descriptor_list]) + elif self.hybrid_mode == "sequential": + return self.descriptor_list[-1].dim_out + else: + raise RuntimeError + + @property + def dim_emb_list(self) -> List[int]: + """Returns the output dimension list of embeddings.""" + return [descrpt.dim_emb for descrpt in self.descriptor_list] + + @property + def dim_emb(self): + """Returns the output dimension of embedding.""" + if self.hybrid_mode == "concat": + return sum(self.dim_emb_list) + elif self.hybrid_mode == "sequential": + return self.descriptor_list[-1].dim_emb + else: + raise RuntimeError + + def share_params(self, base_class, shared_level, resume=False): + assert ( + self.__class__ == base_class.__class__ + ), "Only descriptors of the same type can share params!" + if shared_level == 0: + for ii, des in enumerate(self.descriptor_list): + self.descriptor_list[ii].share_params( + base_class.descriptor_list[ii], shared_level, resume=resume + ) + if self.hybrid_mode == "sequential": + self.sequential_transform = base_class.sequential_transform + else: + raise NotImplementedError + + def compute_input_stats(self, merged): + """Update mean and stddev for descriptor elements.""" + sumr, suma, sumn, sumr2, suma2 = [], [], [], [], [] + for ii, descrpt in enumerate(self.descriptor_list): + merged_tmp = [ + { + key: item[key] if not isinstance(item[key], list) else item[key][ii] + for key in item + } + for item in merged + ] + ( + sumr_tmp, + suma_tmp, + sumn_tmp, + sumr2_tmp, + suma2_tmp, + ) = descrpt.compute_input_stats(merged_tmp) + sumr.append(sumr_tmp) + suma.append(suma_tmp) + sumn.append(sumn_tmp) + sumr2.append(sumr2_tmp) + suma2.append(suma2_tmp) + return sumr, suma, sumn, sumr2, suma2 + + def init_desc_stat(self, sumr, suma, sumn, sumr2, suma2): + for ii, descrpt in enumerate(self.descriptor_list): + descrpt.init_desc_stat(sumr[ii], suma[ii], sumn[ii], sumr2[ii], suma2[ii]) + + def forward( + self, + nlist: torch.Tensor, + extended_coord: torch.Tensor, + extended_atype: torch.Tensor, + extended_atype_embd: Optional[torch.Tensor] = None, + mapping: Optional[torch.Tensor] = None, + ): + """Calculate decoded embedding for each atom. + + Args: + - extended_coord: Tell atom coordinates with shape [nframes, natoms[1]*3]. + - nlist: Tell atom types with shape [nframes, natoms[1]]. + - atype: Tell atom count and element count. Its shape is [2+self.ntypes]. + - nlist_type: Tell simulation box with shape [nframes, 9]. + - atype_tebd: Tell simulation box with shape [nframes, 9]. + - nlist_tebd: Tell simulation box with shape [nframes, 9]. + + Returns + ------- + - result: descriptor with shape [nframes, nloc, self.filter_neuron[-1] * self.axis_neuron]. + - ret: environment matrix with shape [nframes, nloc, self.neei, out_size] + """ + nlist_list = list(torch.split(nlist, self.split_sel, -1)) + nframes, nloc, nnei = nlist.shape + concat_rot_mat = True + if self.hybrid_mode == "concat": + out_descriptor = [] + # out_env_mat = [] + out_rot_mat_list = [] + # out_diff = [] + for ii, descrpt in enumerate(self.descriptor_list): + descriptor, env_mat, diff, rot_mat, sw = descrpt( + nlist_list[ii], + extended_coord, + extended_atype, + extended_atype_embd, + mapping, + ) + if descriptor.shape[0] == nframes * nloc: + # [nframes * nloc, 1 + nnei, emb_dim] + descriptor = descriptor[:, 0, :].reshape(nframes, nloc, -1) + out_descriptor.append(descriptor) + # out_env_mat.append(env_mat) + # out_diff.append(diff) + out_rot_mat_list.append(rot_mat) + if rot_mat is None: + concat_rot_mat = False + out_descriptor = torch.concat(out_descriptor, dim=-1) + if concat_rot_mat: + out_rot_mat = torch.concat(out_rot_mat_list, dim=-2) + else: + out_rot_mat = None + return out_descriptor, None, None, out_rot_mat, sw + elif self.hybrid_mode == "sequential": + assert extended_atype_embd is not None + assert mapping is not None + nframes, nloc, nnei = nlist.shape + nall = extended_coord.view(nframes, -1).shape[1] // 3 + seq_input_ext = extended_atype_embd + seq_input = ( + seq_input_ext[:, :nloc, :] if len(self.descriptor_list) == 0 else None + ) + env_mat, diff, rot_mat, sw = None, None, None, None + env_mat_list, diff_list = [], [] + for ii, (descrpt, seq_transform) in enumerate( + zip(self.descriptor_list, self.sequential_transform) + ): + seq_output, env_mat, diff, rot_mat, sw = descrpt( + nlist_list[ii], + extended_coord, + extended_atype, + seq_input_ext, + mapping, + ) + seq_input = seq_transform(seq_output) + mapping_ext = ( + mapping.view(nframes, nall) + .unsqueeze(-1) + .expand(-1, -1, seq_input.shape[-1]) + ) + seq_input_ext = torch.gather(seq_input, 1, mapping_ext) + env_mat_list.append(env_mat) + diff_list.append(diff) + return seq_input, env_mat_list, diff_list, rot_mat, sw + else: + raise RuntimeError diff --git a/deepmd/pt/model/descriptor/repformer_layer.py b/deepmd/pt/model/descriptor/repformer_layer.py new file mode 100644 index 0000000000..21ae0ff6f3 --- /dev/null +++ b/deepmd/pt/model/descriptor/repformer_layer.py @@ -0,0 +1,749 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + List, +) + +import torch + +from deepmd.pt.model.network.network import ( + SimpleLinear, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.utils import ( + get_activation_fn, +) + + +def torch_linear(*args, **kwargs): + return torch.nn.Linear( + *args, **kwargs, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + + +def _make_nei_g1( + g1_ext: torch.Tensor, + nlist: torch.Tensor, +) -> torch.Tensor: + # nlist: nb x nloc x nnei + nb, nloc, nnei = nlist.shape + # g1_ext: nb x nall x ng1 + ng1 = g1_ext.shape[-1] + # index: nb x (nloc x nnei) x ng1 + index = nlist.reshape(nb, nloc * nnei).unsqueeze(-1).expand(-1, -1, ng1) + # gg1 : nb x (nloc x nnei) x ng1 + gg1 = torch.gather(g1_ext, dim=1, index=index) + # gg1 : nb x nloc x nnei x ng1 + gg1 = gg1.view(nb, nloc, nnei, ng1) + return gg1 + + +def _apply_nlist_mask( + gg: torch.Tensor, + nlist_mask: torch.Tensor, +) -> torch.Tensor: + # gg: nf x nloc x nnei x ng + # msk: nf x nloc x nnei + return gg.masked_fill(~nlist_mask.unsqueeze(-1), 0.0) + + +def _apply_switch(gg: torch.Tensor, sw: torch.Tensor) -> torch.Tensor: + # gg: nf x nloc x nnei x ng + # sw: nf x nloc x nnei + return gg * sw.unsqueeze(-1) + + +def _apply_h_norm( + hh: torch.Tensor, # nf x nloc x nnei x 3 +) -> torch.Tensor: + """Normalize h by the std of vector length. + do not have an idea if this is a good way. + """ + nf, nl, nnei, _ = hh.shape + # nf x nloc x nnei + normh = torch.linalg.norm(hh, dim=-1) + # nf x nloc + std = torch.std(normh, dim=-1) + # nf x nloc x nnei x 3 + hh = hh[:, :, :, :] / (1.0 + std[:, :, None, None]) + return hh + + +class Atten2Map(torch.nn.Module): + def __init__( + self, + ni: int, + nd: int, + nh: int, + has_gate: bool = False, # apply gate to attn map + smooth: bool = True, + attnw_shift: float = 20.0, + ): + super().__init__() + self.ni = ni + self.nd = nd + self.nh = nh + self.mapqk = SimpleLinear(ni, nd * 2 * nh, bias=False) + self.has_gate = has_gate + self.smooth = smooth + self.attnw_shift = attnw_shift + + def forward( + self, + g2: torch.Tensor, # nb x nloc x nnei x ng2 + h2: torch.Tensor, # nb x nloc x nnei x 3 + nlist_mask: torch.Tensor, # nb x nloc x nnei + sw: torch.Tensor, # nb x nloc x nnei + ) -> torch.Tensor: + ( + nb, + nloc, + nnei, + _, + ) = g2.shape + nd, nh = self.nd, self.nh + # nb x nloc x nnei x nd x (nh x 2) + g2qk = self.mapqk(g2).view(nb, nloc, nnei, nd, nh * 2) + # nb x nloc x (nh x 2) x nnei x nd + g2qk = torch.permute(g2qk, (0, 1, 4, 2, 3)) + # nb x nloc x nh x nnei x nd + g2q, g2k = torch.split(g2qk, nh, dim=2) + # g2q = torch.nn.functional.normalize(g2q, dim=-1) + # g2k = torch.nn.functional.normalize(g2k, dim=-1) + # nb x nloc x nh x nnei x nnei + attnw = torch.matmul(g2q, torch.transpose(g2k, -1, -2)) / nd**0.5 + if self.has_gate: + gate = torch.matmul(h2, torch.transpose(h2, -1, -2)).unsqueeze(-3) + attnw = attnw * gate + # mask the attenmap, nb x nloc x 1 x 1 x nnei + attnw_mask = ~nlist_mask.unsqueeze(2).unsqueeze(2) + # mask the attenmap, nb x nloc x 1 x nnei x 1 + attnw_mask_c = ~nlist_mask.unsqueeze(2).unsqueeze(-1) + if self.smooth: + attnw = (attnw + self.attnw_shift) * sw[:, :, None, :, None] * sw[ + :, :, None, None, : + ] - self.attnw_shift + else: + attnw = attnw.masked_fill( + attnw_mask, + float("-inf"), + ) + attnw = torch.softmax(attnw, dim=-1) + attnw = attnw.masked_fill( + attnw_mask, + 0.0, + ) + # nb x nloc x nh x nnei x nnei + attnw = attnw.masked_fill( + attnw_mask_c, + 0.0, + ) + if self.smooth: + attnw = attnw * sw[:, :, None, :, None] * sw[:, :, None, None, :] + # nb x nloc x nnei x nnei + h2h2t = torch.matmul(h2, torch.transpose(h2, -1, -2)) / 3.0**0.5 + # nb x nloc x nh x nnei x nnei + ret = attnw * h2h2t[:, :, None, :, :] + # ret = torch.softmax(g2qk, dim=-1) + # nb x nloc x nnei x nnei x nh + ret = torch.permute(ret, (0, 1, 3, 4, 2)) + return ret + + +class Atten2MultiHeadApply(torch.nn.Module): + def __init__( + self, + ni: int, + nh: int, + ): + super().__init__() + self.ni = ni + self.nh = nh + self.mapv = SimpleLinear(ni, ni * nh, bias=False) + self.head_map = SimpleLinear(ni * nh, ni) + + def forward( + self, + AA: torch.Tensor, # nf x nloc x nnei x nnei x nh + g2: torch.Tensor, # nf x nloc x nnei x ng2 + ) -> torch.Tensor: + nf, nloc, nnei, ng2 = g2.shape + nh = self.nh + # nf x nloc x nnei x ng2 x nh + g2v = self.mapv(g2).view(nf, nloc, nnei, ng2, nh) + # nf x nloc x nh x nnei x ng2 + g2v = torch.permute(g2v, (0, 1, 4, 2, 3)) + # g2v = torch.nn.functional.normalize(g2v, dim=-1) + # nf x nloc x nh x nnei x nnei + AA = torch.permute(AA, (0, 1, 4, 2, 3)) + # nf x nloc x nh x nnei x ng2 + ret = torch.matmul(AA, g2v) + # nf x nloc x nnei x ng2 x nh + ret = torch.permute(ret, (0, 1, 3, 4, 2)).reshape(nf, nloc, nnei, (ng2 * nh)) + # nf x nloc x nnei x ng2 + return self.head_map(ret) + + +class Atten2EquiVarApply(torch.nn.Module): + def __init__( + self, + ni: int, + nh: int, + ): + super().__init__() + self.ni = ni + self.nh = nh + self.head_map = SimpleLinear(nh, 1, bias=False) + + def forward( + self, + AA: torch.Tensor, # nf x nloc x nnei x nnei x nh + h2: torch.Tensor, # nf x nloc x nnei x 3 + ) -> torch.Tensor: + nf, nloc, nnei, _ = h2.shape + nh = self.nh + # nf x nloc x nh x nnei x nnei + AA = torch.permute(AA, (0, 1, 4, 2, 3)) + h2m = torch.unsqueeze(h2, dim=2) + # nf x nloc x nh x nnei x 3 + h2m = torch.tile(h2m, [1, 1, nh, 1, 1]) + # nf x nloc x nh x nnei x 3 + ret = torch.matmul(AA, h2m) + # nf x nloc x nnei x 3 x nh + ret = torch.permute(ret, (0, 1, 3, 4, 2)).view(nf, nloc, nnei, 3, nh) + # nf x nloc x nnei x 3 + return torch.squeeze(self.head_map(ret), dim=-1) + + +class LocalAtten(torch.nn.Module): + def __init__( + self, + ni: int, + nd: int, + nh: int, + smooth: bool = True, + attnw_shift: float = 20.0, + ): + super().__init__() + self.ni = ni + self.nd = nd + self.nh = nh + self.mapq = SimpleLinear(ni, nd * 1 * nh, bias=False) + self.mapkv = SimpleLinear(ni, (nd + ni) * nh, bias=False) + self.head_map = SimpleLinear(ni * nh, ni) + self.smooth = smooth + self.attnw_shift = attnw_shift + + def forward( + self, + g1: torch.Tensor, # nb x nloc x ng1 + gg1: torch.Tensor, # nb x nloc x nnei x ng1 + nlist_mask: torch.Tensor, # nb x nloc x nnei + sw: torch.Tensor, # nb x nloc x nnei + ) -> torch.Tensor: + nb, nloc, nnei = nlist_mask.shape + ni, nd, nh = self.ni, self.nd, self.nh + assert ni == g1.shape[-1] + assert ni == gg1.shape[-1] + # nb x nloc x nd x nh + g1q = self.mapq(g1).view(nb, nloc, nd, nh) + # nb x nloc x nh x nd + g1q = torch.permute(g1q, (0, 1, 3, 2)) + # nb x nloc x nnei x (nd+ni) x nh + gg1kv = self.mapkv(gg1).view(nb, nloc, nnei, nd + ni, nh) + gg1kv = torch.permute(gg1kv, (0, 1, 4, 2, 3)) + # nb x nloc x nh x nnei x nd, nb x nloc x nh x nnei x ng1 + gg1k, gg1v = torch.split(gg1kv, [nd, ni], dim=-1) + + # nb x nloc x nh x 1 x nnei + attnw = torch.matmul(g1q.unsqueeze(-2), torch.transpose(gg1k, -1, -2)) / nd**0.5 + # nb x nloc x nh x nnei + attnw = attnw.squeeze(-2) + # mask the attenmap, nb x nloc x 1 x nnei + attnw_mask = ~nlist_mask.unsqueeze(-2) + # nb x nloc x nh x nnei + if self.smooth: + attnw = (attnw + self.attnw_shift) * sw.unsqueeze(-2) - self.attnw_shift + else: + attnw = attnw.masked_fill( + attnw_mask, + float("-inf"), + ) + attnw = torch.softmax(attnw, dim=-1) + attnw = attnw.masked_fill( + attnw_mask, + 0.0, + ) + if self.smooth: + attnw = attnw * sw.unsqueeze(-2) + + # nb x nloc x nh x ng1 + ret = ( + torch.matmul(attnw.unsqueeze(-2), gg1v).squeeze(-2).view(nb, nloc, nh * ni) + ) + # nb x nloc x ng1 + ret = self.head_map(ret) + return ret + + +class RepformerLayer(torch.nn.Module): + def __init__( + self, + rcut, + rcut_smth, + sel: int, + ntypes: int, + g1_dim=128, + g2_dim=16, + axis_dim: int = 4, + update_chnnl_2: bool = True, + do_bn_mode: str = "no", + bn_momentum: float = 0.1, + update_g1_has_conv: bool = True, + update_g1_has_drrd: bool = True, + update_g1_has_grrg: bool = True, + update_g1_has_attn: bool = True, + update_g2_has_g1g1: bool = True, + update_g2_has_attn: bool = True, + update_h2: bool = False, + attn1_hidden: int = 64, + attn1_nhead: int = 4, + attn2_hidden: int = 16, + attn2_nhead: int = 4, + attn2_has_gate: bool = False, + activation: str = "tanh", + update_style: str = "res_avg", + set_davg_zero: bool = True, # TODO + smooth: bool = True, + ): + super().__init__() + self.epsilon = 1e-4 # protection of 1./nnei + self.rcut = rcut + self.rcut_smth = rcut_smth + self.ntypes = ntypes + sel = [sel] if isinstance(sel, int) else sel + self.nnei = sum(sel) + assert len(sel) == 1 + self.sel = torch.tensor(sel) + self.sec = self.sel + self.axis_dim = axis_dim + self.set_davg_zero = set_davg_zero + self.do_bn_mode = do_bn_mode + self.bn_momentum = bn_momentum + self.act = get_activation_fn(activation) + self.update_g1_has_grrg = update_g1_has_grrg + self.update_g1_has_drrd = update_g1_has_drrd + self.update_g1_has_conv = update_g1_has_conv + self.update_g1_has_attn = update_g1_has_attn + self.update_chnnl_2 = update_chnnl_2 + self.update_g2_has_g1g1 = update_g2_has_g1g1 if self.update_chnnl_2 else False + self.update_g2_has_attn = update_g2_has_attn if self.update_chnnl_2 else False + self.update_h2 = update_h2 if self.update_chnnl_2 else False + del update_g2_has_g1g1, update_g2_has_attn, update_h2 + self.update_style = update_style + self.smooth = smooth + self.g1_dim = g1_dim + self.g2_dim = g2_dim + + g1_in_dim = self.cal_1_dim(g1_dim, g2_dim, self.axis_dim) + self.linear1 = SimpleLinear(g1_in_dim, g1_dim) + self.linear2 = None + self.proj_g1g2 = None + self.proj_g1g1g2 = None + self.attn2g_map = None + self.attn2_mh_apply = None + self.attn2_lm = None + self.attn2h_map = None + self.attn2_ev_apply = None + self.loc_attn = None + + if self.update_chnnl_2: + self.linear2 = SimpleLinear(g2_dim, g2_dim) + if self.update_g1_has_conv: + self.proj_g1g2 = SimpleLinear(g1_dim, g2_dim, bias=False) + if self.update_g2_has_g1g1: + self.proj_g1g1g2 = SimpleLinear(g1_dim, g2_dim, bias=False) + if self.update_g2_has_attn: + self.attn2g_map = Atten2Map( + g2_dim, attn2_hidden, attn2_nhead, attn2_has_gate, self.smooth + ) + self.attn2_mh_apply = Atten2MultiHeadApply(g2_dim, attn2_nhead) + self.attn2_lm = torch.nn.LayerNorm( + g2_dim, + elementwise_affine=True, + device=env.DEVICE, + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + ) + if self.update_h2: + self.attn2h_map = Atten2Map( + g2_dim, attn2_hidden, attn2_nhead, attn2_has_gate, self.smooth + ) + self.attn2_ev_apply = Atten2EquiVarApply(g2_dim, attn2_nhead) + if self.update_g1_has_attn: + self.loc_attn = LocalAtten(g1_dim, attn1_hidden, attn1_nhead, self.smooth) + + if self.do_bn_mode == "uniform": + self.bn1 = self._bn_layer() + self.bn2 = self._bn_layer() + elif self.do_bn_mode == "component": + self.bn1 = self._bn_layer(nf=g1_dim) + self.bn2 = self._bn_layer(nf=g2_dim) + elif self.do_bn_mode == "no": + self.bn1, self.bn2 = None, None + else: + raise RuntimeError(f"unknown bn_mode {self.do_bn_mode}") + + def cal_1_dim(self, g1d: int, g2d: int, ax: int) -> int: + ret = g1d + if self.update_g1_has_grrg: + ret += g2d * ax + if self.update_g1_has_drrd: + ret += g1d * ax + if self.update_g1_has_conv: + ret += g2d + return ret + + def _update_h2( + self, + g2: torch.Tensor, + h2: torch.Tensor, + nlist_mask: torch.Tensor, + sw: torch.Tensor, + ) -> torch.Tensor: + assert self.attn2h_map is not None + assert self.attn2_ev_apply is not None + nb, nloc, nnei, _ = g2.shape + # # nb x nloc x nnei x nh2 + # h2_1 = self.attn2_ev_apply(AA, h2) + # h2_update.append(h2_1) + # nb x nloc x nnei x nnei x nh + AAh = self.attn2h_map(g2, h2, nlist_mask, sw) + # nb x nloc x nnei x nh2 + h2_1 = self.attn2_ev_apply(AAh, h2) + return h2_1 + + def _update_g1_conv( + self, + gg1: torch.Tensor, + g2: torch.Tensor, + nlist_mask: torch.Tensor, + sw: torch.Tensor, + ) -> torch.Tensor: + assert self.proj_g1g2 is not None + nb, nloc, nnei, _ = g2.shape + ng1 = gg1.shape[-1] + ng2 = g2.shape[-1] + # gg1 : nb x nloc x nnei x ng2 + gg1 = self.proj_g1g2(gg1).view(nb, nloc, nnei, ng2) + # nb x nloc x nnei x ng2 + gg1 = _apply_nlist_mask(gg1, nlist_mask) + if not self.smooth: + # normalized by number of neighbors, not smooth + # nb x nloc x 1 + invnnei = 1.0 / (self.epsilon + torch.sum(nlist_mask, dim=-1)).unsqueeze(-1) + else: + gg1 = _apply_switch(gg1, sw) + invnnei = (1.0 / float(nnei)) * torch.ones( + (nb, nloc, 1), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + # nb x nloc x ng2 + g1_11 = torch.sum(g2 * gg1, dim=2) * invnnei + return g1_11 + + def _cal_h2g2( + self, + g2: torch.Tensor, + h2: torch.Tensor, + nlist_mask: torch.Tensor, + sw: torch.Tensor, + ) -> torch.Tensor: + # g2: nf x nloc x nnei x ng2 + # h2: nf x nloc x nnei x 3 + # msk: nf x nloc x nnei + nb, nloc, nnei, _ = g2.shape + ng2 = g2.shape[-1] + # nb x nloc x nnei x ng2 + g2 = _apply_nlist_mask(g2, nlist_mask) + if not self.smooth: + # nb x nloc + invnnei = 1.0 / (self.epsilon + torch.sum(nlist_mask, dim=-1)) + # nb x nloc x 1 x 1 + invnnei = invnnei.unsqueeze(-1).unsqueeze(-1) + else: + g2 = _apply_switch(g2, sw) + invnnei = (1.0 / float(nnei)) * torch.ones( + (nb, nloc, 1, 1), dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + # nb x nloc x 3 x ng2 + h2g2 = torch.matmul(torch.transpose(h2, -1, -2), g2) * invnnei + return h2g2 + + def _cal_grrg(self, h2g2: torch.Tensor) -> torch.Tensor: + # nb x nloc x 3 x ng2 + nb, nloc, _, ng2 = h2g2.shape + # nb x nloc x 3 x axis + h2g2m = torch.split(h2g2, self.axis_dim, dim=-1)[0] + # nb x nloc x axis x ng2 + g1_13 = torch.matmul(torch.transpose(h2g2m, -1, -2), h2g2) / (3.0**1) + # nb x nloc x (axisxng2) + g1_13 = g1_13.view(nb, nloc, self.axis_dim * ng2) + return g1_13 + + def _update_g1_grrg( + self, + g2: torch.Tensor, + h2: torch.Tensor, + nlist_mask: torch.Tensor, + sw: torch.Tensor, + ) -> torch.Tensor: + # g2: nf x nloc x nnei x ng2 + # h2: nf x nloc x nnei x 3 + # msk: nf x nloc x nnei + nb, nloc, nnei, _ = g2.shape + ng2 = g2.shape[-1] + # nb x nloc x 3 x ng2 + h2g2 = self._cal_h2g2(g2, h2, nlist_mask, sw) + # nb x nloc x (axisxng2) + g1_13 = self._cal_grrg(h2g2) + return g1_13 + + def _update_g2_g1g1( + self, + g1: torch.Tensor, # nb x nloc x ng1 + gg1: torch.Tensor, # nb x nloc x nnei x ng1 + nlist_mask: torch.Tensor, # nb x nloc x nnei + sw: torch.Tensor, # nb x nloc x nnei + ) -> torch.Tensor: + ret = g1.unsqueeze(-2) * gg1 + # nb x nloc x nnei x ng1 + ret = _apply_nlist_mask(ret, nlist_mask) + if self.smooth: + ret = _apply_switch(ret, sw) + return ret + + def _apply_bn( + self, + bn_number: int, + gg: torch.Tensor, + ): + if self.do_bn_mode == "uniform": + return self._apply_bn_uni(bn_number, gg) + elif self.do_bn_mode == "component": + return self._apply_bn_comp(bn_number, gg) + else: + return gg + + def _apply_nb_1(self, bn_number: int, gg: torch.Tensor) -> torch.Tensor: + nb, nl, nf = gg.shape + gg = gg.view([nb, 1, nl * nf]) + if bn_number == 1: + assert self.bn1 is not None + gg = self.bn1(gg) + else: + assert self.bn2 is not None + gg = self.bn2(gg) + return gg.view([nb, nl, nf]) + + def _apply_nb_2( + self, + bn_number: int, + gg: torch.Tensor, + ) -> torch.Tensor: + nb, nl, nnei, nf = gg.shape + gg = gg.view([nb, 1, nl * nnei * nf]) + if bn_number == 1: + assert self.bn1 is not None + gg = self.bn1(gg) + else: + assert self.bn2 is not None + gg = self.bn2(gg) + return gg.view([nb, nl, nnei, nf]) + + def _apply_bn_uni( + self, + bn_number: int, + gg: torch.Tensor, + mode: str = "1", + ) -> torch.Tensor: + if len(gg.shape) == 3: + return self._apply_nb_1(bn_number, gg) + elif len(gg.shape) == 4: + return self._apply_nb_2(bn_number, gg) + else: + raise RuntimeError(f"unsupported input shape {gg.shape}") + + def _apply_bn_comp( + self, + bn_number: int, + gg: torch.Tensor, + ) -> torch.Tensor: + ss = gg.shape + nf = ss[-1] + gg = gg.view([-1, nf]) + if bn_number == 1: + assert self.bn1 is not None + gg = self.bn1(gg).view(ss) + else: + assert self.bn2 is not None + gg = self.bn2(gg).view(ss) + return gg + + def forward( + self, + g1_ext: torch.Tensor, # nf x nall x ng1 + g2: torch.Tensor, # nf x nloc x nnei x ng2 + h2: torch.Tensor, # nf x nloc x nnei x 3 + nlist: torch.Tensor, # nf x nloc x nnei + nlist_mask: torch.Tensor, # nf x nloc x nnei + sw: torch.Tensor, # switch func, nf x nloc x nnei + ): + """ + Parameters + ---------- + g1_ext : nf x nall x ng1 extended single-atom chanel + g2 : nf x nloc x nnei x ng2 pair-atom channel, invariant + h2 : nf x nloc x nnei x 3 pair-atom channel, equivariant + nlist : nf x nloc x nnei neighbor list (padded neis are set to 0) + nlist_mask : nf x nloc x nnei masks of the neighbor list. real nei 1 otherwise 0 + sw : nf x nloc x nnei switch function + + Returns + ------- + g1: nf x nloc x ng1 updated single-atom chanel + g2: nf x nloc x nnei x ng2 updated pair-atom channel, invariant + h2: nf x nloc x nnei x 3 updated pair-atom channel, equivariant + """ + cal_gg1 = ( + self.update_g1_has_drrd + or self.update_g1_has_conv + or self.update_g1_has_attn + or self.update_g2_has_g1g1 + ) + + nb, nloc, nnei, _ = g2.shape + nall = g1_ext.shape[1] + g1, _ = torch.split(g1_ext, [nloc, nall - nloc], dim=1) + assert (nb, nloc) == g1.shape[:2] + assert (nb, nloc, nnei) == h2.shape[:3] + ng1 = g1.shape[-1] + ng2 = g2.shape[-1] + nh2 = h2.shape[-1] + + if self.bn1 is not None: + g1 = self._apply_bn(1, g1) + if self.bn2 is not None: + g2 = self._apply_bn(2, g2) + if self.update_h2: + h2 = _apply_h_norm(h2) + + g2_update: List[torch.Tensor] = [g2] + h2_update: List[torch.Tensor] = [h2] + g1_update: List[torch.Tensor] = [g1] + g1_mlp: List[torch.Tensor] = [g1] + + if cal_gg1: + gg1 = _make_nei_g1(g1_ext, nlist) + else: + gg1 = None + + if self.update_chnnl_2: + # nb x nloc x nnei x ng2 + assert self.linear2 is not None + g2_1 = self.act(self.linear2(g2)) + g2_update.append(g2_1) + + if self.update_g2_has_g1g1: + assert gg1 is not None + assert self.proj_g1g1g2 is not None + g2_update.append( + self.proj_g1g1g2(self._update_g2_g1g1(g1, gg1, nlist_mask, sw)) + ) + + if self.update_g2_has_attn: + assert self.attn2g_map is not None + assert self.attn2_mh_apply is not None + assert self.attn2_lm is not None + # nb x nloc x nnei x nnei x nh + AAg = self.attn2g_map(g2, h2, nlist_mask, sw) + # nb x nloc x nnei x ng2 + g2_2 = self.attn2_mh_apply(AAg, g2) + g2_2 = self.attn2_lm(g2_2) + g2_update.append(g2_2) + + if self.update_h2: + h2_update.append(self._update_h2(g2, h2, nlist_mask, sw)) + + if self.update_g1_has_conv: + assert gg1 is not None + g1_mlp.append(self._update_g1_conv(gg1, g2, nlist_mask, sw)) + + if self.update_g1_has_grrg: + g1_mlp.append(self._update_g1_grrg(g2, h2, nlist_mask, sw)) + + if self.update_g1_has_drrd: + assert gg1 is not None + g1_mlp.append(self._update_g1_grrg(gg1, h2, nlist_mask, sw)) + + # nb x nloc x [ng1+ng2+(axisxng2)+(axisxng1)] + # conv grrg drrd + g1_1 = self.act(self.linear1(torch.cat(g1_mlp, dim=-1))) + g1_update.append(g1_1) + + if self.update_g1_has_attn: + assert gg1 is not None + assert self.loc_attn is not None + g1_update.append(self.loc_attn(g1, gg1, nlist_mask, sw)) + + # update + if self.update_chnnl_2: + g2_new = self.list_update(g2_update) + h2_new = self.list_update(h2_update) + else: + g2_new, h2_new = g2, h2 + g1_new = self.list_update(g1_update) + return g1_new, g2_new, h2_new + + @torch.jit.export + def list_update_res_avg( + self, + update_list: List[torch.Tensor], + ) -> torch.Tensor: + nitem = len(update_list) + uu = update_list[0] + for ii in range(1, nitem): + uu = uu + update_list[ii] + return uu / (float(nitem) ** 0.5) + + @torch.jit.export + def list_update_res_incr(self, update_list: List[torch.Tensor]) -> torch.Tensor: + nitem = len(update_list) + uu = update_list[0] + scale = 1.0 / (float(nitem - 1) ** 0.5) if nitem > 1 else 0.0 + for ii in range(1, nitem): + uu = uu + scale * update_list[ii] + return uu + + @torch.jit.export + def list_update(self, update_list: List[torch.Tensor]) -> torch.Tensor: + if self.update_style == "res_avg": + return self.list_update_res_avg(update_list) + elif self.update_style == "res_incr": + return self.list_update_res_incr(update_list) + else: + raise RuntimeError(f"unknown update style {self.update_style}") + + def _bn_layer( + self, + nf: int = 1, + ) -> Callable: + return torch.nn.BatchNorm1d( + nf, + eps=1e-5, + momentum=self.bn_momentum, + affine=False, + track_running_stats=True, + device=env.DEVICE, + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + ) diff --git a/deepmd/pt/model/descriptor/repformers.py b/deepmd/pt/model/descriptor/repformers.py new file mode 100644 index 0000000000..26887b1b75 --- /dev/null +++ b/deepmd/pt/model/descriptor/repformers.py @@ -0,0 +1,348 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + List, + Optional, +) + +import numpy as np +import torch + +from deepmd.pt.model.descriptor.descriptor import ( + DescriptorBlock, + compute_std, +) +from deepmd.pt.model.descriptor.env_mat import ( + prod_env_mat_se_a, +) +from deepmd.pt.model.network.network import ( + SimpleLinear, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.nlist import ( + build_neighbor_list, +) +from deepmd.pt.utils.utils import ( + get_activation_fn, +) + +from .repformer_layer import ( + RepformerLayer, +) +from .se_atten import ( + analyze_descrpt, +) + +mydtype = env.GLOBAL_PT_FLOAT_PRECISION +mydev = env.DEVICE + + +def torch_linear(*args, **kwargs): + return torch.nn.Linear(*args, **kwargs, dtype=mydtype, device=mydev) + + +simple_linear = SimpleLinear +mylinear = simple_linear + + +@DescriptorBlock.register("se_repformer") +@DescriptorBlock.register("se_uni") +class DescrptBlockRepformers(DescriptorBlock): + def __init__( + self, + rcut, + rcut_smth, + sel: int, + ntypes: int, + nlayers: int = 3, + g1_dim=128, + g2_dim=16, + axis_dim: int = 4, + direct_dist: bool = False, + do_bn_mode: str = "no", + bn_momentum: float = 0.1, + update_g1_has_conv: bool = True, + update_g1_has_drrd: bool = True, + update_g1_has_grrg: bool = True, + update_g1_has_attn: bool = True, + update_g2_has_g1g1: bool = True, + update_g2_has_attn: bool = True, + update_h2: bool = False, + attn1_hidden: int = 64, + attn1_nhead: int = 4, + attn2_hidden: int = 16, + attn2_nhead: int = 4, + attn2_has_gate: bool = False, + activation: str = "tanh", + update_style: str = "res_avg", + set_davg_zero: bool = True, # TODO + smooth: bool = True, + add_type_ebd_to_seq: bool = False, + type: Optional[str] = None, + ): + """ + smooth: + If strictly smooth, cannot be used with update_g1_has_attn + add_type_ebd_to_seq: + At the presence of seq_input (optional input to forward), + whether or not add an type embedding to seq_input. + If no seq_input is given, it has no effect. + """ + super().__init__() + del type + self.epsilon = 1e-4 # protection of 1./nnei + self.rcut = rcut + self.rcut_smth = rcut_smth + self.ntypes = ntypes + self.nlayers = nlayers + sel = [sel] if isinstance(sel, int) else sel + self.nnei = sum(sel) + assert len(sel) == 1 + self.sel = sel + self.sec = self.sel + self.split_sel = self.sel + self.axis_dim = axis_dim + self.set_davg_zero = set_davg_zero + self.g1_dim = g1_dim + self.g2_dim = g2_dim + self.act = get_activation_fn(activation) + self.direct_dist = direct_dist + self.add_type_ebd_to_seq = add_type_ebd_to_seq + + self.g2_embd = mylinear(1, self.g2_dim) + layers = [] + for ii in range(nlayers): + layers.append( + RepformerLayer( + rcut, + rcut_smth, + sel, + ntypes, + self.g1_dim, + self.g2_dim, + axis_dim=self.axis_dim, + update_chnnl_2=(ii != nlayers - 1), + do_bn_mode=do_bn_mode, + bn_momentum=bn_momentum, + update_g1_has_conv=update_g1_has_conv, + update_g1_has_drrd=update_g1_has_drrd, + update_g1_has_grrg=update_g1_has_grrg, + update_g1_has_attn=update_g1_has_attn, + update_g2_has_g1g1=update_g2_has_g1g1, + update_g2_has_attn=update_g2_has_attn, + update_h2=update_h2, + attn1_hidden=attn1_hidden, + attn1_nhead=attn1_nhead, + attn2_has_gate=attn2_has_gate, + attn2_hidden=attn2_hidden, + attn2_nhead=attn2_nhead, + activation=activation, + update_style=update_style, + smooth=smooth, + ) + ) + self.layers = torch.nn.ModuleList(layers) + + sshape = (self.ntypes, self.nnei, 4) + mean = torch.zeros(sshape, dtype=mydtype, device=mydev) + stddev = torch.ones(sshape, dtype=mydtype, device=mydev) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> List[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntype(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.g1_dim + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return self.g1_dim + + @property + def dim_emb(self): + """Returns the embedding dimension g2.""" + return self.g2_dim + + def forward( + self, + nlist: torch.Tensor, + extended_coord: torch.Tensor, + extended_atype: torch.Tensor, + extended_atype_embd: Optional[torch.Tensor] = None, + mapping: Optional[torch.Tensor] = None, + ): + assert mapping is not None + assert extended_atype_embd is not None + nframes, nloc, nnei = nlist.shape + nall = extended_coord.view(nframes, -1).shape[1] // 3 + atype = extended_atype[:, :nloc] + # nb x nloc x nnei x 4, nb x nloc x nnei x 3, nb x nloc x nnei x 1 + dmatrix, diff, sw = prod_env_mat_se_a( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + ) + nlist_mask = nlist != -1 + sw = torch.squeeze(sw, -1) + # beyond the cutoff sw should be 0.0 + sw = sw.masked_fill(~nlist_mask, 0.0) + + # [nframes, nloc, tebd_dim] + atype_embd = extended_atype_embd[:, :nloc, :] + assert list(atype_embd.shape) == [nframes, nloc, self.g1_dim] + + g1 = self.act(atype_embd) + # nb x nloc x nnei x 1, nb x nloc x nnei x 3 + if not self.direct_dist: + g2, h2 = torch.split(dmatrix, [1, 3], dim=-1) + else: + g2, h2 = torch.linalg.norm(diff, dim=-1, keepdim=True), diff + g2 = g2 / self.rcut + h2 = h2 / self.rcut + # nb x nloc x nnei x ng2 + g2 = self.act(self.g2_embd(g2)) + + # set all padding positions to index of 0 + # if the a neighbor is real or not is indicated by nlist_mask + nlist[nlist == -1] = 0 + # nb x nall x ng1 + mapping = mapping.view(nframes, nall).unsqueeze(-1).expand(-1, -1, self.g1_dim) + for idx, ll in enumerate(self.layers): + # g1: nb x nloc x ng1 + # g1_ext: nb x nall x ng1 + g1_ext = torch.gather(g1, 1, mapping) + g1, g2, h2 = ll.forward( + g1_ext, + g2, + h2, + nlist, + nlist_mask, + sw, + ) + + # uses the last layer. + # nb x nloc x 3 x ng2 + h2g2 = ll._cal_h2g2(g2, h2, nlist_mask, sw) + # (nb x nloc) x ng2 x 3 + rot_mat = torch.permute(h2g2, (0, 1, 3, 2)) + + return g1, g2, h2, rot_mat.view(-1, self.dim_emb, 3), sw + + def compute_input_stats(self, merged): + """Update mean and stddev for descriptor elements.""" + ndescrpt = self.nnei * 4 + sumr = [] + suma = [] + sumn = [] + sumr2 = [] + suma2 = [] + mixed_type = "real_natoms_vec" in merged[0] + for system in merged: + index = system["mapping"].unsqueeze(-1).expand(-1, -1, 3) + extended_coord = torch.gather(system["coord"], dim=1, index=index) + extended_coord = extended_coord - system["shift"] + index = system["mapping"] + extended_atype = torch.gather(system["atype"], dim=1, index=index) + nloc = system["atype"].shape[-1] + ####################################################### + # dirty hack here! the interface of dataload should be + # redesigned to support descriptors like dpa2 + ####################################################### + nlist = build_neighbor_list( + extended_coord, + extended_atype, + nloc, + self.rcut, + self.get_sel(), + distinguish_types=False, + ) + env_mat, _, _ = prod_env_mat_se_a( + extended_coord, + nlist, + system["atype"], + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + ) + if not mixed_type: + sysr, sysr2, sysa, sysa2, sysn = analyze_descrpt( + env_mat.detach().cpu().numpy(), ndescrpt, system["natoms"] + ) + else: + sysr, sysr2, sysa, sysa2, sysn = analyze_descrpt( + env_mat.detach().cpu().numpy(), + ndescrpt, + system["real_natoms_vec"], + mixed_type=mixed_type, + real_atype=system["atype"].detach().cpu().numpy(), + ) + sumr.append(sysr) + suma.append(sysa) + sumn.append(sysn) + sumr2.append(sysr2) + suma2.append(sysa2) + sumr = np.sum(sumr, axis=0) + suma = np.sum(suma, axis=0) + sumn = np.sum(sumn, axis=0) + sumr2 = np.sum(sumr2, axis=0) + suma2 = np.sum(suma2, axis=0) + return sumr, suma, sumn, sumr2, suma2 + + def init_desc_stat(self, sumr, suma, sumn, sumr2, suma2): + all_davg = [] + all_dstd = [] + for type_i in range(self.ntypes): + davgunit = [[sumr[type_i] / (sumn[type_i] + 1e-15), 0, 0, 0]] + dstdunit = [ + [ + compute_std(sumr2[type_i], sumr[type_i], sumn[type_i], self.rcut), + compute_std(suma2[type_i], suma[type_i], sumn[type_i], self.rcut), + compute_std(suma2[type_i], suma[type_i], sumn[type_i], self.rcut), + compute_std(suma2[type_i], suma[type_i], sumn[type_i], self.rcut), + ] + ] + davg = np.tile(davgunit, [self.nnei, 1]) + dstd = np.tile(dstdunit, [self.nnei, 1]) + all_davg.append(davg) + all_dstd.append(dstd) + self.sumr = sumr + self.suma = suma + self.sumn = sumn + self.sumr2 = sumr2 + self.suma2 = suma2 + if not self.set_davg_zero: + mean = np.stack(all_davg) + self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) + stddev = np.stack(all_dstd) + self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) diff --git a/deepmd/pt/model/descriptor/se_a.py b/deepmd/pt/model/descriptor/se_a.py new file mode 100644 index 0000000000..10aa66311e --- /dev/null +++ b/deepmd/pt/model/descriptor/se_a.py @@ -0,0 +1,478 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + ClassVar, + List, + Optional, +) + +import numpy as np +import torch + +from deepmd.pt.model.descriptor import ( + Descriptor, + DescriptorBlock, + compute_std, + prod_env_mat_se_a, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.env import ( + PRECISION_DICT, +) + +try: + from typing import ( + Final, + ) +except ImportError: + from torch.jit import Final + +from deepmd.model_format import EnvMat as DPEnvMat +from deepmd.pt.model.network.mlp import ( + EmbeddingNet, + NetworkCollection, +) +from deepmd.pt.model.network.network import ( + TypeFilter, +) + + +@Descriptor.register("se_e2_a") +class DescrptSeA(Descriptor): + def __init__( + self, + rcut, + rcut_smth, + sel, + neuron=[25, 50, 100], + axis_neuron=16, + set_davg_zero: bool = False, + activation_function: str = "tanh", + precision: str = "float64", + resnet_dt: bool = False, + old_impl: bool = False, + **kwargs, + ): + super().__init__() + self.sea = DescrptBlockSeA( + rcut, + rcut_smth, + sel, + neuron, + axis_neuron, + set_davg_zero, + activation_function, + precision, + resnet_dt, + old_impl, + **kwargs, + ) + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.sea.get_rcut() + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return self.sea.get_nsel() + + def get_sel(self) -> List[int]: + """Returns the number of selected atoms for each type.""" + return self.sea.get_sel() + + def get_ntype(self) -> int: + """Returns the number of element types.""" + return self.sea.get_ntype() + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.sea.get_dim_out() + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.sea.dim_out + + def compute_input_stats(self, merged): + """Update mean and stddev for descriptor elements.""" + return self.sea.compute_input_stats(merged) + + def init_desc_stat(self, sumr, suma, sumn, sumr2, suma2): + self.sea.init_desc_stat(sumr, suma, sumn, sumr2, suma2) + + @classmethod + def get_stat_name(cls, config): + descrpt_type = config["type"] + assert descrpt_type in ["se_e2_a"] + return f'stat_file_sea_rcut{config["rcut"]:.2f}_smth{config["rcut_smth"]:.2f}_sel{config["sel"]}.npz' + + @classmethod + def get_data_process_key(cls, config): + descrpt_type = config["type"] + assert descrpt_type in ["se_e2_a"] + return {"sel": config["sel"], "rcut": config["rcut"]} + + def forward( + self, + extended_coord: torch.Tensor, + extended_atype: torch.Tensor, + nlist: torch.Tensor, + mapping: Optional[torch.Tensor] = None, + ): + return self.sea.forward(nlist, extended_coord, extended_atype, None, mapping) + + def set_stat_mean_and_stddev( + self, + mean: torch.Tensor, + stddev: torch.Tensor, + ) -> None: + self.sea.mean = mean + self.sea.stddev = stddev + + def serialize(self) -> dict: + obj = self.sea + return { + "rcut": obj.rcut, + "rcut_smth": obj.rcut_smth, + "sel": obj.sel, + "neuron": obj.neuron, + "axis_neuron": obj.axis_neuron, + "resnet_dt": obj.resnet_dt, + "set_davg_zero": obj.set_davg_zero, + "activation_function": obj.activation_function, + "precision": obj.precision, + "embeddings": obj.filter_layers.serialize(), + "env_mat": DPEnvMat(obj.rcut, obj.rcut_smth).serialize(), + "@variables": { + "davg": obj["davg"].detach().cpu().numpy(), + "dstd": obj["dstd"].detach().cpu().numpy(), + }, + ## to be updated when the options are supported. + "trainable": True, + "type_one_side": True, + "exclude_types": [], + "spin": None, + } + + @classmethod + def deserialize(cls, data: dict) -> "DescrptSeA": + variables = data.pop("@variables") + embeddings = data.pop("embeddings") + env_mat = data.pop("env_mat") + obj = cls(**data) + + def t_cvt(xx): + return torch.tensor(xx, dtype=obj.sea.prec, device=env.DEVICE) + + obj.sea["davg"] = t_cvt(variables["davg"]) + obj.sea["dstd"] = t_cvt(variables["dstd"]) + obj.sea.filter_layers = NetworkCollection.deserialize(embeddings) + return obj + + +@DescriptorBlock.register("se_e2_a") +class DescrptBlockSeA(DescriptorBlock): + ndescrpt: Final[int] + __constants__: ClassVar[list] = ["ndescrpt"] + + def __init__( + self, + rcut, + rcut_smth, + sel, + neuron=[25, 50, 100], + axis_neuron=16, + set_davg_zero: bool = False, + activation_function: str = "tanh", + precision: str = "float64", + resnet_dt: bool = False, + old_impl: bool = False, + **kwargs, + ): + """Construct an embedding net of type `se_a`. + + Args: + - rcut: Cut-off radius. + - rcut_smth: Smooth hyper-parameter for pair force & energy. + - sel: For each element type, how many atoms is selected as neighbors. + - filter_neuron: Number of neurons in each hidden layers of the embedding net. + - axis_neuron: Number of columns of the sub-matrix of the embedding matrix. + """ + super().__init__() + self.rcut = rcut + self.rcut_smth = rcut_smth + self.neuron = neuron + self.filter_neuron = self.neuron + self.axis_neuron = axis_neuron + self.set_davg_zero = set_davg_zero + self.activation_function = activation_function + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.resnet_dt = resnet_dt + self.old_impl = old_impl + + self.ntypes = len(sel) + self.sel = sel + self.sec = torch.tensor( + np.append([0], np.cumsum(self.sel)), dtype=int, device=env.DEVICE + ) + self.split_sel = self.sel + self.nnei = sum(sel) + self.ndescrpt = self.nnei * 4 + + wanted_shape = (self.ntypes, self.nnei, 4) + mean = torch.zeros(wanted_shape, dtype=self.prec, device=env.DEVICE) + stddev = torch.ones(wanted_shape, dtype=self.prec, device=env.DEVICE) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + self.filter_layers_old = None + self.filter_layers = None + + if self.old_impl: + filter_layers = [] + # TODO: remove + start_index = 0 + for type_i in range(self.ntypes): + one = TypeFilter(start_index, sel[type_i], self.filter_neuron) + filter_layers.append(one) + start_index += sel[type_i] + self.filter_layers_old = torch.nn.ModuleList(filter_layers) + else: + filter_layers = NetworkCollection( + ndim=1, ntypes=len(sel), network_type="embedding_network" + ) + # TODO: ndim=2 if type_one_side=False + for ii in range(self.ntypes): + filter_layers[(ii,)] = EmbeddingNet( + 1, + self.filter_neuron, + activation_function=self.activation_function, + precision=self.precision, + resnet_dt=self.resnet_dt, + ) + self.filter_layers = filter_layers + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> List[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntype(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + def get_dim_in(self) -> int: + """Returns the input dimension.""" + return self.dim_in + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.filter_neuron[-1] * self.axis_neuron + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return 0 + + def __setitem__(self, key, value): + if key in ("avg", "data_avg", "davg"): + self.mean = value + elif key in ("std", "data_std", "dstd"): + self.stddev = value + else: + raise KeyError(key) + + def __getitem__(self, key): + if key in ("avg", "data_avg", "davg"): + return self.mean + elif key in ("std", "data_std", "dstd"): + return self.stddev + else: + raise KeyError(key) + + def compute_input_stats(self, merged): + """Update mean and stddev for descriptor elements.""" + sumr = [] + suma = [] + sumn = [] + sumr2 = [] + suma2 = [] + for system in merged: + index = system["mapping"].unsqueeze(-1).expand(-1, -1, 3) + extended_coord = torch.gather(system["coord"], dim=1, index=index) + extended_coord = extended_coord - system["shift"] + env_mat, _, _ = prod_env_mat_se_a( + extended_coord, + system["nlist"], + system["atype"], + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + ) + sysr, sysr2, sysa, sysa2, sysn = analyze_descrpt( + env_mat.detach().cpu().numpy(), self.ndescrpt, system["natoms"] + ) + sumr.append(sysr) + suma.append(sysa) + sumn.append(sysn) + sumr2.append(sysr2) + suma2.append(sysa2) + sumr = np.sum(sumr, axis=0) + suma = np.sum(suma, axis=0) + sumn = np.sum(sumn, axis=0) + sumr2 = np.sum(sumr2, axis=0) + suma2 = np.sum(suma2, axis=0) + return sumr, suma, sumn, sumr2, suma2 + + def init_desc_stat(self, sumr, suma, sumn, sumr2, suma2): + all_davg = [] + all_dstd = [] + for type_i in range(self.ntypes): + davgunit = [[sumr[type_i] / (sumn[type_i] + 1e-15), 0, 0, 0]] + dstdunit = [ + [ + compute_std(sumr2[type_i], sumr[type_i], sumn[type_i], self.rcut), + compute_std(suma2[type_i], suma[type_i], sumn[type_i], self.rcut), + compute_std(suma2[type_i], suma[type_i], sumn[type_i], self.rcut), + compute_std(suma2[type_i], suma[type_i], sumn[type_i], self.rcut), + ] + ] + davg = np.tile(davgunit, [self.nnei, 1]) + dstd = np.tile(dstdunit, [self.nnei, 1]) + all_davg.append(davg) + all_dstd.append(dstd) + self.sumr = sumr + self.suma = suma + self.sumn = sumn + self.sumr2 = sumr2 + self.suma2 = suma2 + if not self.set_davg_zero: + mean = np.stack(all_davg) + self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) + stddev = np.stack(all_dstd) + self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) + + def forward( + self, + nlist: torch.Tensor, + extended_coord: torch.Tensor, + extended_atype: torch.Tensor, + extended_atype_embd: Optional[torch.Tensor] = None, + mapping: Optional[torch.Tensor] = None, + ): + """Calculate decoded embedding for each atom. + + Args: + - coord: Tell atom coordinates with shape [nframes, natoms[1]*3]. + - atype: Tell atom types with shape [nframes, natoms[1]]. + - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. + - box: Tell simulation box with shape [nframes, 9]. + + Returns + ------- + - `torch.Tensor`: descriptor matrix with shape [nframes, natoms[0]*self.filter_neuron[-1]*self.axis_neuron]. + """ + del extended_atype_embd, mapping + nloc = nlist.shape[1] + atype = extended_atype[:, :nloc] + dmatrix, diff, _ = prod_env_mat_se_a( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + ) + + if self.old_impl: + assert self.filter_layers_old is not None + dmatrix = dmatrix.view( + -1, self.ndescrpt + ) # shape is [nframes*nall, self.ndescrpt] + xyz_scatter = torch.empty( + 1, + ) + ret = self.filter_layers_old[0](dmatrix) + xyz_scatter = ret + for ii, transform in enumerate(self.filter_layers_old[1:]): + # shape is [nframes*nall, 4, self.filter_neuron[-1]] + ret = transform.forward(dmatrix) + xyz_scatter = xyz_scatter + ret + else: + assert self.filter_layers is not None + dmatrix = dmatrix.view(-1, self.nnei, 4) + nfnl = dmatrix.shape[0] + # pre-allocate a shape to pass jit + xyz_scatter = torch.zeros( + [nfnl, 4, self.filter_neuron[-1]], dtype=self.prec, device=env.DEVICE + ) + for ii, ll in enumerate(self.filter_layers.networks): + # nfnl x nt x 4 + rr = dmatrix[:, self.sec[ii] : self.sec[ii + 1], :] + ss = rr[:, :, :1] + # nfnl x nt x ng + gg = ll.forward(ss) + # nfnl x 4 x ng + gr = torch.matmul(rr.permute(0, 2, 1), gg) + xyz_scatter += gr + + xyz_scatter /= self.nnei + xyz_scatter_1 = xyz_scatter.permute(0, 2, 1) + rot_mat = xyz_scatter_1[:, :, 1:4] + xyz_scatter_2 = xyz_scatter[:, :, 0 : self.axis_neuron] + result = torch.matmul( + xyz_scatter_1, xyz_scatter_2 + ) # shape is [nframes*nall, self.filter_neuron[-1], self.axis_neuron] + return ( + result.view(-1, nloc, self.filter_neuron[-1] * self.axis_neuron), + None, + None, + None, + None, + ) + + +def analyze_descrpt(matrix, ndescrpt, natoms): + """Collect avg, square avg and count of descriptors in a batch.""" + ntypes = natoms.shape[1] - 2 + start_index = 0 + sysr = [] + sysa = [] + sysn = [] + sysr2 = [] + sysa2 = [] + for type_i in range(ntypes): + end_index = start_index + natoms[0, 2 + type_i] + dd = matrix[:, start_index:end_index] # all descriptors for this element + start_index = end_index + dd = np.reshape( + dd, [-1, 4] + ) # Shape is [nframes*natoms[2+type_id]*self.nnei, 4] + ddr = dd[:, :1] + dda = dd[:, 1:] + sumr = np.sum(ddr) + suma = np.sum(dda) / 3.0 + sumn = dd.shape[0] # Value is nframes*natoms[2+type_id]*self.nnei + sumr2 = np.sum(np.multiply(ddr, ddr)) + suma2 = np.sum(np.multiply(dda, dda)) / 3.0 + sysr.append(sumr) + sysa.append(suma) + sysn.append(sumn) + sysr2.append(sumr2) + sysa2.append(suma2) + return sysr, sysr2, sysa, sysa2, sysn diff --git a/deepmd/pt/model/descriptor/se_atten.py b/deepmd/pt/model/descriptor/se_atten.py new file mode 100644 index 0000000000..0c932f42f2 --- /dev/null +++ b/deepmd/pt/model/descriptor/se_atten.py @@ -0,0 +1,392 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + List, + Optional, +) + +import numpy as np +import torch + +from deepmd.pt.model.descriptor.descriptor import ( + DescriptorBlock, + compute_std, +) +from deepmd.pt.model.descriptor.env_mat import ( + prod_env_mat_se_a, +) +from deepmd.pt.model.network.network import ( + NeighborWiseAttention, + TypeFilter, +) +from deepmd.pt.utils import ( + env, +) + + +@DescriptorBlock.register("se_atten") +class DescrptBlockSeAtten(DescriptorBlock): + def __init__( + self, + rcut, + rcut_smth, + sel, + ntypes: int, + neuron: list = [25, 50, 100], + axis_neuron: int = 16, + tebd_dim: int = 8, + tebd_input_mode: str = "concat", + # set_davg_zero: bool = False, + set_davg_zero: bool = True, # TODO + attn: int = 128, + attn_layer: int = 2, + attn_dotr: bool = True, + attn_mask: bool = False, + post_ln=True, + ffn=False, + ffn_embed_dim=1024, + activation="tanh", + scaling_factor=1.0, + head_num=1, + normalize=True, + temperature=None, + return_rot=False, + type: Optional[str] = None, + ): + """Construct an embedding net of type `se_atten`. + + Args: + - rcut: Cut-off radius. + - rcut_smth: Smooth hyper-parameter for pair force & energy. + - sel: For each element type, how many atoms is selected as neighbors. + - filter_neuron: Number of neurons in each hidden layers of the embedding net. + - axis_neuron: Number of columns of the sub-matrix of the embedding matrix. + """ + super().__init__() + del type + self.rcut = rcut + self.rcut_smth = rcut_smth + self.filter_neuron = neuron + self.axis_neuron = axis_neuron + self.tebd_dim = tebd_dim + self.tebd_input_mode = tebd_input_mode + self.set_davg_zero = set_davg_zero + self.attn_dim = attn + self.attn_layer = attn_layer + self.attn_dotr = attn_dotr + self.attn_mask = attn_mask + self.post_ln = post_ln + self.ffn = ffn + self.ffn_embed_dim = ffn_embed_dim + self.activation = activation + self.scaling_factor = scaling_factor + self.head_num = head_num + self.normalize = normalize + self.temperature = temperature + self.return_rot = return_rot + + if isinstance(sel, int): + sel = [sel] + + self.ntypes = ntypes + self.sel = sel + self.sec = self.sel + self.split_sel = self.sel + self.nnei = sum(sel) + self.ndescrpt = self.nnei * 4 + self.dpa1_attention = NeighborWiseAttention( + self.attn_layer, + self.nnei, + self.filter_neuron[-1], + self.attn_dim, + dotr=self.attn_dotr, + do_mask=self.attn_mask, + post_ln=self.post_ln, + ffn=self.ffn, + ffn_embed_dim=self.ffn_embed_dim, + activation=self.activation, + scaling_factor=self.scaling_factor, + head_num=self.head_num, + normalize=self.normalize, + temperature=self.temperature, + ) + + wanted_shape = (self.ntypes, self.nnei, 4) + mean = torch.zeros( + wanted_shape, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + stddev = torch.ones( + wanted_shape, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + self.register_buffer("mean", mean) + self.register_buffer("stddev", stddev) + + filter_layers = [] + one = TypeFilter( + 0, + self.nnei, + self.filter_neuron, + return_G=True, + tebd_dim=self.tebd_dim, + use_tebd=True, + tebd_mode=self.tebd_input_mode, + ) + filter_layers.append(one) + self.filter_layers = torch.nn.ModuleList(filter_layers) + + def get_rcut(self) -> float: + """Returns the cut-off radius.""" + return self.rcut + + def get_nsel(self) -> int: + """Returns the number of selected atoms in the cut-off radius.""" + return sum(self.sel) + + def get_sel(self) -> List[int]: + """Returns the number of selected atoms for each type.""" + return self.sel + + def get_ntype(self) -> int: + """Returns the number of element types.""" + return self.ntypes + + def get_dim_in(self) -> int: + """Returns the output dimension.""" + return self.dim_in + + def get_dim_out(self) -> int: + """Returns the output dimension.""" + return self.dim_out + + @property + def dim_out(self): + """Returns the output dimension of this descriptor.""" + return self.filter_neuron[-1] * self.axis_neuron + + @property + def dim_in(self): + """Returns the atomic input dimension of this descriptor.""" + return self.tebd_dim + + @property + def dim_emb(self): + """Returns the output dimension of embedding.""" + return self.filter_neuron[-1] + + def compute_input_stats(self, merged): + """Update mean and stddev for descriptor elements.""" + sumr = [] + suma = [] + sumn = [] + sumr2 = [] + suma2 = [] + mixed_type = "real_natoms_vec" in merged[0] + for system in merged: + index = system["mapping"].unsqueeze(-1).expand(-1, -1, 3) + extended_coord = torch.gather(system["coord"], dim=1, index=index) + extended_coord = extended_coord - system["shift"] + env_mat, _, _ = prod_env_mat_se_a( + extended_coord, + system["nlist"], + system["atype"], + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + ) + if not mixed_type: + sysr, sysr2, sysa, sysa2, sysn = analyze_descrpt( + env_mat.detach().cpu().numpy(), self.ndescrpt, system["natoms"] + ) + else: + sysr, sysr2, sysa, sysa2, sysn = analyze_descrpt( + env_mat.detach().cpu().numpy(), + self.ndescrpt, + system["real_natoms_vec"], + mixed_type=mixed_type, + real_atype=system["atype"].detach().cpu().numpy(), + ) + sumr.append(sysr) + suma.append(sysa) + sumn.append(sysn) + sumr2.append(sysr2) + suma2.append(sysa2) + sumr = np.sum(sumr, axis=0) + suma = np.sum(suma, axis=0) + sumn = np.sum(sumn, axis=0) + sumr2 = np.sum(sumr2, axis=0) + suma2 = np.sum(suma2, axis=0) + return sumr, suma, sumn, sumr2, suma2 + + def init_desc_stat(self, sumr, suma, sumn, sumr2, suma2): + all_davg = [] + all_dstd = [] + for type_i in range(self.ntypes): + davgunit = [[sumr[type_i] / (sumn[type_i] + 1e-15), 0, 0, 0]] + dstdunit = [ + [ + compute_std(sumr2[type_i], sumr[type_i], sumn[type_i], self.rcut), + compute_std(suma2[type_i], suma[type_i], sumn[type_i], self.rcut), + compute_std(suma2[type_i], suma[type_i], sumn[type_i], self.rcut), + compute_std(suma2[type_i], suma[type_i], sumn[type_i], self.rcut), + ] + ] + davg = np.tile(davgunit, [self.nnei, 1]) + dstd = np.tile(dstdunit, [self.nnei, 1]) + all_davg.append(davg) + all_dstd.append(dstd) + self.sumr = sumr + self.suma = suma + self.sumn = sumn + self.sumr2 = sumr2 + self.suma2 = suma2 + if not self.set_davg_zero: + mean = np.stack(all_davg) + self.mean.copy_(torch.tensor(mean, device=env.DEVICE)) + stddev = np.stack(all_dstd) + self.stddev.copy_(torch.tensor(stddev, device=env.DEVICE)) + + def forward( + self, + nlist: torch.Tensor, + extended_coord: torch.Tensor, + extended_atype: torch.Tensor, + extended_atype_embd: Optional[torch.Tensor] = None, + mapping: Optional[torch.Tensor] = None, + ) -> List[torch.Tensor]: + """Calculate decoded embedding for each atom. + + Args: + - coord: Tell atom coordinates with shape [nframes, natoms[1]*3]. + - atype: Tell atom types with shape [nframes, natoms[1]]. + - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. + - box: Tell simulation box with shape [nframes, 9]. + + Returns + ------- + - result: descriptor with shape [nframes, nloc, self.filter_neuron[-1] * self.axis_neuron]. + - ret: environment matrix with shape [nframes, nloc, self.neei, out_size] + """ + del mapping + assert extended_atype_embd is not None + nframes, nloc, nnei = nlist.shape + atype = extended_atype[:, :nloc] + nb = nframes + nall = extended_coord.view(nb, -1, 3).shape[1] + dmatrix, diff, sw = prod_env_mat_se_a( + extended_coord, + nlist, + atype, + self.mean, + self.stddev, + self.rcut, + self.rcut_smth, + ) + dmatrix = dmatrix.view( + -1, self.ndescrpt + ) # shape is [nframes*nall, self.ndescrpt] + nlist_mask = nlist != -1 + nlist[nlist == -1] = 0 + sw = torch.squeeze(sw, -1) + # beyond the cutoff sw should be 0.0 + sw = sw.masked_fill(~nlist_mask, 0.0) + # nf x nloc x nt -> nf x nloc x nnei x nt + atype_tebd = extended_atype_embd[:, :nloc, :] + atype_tebd_nnei = atype_tebd.unsqueeze(2).expand(-1, -1, self.nnei, -1) + # nf x nall x nt + nt = extended_atype_embd.shape[-1] + atype_tebd_ext = extended_atype_embd + # nb x (nloc x nnei) x nt + index = nlist.reshape(nb, nloc * nnei).unsqueeze(-1).expand(-1, -1, nt) + # nb x (nloc x nnei) x nt + atype_tebd_nlist = torch.gather(atype_tebd_ext, dim=1, index=index) + # nb x nloc x nnei x nt + atype_tebd_nlist = atype_tebd_nlist.view(nb, nloc, nnei, nt) + ret = self.filter_layers[0]( + dmatrix, + atype_tebd=atype_tebd_nnei, + nlist_tebd=atype_tebd_nlist, + ) # shape is [nframes*nall, self.neei, out_size] + input_r = torch.nn.functional.normalize( + dmatrix.reshape(-1, self.nnei, 4)[:, :, 1:4], dim=-1 + ) + ret = self.dpa1_attention( + ret, nlist_mask, input_r=input_r, sw=sw + ) # shape is [nframes*nloc, self.neei, out_size] + inputs_reshape = dmatrix.view(-1, self.nnei, 4).permute( + 0, 2, 1 + ) # shape is [nframes*natoms[0], 4, self.neei] + xyz_scatter = torch.matmul( + inputs_reshape, ret + ) # shape is [nframes*natoms[0], 4, out_size] + xyz_scatter = xyz_scatter / self.nnei + xyz_scatter_1 = xyz_scatter.permute(0, 2, 1) + rot_mat = xyz_scatter_1[:, :, 1:4] + xyz_scatter_2 = xyz_scatter[:, :, 0 : self.axis_neuron] + result = torch.matmul( + xyz_scatter_1, xyz_scatter_2 + ) # shape is [nframes*nloc, self.filter_neuron[-1], self.axis_neuron] + return ( + result.view(-1, nloc, self.filter_neuron[-1] * self.axis_neuron), + ret.view(-1, nloc, self.nnei, self.filter_neuron[-1]), + diff, + rot_mat.view(-1, self.filter_neuron[-1], 3), + sw, + ) + + +def analyze_descrpt(matrix, ndescrpt, natoms, mixed_type=False, real_atype=None): + """Collect avg, square avg and count of descriptors in a batch.""" + ntypes = natoms.shape[1] - 2 + if not mixed_type: + sysr = [] + sysa = [] + sysn = [] + sysr2 = [] + sysa2 = [] + start_index = 0 + for type_i in range(ntypes): + end_index = start_index + natoms[0, 2 + type_i] + dd = matrix[:, start_index:end_index] + start_index = end_index + dd = np.reshape( + dd, [-1, 4] + ) # Shape is [nframes*natoms[2+type_id]*self.nnei, 4] + ddr = dd[:, :1] + dda = dd[:, 1:] + sumr = np.sum(ddr) + suma = np.sum(dda) / 3.0 + sumn = dd.shape[0] # Value is nframes*natoms[2+type_id]*self.nnei + sumr2 = np.sum(np.multiply(ddr, ddr)) + suma2 = np.sum(np.multiply(dda, dda)) / 3.0 + sysr.append(sumr) + sysa.append(suma) + sysn.append(sumn) + sysr2.append(sumr2) + sysa2.append(suma2) + else: + sysr = [0.0 for i in range(ntypes)] + sysa = [0.0 for i in range(ntypes)] + sysn = [0 for i in range(ntypes)] + sysr2 = [0.0 for i in range(ntypes)] + sysa2 = [0.0 for i in range(ntypes)] + for frame_item in range(matrix.shape[0]): + dd_ff = matrix[frame_item] + atype_frame = real_atype[frame_item] + for type_i in range(ntypes): + type_idx = atype_frame == type_i + dd = dd_ff[type_idx] + dd = np.reshape(dd, [-1, 4]) # typen_atoms * nnei, 4 + ddr = dd[:, :1] + dda = dd[:, 1:] + sumr = np.sum(ddr) + suma = np.sum(dda) / 3.0 + sumn = dd.shape[0] + sumr2 = np.sum(np.multiply(ddr, ddr)) + suma2 = np.sum(np.multiply(dda, dda)) / 3.0 + sysr[type_i] += sumr + sysa[type_i] += suma + sysn[type_i] += sumn + sysr2[type_i] += sumr2 + sysa2[type_i] += suma2 + + return sysr, sysr2, sysa, sysa2, sysn diff --git a/deepmd/pt/model/model/__init__.py b/deepmd/pt/model/model/__init__.py new file mode 100644 index 0000000000..a3db3dbdec --- /dev/null +++ b/deepmd/pt/model/model/__init__.py @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .ener import ( + EnergyModel, +) +from .model import ( + BaseModel, +) + + +def get_model(model_params, sampled=None): + return EnergyModel( + descriptor=model_params["descriptor"], + fitting_net=model_params.get("fitting_net", None), + type_map=model_params["type_map"], + type_embedding=model_params.get("type_embedding", None), + resuming=model_params.get("resuming", False), + stat_file_dir=model_params.get("stat_file_dir", None), + stat_file_path=model_params.get("stat_file_path", None), + sampled=sampled, + ) + + +__all__ = [ + "BaseModel", + "EnergyModel", + "get_model", +] diff --git a/deepmd/pt/model/model/atomic_model.py b/deepmd/pt/model/model/atomic_model.py new file mode 100644 index 0000000000..47fd463fc9 --- /dev/null +++ b/deepmd/pt/model/model/atomic_model.py @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from abc import ( + ABC, + abstractmethod, +) +from typing import ( + Dict, + List, + Optional, +) + +import torch + +from deepmd.model_format import ( + FittingOutputDef, +) +from deepmd.pt.model.task import ( + Fitting, +) + + +class AtomicModel(ABC): + @abstractmethod + def get_fitting_net(self) -> Fitting: + raise NotImplementedError + + @abstractmethod + def get_fitting_output_def(self) -> FittingOutputDef: + raise NotImplementedError + + @abstractmethod + def get_rcut(self) -> float: + raise NotImplementedError + + @abstractmethod + def get_sel(self) -> List[int]: + raise NotImplementedError + + @abstractmethod + def distinguish_types(self) -> bool: + raise NotImplementedError + + @abstractmethod + def forward_atomic( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[torch.Tensor] = None, + do_atomic_virial: bool = False, + ) -> Dict[str, torch.Tensor]: + raise NotImplementedError + + def do_grad( + self, + var_name: Optional[str] = None, + ) -> bool: + """Tell if the output variable `var_name` is differentiable. + if var_name is None, returns if any of the variable is differentiable. + + """ + odef = self.get_fitting_output_def() + if var_name is None: + require: List[bool] = [] + for vv in odef.keys(): + require.append(self.do_grad_(vv)) + return any(require) + else: + return self.do_grad_(var_name) + + def do_grad_( + self, + var_name: str, + ) -> bool: + """Tell if the output variable `var_name` is differentiable.""" + assert var_name is not None + return self.get_fitting_output_def()[var_name].differentiable diff --git a/deepmd/pt/model/model/dp_atomic_model.py b/deepmd/pt/model/model/dp_atomic_model.py new file mode 100644 index 0000000000..ffeeeda660 --- /dev/null +++ b/deepmd/pt/model/model/dp_atomic_model.py @@ -0,0 +1,214 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Dict, + List, + Optional, +) + +import torch + +from deepmd.model_format import ( + FittingOutputDef, +) +from deepmd.pt.model.descriptor.descriptor import ( + Descriptor, +) +from deepmd.pt.model.task import ( + DenoiseNet, + Fitting, +) + +from .atomic_model import ( + AtomicModel, +) +from .model import ( + BaseModel, +) + + +class DPAtomicModel(BaseModel, AtomicModel): + """Model give atomic prediction of some physical property. + + Parameters + ---------- + descriptor + Descriptor + fitting_net + Fitting net + type_map + Mapping atom type to the name (str) of the type. + For example `type_map[1]` gives the name of the type 1. + type_embedding + Type embedding net + resuming + Whether to resume/fine-tune from checkpoint or not. + stat_file_dir + The directory to the state files. + stat_file_path + The path to the state files. + sampled + Sampled frames to compute the statistics. + """ + + def __init__( + self, + descriptor: dict, + fitting_net: dict, + type_map: Optional[List[str]], + type_embedding: Optional[dict] = None, + resuming: bool = False, + stat_file_dir=None, + stat_file_path=None, + sampled=None, + **kwargs, + ): + super().__init__() + # Descriptor + Type Embedding Net (Optional) + ntypes = len(type_map) + self.type_map = type_map + self.ntypes = ntypes + descriptor["ntypes"] = ntypes + self.combination = descriptor.get("combination", False) + if self.combination: + self.prefactor = descriptor.get("prefactor", [0.5, 0.5]) + self.descriptor_type = descriptor["type"] + + self.type_split = True + if self.descriptor_type not in ["se_e2_a"]: + self.type_split = False + + self.descriptor = Descriptor(**descriptor) + self.rcut = self.descriptor.get_rcut() + self.sel = self.descriptor.get_sel() + self.split_nlist = False + + # Statistics + self.compute_or_load_stat( + fitting_net, + ntypes, + resuming=resuming, + type_map=type_map, + stat_file_dir=stat_file_dir, + stat_file_path=stat_file_path, + sampled=sampled, + ) + + # Fitting + if fitting_net: + fitting_net["type"] = fitting_net.get("type", "ener") + if self.descriptor_type not in ["se_e2_a"]: + fitting_net["ntypes"] = 1 + else: + fitting_net["ntypes"] = self.descriptor.get_ntype() + fitting_net["use_tebd"] = False + fitting_net["embedding_width"] = self.descriptor.dim_out + + self.grad_force = "direct" not in fitting_net["type"] + if not self.grad_force: + fitting_net["out_dim"] = self.descriptor.dim_emb + if "ener" in fitting_net["type"]: + fitting_net["return_energy"] = True + self.fitting_net = Fitting(**fitting_net) + else: + self.fitting_net = None + self.grad_force = False + if not self.split_nlist: + self.coord_denoise_net = DenoiseNet( + self.descriptor.dim_out, self.ntypes - 1, self.descriptor.dim_emb + ) + elif self.combination: + self.coord_denoise_net = DenoiseNet( + self.descriptor.dim_out, + self.ntypes - 1, + self.descriptor.dim_emb_list, + self.prefactor, + ) + else: + self.coord_denoise_net = DenoiseNet( + self.descriptor.dim_out, self.ntypes - 1, self.descriptor.dim_emb + ) + + def get_fitting_net(self) -> Fitting: + """Get the fitting net.""" + return ( + self.fitting_net if self.fitting_net is not None else self.coord_denoise_net + ) + + def get_fitting_output_def(self) -> FittingOutputDef: + """Get the output def of the fitting net.""" + return ( + self.fitting_net.output_def() + if self.fitting_net is not None + else self.coord_denoise_net.output_def() + ) + + def get_rcut(self) -> float: + """Get the cut-off radius.""" + return self.rcut + + def get_sel(self) -> List[int]: + """Get the neighbor selection.""" + return self.sel + + def distinguish_types(self) -> bool: + """If distinguish different types by sorting.""" + return self.type_split + + def forward_atomic( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[torch.Tensor] = None, + ) -> Dict[str, torch.Tensor]: + """Return atomic prediction. + + Parameters + ---------- + extended_coord + coodinates in extended region + extended_atype + atomic type in extended region + nlist + neighbor list. nf x nloc x nsel + mapping + mapps the extended indices to local indices + + Returns + ------- + result_dict + the result dict, defined by the fitting net output def. + + """ + nframes, nloc, nnei = nlist.shape + atype = extended_atype[:, :nloc] + if self.do_grad(): + extended_coord.requires_grad_(True) + descriptor, env_mat, diff, rot_mat, sw = self.descriptor( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + assert descriptor is not None + # energy, force + if self.fitting_net is not None: + fit_ret = self.fitting_net( + descriptor, atype, atype_tebd=None, rot_mat=rot_mat + ) + # denoise + else: + nlist_list = [nlist] + if not self.split_nlist: + nnei_mask = nlist != -1 + elif self.combination: + nnei_mask = [] + for item in nlist_list: + nnei_mask_item = item != -1 + nnei_mask.append(nnei_mask_item) + else: + env_mat = env_mat[-1] + diff = diff[-1] + nnei_mask = nlist_list[-1] != -1 + fit_ret = self.coord_denoise_net(env_mat, diff, nnei_mask, descriptor, sw) + return fit_ret diff --git a/deepmd/pt/model/model/ener.py b/deepmd/pt/model/model/ener.py new file mode 100644 index 0000000000..c316c99a86 --- /dev/null +++ b/deepmd/pt/model/model/ener.py @@ -0,0 +1,151 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Dict, + List, + Optional, +) + +import torch + +from .dp_atomic_model import ( + DPAtomicModel, +) +from .make_model import ( + make_model, +) + +DPModel = make_model(DPAtomicModel) + + +class EnergyModel(DPModel): + model_type = "ener" + + def __init__( + self, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + + def forward( + self, + coord, + atype, + box: Optional[torch.Tensor] = None, + do_atomic_virial: bool = False, + ) -> Dict[str, torch.Tensor]: + model_ret = self.forward_common( + coord, atype, box, do_atomic_virial=do_atomic_virial + ) + if self.fitting_net is not None: + model_predict = {} + model_predict["atom_energy"] = model_ret["energy"] + model_predict["energy"] = model_ret["energy_redu"] + if self.do_grad("energy"): + model_predict["force"] = model_ret["energy_derv_r"].squeeze(-2) + if do_atomic_virial: + model_predict["atomic_virial"] = model_ret["energy_derv_c"].squeeze( + -3 + ) + model_predict["virial"] = model_ret["energy_derv_c_redu"].squeeze(-3) + else: + model_predict["force"] = model_ret["dforce"] + else: + model_predict = model_ret + model_predict["updated_coord"] += coord + return model_predict + + def forward_lower( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[torch.Tensor] = None, + do_atomic_virial: bool = False, + ): + model_ret = self.common_forward_lower( + extended_coord, + extended_atype, + nlist, + mapping, + do_atomic_virial=do_atomic_virial, + ) + if self.fitting_net is not None: + model_predict = {} + model_predict["atom_energy"] = model_ret["energy"] + model_predict["energy"] = model_ret["energy_redu"] + if self.do_grad("energy"): + model_predict["extended_force"] = model_ret["energy_derv_r"].squeeze(-2) + if do_atomic_virial: + model_predict["extended_virial"] = model_ret[ + "energy_derv_c" + ].squeeze(-3) + else: + assert model_ret["dforce"] is not None + model_predict["dforce"] = model_ret["dforce"] + else: + model_predict = model_ret + return model_predict + + +# should be a stand-alone function!!!! +def process_nlist( + nlist, + extended_atype, + mapping: Optional[torch.Tensor] = None, +): + # process the nlist_type and nlist_loc + nframes, nloc = nlist.shape[:2] + nmask = nlist == -1 + nlist[nmask] = 0 + if mapping is not None: + nlist_loc = torch.gather( + mapping, + dim=1, + index=nlist.reshape(nframes, -1), + ).reshape(nframes, nloc, -1) + nlist_loc[nmask] = -1 + else: + nlist_loc = None + nlist_type = torch.gather( + extended_atype, + dim=1, + index=nlist.reshape(nframes, -1), + ).reshape(nframes, nloc, -1) + nlist_type[nmask] = -1 + nlist[nmask] = -1 + return nlist_loc, nlist_type, nframes, nloc + + +def process_nlist_gathered( + nlist, + extended_atype, + split_sel: List[int], + mapping: Optional[torch.Tensor] = None, +): + nlist_list = list(torch.split(nlist, split_sel, -1)) + nframes, nloc = nlist_list[0].shape[:2] + nlist_type_list = [] + nlist_loc_list = [] + for nlist_item in nlist_list: + nmask = nlist_item == -1 + nlist_item[nmask] = 0 + if mapping is not None: + nlist_loc_item = torch.gather( + mapping, dim=1, index=nlist_item.reshape(nframes, -1) + ).reshape(nframes, nloc, -1) + nlist_loc_item[nmask] = -1 + nlist_loc_list.append(nlist_loc_item) + nlist_type_item = torch.gather( + extended_atype, dim=1, index=nlist_item.reshape(nframes, -1) + ).reshape(nframes, nloc, -1) + nlist_type_item[nmask] = -1 + nlist_type_list.append(nlist_type_item) + nlist_item[nmask] = -1 + + if mapping is not None: + nlist_loc = torch.cat(nlist_loc_list, -1) + else: + nlist_loc = None + nlist_type = torch.cat(nlist_type_list, -1) + return nlist_loc, nlist_type, nframes, nloc diff --git a/deepmd/pt/model/model/make_model.py b/deepmd/pt/model/model/make_model.py new file mode 100644 index 0000000000..3ddd21fbb8 --- /dev/null +++ b/deepmd/pt/model/model/make_model.py @@ -0,0 +1,136 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Dict, + Optional, +) + +import torch + +from deepmd.model_format import ( + ModelOutputDef, +) +from deepmd.pt.model.model.transform_output import ( + communicate_extended_output, + fit_output_to_model_output, +) +from deepmd.pt.utils.nlist import ( + build_neighbor_list, + extend_coord_with_ghosts, +) +from deepmd.pt.utils.region import ( + normalize_coord, +) + + +def make_model(T_AtomicModel): + class CM(T_AtomicModel): + def __init__( + self, + *args, + **kwargs, + ): + super().__init__( + *args, + **kwargs, + ) + + def get_model_output_def(self): + return ModelOutputDef(self.get_fitting_output_def()) + + # cannot use the name forward. torch script does not work + def forward_common( + self, + coord, + atype, + box: Optional[torch.Tensor] = None, + do_atomic_virial: bool = False, + ) -> Dict[str, torch.Tensor]: + """Return total energy of the system. + Args: + - coord: Atom coordinates with shape [nframes, natoms[1]*3]. + - atype: Atom types with shape [nframes, natoms[1]]. + - natoms: Atom statisics with shape [self.ntypes+2]. + - box: Simulation box with shape [nframes, 9]. + - atomic_virial: Whether or not compoute the atomic virial. + + Returns + ------- + - energy: Energy per atom. + - force: XYZ force per atom. + """ + nframes, nloc = atype.shape[:2] + if box is not None: + coord_normalized = normalize_coord(coord, box.reshape(-1, 3, 3)) + else: + coord_normalized = coord.clone() + extended_coord, extended_atype, mapping = extend_coord_with_ghosts( + coord_normalized, atype, box, self.get_rcut() + ) + nlist = build_neighbor_list( + extended_coord, + extended_atype, + nloc, + self.get_rcut(), + self.get_sel(), + distinguish_types=self.distinguish_types(), + ) + extended_coord = extended_coord.reshape(nframes, -1, 3) + model_predict_lower = self.forward_common_lower( + extended_coord, + extended_atype, + nlist, + mapping, + do_atomic_virial=do_atomic_virial, + ) + model_predict = communicate_extended_output( + model_predict_lower, + self.get_model_output_def(), + mapping, + do_atomic_virial=do_atomic_virial, + ) + return model_predict + + def forward_common_lower( + self, + extended_coord, + extended_atype, + nlist, + mapping: Optional[torch.Tensor] = None, + do_atomic_virial: bool = False, + ): + """Return model prediction. + + Parameters + ---------- + extended_coord + coodinates in extended region + extended_atype + atomic type in extended region + nlist + neighbor list. nf x nloc x nsel + mapping + mapps the extended indices to local indices + do_atomic_virial + whether do atomic virial + + Returns + ------- + result_dict + the result dict, defined by the fitting net output def. + + """ + atomic_ret = self.forward_atomic( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + model_predict = fit_output_to_model_output( + atomic_ret, + self.get_fitting_output_def(), + extended_coord, + do_atomic_virial=do_atomic_virial, + ) + return model_predict + + return CM diff --git a/deepmd/pt/model/model/model.py b/deepmd/pt/model/model/model.py new file mode 100644 index 0000000000..139744c1e9 --- /dev/null +++ b/deepmd/pt/model/model/model.py @@ -0,0 +1,150 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +import os + +import numpy as np +import torch + +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.stat import ( + compute_output_stats, +) + + +class BaseModel(torch.nn.Module): + def __init__(self): + """Construct a basic model for different tasks.""" + super().__init__() + + def forward(self, *args, **kwargs): + """Model output.""" + raise NotImplementedError + + def compute_or_load_stat( + self, + fitting_param, + ntypes, + resuming=False, + type_map=None, + stat_file_dir=None, + stat_file_path=None, + sampled=None, + ): + if fitting_param is None: + fitting_param = {} + if not resuming: + if sampled is not None: # compute stat + for sys in sampled: + for key in sys: + if isinstance(sys[key], list): + sys[key] = [item.to(env.DEVICE) for item in sys[key]] + else: + if sys[key] is not None: + sys[key] = sys[key].to(env.DEVICE) + sumr, suma, sumn, sumr2, suma2 = self.descriptor.compute_input_stats( + sampled + ) + + energy = [item["energy"] for item in sampled] + mixed_type = "real_natoms_vec" in sampled[0] + if mixed_type: + input_natoms = [item["real_natoms_vec"] for item in sampled] + else: + input_natoms = [item["natoms"] for item in sampled] + tmp = compute_output_stats(energy, input_natoms) + fitting_param["bias_atom_e"] = tmp[:, 0] + if stat_file_path is not None: + if not os.path.exists(stat_file_dir): + os.mkdir(stat_file_dir) + if not isinstance(stat_file_path, list): + logging.info(f"Saving stat file to {stat_file_path}") + np.savez_compressed( + stat_file_path, + sumr=sumr, + suma=suma, + sumn=sumn, + sumr2=sumr2, + suma2=suma2, + bias_atom_e=fitting_param["bias_atom_e"], + type_map=type_map, + ) + else: + for ii, file_path in enumerate(stat_file_path): + logging.info(f"Saving stat file to {file_path}") + np.savez_compressed( + file_path, + sumr=sumr[ii], + suma=suma[ii], + sumn=sumn[ii], + sumr2=sumr2[ii], + suma2=suma2[ii], + bias_atom_e=fitting_param["bias_atom_e"], + type_map=type_map, + ) + else: # load stat + target_type_map = type_map + if not isinstance(stat_file_path, list): + logging.info(f"Loading stat file from {stat_file_path}") + stats = np.load(stat_file_path) + stat_type_map = list(stats["type_map"]) + missing_type = [ + i for i in target_type_map if i not in stat_type_map + ] + assert not missing_type, f"These type are not in stat file {stat_file_path}: {missing_type}! Please change the stat file path!" + idx_map = [stat_type_map.index(i) for i in target_type_map] + if stats["sumr"].size: + sumr, suma, sumn, sumr2, suma2 = ( + stats["sumr"][idx_map], + stats["suma"][idx_map], + stats["sumn"][idx_map], + stats["sumr2"][idx_map], + stats["suma2"][idx_map], + ) + else: + sumr, suma, sumn, sumr2, suma2 = [], [], [], [], [] + fitting_param["bias_atom_e"] = stats["bias_atom_e"][idx_map] + else: + sumr, suma, sumn, sumr2, suma2 = [], [], [], [], [] + id_bias_atom_e = None + for ii, file_path in enumerate(stat_file_path): + logging.info(f"Loading stat file from {file_path}") + stats = np.load(file_path) + stat_type_map = list(stats["type_map"]) + missing_type = [ + i for i in target_type_map if i not in stat_type_map + ] + assert not missing_type, f"These type are not in stat file {file_path}: {missing_type}! Please change the stat file path!" + idx_map = [stat_type_map.index(i) for i in target_type_map] + if stats["sumr"].size: + sumr_tmp, suma_tmp, sumn_tmp, sumr2_tmp, suma2_tmp = ( + stats["sumr"][idx_map], + stats["suma"][idx_map], + stats["sumn"][idx_map], + stats["sumr2"][idx_map], + stats["suma2"][idx_map], + ) + else: + sumr_tmp, suma_tmp, sumn_tmp, sumr2_tmp, suma2_tmp = ( + [], + [], + [], + [], + [], + ) + sumr.append(sumr_tmp) + suma.append(suma_tmp) + sumn.append(sumn_tmp) + sumr2.append(sumr2_tmp) + suma2.append(suma2_tmp) + fitting_param["bias_atom_e"] = stats["bias_atom_e"][idx_map] + if id_bias_atom_e is None: + id_bias_atom_e = fitting_param["bias_atom_e"] + else: + assert ( + id_bias_atom_e == fitting_param["bias_atom_e"] + ).all(), "bias_atom_e in stat files are not consistent!" + self.descriptor.init_desc_stat(sumr, suma, sumn, sumr2, suma2) + else: # resuming for checkpoint; init model params from scratch + fitting_param["bias_atom_e"] = [0.0] * ntypes diff --git a/deepmd/pt/model/model/transform_output.py b/deepmd/pt/model/model/transform_output.py new file mode 100644 index 0000000000..673491d788 --- /dev/null +++ b/deepmd/pt/model/model/transform_output.py @@ -0,0 +1,214 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Dict, + List, + Optional, +) + +import torch + +from deepmd.model_format import ( + FittingOutputDef, + ModelOutputDef, + OutputVariableDef, + get_deriv_name, + get_reduce_name, +) + + +def atomic_virial_corr( + extended_coord: torch.Tensor, + atom_energy: torch.Tensor, +): + nall = extended_coord.shape[1] + nloc = atom_energy.shape[1] + coord, _ = torch.split(extended_coord, [nloc, nall - nloc], dim=1) + # no derivative with respect to the loc coord. + coord = coord.detach() + ce = coord * atom_energy + sumce0, sumce1, sumce2 = torch.split(torch.sum(ce, dim=1), [1, 1, 1], dim=-1) + faked_grad = torch.ones_like(sumce0) + lst = torch.jit.annotate(List[Optional[torch.Tensor]], [faked_grad]) + extended_virial_corr0 = torch.autograd.grad( + [sumce0], [extended_coord], grad_outputs=lst, create_graph=True + )[0] + assert extended_virial_corr0 is not None + extended_virial_corr1 = torch.autograd.grad( + [sumce1], [extended_coord], grad_outputs=lst, create_graph=True + )[0] + assert extended_virial_corr1 is not None + extended_virial_corr2 = torch.autograd.grad( + [sumce2], [extended_coord], grad_outputs=lst, create_graph=True + )[0] + assert extended_virial_corr2 is not None + extended_virial_corr = torch.concat( + [ + extended_virial_corr0.unsqueeze(-1), + extended_virial_corr1.unsqueeze(-1), + extended_virial_corr2.unsqueeze(-1), + ], + dim=-1, + ) + return extended_virial_corr + + +def task_deriv_one( + atom_energy: torch.Tensor, + energy: torch.Tensor, + extended_coord: torch.Tensor, + do_atomic_virial: bool = False, +): + faked_grad = torch.ones_like(energy) + lst = torch.jit.annotate(List[Optional[torch.Tensor]], [faked_grad]) + extended_force = torch.autograd.grad( + [energy], [extended_coord], grad_outputs=lst, create_graph=True + )[0] + assert extended_force is not None + extended_force = -extended_force + extended_virial = extended_force.unsqueeze(-1) @ extended_coord.unsqueeze(-2) + # the correction sums to zero, which does not contribute to global virial + if do_atomic_virial: + extended_virial_corr = atomic_virial_corr(extended_coord, atom_energy) + extended_virial = extended_virial + extended_virial_corr + return extended_force, extended_virial + + +def get_leading_dims( + vv: torch.Tensor, + vdef: OutputVariableDef, +): + """Get the dimensions of nf x nloc.""" + vshape = vv.shape + return list(vshape[: (len(vshape) - len(vdef.shape))]) + + +def get_atom_axis( + vdef: torch.Tensor, +): + """Get the axis of atoms.""" + atom_axis = -(len(vdef.shape) + 1) + return atom_axis + + +def take_deriv( + vv: torch.Tensor, + svv: torch.Tensor, + vdef: OutputVariableDef, + coord_ext: torch.Tensor, + do_atomic_virial: bool = False, +): + size = 1 + for ii in vdef.shape: + size *= ii + vv1 = vv.view(list(get_leading_dims(vv, vdef)) + [size]) # noqa: RUF005 + svv1 = svv.view(list(get_leading_dims(svv, vdef)) + [size]) # noqa: RUF005 + split_vv1 = torch.split(vv1, [1] * size, dim=-1) + split_svv1 = torch.split(svv1, [1] * size, dim=-1) + split_ff, split_avir = [], [] + for vvi, svvi in zip(split_vv1, split_svv1): + # nf x nloc x 3, nf x nloc x 3 x 3 + ffi, aviri = task_deriv_one( + vvi, svvi, coord_ext, do_atomic_virial=do_atomic_virial + ) + # nf x nloc x 1 x 3, nf x nloc x 1 x 3 x 3 + ffi = ffi.unsqueeze(-2) + aviri = aviri.unsqueeze(-3) + split_ff.append(ffi) + split_avir.append(aviri) + # nf x nloc x v_dim x 3, nf x nloc x v_dim x 3 x 3 + ff = torch.concat(split_ff, dim=-2) + avir = torch.concat(split_avir, dim=-3) + return ff, avir + + +def fit_output_to_model_output( + fit_ret: Dict[str, torch.Tensor], + fit_output_def: FittingOutputDef, + coord_ext: torch.Tensor, + do_atomic_virial: bool = False, +) -> Dict[str, torch.Tensor]: + """Transform the output of the fitting network to + the model output. + + """ + model_ret = dict(fit_ret.items()) + for kk, vv in fit_ret.items(): + vdef = fit_output_def[kk] + shap = vdef.shape + atom_axis = -(len(shap) + 1) + if vdef.reduciable: + kk_redu = get_reduce_name(kk) + model_ret[kk_redu] = torch.sum(vv, dim=atom_axis) + if vdef.differentiable: + kk_derv_r, kk_derv_c = get_deriv_name(kk) + dr, dc = take_deriv( + vv, + model_ret[kk_redu], + vdef, + coord_ext, + do_atomic_virial=do_atomic_virial, + ) + model_ret[kk_derv_r] = dr + model_ret[kk_derv_c] = dc + return model_ret + + +def communicate_extended_output( + model_ret: Dict[str, torch.Tensor], + model_output_def: ModelOutputDef, + mapping: torch.Tensor, # nf x nloc + do_atomic_virial: bool = False, +) -> Dict[str, torch.Tensor]: + """Transform the output of the model network defined on + local and ghost (extended) atoms to local atoms. + + """ + new_ret = {} + for kk in model_output_def.keys_outp(): + vv = model_ret[kk] + vdef = model_output_def[kk] + new_ret[kk] = vv + if vdef.reduciable: + kk_redu = get_reduce_name(kk) + new_ret[kk_redu] = model_ret[kk_redu] + if vdef.differentiable: + # nf x nloc + vldims = get_leading_dims(vv, vdef) + # nf x nall + mldims = list(mapping.shape) + kk_derv_r, kk_derv_c = get_deriv_name(kk) + # vdim x 3 + derv_r_ext_dims = list(vdef.shape) + [3] # noqa:RUF005 + mapping = mapping.view(mldims + [1] * len(derv_r_ext_dims)).expand( + [-1] * len(mldims) + derv_r_ext_dims + ) + force = torch.zeros( + vldims + derv_r_ext_dims, dtype=vv.dtype, device=vv.device + ) + # nf x nloc x 1 x 3 + new_ret[kk_derv_r] = torch.scatter_reduce( + force, + 1, + index=mapping, + src=model_ret[kk_derv_r], + reduce="sum", + ) + mapping = mapping.unsqueeze(-1).expand( + [-1] * (len(mldims) + len(derv_r_ext_dims)) + [3] + ) + virial = torch.zeros( + vldims + derv_r_ext_dims + [3], dtype=vv.dtype, device=vv.device + ) + # nf x nloc x 1 x 3 + new_ret[kk_derv_c] = torch.scatter_reduce( + virial, + 1, + index=mapping, + src=model_ret[kk_derv_c], + reduce="sum", + ) + new_ret[kk_derv_c + "_redu"] = torch.sum(new_ret[kk_derv_c], dim=1) + if not do_atomic_virial: + # pop atomic virial, because it is not correctly calculated. + new_ret.pop(kk_derv_c) + return new_ret diff --git a/deepmd/pt/model/network/__init__.py b/deepmd/pt/model/network/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pt/model/network/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pt/model/network/mlp.py b/deepmd/pt/model/network/mlp.py new file mode 100644 index 0000000000..e3ac0e7bc2 --- /dev/null +++ b/deepmd/pt/model/network/mlp.py @@ -0,0 +1,217 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + ClassVar, + Dict, + Optional, +) + +import numpy as np +import torch +import torch.nn as nn + +from deepmd.pt.utils import ( + env, +) + +device = env.DEVICE + +from deepmd.model_format import ( + NativeLayer, +) +from deepmd.model_format import NetworkCollection as DPNetworkCollection +from deepmd.model_format import ( + make_embedding_network, + make_fitting_network, + make_multilayer_network, +) +from deepmd.pt.utils.env import ( + DEFAULT_PRECISION, + PRECISION_DICT, +) +from deepmd.pt.utils.utils import ( + ActivationFn, +) + +try: + from deepmd._version import version as __version__ +except ImportError: + __version__ = "unknown" + + +def empty_t(shape, precision): + return torch.empty(shape, dtype=precision, device=device) + + +class MLPLayer(nn.Module): + def __init__( + self, + num_in, + num_out, + bias: bool = True, + use_timestep: bool = False, + activation_function: Optional[str] = None, + resnet: bool = False, + bavg: float = 0.0, + stddev: float = 1.0, + precision: str = DEFAULT_PRECISION, + ): + super().__init__() + self.use_timestep = use_timestep + self.activate_name = activation_function + self.activate = ActivationFn(self.activate_name) + self.precision = precision + self.prec = PRECISION_DICT[self.precision] + self.matrix = nn.Parameter(data=empty_t((num_in, num_out), self.prec)) + nn.init.normal_(self.matrix.data, std=stddev / np.sqrt(num_out + num_in)) + if bias: + self.bias = nn.Parameter( + data=empty_t([num_out], self.prec), + ) + nn.init.normal_(self.bias.data, mean=bavg, std=stddev) + else: + self.bias = None + if self.use_timestep: + self.idt = nn.Parameter(data=empty_t([num_out], self.prec)) + nn.init.normal_(self.idt.data, mean=0.1, std=0.001) + else: + self.idt = None + self.resnet = resnet + + def check_type_consistency(self): + precision = self.precision + + def check_var(var): + if var is not None: + # assertion "float64" == "double" would fail + assert PRECISION_DICT[var.dtype.name] is PRECISION_DICT[precision] + + check_var(self.w) + check_var(self.b) + check_var(self.idt) + + def dim_in(self) -> int: + return self.matrix.shape[0] + + def dim_out(self) -> int: + return self.matrix.shape[1] + + def forward( + self, + xx: torch.Tensor, + ) -> torch.Tensor: + """One MLP layer used by DP model. + + Parameters + ---------- + xx : torch.Tensor + The input. + + Returns + ------- + yy: torch.Tensor + The output. + """ + yy = ( + torch.matmul(xx, self.matrix) + self.bias + if self.bias is not None + else torch.matmul(xx, self.matrix) + ) + yy = self.activate(yy).clone() + yy = yy * self.idt if self.idt is not None else yy + if self.resnet: + if xx.shape[-1] == yy.shape[-1]: + yy += xx + elif 2 * xx.shape[-1] == yy.shape[-1]: + yy += torch.concat([xx, xx], dim=-1) + else: + yy = yy + return yy + + def serialize(self) -> dict: + """Serialize the layer to a dict. + + Returns + ------- + dict + The serialized layer. + """ + nl = NativeLayer( + self.matrix.shape[0], + self.matrix.shape[1], + bias=self.bias is not None, + use_timestep=self.idt is not None, + activation_function=self.activate_name, + resnet=self.resnet, + precision=self.precision, + ) + nl.w, nl.b, nl.idt = ( + self.matrix.detach().cpu().numpy(), + self.bias.detach().cpu().numpy() if self.bias is not None else None, + self.idt.detach().cpu().numpy() if self.idt is not None else None, + ) + return nl.serialize() + + @classmethod + def deserialize(cls, data: dict) -> "MLPLayer": + """Deserialize the layer from a dict. + + Parameters + ---------- + data : dict + The dict to deserialize from. + """ + nl = NativeLayer.deserialize(data) + obj = cls( + nl["matrix"].shape[0], + nl["matrix"].shape[1], + bias=nl["bias"] is not None, + use_timestep=nl["idt"] is not None, + activation_function=nl["activation_function"], + resnet=nl["resnet"], + precision=nl["precision"], + ) + prec = PRECISION_DICT[obj.precision] + + def check_load_param(ss): + return ( + nn.Parameter(data=torch.tensor(nl[ss], dtype=prec, device=device)) + if nl[ss] is not None + else None + ) + + obj.matrix = check_load_param("matrix") + obj.bias = check_load_param("bias") + obj.idt = check_load_param("idt") + return obj + + +MLP_ = make_multilayer_network(MLPLayer, nn.Module) + + +class MLP(MLP_): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.layers = torch.nn.ModuleList(self.layers) + + forward = MLP_.call + + +EmbeddingNet = make_embedding_network(MLP, MLPLayer) + +FittingNet = make_fitting_network(EmbeddingNet, MLP, MLPLayer) + + +class NetworkCollection(DPNetworkCollection, nn.Module): + """PyTorch implementation of NetworkCollection.""" + + NETWORK_TYPE_MAP: ClassVar[Dict[str, type]] = { + "network": MLP, + "embedding_network": EmbeddingNet, + # "fitting_network": FittingNet, + } + + def __init__(self, *args, **kwargs): + # init both two base classes + DPNetworkCollection.__init__(self, *args, **kwargs) + nn.Module.__init__(self) + self.networks = self._networks = torch.nn.ModuleList(self._networks) diff --git a/deepmd/pt/model/network/network.py b/deepmd/pt/model/network/network.py new file mode 100644 index 0000000000..8b5b3cf998 --- /dev/null +++ b/deepmd/pt/model/network/network.py @@ -0,0 +1,1897 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from deepmd.pt.utils import ( + env, +) + +try: + from typing import ( + Final, + ) +except ImportError: + from torch.jit import Final + +from functools import ( + partial, +) + +import torch.utils.checkpoint + +from deepmd.pt.utils.utils import ( + ActivationFn, + get_activation_fn, +) + + +def Tensor(*shape): + return torch.empty(shape, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + + +class Dropout(nn.Module): + def __init__(self, p): + super().__init__() + self.p = p + + def forward(self, x, inplace: bool = False): + if self.p > 0 and self.training: + return F.dropout(x, p=self.p, training=True, inplace=inplace) + else: + return x + + +class Identity(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, x): + return x + + +class DropPath(torch.nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, prob=None): + super().__init__() + self.drop_prob = prob + + def forward(self, x): + if self.drop_prob == 0.0 or not self.training: + return x + keep_prob = 1 - self.drop_prob + shape = (x.shape[0],) + (1,) * ( + x.ndim - 1 + ) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) + random_tensor.floor_() # binarize + output = x.div(keep_prob) * random_tensor + return output + + def extra_repr(self) -> str: + return f"prob={self.drop_prob}" + + +def softmax_dropout( + input_x, dropout_prob, is_training=True, mask=None, bias=None, inplace=True +): + input_x = input_x.contiguous() + if not inplace: + input_x = input_x.clone() + if mask is not None: + input_x += mask + if bias is not None: + input_x += bias + return F.dropout(F.softmax(input_x, dim=-1), p=dropout_prob, training=is_training) + + +def checkpoint_sequential( + functions, + input_x, + enabled=True, +): + def wrap_tuple(a): + return (a,) if type(a) is not tuple else a + + def exec(func, a): + return wrap_tuple(func(*a)) + + def get_wrap_exec(func): + def wrap_exec(*a): + return exec(func, a) + + return wrap_exec + + input_x = wrap_tuple(input_x) + + is_grad_enabled = torch.is_grad_enabled() + + if enabled and is_grad_enabled: + for func in functions: + input_x = torch.utils.checkpoint.checkpoint(get_wrap_exec(func), *input_x) + else: + for func in functions: + input_x = exec(func, input_x) + return input_x + + +class ResidualLinear(nn.Module): + resnet: Final[int] + + def __init__(self, num_in, num_out, bavg=0.0, stddev=1.0, resnet_dt=False): + """Construct a residual linear layer. + + Args: + - num_in: Width of input tensor. + - num_out: Width of output tensor. + - resnet_dt: Using time-step in the ResNet construction. + """ + super().__init__() + self.num_in = num_in + self.num_out = num_out + self.resnet = resnet_dt + + self.matrix = nn.Parameter(data=Tensor(num_in, num_out)) + nn.init.normal_(self.matrix.data, std=stddev / np.sqrt(num_out + num_in)) + self.bias = nn.Parameter(data=Tensor(1, num_out)) + nn.init.normal_(self.bias.data, mean=bavg, std=stddev) + if self.resnet: + self.idt = nn.Parameter(data=Tensor(1, num_out)) + nn.init.normal_(self.idt.data, mean=1.0, std=0.001) + + def forward(self, inputs): + """Return X ?+ X*W+b.""" + xw_plus_b = torch.matmul(inputs, self.matrix) + self.bias + hidden = torch.tanh(xw_plus_b) + if self.resnet: + hidden = hidden * self.idt + if self.num_in == self.num_out: + return inputs + hidden + elif self.num_in * 2 == self.num_out: + return torch.cat([inputs, inputs], dim=1) + hidden + else: + return hidden + + +class TypeFilter(nn.Module): + use_tebd: Final[bool] + tebd_mode: Final[str] + + def __init__( + self, + offset, + length, + neuron, + return_G=False, + tebd_dim=0, + use_tebd=False, + tebd_mode="concat", + ): + """Construct a filter on the given element as neighbor. + + Args: + - offset: Element offset in the descriptor matrix. + - length: Atom count of this element. + - neuron: Number of neurons in each hidden layers of the embedding net. + """ + super().__init__() + self.offset = offset + self.length = length + self.tebd_dim = tebd_dim + self.use_tebd = use_tebd + self.tebd_mode = tebd_mode + supported_tebd_mode = ["concat", "dot", "dot_residual_s", "dot_residual_t"] + assert ( + tebd_mode in supported_tebd_mode + ), f"Unknown tebd_mode {tebd_mode}! Supported are {supported_tebd_mode}." + if use_tebd and tebd_mode == "concat": + self.neuron = [1 + tebd_dim * 2, *neuron] + else: + self.neuron = [1, *neuron] + + deep_layers = [] + for ii in range(1, len(self.neuron)): + one = ResidualLinear(self.neuron[ii - 1], self.neuron[ii]) + deep_layers.append(one) + self.deep_layers = nn.ModuleList(deep_layers) + + deep_layers_t = [] + if use_tebd and tebd_mode in ["dot", "dot_residual_s", "dot_residual_t"]: + self.neuron_t = [tebd_dim * 2, *neuron] + for ii in range(1, len(self.neuron_t)): + one = ResidualLinear(self.neuron_t[ii - 1], self.neuron_t[ii]) + deep_layers_t.append(one) + self.deep_layers_t = nn.ModuleList(deep_layers_t) + + self.return_G = return_G + + def forward( + self, + inputs, + atype_tebd: Optional[torch.Tensor] = None, + nlist_tebd: Optional[torch.Tensor] = None, + ): + """Calculate decoded embedding for each atom. + + Args: + - inputs: Descriptor matrix. Its shape is [nframes*natoms[0], len_descriptor]. + + Returns + ------- + - `torch.Tensor`: Embedding contributed by me. Its shape is [nframes*natoms[0], 4, self.neuron[-1]]. + """ + inputs_i = inputs[:, self.offset * 4 : (self.offset + self.length) * 4] + inputs_reshape = inputs_i.reshape( + -1, 4 + ) # shape is [nframes*natoms[0]*self.length, 4] + xyz_scatter = inputs_reshape[:, 0:1] + + # concat the tebd as input + if self.use_tebd and self.tebd_mode == "concat": + assert nlist_tebd is not None and atype_tebd is not None + nlist_tebd = nlist_tebd.reshape(-1, self.tebd_dim) + atype_tebd = atype_tebd.reshape(-1, self.tebd_dim) + # [nframes * nloc * nnei, 1 + tebd_dim * 2] + xyz_scatter = torch.concat([xyz_scatter, nlist_tebd, atype_tebd], dim=1) + + for linear in self.deep_layers: + xyz_scatter = linear(xyz_scatter) + # [nframes * nloc * nnei, out_size] + + # dot the tebd output + if self.use_tebd and self.tebd_mode in [ + "dot", + "dot_residual_s", + "dot_residual_t", + ]: + assert nlist_tebd is not None and atype_tebd is not None + nlist_tebd = nlist_tebd.reshape(-1, self.tebd_dim) + atype_tebd = atype_tebd.reshape(-1, self.tebd_dim) + # [nframes * nloc * nnei, tebd_dim * 2] + two_side_tebd = torch.concat([nlist_tebd, atype_tebd], dim=1) + for linear in self.deep_layers_t: + two_side_tebd = linear(two_side_tebd) + # [nframes * nloc * nnei, out_size] + if self.tebd_mode == "dot": + xyz_scatter = xyz_scatter * two_side_tebd + elif self.tebd_mode == "dot_residual_s": + xyz_scatter = xyz_scatter * two_side_tebd + xyz_scatter + elif self.tebd_mode == "dot_residual_t": + xyz_scatter = xyz_scatter * two_side_tebd + two_side_tebd + + xyz_scatter = xyz_scatter.view( + -1, self.length, self.neuron[-1] + ) # shape is [nframes*natoms[0], self.length, self.neuron[-1]] + if self.return_G: + return xyz_scatter + else: + # shape is [nframes*natoms[0], 4, self.length] + inputs_reshape = inputs_i.view(-1, self.length, 4).permute(0, 2, 1) + return torch.matmul(inputs_reshape, xyz_scatter) + + +class SimpleLinear(nn.Module): + use_timestep: Final[bool] + + def __init__( + self, + num_in, + num_out, + bavg=0.0, + stddev=1.0, + use_timestep=False, + activate=None, + bias: bool = True, + ): + """Construct a linear layer. + + Args: + - num_in: Width of input tensor. + - num_out: Width of output tensor. + - use_timestep: Apply time-step to weight. + - activate: type of activate func. + """ + super().__init__() + self.num_in = num_in + self.num_out = num_out + self.use_timestep = use_timestep + self.activate = ActivationFn(activate) + + self.matrix = nn.Parameter(data=Tensor(num_in, num_out)) + nn.init.normal_(self.matrix.data, std=stddev / np.sqrt(num_out + num_in)) + if bias: + self.bias = nn.Parameter(data=Tensor(1, num_out)) + nn.init.normal_(self.bias.data, mean=bavg, std=stddev) + else: + self.bias = None + if self.use_timestep: + self.idt = nn.Parameter(data=Tensor(1, num_out)) + nn.init.normal_(self.idt.data, mean=0.1, std=0.001) + + def forward(self, inputs): + """Return X*W+b.""" + xw = torch.matmul(inputs, self.matrix) + hidden = xw + self.bias if self.bias is not None else xw + hidden = self.activate(hidden) + if self.use_timestep: + hidden = hidden * self.idt + return hidden + + +class Linear(nn.Linear): + def __init__( + self, + d_in: int, + d_out: int, + bias: bool = True, + init: str = "default", + ): + super().__init__(d_in, d_out, bias=bias, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + + self.use_bias = bias + + if self.use_bias: + with torch.no_grad(): + self.bias.fill_(0) + + if init == "default": + self._trunc_normal_init(1.0) + elif init == "relu": + self._trunc_normal_init(2.0) + elif init == "glorot": + self._glorot_uniform_init() + elif init == "gating": + self._zero_init(self.use_bias) + elif init == "normal": + self._normal_init() + elif init == "final": + self._zero_init(False) + else: + raise ValueError("Invalid init method.") + + def _trunc_normal_init(self, scale=1.0): + # Constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) + TRUNCATED_NORMAL_STDDEV_FACTOR = 0.87962566103423978 + _, fan_in = self.weight.shape + scale = scale / max(1, fan_in) + std = (scale**0.5) / TRUNCATED_NORMAL_STDDEV_FACTOR + nn.init.trunc_normal_(self.weight, mean=0.0, std=std) + + def _glorot_uniform_init(self): + nn.init.xavier_uniform_(self.weight, gain=1) + + def _zero_init(self, use_bias=True): + with torch.no_grad(): + self.weight.fill_(0.0) + if use_bias: + with torch.no_grad(): + self.bias.fill_(1.0) + + def _normal_init(self): + nn.init.kaiming_normal_(self.weight, nonlinearity="linear") + + +class Transition(nn.Module): + def __init__(self, d_in, n, dropout=0.0): + super().__init__() + + self.d_in = d_in + self.n = n + + self.linear_1 = Linear(self.d_in, self.n * self.d_in, init="relu") + self.act = nn.GELU() + self.linear_2 = Linear(self.n * self.d_in, d_in, init="final") + self.dropout = dropout + + def _transition(self, x): + x = self.linear_1(x) + x = self.act(x) + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.linear_2(x) + return x + + def forward( + self, + x: torch.Tensor, + ) -> torch.Tensor: + x = self._transition(x=x) + return x + + +class Embedding(nn.Embedding): + def __init__( + self, + num_embeddings: int, + embedding_dim: int, + padding_idx: Optional[int] = None, + dtype=torch.float64, + ): + super().__init__( + num_embeddings, embedding_dim, padding_idx=padding_idx, dtype=dtype + ) + self._normal_init() + + if padding_idx is not None: + self.weight.data[self.padding_idx].zero_() + + def _normal_init(self, std=0.02): + nn.init.normal_(self.weight, mean=0.0, std=std) + + +class NonLinearHead(nn.Module): + def __init__(self, input_dim, out_dim, activation_fn, hidden=None): + super().__init__() + hidden = input_dim if not hidden else hidden + self.linear1 = SimpleLinear(input_dim, hidden, activate=activation_fn) + self.linear2 = SimpleLinear(hidden, out_dim) + + def forward(self, x): + x = self.linear1(x) + x = self.linear2(x) + return x + + +class NonLinear(nn.Module): + def __init__(self, input, output_size, hidden=None): + super().__init__() + + if hidden is None: + hidden = input + self.layer1 = Linear(input, hidden, init="relu") + self.layer2 = Linear(hidden, output_size, init="final") + + def forward(self, x): + x = F.linear(x, self.layer1.weight) + # x = fused_ops.bias_torch_gelu(x, self.layer1.bias) + x = nn.GELU()(x) + self.layer1.bias + x = self.layer2(x) + return x + + def zero_init(self): + nn.init.zeros_(self.layer2.weight) + nn.init.zeros_(self.layer2.bias) + + +class MaskLMHead(nn.Module): + """Head for masked language modeling.""" + + def __init__(self, embed_dim, output_dim, activation_fn, weight=None): + super().__init__() + self.dense = SimpleLinear(embed_dim, embed_dim) + self.activation_fn = get_activation_fn(activation_fn) + self.layer_norm = nn.LayerNorm(embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + + if weight is None: + weight = nn.Linear( + embed_dim, output_dim, bias=False, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ).weight + self.weight = weight + self.bias = nn.Parameter( + torch.zeros(output_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + ) + + def forward(self, features, masked_tokens: Optional[torch.Tensor] = None, **kwargs): + # Only project the masked tokens while training, + # saves both memory and computation + if masked_tokens is not None: + features = features[masked_tokens, :] + + x = self.dense(features) + x = self.activation_fn(x) + x = self.layer_norm(x) + # project back to size of vocabulary with bias + x = F.linear(x, self.weight) + self.bias + return x + + +class ResidualDeep(nn.Module): + def __init__( + self, type_id, embedding_width, neuron, bias_atom_e, out_dim=1, resnet_dt=False + ): + """Construct a filter on the given element as neighbor. + + Args: + - typei: Element ID. + - embedding_width: Embedding width per atom. + - neuron: Number of neurons in each hidden layers of the embedding net. + - resnet_dt: Using time-step in the ResNet construction. + """ + super().__init__() + self.type_id = type_id + self.neuron = [embedding_width, *neuron] + self.out_dim = out_dim + + deep_layers = [] + for ii in range(1, len(self.neuron)): + one = SimpleLinear( + num_in=self.neuron[ii - 1], + num_out=self.neuron[ii], + use_timestep=( + resnet_dt and ii > 1 and self.neuron[ii - 1] == self.neuron[ii] + ), + activate="tanh", + ) + deep_layers.append(one) + self.deep_layers = nn.ModuleList(deep_layers) + if not env.ENERGY_BIAS_TRAINABLE: + bias_atom_e = 0 + self.final_layer = SimpleLinear(self.neuron[-1], self.out_dim, bias_atom_e) + + def forward(self, inputs): + """Calculate decoded embedding for each atom. + + Args: + - inputs: Embedding net output per atom. Its shape is [nframes*nloc, self.embedding_width]. + + Returns + ------- + - `torch.Tensor`: Output layer with shape [nframes*nloc, self.neuron[-1]]. + """ + outputs = inputs + for idx, linear in enumerate(self.deep_layers): + if idx > 0 and linear.num_in == linear.num_out: + outputs = outputs + linear(outputs) + else: + outputs = linear(outputs) + outputs = self.final_layer(outputs) + return outputs + + +class TypeEmbedNet(nn.Module): + def __init__(self, type_nums, embed_dim, bavg=0.0, stddev=1.0): + """Construct a type embedding net.""" + super().__init__() + self.embedding = nn.Embedding( + type_nums + 1, + embed_dim, + padding_idx=type_nums, + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + ) + # nn.init.normal_(self.embedding.weight[:-1], mean=bavg, std=stddev) + + def forward(self, atype): + """ + Args: + atype: Type of each input, [nframes, nloc] or [nframes, nloc, nnei]. + + Returns + ------- + type_embedding: + + """ + return self.embedding(atype) + + def share_params(self, base_class, shared_level, resume=False): + assert ( + self.__class__ == base_class.__class__ + ), "Only TypeEmbedNet of the same type can share params!" + if shared_level == 0: + # the following will successfully link all the params except buffers, which need manually link. + for item in self._modules: + self._modules[item] = base_class._modules[item] + else: + raise NotImplementedError + + +@torch.jit.script +def gaussian(x, mean, std: float): + pi = 3.14159 + a = (2 * pi) ** 0.5 + return torch.exp(-0.5 * (((x - mean) / std) ** 2)) / (a * std) + + +class GaussianKernel(nn.Module): + def __init__(self, K=128, num_pair=512, std_width=1.0, start=0.0, stop=9.0): + super().__init__() + self.K = K + std_width = std_width + start = start + stop = stop + mean = torch.linspace(start, stop, K, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + self.std = (std_width * (mean[1] - mean[0])).item() + self.register_buffer("mean", mean) + self.mul = Embedding( + num_pair + 1, 1, padding_idx=num_pair, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.bias = Embedding( + num_pair + 1, 1, padding_idx=num_pair, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + nn.init.constant_(self.bias.weight, 0) + nn.init.constant_(self.mul.weight, 1.0) + + def forward(self, x, atom_pair): + mul = self.mul(atom_pair).abs().sum(dim=-2) + bias = self.bias(atom_pair).sum(dim=-2) + x = mul * x.unsqueeze(-1) + bias + # [nframes, nloc, nnei, K] + x = x.expand(-1, -1, -1, self.K) + mean = self.mean.view(-1) + return gaussian(x, mean, self.std) + + +class GaussianEmbedding(nn.Module): + def __init__( + self, + rcut, + kernel_num, + num_pair, + embed_dim, + pair_embed_dim, + sel, + ntypes, + atomic_sum_gbf, + ): + """Construct a gaussian kernel based embedding of pair representation. + + Args: + rcut: Radial cutoff. + kernel_num: Number of gaussian kernels. + num_pair: Number of different pairs. + embed_dim: Dimension of atomic representation. + pair_embed_dim: Dimension of pair representation. + sel: Number of neighbors. + ntypes: Number of atom types. + """ + super().__init__() + self.gbf = GaussianKernel(K=kernel_num, num_pair=num_pair, stop=rcut) + self.gbf_proj = NonLinear(kernel_num, pair_embed_dim) + self.embed_dim = embed_dim + self.pair_embed_dim = pair_embed_dim + self.atomic_sum_gbf = atomic_sum_gbf + if self.atomic_sum_gbf: + if kernel_num != self.embed_dim: + self.edge_proj = torch.nn.Linear( + kernel_num, self.embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + else: + self.edge_proj = None + self.ntypes = ntypes + self.nnei = sel + + def forward(self, coord_selected, atom_feature, edge_type_2dim, edge_feature): + ## local cluster forward + """Calculate decoded embedding for each atom. + Args: + coord_selected: Clustered atom coordinates with shape [nframes*nloc, natoms, 3]. + atom_feature: Previous calculated atomic features with shape [nframes*nloc, natoms, embed_dim]. + edge_type_2dim: Edge index for gbf calculation with shape [nframes*nloc, natoms, natoms, 2]. + edge_feature: Previous calculated edge features with shape [nframes*nloc, natoms, natoms, pair_dim]. + + Returns + ------- + atom_feature: Updated atomic features with shape [nframes*nloc, natoms, embed_dim]. + attn_bias: Updated edge features as attention bias with shape [nframes*nloc, natoms, natoms, pair_dim]. + delta_pos: Delta position for force/vector prediction with shape [nframes*nloc, natoms, natoms, 3]. + """ + ncluster, natoms, _ = coord_selected.shape + # ncluster x natoms x natoms x 3 + delta_pos = coord_selected.unsqueeze(1) - coord_selected.unsqueeze(2) + # (ncluster x natoms x natoms + dist = delta_pos.norm(dim=-1).view(-1, natoms, natoms) + # [ncluster, natoms, natoms, K] + gbf_feature = self.gbf(dist, edge_type_2dim) + if self.atomic_sum_gbf: + edge_features = gbf_feature + # [ncluster, natoms, K] + sum_edge_features = edge_features.sum(dim=-2) + if self.edge_proj is not None: + sum_edge_features = self.edge_proj(sum_edge_features) + # [ncluster, natoms, embed_dim] + atom_feature = atom_feature + sum_edge_features + + # [ncluster, natoms, natoms, pair_dim] + gbf_result = self.gbf_proj(gbf_feature) + + attn_bias = gbf_result + edge_feature + return atom_feature, attn_bias, delta_pos + + +class NeighborWiseAttention(nn.Module): + def __init__( + self, + layer_num, + nnei, + embed_dim, + hidden_dim, + dotr=False, + do_mask=False, + post_ln=True, + ffn=False, + ffn_embed_dim=1024, + activation="tanh", + scaling_factor=1.0, + head_num=1, + normalize=True, + temperature=None, + ): + """Construct a neighbor-wise attention net.""" + super().__init__() + self.layer_num = layer_num + attention_layers = [] + for i in range(self.layer_num): + attention_layers.append( + NeighborWiseAttentionLayer( + nnei, + embed_dim, + hidden_dim, + dotr=dotr, + do_mask=do_mask, + post_ln=post_ln, + ffn=ffn, + ffn_embed_dim=ffn_embed_dim, + activation=activation, + scaling_factor=scaling_factor, + head_num=head_num, + normalize=normalize, + temperature=temperature, + ) + ) + self.attention_layers = nn.ModuleList(attention_layers) + + def forward( + self, + input_G, + nei_mask, + input_r: Optional[torch.Tensor] = None, + sw: Optional[torch.Tensor] = None, + ): + """ + Args: + input_G: Input G, [nframes * nloc, nnei, embed_dim]. + nei_mask: neighbor mask, [nframes * nloc, nnei]. + input_r: normalized radial, [nframes, nloc, nei, 3]. + + Returns + ------- + out: Output G, [nframes * nloc, nnei, embed_dim] + + """ + out = input_G + # https://github.com/pytorch/pytorch/issues/39165#issuecomment-635472592 + for layer in self.attention_layers: + out = layer(out, nei_mask, input_r=input_r, sw=sw) + return out + + +class NeighborWiseAttentionLayer(nn.Module): + ffn: Final[bool] + + def __init__( + self, + nnei, + embed_dim, + hidden_dim, + dotr=False, + do_mask=False, + post_ln=True, + ffn=False, + ffn_embed_dim=1024, + activation="tanh", + scaling_factor=1.0, + head_num=1, + normalize=True, + temperature=None, + ): + """Construct a neighbor-wise attention layer.""" + super().__init__() + self.nnei = nnei + self.embed_dim = embed_dim + self.hidden_dim = hidden_dim + self.dotr = dotr + self.do_mask = do_mask + self.post_ln = post_ln + self.ffn = ffn + self.attention_layer = GatedSelfAttetion( + nnei, + embed_dim, + hidden_dim, + dotr=dotr, + do_mask=do_mask, + scaling_factor=scaling_factor, + head_num=head_num, + normalize=normalize, + temperature=temperature, + ) + self.attn_layer_norm = nn.LayerNorm( + self.embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + if self.ffn: + self.ffn_embed_dim = ffn_embed_dim + self.fc1 = nn.Linear( + self.embed_dim, self.ffn_embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.activation_fn = get_activation_fn(activation) + self.fc2 = nn.Linear( + self.ffn_embed_dim, self.embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.final_layer_norm = nn.LayerNorm( + self.embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + + def forward( + self, + x, + nei_mask, + input_r: Optional[torch.Tensor] = None, + sw: Optional[torch.Tensor] = None, + ): + residual = x + if not self.post_ln: + x = self.attn_layer_norm(x) + x = self.attention_layer(x, nei_mask, input_r=input_r, sw=sw) + x = residual + x + if self.post_ln: + x = self.attn_layer_norm(x) + if self.ffn: + residual = x + if not self.post_ln: + x = self.final_layer_norm(x) + x = self.fc1(x) + x = self.activation_fn(x) + x = self.fc2(x) + x = residual + x + if self.post_ln: + x = self.final_layer_norm(x) + return x + + +class GatedSelfAttetion(nn.Module): + def __init__( + self, + nnei, + embed_dim, + hidden_dim, + dotr=False, + do_mask=False, + scaling_factor=1.0, + head_num=1, + normalize=True, + temperature=None, + bias=True, + smooth=True, + ): + """Construct a neighbor-wise attention net.""" + super().__init__() + self.nnei = nnei + self.embed_dim = embed_dim + self.hidden_dim = hidden_dim + self.head_num = head_num + self.dotr = dotr + self.do_mask = do_mask + if temperature is None: + self.scaling = (self.hidden_dim * scaling_factor) ** -0.5 + else: + self.scaling = temperature + self.normalize = normalize + self.in_proj = SimpleLinear( + embed_dim, + hidden_dim * 3, + bavg=0.0, + stddev=1.0, + use_timestep=False, + bias=bias, + ) + self.out_proj = SimpleLinear( + hidden_dim, embed_dim, bavg=0.0, stddev=1.0, use_timestep=False, bias=bias + ) + self.smooth = smooth + + def forward( + self, + query, + nei_mask, + input_r: Optional[torch.Tensor] = None, + sw: Optional[torch.Tensor] = None, + attnw_shift: float = 20.0, + ): + """ + Args: + query: input G, [nframes * nloc, nnei, embed_dim]. + nei_mask: neighbor mask, [nframes * nloc, nnei]. + input_r: normalized radial, [nframes, nloc, nei, 3]. + + Returns + ------- + type_embedding: + + """ + q, k, v = self.in_proj(query).chunk(3, dim=-1) + # [nframes * nloc, nnei, hidden_dim] + q = q.view(-1, self.nnei, self.hidden_dim) + k = k.view(-1, self.nnei, self.hidden_dim) + v = v.view(-1, self.nnei, self.hidden_dim) + if self.normalize: + q = F.normalize(q, dim=-1) + k = F.normalize(k, dim=-1) + v = F.normalize(v, dim=-1) + q = q * self.scaling + k = k.transpose(1, 2) + # [nframes * nloc, nnei, nnei] + attn_weights = torch.bmm(q, k) + # [nframes * nloc, nnei] + nei_mask = nei_mask.view(-1, self.nnei) + if self.smooth: + # [nframes * nloc, nnei] + assert sw is not None + sw = sw.view([-1, self.nnei]) + attn_weights = (attn_weights + attnw_shift) * sw[:, :, None] * sw[ + :, None, : + ] - attnw_shift + else: + attn_weights = attn_weights.masked_fill( + ~nei_mask.unsqueeze(1), float("-inf") + ) + attn_weights = F.softmax(attn_weights, dim=-1) + attn_weights = attn_weights.masked_fill(~nei_mask.unsqueeze(-1), 0.0) + if self.smooth: + assert sw is not None + attn_weights = attn_weights * sw[:, :, None] * sw[:, None, :] + if self.dotr: + assert input_r is not None, "input_r must be provided when dotr is True!" + angular_weight = torch.bmm(input_r, input_r.transpose(1, 2)) + attn_weights = attn_weights * angular_weight + o = torch.bmm(attn_weights, v) + output = self.out_proj(o) + return output + + +class LocalSelfMultiheadAttention(nn.Module): + def __init__(self, feature_dim, attn_head, scaling_factor=1.0): + super().__init__() + self.feature_dim = feature_dim + self.attn_head = attn_head + self.head_dim = feature_dim // attn_head + assert ( + feature_dim % attn_head == 0 + ), f"feature_dim {feature_dim} must be divided by attn_head {attn_head}!" + self.scaling = (self.head_dim * scaling_factor) ** -0.5 + self.in_proj = SimpleLinear(self.feature_dim, self.feature_dim * 3) + # TODO debug + # self.out_proj = SimpleLinear(self.feature_dim, self.feature_dim) + + def forward( + self, + query, + attn_bias: Optional[torch.Tensor] = None, + nlist_mask: Optional[torch.Tensor] = None, + nlist: Optional[torch.Tensor] = None, + return_attn=True, + ): + nframes, nloc, feature_dim = query.size() + _, _, nnei = nlist.size() + assert feature_dim == self.feature_dim + # [nframes, nloc, feature_dim] + q, k, v = self.in_proj(query).chunk(3, dim=-1) + # [nframes * attn_head * nloc, 1, head_dim] + q = ( + q.view(nframes, nloc, self.attn_head, self.head_dim) + .transpose(1, 2) + .contiguous() + .view(nframes * self.attn_head * nloc, 1, self.head_dim) + * self.scaling + ) + # [nframes, nloc, feature_dim] --> [nframes, nloc + 1, feature_dim] + # with nlist [nframes, nloc, nnei] --> [nframes, nloc, nnei, feature_dim] + # padding = torch.zeros(feature_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION).to(k.device) + # k = torch.concat([k, padding.unsqueeze(0).unsqueeze(1)], dim=1) + # v = torch.concat([v, padding.unsqueeze(0).unsqueeze(1)], dim=1) + + # [nframes, nloc * nnei, feature_dim] + index = nlist.view(nframes, -1).unsqueeze(-1).expand(-1, -1, feature_dim) + k = torch.gather(k, dim=1, index=index) + # [nframes, nloc * nnei, feature_dim] + v = torch.gather(v, dim=1, index=index) + # [nframes * attn_head * nloc, nnei, head_dim] + k = ( + k.view(nframes, nloc, nnei, self.attn_head, self.head_dim) + .permute(0, 3, 1, 2, 4) + .contiguous() + .view(nframes * self.attn_head * nloc, nnei, self.head_dim) + ) + v = ( + v.view(nframes, nloc, nnei, self.attn_head, self.head_dim) + .permute(0, 3, 1, 2, 4) + .contiguous() + .view(nframes * self.attn_head * nloc, nnei, self.head_dim) + ) + # [nframes * attn_head * nloc, 1, nnei] + attn_weights = torch.bmm(q, k.transpose(1, 2)) + # maskfill + # [nframes, attn_head, nloc, nnei] + attn_weights = attn_weights.view( + nframes, self.attn_head, nloc, nnei + ).masked_fill(~nlist_mask.unsqueeze(1), float("-inf")) + # add bias + if return_attn: + attn_weights = attn_weights + attn_bias + # softmax + # [nframes * attn_head * nloc, 1, nnei] + attn = F.softmax(attn_weights, dim=-1).view( + nframes * self.attn_head * nloc, 1, nnei + ) + # bmm + # [nframes * attn_head * nloc, 1, head_dim] + o = torch.bmm(attn, v) + assert list(o.size()) == [nframes * self.attn_head * nloc, 1, self.head_dim] + # [nframes, nloc, feature_dim] + o = ( + o.view(nframes, self.attn_head, nloc, self.head_dim) + .transpose(1, 2) + .contiguous() + .view(nframes, nloc, self.feature_dim) + ) + # out + ## TODO debug: + # o = self.out_proj(o) + if not return_attn: + return o + else: + return o, attn_weights, attn + + +class NodeTaskHead(nn.Module): + def __init__( + self, + embed_dim: int, + pair_dim: int, + num_head: int, + ): + super().__init__() + self.layer_norm = nn.LayerNorm(embed_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + self.pair_norm = nn.LayerNorm(pair_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + self.embed_dim = embed_dim + self.q_proj = Linear(embed_dim, embed_dim, bias=False, init="glorot") + self.k_proj = Linear(embed_dim, embed_dim, bias=False, init="glorot") + self.v_proj = Linear(embed_dim, embed_dim, bias=False, init="glorot") + self.num_heads = num_head + self.head_dim = embed_dim // num_head + self.scaling = self.head_dim**-0.5 + self.force_proj = Linear(embed_dim, 1, init="final", bias=False) + self.linear_bias = Linear(pair_dim, num_head) + self.dropout = 0.1 + + def zero_init(self): + nn.init.zeros_(self.force_proj.weight) + + def forward( + self, + query: Tensor, + pair: Tensor, + delta_pos: Tensor, + attn_mask: Tensor = None, + ) -> Tensor: + ncluster, natoms, _ = query.size() + query = self.layer_norm(query) + # [ncluster, natoms, natoms, pair_dim] + pair = self.pair_norm(pair) + + # [ncluster, attn_head, natoms, head_dim] + q = ( + self.q_proj(query) + .view(ncluster, natoms, self.num_heads, -1) + .transpose(1, 2) + * self.scaling + ) + # [ncluster, attn_head, natoms, head_dim] + k = ( + self.k_proj(query) + .view(ncluster, natoms, self.num_heads, -1) + .transpose(1, 2) + ) + v = ( + self.v_proj(query) + .view(ncluster, natoms, self.num_heads, -1) + .transpose(1, 2) + ) + # [ncluster, attn_head, natoms, natoms] + attn = q @ k.transpose(-1, -2) + del q, k + # [ncluster, attn_head, natoms, natoms] + bias = self.linear_bias(pair).permute(0, 3, 1, 2).contiguous() + + # [ncluster, attn_head, natoms, natoms] + attn_probs = softmax_dropout( + attn, + self.dropout, + self.training, + mask=attn_mask, + bias=bias.contiguous(), + ).view(ncluster, self.num_heads, natoms, natoms) + + # delta_pos: [ncluster, natoms, natoms, 3] + # [ncluster, attn_head, natoms, natoms, 3] + rot_attn_probs = attn_probs.unsqueeze(-1) * delta_pos.unsqueeze(1).type_as( + attn_probs + ) + # [ncluster, attn_head, 3, natoms, natoms] + rot_attn_probs = rot_attn_probs.permute(0, 1, 4, 2, 3) + # [ncluster, attn_head, 3, natoms, head_dim] + x = rot_attn_probs @ v.unsqueeze(2) + # [ncluster, natoms, 3, embed_dim] + x = x.permute(0, 3, 2, 1, 4).contiguous().view(ncluster, natoms, 3, -1) + cur_force = self.force_proj(x).view(ncluster, natoms, 3) + return cur_force + + +class EnergyHead(nn.Module): + def __init__( + self, + input_dim, + output_dim, + ): + super().__init__() + self.layer_norm = nn.LayerNorm(input_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + self.linear_in = Linear(input_dim, input_dim, init="relu") + + self.linear_out = Linear(input_dim, output_dim, bias=True, init="final") + + def forward(self, x): + x = x.type(self.linear_in.weight.dtype) + x = F.gelu(self.layer_norm(self.linear_in(x))) + x = self.linear_out(x) + return x + + +class OuterProduct(nn.Module): + def __init__(self, d_atom, d_pair, d_hid=32): + super().__init__() + + self.d_atom = d_atom + self.d_pair = d_pair + self.d_hid = d_hid + + self.linear_in = nn.Linear( + d_atom, d_hid * 2, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.linear_out = nn.Linear( + d_hid**2, d_pair, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.act = nn.GELU() + + def _opm(self, a, b): + # [nframes, nloc, d] + nframes, nloc, d = a.shape + a = a.view(nframes, nloc, 1, d, 1) + b = b.view(nframes, 1, nloc, 1, d) + # [nframes, nloc, nloc, d, d] + outer = a * b + outer = outer.view(outer.shape[:-2] + (-1,)) + outer = self.linear_out(outer) + return outer + + def forward( + self, + m: torch.Tensor, + nlist: torch.Tensor, + op_mask: float, + op_norm: float, + ) -> torch.Tensor: + ab = self.linear_in(m) + ab = ab * op_mask + a, b = ab.chunk(2, dim=-1) + # [ncluster, natoms, natoms, d_pair] + z = self._opm(a, b) + z *= op_norm + return z + + +class Attention(nn.Module): + def __init__( + self, + q_dim: int, + k_dim: int, + v_dim: int, + head_dim: int, + num_heads: int, + gating: bool = False, + dropout: float = 0.0, + ): + super().__init__() + + self.num_heads = num_heads + self.head_dim = head_dim + total_dim = head_dim * self.num_heads + self.total_dim = total_dim + self.q_dim = q_dim + self.gating = gating + self.linear_q = Linear(q_dim, total_dim, bias=False, init="glorot") + self.linear_k = Linear(k_dim, total_dim, bias=False, init="glorot") + self.linear_v = Linear(v_dim, total_dim, bias=False, init="glorot") + self.linear_o = Linear(total_dim, q_dim, init="final") + self.linear_g = None + if self.gating: + self.linear_g = Linear(q_dim, total_dim, init="gating") + # precompute the 1/sqrt(head_dim) + self.norm = head_dim**-0.5 + self.dropout = dropout + + def forward( + self, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + bias: torch.Tensor, + mask: torch.Tensor = None, + ) -> torch.Tensor: + nframes, nloc, embed_dim = q.size() + g = None + if self.linear_g is not None: + # gating, use raw query input + # [nframes, nloc, total_dim] + g = self.linear_g(q) + # [nframes, nloc, total_dim] + q = self.linear_q(q) + q *= self.norm + # [nframes, nloc, total_dim] + k = self.linear_k(k) + # [nframes, nloc, total_dim] + v = self.linear_v(v) + # global + # q [nframes, h, nloc, d] + # k [nframes, h, nloc, d] + # v [nframes, h, nloc, d] + # attn [nframes, h, nloc, nloc] + # o [nframes, h, nloc, d] + + # [nframes, h, nloc, d] + q = q.view(q.shape[:-1] + (self.num_heads, -1)).transpose(-2, -3).contiguous() + k = k.view(k.shape[:-1] + (self.num_heads, -1)).transpose(-2, -3).contiguous() + v = v.view(v.shape[:-1] + (self.num_heads, -1)).transpose(-2, -3) + # [nframes, h, nloc, nloc] + attn = torch.matmul(q, k.transpose(-1, -2)) + del q, k + # [nframes, h, nloc, nloc] + attn = softmax_dropout(attn, self.dropout, self.training, mask=mask, bias=bias) + # [nframes, h, nloc, d] + o = torch.matmul(attn, v) + del attn, v + + # local + # q [nframes, h, nloc, 1, d] + # k [nframes, h, nloc, nnei, d] + # v [nframes, h, nloc, nnei, d] + # attn [nframes, h, nloc, nnei] + # o [nframes, h, nloc, d] + + assert list(o.size()) == [nframes, self.num_heads, nloc, self.head_dim] + # [nframes, nloc, total_dim] + o = o.transpose(-2, -3).contiguous() + o = o.view(*o.shape[:-2], -1) + + if g is not None: + o = torch.sigmoid(g) * o + + # merge heads + o = self.linear_o(o) + return o + + +class AtomAttention(nn.Module): + def __init__( + self, + q_dim: int, + k_dim: int, + v_dim: int, + pair_dim: int, + head_dim: int, + num_heads: int, + gating: bool = False, + dropout: float = 0.0, + ): + super().__init__() + + self.mha = Attention( + q_dim, k_dim, v_dim, head_dim, num_heads, gating=gating, dropout=dropout + ) + self.layer_norm = nn.LayerNorm(pair_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + self.linear_bias = Linear(pair_dim, num_heads) + + def forward( + self, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + nlist: torch.Tensor, + pair: torch.Tensor, + mask: torch.Tensor = None, + ) -> torch.Tensor: + pair = self.layer_norm(pair) + bias = self.linear_bias(pair).permute(0, 3, 1, 2).contiguous() + return self.mha(q, k, v, bias=bias, mask=mask) + + +class TriangleMultiplication(nn.Module): + def __init__(self, d_pair, d_hid): + super().__init__() + + self.linear_ab_p = Linear(d_pair, d_hid * 2) + self.linear_ab_g = Linear(d_pair, d_hid * 2, init="gating") + + self.linear_g = Linear(d_pair, d_pair, init="gating") + self.linear_z = Linear(d_hid, d_pair, init="final") + + self.layer_norm_out = nn.LayerNorm(d_hid, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + + def forward( + self, + z: torch.Tensor, + mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + # z : [nframes, nloc, nloc, pair_dim] + + # [nframes, nloc, nloc, pair_dim] + g = self.linear_g(z) + if self.training: + ab = self.linear_ab_p(z) * torch.sigmoid(self.linear_ab_g(z)) + else: + ab = self.linear_ab_p(z) + ab *= torch.sigmoid(self.linear_ab_g(z)) + # [nframes, nloc, nloc, d] + a, b = torch.chunk(ab, 2, dim=-1) + del z, ab + + # [nframes, d, nloc_i, nloc_k] row not trans + a1 = a.permute(0, 3, 1, 2) + # [nframes, d, nloc_k, nloc_j(i)] trans + b1 = b.transpose(-1, -3) + # [nframes, d, nloc_i, nloc_j] + x = torch.matmul(a1, b1) + del a1, b1 + + # [nframes, d, nloc_k, nloc_j(i)] not trans + b2 = b.permute(0, 3, 1, 2) + # [nframes, d, nloc_i, nloc_k] col trans # check TODO + a2 = a.transpose(-1, -3) + + # [nframes, d, nloc_i, nloc_j] + x = x + torch.matmul(a2, b2) + del a, b, a2, b2 + + # [nframes, nloc_i, nloc_j, d] + x = x.permute(0, 2, 3, 1) + + x = self.layer_norm_out(x) + x = self.linear_z(x) + return g * x + + +class EvoformerEncoderLayer(nn.Module): + def __init__( + self, + feature_dim: int = 768, + ffn_dim: int = 2048, + attn_head: int = 8, + activation_fn: str = "gelu", + post_ln: bool = False, + ): + super().__init__() + self.feature_dim = feature_dim + self.ffn_dim = ffn_dim + self.attn_head = attn_head + self.activation_fn = ( + get_activation_fn(activation_fn) if activation_fn is not None else None + ) + self.post_ln = post_ln + self.self_attn_layer_norm = nn.LayerNorm( + self.feature_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + + self.self_attn = LocalSelfMultiheadAttention( + self.feature_dim, + self.attn_head, + ) + self.final_layer_norm = nn.LayerNorm( + self.feature_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.fc1 = SimpleLinear(self.feature_dim, self.ffn_dim) + self.fc2 = SimpleLinear(self.ffn_dim, self.feature_dim) + + def forward( + self, + x, + attn_bias: Optional[torch.Tensor] = None, + nlist_mask: Optional[torch.Tensor] = None, + nlist: Optional[torch.Tensor] = None, + return_attn=True, + ): + residual = x + if not self.post_ln: + x = self.self_attn_layer_norm(x) + x = self.self_attn( + query=x, + attn_bias=attn_bias, + nlist_mask=nlist_mask, + nlist=nlist, + return_attn=return_attn, + ) + if return_attn: + x, attn_weights, attn_probs = x + x = residual + x + if self.post_ln: + x = self.self_attn_layer_norm(x) + + residual = x + if not self.post_ln: + x = self.final_layer_norm(x) + x = self.fc1(x) + x = self.activation_fn(x) + x = self.fc2(x) + x = residual + x + if self.post_ln: + x = self.final_layer_norm(x) + if not return_attn: + return x + else: + return x, attn_weights, attn_probs + + +# output: atomic_rep, transformed_atomic_rep, pair_rep, delta_pair_rep, norm_x, norm_delta_pair_rep, +class Evoformer2bEncoder(nn.Module): + def __init__( + self, + nnei: int, + layer_num: int = 6, + attn_head: int = 8, + atomic_dim: int = 1024, + pair_dim: int = 100, + feature_dim: int = 1024, + ffn_dim: int = 2048, + post_ln: bool = False, + final_layer_norm: bool = True, + final_head_layer_norm: bool = False, + emb_layer_norm: bool = False, + atomic_residual: bool = False, + evo_residual: bool = False, + residual_factor: float = 1.0, + activation_function: str = "gelu", + ): + super().__init__() + self.nnei = nnei + self.layer_num = layer_num + self.attn_head = attn_head + self.atomic_dim = atomic_dim + self.pair_dim = pair_dim + self.feature_dim = feature_dim + self.ffn_dim = ffn_dim + self.post_ln = post_ln + self._final_layer_norm = final_layer_norm + self._final_head_layer_norm = final_head_layer_norm + self._emb_layer_norm = emb_layer_norm + self.activation_function = activation_function + self.evo_residual = evo_residual + self.residual_factor = residual_factor + if atomic_residual and atomic_dim == feature_dim: + self.atomic_residual = True + else: + self.atomic_residual = False + self.in_proj = SimpleLinear( + self.atomic_dim, + self.feature_dim, + bavg=0.0, + stddev=1.0, + use_timestep=False, + activate="tanh", + ) # TODO + self.out_proj = SimpleLinear( + self.feature_dim, + self.atomic_dim, + bavg=0.0, + stddev=1.0, + use_timestep=False, + activate="tanh", + ) + if self._emb_layer_norm: + self.emb_layer_norm = nn.LayerNorm( + self.feature_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + + ## TODO debug : self.in_proj_pair = NonLinearHead(self.pair_dim, self.attn_head, activation_fn=None) + self.in_proj_pair = SimpleLinear(self.pair_dim, self.attn_head, activate=None) + evoformer_encoder_layers = [] + for i in range(self.layer_num): + evoformer_encoder_layers.append( + EvoformerEncoderLayer( + feature_dim=self.feature_dim, + ffn_dim=self.ffn_dim, + attn_head=self.attn_head, + activation_fn=self.activation_function, + post_ln=self.post_ln, + ) + ) + self.evoformer_encoder_layers = nn.ModuleList(evoformer_encoder_layers) + if self._final_layer_norm: + self.final_layer_norm = nn.LayerNorm( + self.feature_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + if self._final_head_layer_norm: + self.final_head_layer_norm = nn.LayerNorm( + self.attn_head, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + + def forward(self, atomic_rep, pair_rep, nlist, nlist_type, nlist_mask): + """Encoder the atomic and pair representations. + + Args: + - atomic_rep: Atomic representation with shape [nframes, nloc, atomic_dim]. + - pair_rep: Pair representation with shape [nframes, nloc, nnei, pair_dim]. + - nlist: Neighbor list with shape [nframes, nloc, nnei]. + - nlist_type: Neighbor types with shape [nframes, nloc, nnei]. + - nlist_mask: Neighbor mask with shape [nframes, nloc, nnei], `False` if blank. + + Returns + ------- + - atomic_rep: Atomic representation after encoder with shape [nframes, nloc, feature_dim]. + - transformed_atomic_rep: Transformed atomic representation after encoder with shape [nframes, nloc, atomic_dim]. + - pair_rep: Pair representation after encoder with shape [nframes, nloc, nnei, attn_head]. + - delta_pair_rep: Delta pair representation after encoder with shape [nframes, nloc, nnei, attn_head]. + - norm_x: Normalization loss of atomic_rep. + - norm_delta_pair_rep: Normalization loss of delta_pair_rep. + """ + # Global branch + nframes, nloc, _ = atomic_rep.size() + nnei = pair_rep.shape[2] + input_atomic_rep = atomic_rep + # [nframes, nloc, feature_dim] + if self.atomic_residual: + atomic_rep = atomic_rep + self.in_proj(atomic_rep) + else: + atomic_rep = self.in_proj(atomic_rep) + + if self._emb_layer_norm: + atomic_rep = self.emb_layer_norm(atomic_rep) + + # Local branch + # [nframes, nloc, nnei, attn_head] + pair_rep = self.in_proj_pair(pair_rep) + # [nframes, attn_head, nloc, nnei] + pair_rep = pair_rep.permute(0, 3, 1, 2).contiguous() + input_pair_rep = pair_rep + pair_rep = pair_rep.masked_fill(~nlist_mask.unsqueeze(1), float("-inf")) + + for i in range(self.layer_num): + atomic_rep, pair_rep, _ = self.evoformer_encoder_layers[i]( + atomic_rep, + attn_bias=pair_rep, + nlist_mask=nlist_mask, + nlist=nlist, + return_attn=True, + ) + + def norm_loss(x, eps=1e-10, tolerance=1.0): + # x = x.float() + max_norm = x.shape[-1] ** 0.5 + norm = torch.sqrt(torch.sum(x**2, dim=-1) + eps) + error = F.relu((norm - max_norm).abs() - tolerance) + return error + + def masked_mean(mask, value, dim=-1, eps=1e-10): + return ( + torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim)) + ).mean() + + # atomic_rep shape: [nframes, nloc, feature_dim] + # pair_rep shape: [nframes, attn_head, nloc, nnei] + + norm_x = torch.mean(norm_loss(atomic_rep)) + if self._final_layer_norm: + atomic_rep = self.final_layer_norm(atomic_rep) + + delta_pair_rep = pair_rep - input_pair_rep + delta_pair_rep = delta_pair_rep.masked_fill(~nlist_mask.unsqueeze(1), 0) + # [nframes, nloc, nnei, attn_head] + delta_pair_rep = ( + delta_pair_rep.view(nframes, self.attn_head, nloc, nnei) + .permute(0, 2, 3, 1) + .contiguous() + ) + + # [nframes, nloc, nnei] + norm_delta_pair_rep = norm_loss(delta_pair_rep) + norm_delta_pair_rep = masked_mean(mask=nlist_mask, value=norm_delta_pair_rep) + if self._final_head_layer_norm: + delta_pair_rep = self.final_head_layer_norm(delta_pair_rep) + + if self.atomic_residual: + transformed_atomic_rep = atomic_rep + self.out_proj(atomic_rep) + else: + transformed_atomic_rep = self.out_proj(atomic_rep) + + if self.evo_residual: + transformed_atomic_rep = ( + self.residual_factor * transformed_atomic_rep + input_atomic_rep + ) * (1 / np.sqrt(2)) + + return ( + atomic_rep, + transformed_atomic_rep, + pair_rep, + delta_pair_rep, + norm_x, + norm_delta_pair_rep, + ) + + +class Evoformer3bEncoderLayer(nn.Module): + def __init__( + self, + nnei, + embedding_dim: int = 768, + pair_dim: int = 64, + pair_hidden_dim: int = 32, + ffn_embedding_dim: int = 3072, + num_attention_heads: int = 8, + dropout: float = 0.1, + droppath_prob: float = 0.0, + pair_dropout: float = 0.25, + attention_dropout: float = 0.1, + activation_dropout: float = 0.1, + pre_ln: bool = True, + tri_update: bool = True, + ): + super().__init__() + # Initialize parameters + self.nnei = nnei + self.embedding_dim = embedding_dim + self.num_attention_heads = num_attention_heads + self.attention_dropout = attention_dropout + + # self.dropout = dropout + self.activation_dropout = activation_dropout + + if droppath_prob > 0.0: + self.dropout_module = DropPath(droppath_prob) + else: + self.dropout_module = Dropout(dropout) + + # self.self_attn = AtomAttentionLocal(embedding_dim, embedding_dim, embedding_dim, pair_dim, + # embedding_dim // num_attention_heads, num_attention_heads, + # gating=False, dropout=attention_dropout) + self.self_attn = AtomAttention( + embedding_dim, + embedding_dim, + embedding_dim, + pair_dim, + embedding_dim // num_attention_heads, + num_attention_heads, + gating=False, + dropout=attention_dropout, + ) + # layer norm associated with the self attention layer + self.pre_ln = pre_ln + self.self_attn_layer_norm = nn.LayerNorm( + self.embedding_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.fc1 = nn.Linear( + self.embedding_dim, ffn_embedding_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.fc2 = nn.Linear( + ffn_embedding_dim, self.embedding_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.final_layer_norm = nn.LayerNorm( + self.embedding_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + + self.x_layer_norm_opm = nn.LayerNorm( + self.embedding_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + # self.opm = OuterProductLocal(self.embedding_dim, pair_dim, d_hid=pair_hidden_dim) + self.opm = OuterProduct(self.embedding_dim, pair_dim, d_hid=pair_hidden_dim) + # self.pair_layer_norm_opm = nn.LayerNorm(pair_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + self.pair_layer_norm_ffn = nn.LayerNorm( + pair_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.pair_ffn = Transition( + pair_dim, + 1, + dropout=activation_dropout, + ) + self.pair_dropout = pair_dropout + self.tri_update = tri_update + if self.tri_update: + self.pair_layer_norm_trimul = nn.LayerNorm( + pair_dim, dtype=env.GLOBAL_PT_FLOAT_PRECISION + ) + self.pair_tri_mul = TriangleMultiplication(pair_dim, pair_hidden_dim) + + def update_pair( + self, + x, + pair, + nlist, + op_mask, + op_norm, + ): + # local: + # [nframes, nloc, nnei, pair_dim] + # global: + # [nframes, nloc, nloc, pair_dim] + pair = pair + self.dropout_module( + self.opm(self.x_layer_norm_opm(x), nlist, op_mask, op_norm) + ) + if not self.pre_ln: + pair = self.pair_layer_norm_opm(pair) + return x, pair + + def shared_dropout(self, x, shared_dim, dropout): + shape = list(x.shape) + shape[shared_dim] = 1 + with torch.no_grad(): + mask = x.new_ones(shape) + return F.dropout(mask, p=dropout, training=self.training) * x + + def forward( + self, + x: torch.Tensor, + pair: torch.Tensor, + nlist: torch.Tensor = None, + attn_mask: Optional[torch.Tensor] = None, + pair_mask: Optional[torch.Tensor] = None, + op_mask: float = 1.0, + op_norm: float = 1.0, + ): + """Encoder the atomic and pair representations. + + Args: + - x: Atomic representation with shape [ncluster, natoms, embed_dim]. + - pair: Pair representation with shape [ncluster, natoms, natoms, pair_dim]. + - attn_mask: Attention mask with shape [ncluster, head, natoms, natoms]. + - pair_mask: Neighbor mask with shape [ncluster, natoms, natoms]. + + """ + # [ncluster, natoms, embed_dim] + residual = x + if self.pre_ln: + x = self.self_attn_layer_norm(x) + x = self.self_attn( + x, + x, + x, + nlist=nlist, + pair=pair, + mask=attn_mask, + ) + # x = F.dropout(x, p=self.dropout, training=self.training) + x = self.dropout_module(x) + x = residual + x + if not self.pre_ln: + x = self.self_attn_layer_norm(x) + + residual = x + if self.pre_ln: + x = self.final_layer_norm(x) + x = F.linear(x, self.fc1.weight) + # x = fused_ops.bias_torch_gelu(x, self.fc1.bias) + x = nn.GELU()(x) + self.fc1.bias + x = F.dropout(x, p=self.activation_dropout, training=self.training) + x = self.fc2(x) + # x = F.dropout(x, p=self.dropout, training=self.training) + x = self.dropout_module(x) + + x = residual + x + if not self.pre_ln: + x = self.final_layer_norm(x) + + block = [ + partial( + self.update_pair, + nlist=nlist, + op_mask=op_mask, + op_norm=op_norm, + ) + ] + + x, pair = checkpoint_sequential( + block, + input_x=(x, pair), + ) + + if self.tri_update: + residual_pair = pair + if self.pre_ln: + pair = self.pair_layer_norm_trimul(pair) + + pair = self.shared_dropout( + self.pair_tri_mul(pair, pair_mask), -3, self.pair_dropout + ) + pair = residual_pair + pair + if not self.pre_ln: + pair = self.pair_layer_norm_trimul(pair) + + residual_pair = pair + if self.pre_ln: + pair = self.pair_layer_norm_ffn(pair) + pair = self.dropout_module(self.pair_ffn(pair)) + pair = residual_pair + pair + if not self.pre_ln: + pair = self.pair_layer_norm_ffn(pair) + return x, pair + + +class Evoformer3bEncoder(nn.Module): + def __init__( + self, + nnei, + layer_num=6, + attn_head=8, + atomic_dim=768, + pair_dim=64, + pair_hidden_dim=32, + ffn_embedding_dim=3072, + dropout: float = 0.1, + droppath_prob: float = 0.0, + pair_dropout: float = 0.25, + attention_dropout: float = 0.1, + activation_dropout: float = 0.1, + pre_ln: bool = True, + tri_update: bool = True, + **kwargs, + ): + super().__init__() + self.nnei = nnei + if droppath_prob > 0: + droppath_probs = [ + x.item() for x in torch.linspace(0, droppath_prob, layer_num) + ] + else: + droppath_probs = None + + self.layers = nn.ModuleList( + [ + Evoformer3bEncoderLayer( + nnei, + atomic_dim, + pair_dim, + pair_hidden_dim, + ffn_embedding_dim, + num_attention_heads=attn_head, + dropout=dropout, + droppath_prob=droppath_probs[_], + pair_dropout=pair_dropout, + attention_dropout=attention_dropout, + activation_dropout=activation_dropout, + pre_ln=pre_ln, + tri_update=tri_update, + ) + for _ in range(layer_num) + ] + ) + + def forward(self, x, pair, attn_mask=None, pair_mask=None, atom_mask=None): + """Encoder the atomic and pair representations. + + Args: + x: Atomic representation with shape [ncluster, natoms, atomic_dim]. + pair: Pair representation with shape [ncluster, natoms, natoms, pair_dim]. + attn_mask: Attention mask (with -inf for softmax) with shape [ncluster, head, natoms, natoms]. + pair_mask: Pair mask (with 1 for real atom pair and 0 for padding) with shape [ncluster, natoms, natoms]. + atom_mask: Atom mask (with 1 for real atom and 0 for padding) with shape [ncluster, natoms]. + + Returns + ------- + x: Atomic representation with shape [ncluster, natoms, atomic_dim]. + pair: Pair representation with shape [ncluster, natoms, natoms, pair_dim]. + + """ + # [ncluster, natoms, 1] + op_mask = atom_mask.unsqueeze(-1) + op_mask = op_mask * (op_mask.size(-2) ** -0.5) + eps = 1e-3 + # [ncluster, natoms, natoms, 1] + op_norm = 1.0 / (eps + torch.einsum("...bc,...dc->...bdc", op_mask, op_mask)) + for layer in self.layers: + x, pair = layer( + x, + pair, + nlist=None, + attn_mask=attn_mask, + pair_mask=pair_mask, + op_mask=op_mask, + op_norm=op_norm, + ) + return x, pair diff --git a/deepmd/pt/model/task/__init__.py b/deepmd/pt/model/task/__init__.py new file mode 100644 index 0000000000..fcf46632f3 --- /dev/null +++ b/deepmd/pt/model/task/__init__.py @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .atten_lcc import ( + FittingNetAttenLcc, +) +from .denoise import ( + DenoiseNet, +) +from .dipole import ( + DipoleFittingNetType, +) +from .ener import ( + EnergyFittingNet, + EnergyFittingNetDirect, +) +from .fitting import ( + Fitting, +) +from .task import ( + TaskBaseMethod, +) +from .type_predict import ( + TypePredictNet, +) + +__all__ = [ + "FittingNetAttenLcc", + "DenoiseNet", + "DipoleFittingNetType", + "EnergyFittingNet", + "EnergyFittingNetDirect", + "Fitting", + "TaskBaseMethod", + "TypePredictNet", +] diff --git a/deepmd/pt/model/task/atten_lcc.py b/deepmd/pt/model/task/atten_lcc.py new file mode 100644 index 0000000000..41ccf99330 --- /dev/null +++ b/deepmd/pt/model/task/atten_lcc.py @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch +import torch.nn as nn + +from deepmd.pt.model.network.network import ( + EnergyHead, + NodeTaskHead, +) +from deepmd.pt.model.task.task import ( + TaskBaseMethod, +) +from deepmd.pt.utils import ( + env, +) + + +class FittingNetAttenLcc(TaskBaseMethod): + def __init__( + self, embedding_width, bias_atom_e, pair_embed_dim, attention_heads, **kwargs + ): + super().__init__() + self.embedding_width = embedding_width + self.engergy_proj = EnergyHead(self.embedding_width, 1) + self.energe_agg_factor = nn.Embedding(4, 1, dtype=env.GLOBAL_PT_FLOAT_PRECISION) + nn.init.normal_(self.energe_agg_factor.weight, 0, 0.01) + bias_atom_e = torch.tensor(bias_atom_e) + self.register_buffer("bias_atom_e", bias_atom_e) + self.pair_embed_dim = pair_embed_dim + self.attention_heads = attention_heads + self.node_proc = NodeTaskHead( + self.embedding_width, self.pair_embed_dim, self.attention_heads + ) + self.node_proc.zero_init() + + def forward(self, output, pair, delta_pos, atype, nframes, nloc): + # [nframes x nloc x tebd_dim] + output_nloc = (output[:, 0, :]).reshape(nframes, nloc, self.embedding_width) + # Optional: GRRG or mean of gbf TODO + + # energy outut + # [nframes, nloc] + energy_out = self.engergy_proj(output_nloc).view(nframes, nloc) + # [nframes, nloc] + energy_factor = self.energe_agg_factor(torch.zeros_like(atype)).view( + nframes, nloc + ) + energy_out = (energy_out * energy_factor) + self.bias_atom_e[atype] + energy_out = energy_out.sum(dim=-1) + + # vector output + # predict_force: [(nframes x nloc) x (1 + nnei2) x 3] + predict_force = self.node_proc(output, pair, delta_pos=delta_pos) + # predict_force_nloc: [nframes x nloc x 3] + predict_force_nloc = (predict_force[:, 0, :]).reshape(nframes, nloc, 3) + return energy_out, predict_force_nloc diff --git a/deepmd/pt/model/task/denoise.py b/deepmd/pt/model/task/denoise.py new file mode 100644 index 0000000000..7e6b6dcdb6 --- /dev/null +++ b/deepmd/pt/model/task/denoise.py @@ -0,0 +1,129 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import torch + +from deepmd.model_format import ( + FittingOutputDef, + OutputVariableDef, + fitting_check_output, +) +from deepmd.pt.model.network.network import ( + MaskLMHead, + NonLinearHead, +) +from deepmd.pt.model.task.task import ( + TaskBaseMethod, +) +from deepmd.pt.utils import ( + env, +) + + +@fitting_check_output +class DenoiseNet(TaskBaseMethod): + def __init__( + self, + feature_dim, + ntypes, + attn_head=8, + prefactor=[0.5, 0.5], + activation_function="gelu", + **kwargs, + ): + """Construct a denoise net. + + Args: + - ntypes: Element count. + - embedding_width: Embedding width per atom. + - neuron: Number of neurons in each hidden layers of the fitting net. + - bias_atom_e: Average enery per atom for each element. + - resnet_dt: Using time-step in the ResNet construction. + """ + super().__init__() + self.feature_dim = feature_dim + self.ntypes = ntypes + self.attn_head = attn_head + self.prefactor = torch.tensor( + prefactor, dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + + self.lm_head = MaskLMHead( + embed_dim=self.feature_dim, + output_dim=ntypes, + activation_fn=activation_function, + weight=None, + ) + + if not isinstance(self.attn_head, list): + self.pair2coord_proj = NonLinearHead( + self.attn_head, 1, activation_fn=activation_function + ) + else: + self.pair2coord_proj = [] + self.ndescriptor = len(self.attn_head) + for ii in range(self.ndescriptor): + _pair2coord_proj = NonLinearHead( + self.attn_head[ii], 1, activation_fn=activation_function + ) + self.pair2coord_proj.append(_pair2coord_proj) + self.pair2coord_proj = torch.nn.ModuleList(self.pair2coord_proj) + + def output_def(self): + return FittingOutputDef( + [ + OutputVariableDef( + "updated_coord", [3], reduciable=False, differentiable=False + ), + OutputVariableDef( + "logits", [-1], reduciable=False, differentiable=False + ), + ] + ) + + def forward( + self, + pair_weights, + diff, + nlist_mask, + features, + sw, + masked_tokens: Optional[torch.Tensor] = None, + ): + """Calculate the updated coord. + Args: + - coord: Input noisy coord with shape [nframes, nloc, 3]. + - pair_weights: Input pair weights with shape [nframes, nloc, nnei, head]. + - diff: Input pair relative coord list with shape [nframes, nloc, nnei, 3]. + - nlist_mask: Input nlist mask with shape [nframes, nloc, nnei]. + + Returns + ------- + - denoised_coord: Denoised updated coord with shape [nframes, nloc, 3]. + """ + # [nframes, nloc, nnei, 1] + logits = self.lm_head(features, masked_tokens=masked_tokens) + if not isinstance(self.attn_head, list): + attn_probs = self.pair2coord_proj(pair_weights) + out_coord = (attn_probs * diff).sum(dim=-2) / ( + sw.sum(dim=-1).unsqueeze(-1) + 1e-6 + ) + else: + assert len(self.prefactor) == self.ndescriptor + all_coord_update = [] + assert len(pair_weights) == len(diff) == len(nlist_mask) == self.ndescriptor + for ii in range(self.ndescriptor): + _attn_probs = self.pair2coord_proj[ii](pair_weights[ii]) + _coord_update = (_attn_probs * diff[ii]).sum(dim=-2) / ( + nlist_mask[ii].sum(dim=-1).unsqueeze(-1) + 1e-6 + ) + all_coord_update.append(_coord_update) + out_coord = self.prefactor[0] * all_coord_update[0] + for ii in range(self.ndescriptor - 1): + out_coord += self.prefactor[ii + 1] * all_coord_update[ii + 1] + return { + "updated_coord": out_coord, + "logits": logits, + } diff --git a/deepmd/pt/model/task/dipole.py b/deepmd/pt/model/task/dipole.py new file mode 100644 index 0000000000..8511c7dc29 --- /dev/null +++ b/deepmd/pt/model/task/dipole.py @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging + +import torch + +from deepmd.pt.model.network.network import ( + ResidualDeep, +) +from deepmd.pt.model.task.task import ( + TaskBaseMethod, +) + + +class DipoleFittingNetType(TaskBaseMethod): + def __init__( + self, ntypes, embedding_width, neuron, out_dim, resnet_dt=True, **kwargs + ): + """Construct a fitting net for dipole. + + Args: + - ntypes: Element count. + - embedding_width: Embedding width per atom. + - neuron: Number of neurons in each hidden layers of the fitting net. + - bias_atom_e: Average enery per atom for each element. + - resnet_dt: Using time-step in the ResNet construction. + """ + super().__init__() + self.ntypes = ntypes + self.embedding_width = embedding_width + self.out_dim = out_dim + + filter_layers = [] + one = ResidualDeep( + 0, embedding_width, neuron, 0.0, out_dim=self.out_dim, resnet_dt=resnet_dt + ) + filter_layers.append(one) + self.filter_layers = torch.nn.ModuleList(filter_layers) + + if "seed" in kwargs: + logging.info("Set seed to %d in fitting net.", kwargs["seed"]) + torch.manual_seed(kwargs["seed"]) + + def forward(self, inputs, atype, atype_tebd, rot_mat): + """Based on embedding net output, alculate total energy. + + Args: + - inputs: Descriptor. Its shape is [nframes, nloc, self.embedding_width]. + - atype: Atom type. Its shape is [nframes, nloc]. + - atype_tebd: Atom type embedding. Its shape is [nframes, nloc, tebd_dim] + - rot_mat: GR during descriptor calculation. Its shape is [nframes * nloc, m1, 3]. + + Returns + ------- + - vec_out: output vector. Its shape is [nframes, nloc, 3]. + """ + nframes, nloc, _ = inputs.size() + if atype_tebd is not None: + inputs = torch.concat([inputs, atype_tebd], dim=-1) + vec_out = self.filter_layers[0](inputs) # Shape is [nframes, nloc, m1] + assert list(vec_out.size()) == [nframes, nloc, self.out_dim] + vec_out = vec_out.view(-1, 1, self.out_dim) + vec_out = ( + torch.bmm(vec_out, rot_mat).squeeze(-2).view(nframes, nloc, 3) + ) # Shape is [nframes, nloc, 3] + return vec_out diff --git a/deepmd/pt/model/task/ener.py b/deepmd/pt/model/task/ener.py new file mode 100644 index 0000000000..7ddcbd5c54 --- /dev/null +++ b/deepmd/pt/model/task/ener.py @@ -0,0 +1,241 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from typing import ( + Optional, + Tuple, +) + +import torch + +from deepmd.model_format import ( + FittingOutputDef, + OutputVariableDef, + fitting_check_output, +) +from deepmd.pt.model.network.network import ( + ResidualDeep, +) +from deepmd.pt.model.task.fitting import ( + Fitting, +) +from deepmd.pt.utils import ( + env, +) + + +@Fitting.register("ener") +@fitting_check_output +class EnergyFittingNet(Fitting): + def __init__( + self, + ntypes, + embedding_width, + neuron, + bias_atom_e, + resnet_dt=True, + use_tebd=True, + **kwargs, + ): + """Construct a fitting net for energy. + + Args: + - ntypes: Element count. + - embedding_width: Embedding width per atom. + - neuron: Number of neurons in each hidden layers of the fitting net. + - bias_atom_e: Average enery per atom for each element. + - resnet_dt: Using time-step in the ResNet construction. + """ + super().__init__() + self.ntypes = ntypes + self.embedding_width = embedding_width + self.use_tebd = use_tebd + if not use_tebd: + assert self.ntypes == len(bias_atom_e), "Element count mismatches!" + bias_atom_e = torch.tensor(bias_atom_e) + self.register_buffer("bias_atom_e", bias_atom_e) + + filter_layers = [] + for type_i in range(self.ntypes): + bias_type = 0.0 + one = ResidualDeep( + type_i, embedding_width, neuron, bias_type, resnet_dt=resnet_dt + ) + filter_layers.append(one) + self.filter_layers = torch.nn.ModuleList(filter_layers) + + if "seed" in kwargs: + logging.info("Set seed to %d in fitting net.", kwargs["seed"]) + torch.manual_seed(kwargs["seed"]) + + def output_def(self): + return FittingOutputDef( + [ + OutputVariableDef("energy", [1], reduciable=True, differentiable=True), + ] + ) + + def forward( + self, + inputs: torch.Tensor, + atype: torch.Tensor, + atype_tebd: Optional[torch.Tensor] = None, + rot_mat: Optional[torch.Tensor] = None, + ): + """Based on embedding net output, alculate total energy. + + Args: + - inputs: Embedding matrix. Its shape is [nframes, natoms[0], self.embedding_width]. + - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. + + Returns + ------- + - `torch.Tensor`: Total energy with shape [nframes, natoms[0]]. + """ + outs = torch.zeros_like(atype).unsqueeze(-1) # jit assertion + if self.use_tebd: + if atype_tebd is not None: + inputs = torch.concat([inputs, atype_tebd], dim=-1) + atom_energy = self.filter_layers[0](inputs) + self.bias_atom_e[ + atype + ].unsqueeze(-1) + outs = outs + atom_energy # Shape is [nframes, natoms[0], 1] + else: + for type_i, filter_layer in enumerate(self.filter_layers): + mask = atype == type_i + atom_energy = filter_layer(inputs) + atom_energy = atom_energy + self.bias_atom_e[type_i] + atom_energy = atom_energy * mask.unsqueeze(-1) + outs = outs + atom_energy # Shape is [nframes, natoms[0], 1] + return {"energy": outs.to(env.GLOBAL_PT_FLOAT_PRECISION)} + + +@Fitting.register("direct_force") +@Fitting.register("direct_force_ener") +@fitting_check_output +class EnergyFittingNetDirect(Fitting): + def __init__( + self, + ntypes, + embedding_width, + neuron, + bias_atom_e, + out_dim=1, + resnet_dt=True, + use_tebd=True, + return_energy=False, + **kwargs, + ): + """Construct a fitting net for energy. + + Args: + - ntypes: Element count. + - embedding_width: Embedding width per atom. + - neuron: Number of neurons in each hidden layers of the fitting net. + - bias_atom_e: Average enery per atom for each element. + - resnet_dt: Using time-step in the ResNet construction. + """ + super().__init__() + self.ntypes = ntypes + self.embedding_width = embedding_width + self.use_tebd = use_tebd + self.out_dim = out_dim + if not use_tebd: + assert self.ntypes == len(bias_atom_e), "Element count mismatches!" + bias_atom_e = torch.tensor(bias_atom_e) + self.register_buffer("bias_atom_e", bias_atom_e) + + filter_layers_dipole = [] + for type_i in range(self.ntypes): + one = ResidualDeep( + type_i, + embedding_width, + neuron, + 0.0, + out_dim=out_dim, + resnet_dt=resnet_dt, + ) + filter_layers_dipole.append(one) + self.filter_layers_dipole = torch.nn.ModuleList(filter_layers_dipole) + + self.return_energy = return_energy + filter_layers = [] + if self.return_energy: + for type_i in range(self.ntypes): + bias_type = 0.0 if self.use_tebd else bias_atom_e[type_i] + one = ResidualDeep( + type_i, embedding_width, neuron, bias_type, resnet_dt=resnet_dt + ) + filter_layers.append(one) + self.filter_layers = torch.nn.ModuleList(filter_layers) + + if "seed" in kwargs: + logging.info("Set seed to %d in fitting net.", kwargs["seed"]) + torch.manual_seed(kwargs["seed"]) + + def output_def(self): + return FittingOutputDef( + [ + OutputVariableDef("energy", [1], reduciable=True, differentiable=False), + OutputVariableDef( + "dforce", [3], reduciable=False, differentiable=False + ), + ] + ) + + def forward( + self, + inputs: torch.Tensor, + atype: torch.Tensor, + atype_tebd: Optional[torch.Tensor] = None, + rot_mat: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, None]: + """Based on embedding net output, alculate total energy. + + Args: + - inputs: Embedding matrix. Its shape is [nframes, natoms[0], self.embedding_width]. + - natoms: Tell atom count and element count. Its shape is [2+self.ntypes]. + + Returns + ------- + - `torch.Tensor`: Total energy with shape [nframes, natoms[0]]. + """ + nframes, nloc, _ = inputs.size() + if self.use_tebd: + if atype_tebd is not None: + inputs = torch.concat([inputs, atype_tebd], dim=-1) + vec_out = self.filter_layers_dipole[0]( + inputs + ) # Shape is [nframes, nloc, m1] + assert list(vec_out.size()) == [nframes, nloc, self.out_dim] + vec_out = vec_out.view(-1, 1, self.out_dim) + assert rot_mat is not None + vec_out = ( + torch.bmm(vec_out, rot_mat).squeeze(-2).view(nframes, nloc, 3) + ) # Shape is [nframes, nloc, 3] + else: + vec_out = torch.zeros_like(atype).unsqueeze(-1) # jit assertion + for type_i, filter_layer in enumerate(self.filter_layers_dipole): + mask = atype == type_i + vec_out_type = filter_layer(inputs) # Shape is [nframes, nloc, m1] + vec_out_type = vec_out_type * mask.unsqueeze(-1) + vec_out = vec_out + vec_out_type # Shape is [nframes, natoms[0], 1] + + outs = torch.zeros_like(atype).unsqueeze(-1) # jit assertion + if self.return_energy: + if self.use_tebd: + atom_energy = self.filter_layers[0](inputs) + self.bias_atom_e[ + atype + ].unsqueeze(-1) + outs = outs + atom_energy # Shape is [nframes, natoms[0], 1] + else: + for type_i, filter_layer in enumerate(self.filter_layers): + mask = atype == type_i + atom_energy = filter_layer(inputs) + if not env.ENERGY_BIAS_TRAINABLE: + atom_energy = atom_energy + self.bias_atom_e[type_i] + atom_energy = atom_energy * mask.unsqueeze(-1) + outs = outs + atom_energy # Shape is [nframes, natoms[0], 1] + return { + "energy": outs.to(env.GLOBAL_PT_FLOAT_PRECISION), + "dforce": vec_out, + } diff --git a/deepmd/pt/model/task/fitting.py b/deepmd/pt/model/task/fitting.py new file mode 100644 index 0000000000..16e80f9c20 --- /dev/null +++ b/deepmd/pt/model/task/fitting.py @@ -0,0 +1,223 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from typing import ( + Callable, +) + +import numpy as np +import torch + +from deepmd.model_format import ( + FittingOutputDef, +) +from deepmd.pt.model.task.task import ( + TaskBaseMethod, +) +from deepmd.pt.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pt.utils.env import ( + DEVICE, +) +from deepmd.pt.utils.plugin import ( + Plugin, +) +from deepmd.pt.utils.stat import ( + make_stat_input, +) + + +class Fitting(TaskBaseMethod): + __plugins = Plugin() + + @staticmethod + def register(key: str) -> Callable: + """Register a Fitting plugin. + + Parameters + ---------- + key : str + the key of a Fitting + + Returns + ------- + Fitting + the registered Fitting + + Examples + -------- + >>> @Fitting.register("some_fitting") + class SomeFitting(Fitting): + pass + """ + return Fitting.__plugins.register(key) + + def __new__(cls, *args, **kwargs): + if cls is Fitting: + try: + fitting_type = kwargs["type"] + except KeyError: + raise KeyError("the type of fitting should be set by `type`") + if fitting_type in Fitting.__plugins.plugins: + cls = Fitting.__plugins.plugins[fitting_type] + else: + raise RuntimeError("Unknown descriptor type: " + fitting_type) + return super().__new__(cls) + + def output_def(self) -> FittingOutputDef: + """Definition for the task Output.""" + raise NotImplementedError + + def forward(self, **kwargs): + """Task Output.""" + raise NotImplementedError + + def share_params(self, base_class, shared_level, resume=False): + assert ( + self.__class__ == base_class.__class__ + ), "Only fitting nets of the same type can share params!" + if shared_level == 0: + # link buffers + if hasattr(self, "bias_atom_e"): + self.bias_atom_e = base_class.bias_atom_e + # the following will successfully link all the params except buffers, which need manually link. + for item in self._modules: + self._modules[item] = base_class._modules[item] + elif shared_level == 1: + # only not share the bias_atom_e + # the following will successfully link all the params except buffers, which need manually link. + for item in self._modules: + self._modules[item] = base_class._modules[item] + elif shared_level == 2: + # share all the layers before final layer + # the following will successfully link all the params except buffers, which need manually link. + self._modules["filter_layers"][0].deep_layers = base_class._modules[ + "filter_layers" + ][0].deep_layers + elif shared_level == 3: + # share the first layers + # the following will successfully link all the params except buffers, which need manually link. + self._modules["filter_layers"][0].deep_layers[0] = base_class._modules[ + "filter_layers" + ][0].deep_layers[0] + else: + raise NotImplementedError + + def change_energy_bias( + self, config, model, old_type_map, new_type_map, bias_shift="delta", ntest=10 + ): + """Change the energy bias according to the input data and the pretrained model. + + Parameters + ---------- + config : Dict + The configuration. + model : EnergyModel + Energy model loaded pre-trained model. + new_type_map : list + The original type_map in dataset, they are targets to change the energy bias. + old_type_map : str + The full type_map in pretrained model + bias_shift : str + The mode for changing energy bias : ['delta', 'statistic'] + 'delta' : perform predictions on energies of target dataset, + and do least sqaure on the errors to obtain the target shift as bias. + 'statistic' : directly use the statistic energy bias in the target dataset. + ntest : int + The number of test samples in a system to change the energy bias. + """ + logging.info( + "Changing energy bias in pretrained model for types {}... " + "(this step may take long time)".format(str(new_type_map)) + ) + # data + systems = config["training"]["training_data"]["systems"] + finetune_data = DpLoaderSet( + systems, ntest, config["model"], type_split=False, noise_settings=None + ) + sampled = make_stat_input(finetune_data.systems, finetune_data.dataloaders, 1) + # map + sorter = np.argsort(old_type_map) + idx_type_map = sorter[ + np.searchsorted(old_type_map, new_type_map, sorter=sorter) + ] + mixed_type = np.all([i.mixed_type for i in finetune_data.systems]) + numb_type = len(old_type_map) + type_numbs, energy_ground_truth, energy_predict = [], [], [] + for test_data in sampled: + nframes = test_data["energy"].shape[0] + if mixed_type: + atype = test_data["atype"].detach().cpu().numpy() + else: + atype = test_data["atype"][0].detach().cpu().numpy() + assert np.array( + [i.item() in idx_type_map for i in list(set(atype.reshape(-1)))] + ).all(), "Some types are not in 'type_map'!" + energy_ground_truth.append(test_data["energy"].cpu().numpy()) + if mixed_type: + type_numbs.append( + np.array( + [(atype == i).sum(axis=-1) for i in idx_type_map], + dtype=np.int32, + ).T + ) + else: + type_numbs.append( + np.tile( + np.bincount(atype, minlength=numb_type)[idx_type_map], + (nframes, 1), + ) + ) + if bias_shift == "delta": + coord = test_data["coord"].to(DEVICE) + atype = test_data["atype"].to(DEVICE) + box = ( + test_data["box"].to(DEVICE) + if test_data["box"] is not None + else None + ) + ret = model(coord, atype, box) + energy_predict.append( + ret["energy"].reshape([nframes, 1]).detach().cpu().numpy() + ) + type_numbs = np.concatenate(type_numbs) + energy_ground_truth = np.concatenate(energy_ground_truth) + old_bias = self.bias_atom_e[idx_type_map] + if bias_shift == "delta": + energy_predict = np.concatenate(energy_predict) + bias_diff = energy_ground_truth - energy_predict + delta_bias = np.linalg.lstsq(type_numbs, bias_diff, rcond=None)[0] + unbias_e = energy_predict + type_numbs @ delta_bias + atom_numbs = type_numbs.sum(-1) + rmse_ae = np.sqrt( + np.mean( + np.square( + (unbias_e.ravel() - energy_ground_truth.ravel()) / atom_numbs + ) + ) + ) + self.bias_atom_e[idx_type_map] += torch.from_numpy( + delta_bias.reshape(-1) + ).to(DEVICE) + logging.info( + f"RMSE of atomic energy after linear regression is: {rmse_ae:10.5e} eV/atom." + ) + elif bias_shift == "statistic": + statistic_bias = np.linalg.lstsq( + type_numbs, energy_ground_truth, rcond=None + )[0] + self.bias_atom_e[idx_type_map] = ( + torch.from_numpy(statistic_bias.reshape(-1)) + .type_as(self.bias_atom_e[idx_type_map]) + .to(DEVICE) + ) + else: + raise RuntimeError("Unknown bias_shift mode: " + bias_shift) + logging.info( + "Change energy bias of {} from {} to {}.".format( + str(new_type_map), + str(old_bias.detach().cpu().numpy()), + str(self.bias_atom_e[idx_type_map].detach().cpu().numpy()), + ) + ) + return None diff --git a/deepmd/pt/model/task/task.py b/deepmd/pt/model/task/task.py new file mode 100644 index 0000000000..a9b2efeb9a --- /dev/null +++ b/deepmd/pt/model/task/task.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch + + +class TaskBaseMethod(torch.nn.Module): + def __init__(self, **kwargs): + """Construct a basic head for different tasks.""" + super().__init__() + + def forward(self, **kwargs): + """Task Output.""" + raise NotImplementedError diff --git a/deepmd/pt/model/task/type_predict.py b/deepmd/pt/model/task/type_predict.py new file mode 100644 index 0000000000..57227004d0 --- /dev/null +++ b/deepmd/pt/model/task/type_predict.py @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Optional, +) + +import torch + +from deepmd.pt.model.network.network import ( + MaskLMHead, +) +from deepmd.pt.model.task import ( + TaskBaseMethod, +) + + +class TypePredictNet(TaskBaseMethod): + def __init__(self, feature_dim, ntypes, activation_function="gelu", **kwargs): + """Construct a type predict net. + + Args: + - feature_dim: Input dm. + - ntypes: Numer of types to predict. + - activation_function: Activate function. + """ + super().__init__() + self.feature_dim = feature_dim + self.ntypes = ntypes + self.lm_head = MaskLMHead( + embed_dim=self.feature_dim, + output_dim=ntypes, + activation_fn=activation_function, + weight=None, + ) + + def forward(self, features, masked_tokens: Optional[torch.Tensor] = None): + """Calculate the predicted logits. + Args: + - features: Input features with shape [nframes, nloc, feature_dim]. + - masked_tokens: Input masked tokens with shape [nframes, nloc]. + + Returns + ------- + - logits: Predicted probs with shape [nframes, nloc, ntypes]. + """ + # [nframes, nloc, ntypes] + logits = self.lm_head(features, masked_tokens=masked_tokens) + return logits diff --git a/deepmd/pt/optimizer/KFWrapper.py b/deepmd/pt/optimizer/KFWrapper.py new file mode 100644 index 0000000000..3ab7ffe7a9 --- /dev/null +++ b/deepmd/pt/optimizer/KFWrapper.py @@ -0,0 +1,145 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import math + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.optim.optimizer import ( + Optimizer, +) + + +class KFOptimizerWrapper: + def __init__( + self, + model: nn.Module, + optimizer: Optimizer, + atoms_selected: int, + atoms_per_group: int, + is_distributed: bool = False, + ) -> None: + self.model = model + self.optimizer = optimizer + self.atoms_selected = atoms_selected # 24 + self.atoms_per_group = atoms_per_group # 6 + self.is_distributed = is_distributed + + def update_energy( + self, inputs: dict, Etot_label: torch.Tensor, update_prefactor: float = 1 + ) -> None: + model_pred, _, _ = self.model(**inputs, inference_only=True) + Etot_predict = model_pred["energy"] + natoms_sum = int(inputs["atype"].shape[-1]) + self.optimizer.set_grad_prefactor(natoms_sum) + + self.optimizer.zero_grad() + bs = Etot_label.shape[0] + error = Etot_label - Etot_predict + error = error / natoms_sum + mask = error < 0 + + error = error * update_prefactor + error[mask] = -1 * error[mask] + error = error.mean() + + if self.is_distributed: + dist.all_reduce(error) + error /= dist.get_world_size() + + Etot_predict = update_prefactor * Etot_predict + Etot_predict[mask] = -Etot_predict[mask] + + Etot_predict.sum().backward() + error = error * math.sqrt(bs) + self.optimizer.step(error) + return Etot_predict + + def update_force( + self, inputs: dict, Force_label: torch.Tensor, update_prefactor: float = 1 + ) -> None: + natoms_sum = int(inputs["atype"].shape[-1]) + bs = Force_label.shape[0] + self.optimizer.set_grad_prefactor(natoms_sum * self.atoms_per_group * 3) + + index = self.__sample(self.atoms_selected, self.atoms_per_group, natoms_sum) + + for i in range(index.shape[0]): + self.optimizer.zero_grad() + model_pred, _, _ = self.model(**inputs, inference_only=True) + Etot_predict = model_pred["energy"] + natoms_sum = int(inputs["atype"].shape[-1]) + force_predict = model_pred["force"] + error_tmp = Force_label[:, index[i]] - force_predict[:, index[i]] + error_tmp = update_prefactor * error_tmp + mask = error_tmp < 0 + error_tmp[mask] = -1 * error_tmp[mask] + error = error_tmp.mean() / natoms_sum + + if self.is_distributed: + dist.all_reduce(error) + error /= dist.get_world_size() + + tmp_force_predict = force_predict[:, index[i]] * update_prefactor + tmp_force_predict[mask] = -tmp_force_predict[mask] + + # In order to solve a pytorch bug, reference: https://github.com/pytorch/pytorch/issues/43259 + (tmp_force_predict.sum() + Etot_predict.sum() * 0).backward() + error = error * math.sqrt(bs) + self.optimizer.step(error) + return Etot_predict, force_predict + + def update_denoise_coord( + self, + inputs: dict, + clean_coord: torch.Tensor, + update_prefactor: float = 1, + mask_loss_coord: bool = True, + coord_mask: torch.Tensor = None, + ) -> None: + natoms_sum = int(inputs["atype"].shape[-1]) + bs = clean_coord.shape[0] + self.optimizer.set_grad_prefactor(natoms_sum * self.atoms_per_group * 3) + + index = self.__sample(self.atoms_selected, self.atoms_per_group, natoms_sum) + + for i in range(index.shape[0]): + self.optimizer.zero_grad() + model_pred, _, _ = self.model(**inputs, inference_only=True) + updated_coord = model_pred["updated_coord"] + natoms_sum = int(inputs["atype"].shape[-1]) + error_tmp = clean_coord[:, index[i]] - updated_coord[:, index[i]] + error_tmp = update_prefactor * error_tmp + if mask_loss_coord: + error_tmp[~coord_mask[:, index[i]]] = 0 + mask = error_tmp < 0 + error_tmp[mask] = -1 * error_tmp[mask] + error = error_tmp.mean() / natoms_sum + + if self.is_distributed: + dist.all_reduce(error) + error /= dist.get_world_size() + + tmp_coord_predict = updated_coord[:, index[i]] * update_prefactor + tmp_coord_predict[mask] = -update_prefactor * tmp_coord_predict[mask] + + # In order to solve a pytorch bug, reference: https://github.com/pytorch/pytorch/issues/43259 + (tmp_coord_predict.sum() + updated_coord.sum() * 0).backward() + error = error * math.sqrt(bs) + self.optimizer.step(error) + return model_pred + + def __sample( + self, atoms_selected: int, atoms_per_group: int, natoms: int + ) -> np.ndarray: + if atoms_selected % atoms_per_group: + raise Exception("divider") + index = range(natoms) + rng = np.random.default_rng() + res = rng.choice(index, atoms_selected).reshape(-1, atoms_per_group) + return res + + +# with torch.autograd.profiler.profile(enabled=True, use_cuda=True, record_shapes=False) as prof: +# the code u wanna profile +# print(prof.key_averages().table(sort_by="self_cpu_time_total")) diff --git a/deepmd/pt/optimizer/LKF.py b/deepmd/pt/optimizer/LKF.py new file mode 100644 index 0000000000..5e18797c7b --- /dev/null +++ b/deepmd/pt/optimizer/LKF.py @@ -0,0 +1,221 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +import math + +import torch +from torch.optim.optimizer import ( + Optimizer, +) + + +class LKFOptimizer(Optimizer): + def __init__( + self, + params, + kalman_lambda=0.98, + kalman_nue=0.9987, + block_size=5120, + ): + defaults = { + "lr": 0.1, + "kalman_nue": kalman_nue, + "block_size": block_size, + } + super().__init__(params, defaults) + + self._params = self.param_groups[0]["params"] + + if len(self.param_groups) != 1 or len(self._params) == 0: + raise ValueError( + "LKF doesn't support per-parameter options " "(parameter groups)" + ) + + # NOTE: LKF has only global state, but we register it as state for + # the first param, because this helps with casting in load_state_dict + self._state = self.state[self._params[0]] + self._state.setdefault("kalman_lambda", kalman_lambda) + + self.__init_P() + + def __init_P(self): + param_nums = [] + param_sum = 0 + block_size = self.__get_blocksize() + data_type = self._params[0].dtype + device = self._params[0].device + + for param_group in self.param_groups: + params = param_group["params"] + for param in params: + param_num = param.data.nelement() + if param_sum + param_num > block_size: + if param_sum > 0: + param_nums.append(param_sum) + param_sum = param_num + else: + param_sum += param_num + + param_nums.append(param_sum) + + P = [] + params_packed_index = [] + logging.info("LKF parameter nums: %s" % param_nums) + for param_num in param_nums: + if param_num >= block_size: + block_num = math.ceil(param_num / block_size) + for i in range(block_num): + if i != block_num - 1: + P.append( + torch.eye( + block_size, + dtype=data_type, + device=device, + ) + ) + params_packed_index.append(block_size) + else: + P.append( + torch.eye( + param_num - block_size * i, + dtype=data_type, + device=device, + ) + ) + params_packed_index.append(param_num - block_size * i) + else: + P.append(torch.eye(param_num, dtype=data_type, device=device)) + params_packed_index.append(param_num) + + self._state.setdefault("P", P) + self._state.setdefault("weights_num", len(P)) + self._state.setdefault("params_packed_index", params_packed_index) + + def __get_blocksize(self): + return self.param_groups[0]["block_size"] + + def __get_nue(self): + return self.param_groups[0]["kalman_nue"] + + def __split_weights(self, weight): + block_size = self.__get_blocksize() + param_num = weight.nelement() + res = [] + if param_num < block_size: + res.append(weight) + else: + block_num = math.ceil(param_num / block_size) + for i in range(block_num): + if i != block_num - 1: + res.append(weight[i * block_size : (i + 1) * block_size]) + else: + res.append(weight[i * block_size :]) + return res + + def __update(self, H, error, weights): + P = self._state.get("P") + kalman_lambda = self._state.get("kalman_lambda") + weights_num = self._state.get("weights_num") + params_packed_index = self._state.get("params_packed_index") + + block_size = self.__get_blocksize() + kalman_nue = self.__get_nue() + + tmp = 0 + for i in range(weights_num): + tmp = tmp + (kalman_lambda + torch.matmul(torch.matmul(H[i].T, P[i]), H[i])) + + A = 1 / tmp + + for i in range(weights_num): + K = torch.matmul(P[i], H[i]) + + weights[i] = weights[i] + A * error * K + + P[i] = (1 / kalman_lambda) * (P[i] - A * torch.matmul(K, K.T)) + + kalman_lambda = kalman_nue * kalman_lambda + 1 - kalman_nue + self._state.update({"kalman_lambda": kalman_lambda}) + + i = 0 + param_sum = 0 + for param_group in self.param_groups: + params = param_group["params"] + for param in params: + param_num = param.nelement() + weight_tmp = weights[i][param_sum : param_sum + param_num] + if param_num < block_size: + if param.ndim > 1: + param.data = weight_tmp.reshape( + param.data.T.shape + ).T.contiguous() + else: + param.data = weight_tmp.reshape(param.data.shape) + + param_sum += param_num + + if param_sum == params_packed_index[i]: + i += 1 + param_sum = 0 + else: + block_num = math.ceil(param_num / block_size) + for j in range(block_num): + if j == 0: + tmp_weight = weights[i] + else: + tmp_weight = torch.concat([tmp_weight, weights[i]], dim=0) + i += 1 + param.data = tmp_weight.reshape(param.data.T.shape).T.contiguous() + + def set_grad_prefactor(self, grad_prefactor): + self.grad_prefactor = grad_prefactor + + def step(self, error): + params_packed_index = self._state.get("params_packed_index") + + weights = [] + H = [] + param_index = 0 + param_sum = 0 + + for param in self._params: + if param.ndim > 1: + tmp = param.data.T.contiguous().reshape(param.data.nelement(), 1) + if param.grad is None: + tmp_grad = torch.zeros_like(tmp) + else: + tmp_grad = ( + (param.grad / self.grad_prefactor) + .T.contiguous() + .reshape(param.grad.nelement(), 1) + ) + else: + tmp = param.data.reshape(param.data.nelement(), 1) + if param.grad is None: + tmp_grad = torch.zeros_like(tmp) + else: + tmp_grad = (param.grad / self.grad_prefactor).reshape( + param.grad.nelement(), 1 + ) + + tmp = self.__split_weights(tmp) + tmp_grad = self.__split_weights(tmp_grad) + + for split_grad, split_weight in zip(tmp_grad, tmp): + nelement = split_grad.nelement() + + if param_sum == 0: + res_grad = split_grad + res = split_weight + else: + res_grad = torch.concat((res_grad, split_grad), dim=0) + res = torch.concat((res, split_weight), dim=0) + + param_sum += nelement + + if param_sum == params_packed_index[param_index]: + H.append(res_grad) + weights.append(res) + param_sum = 0 + param_index += 1 + + self.__update(H, error, weights) diff --git a/deepmd/pt/optimizer/__init__.py b/deepmd/pt/optimizer/__init__.py new file mode 100644 index 0000000000..db340b3bb9 --- /dev/null +++ b/deepmd/pt/optimizer/__init__.py @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from .KFWrapper import ( + KFOptimizerWrapper, +) +from .LKF import ( + LKFOptimizer, +) + +__all__ = ["KFOptimizerWrapper", "LKFOptimizer"] diff --git a/deepmd/pt/train/__init__.py b/deepmd/pt/train/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pt/train/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pt/train/training.py b/deepmd/pt/train/training.py new file mode 100644 index 0000000000..049685a6e3 --- /dev/null +++ b/deepmd/pt/train/training.py @@ -0,0 +1,849 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +import os +import time +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) +from typing import ( + Any, + Dict, +) + +import numpy as np +import torch +from tqdm import ( + tqdm, +) +from tqdm.contrib.logging import ( + logging_redirect_tqdm, +) + +from deepmd.pt.loss import ( + DenoiseLoss, + EnergyStdLoss, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.optimizer import ( + KFOptimizerWrapper, + LKFOptimizer, +) +from deepmd.pt.train.wrapper import ( + ModelWrapper, +) +from deepmd.pt.utils import ( + dp_random, +) +from deepmd.pt.utils.dataloader import ( + BufferedIterator, + get_weighted_sampler, +) +from deepmd.pt.utils.env import ( + DEVICE, + DISABLE_TQDM, + JIT, + LOCAL_RANK, + NUM_WORKERS, + SAMPLER_RECORD, +) +from deepmd.pt.utils.learning_rate import ( + LearningRateExp, +) + +if torch.__version__.startswith("2"): + import torch._dynamo + +import torch.distributed as dist +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.data import ( + DataLoader, +) + + +class Trainer: + def __init__( + self, + config: Dict[str, Any], + training_data, + sampled, + validation_data=None, + init_model=None, + restart_model=None, + finetune_model=None, + force_load=False, + shared_links=None, + ): + """Construct a DeePMD trainer. + + Args: + - config: The Dict-like configuration with training options. + """ + resume_model = init_model if init_model is not None else restart_model + self.restart_training = restart_model is not None + model_params = config["model"] + training_params = config["training"] + self.multi_task = "model_dict" in model_params + self.finetune_multi_task = model_params.pop( + "finetune_multi_task", False + ) # should use pop for next finetune + self.model_keys = ( + list(model_params["model_dict"]) if self.multi_task else ["Default"] + ) + self.rank = dist.get_rank() if dist.is_initialized() else 0 + self.world_size = dist.get_world_size() if dist.is_initialized() else 1 + self.num_model = len(self.model_keys) + + # Iteration config + self.num_steps = training_params["numb_steps"] + self.disp_file = training_params.get("disp_file", "lcurve.out") + self.disp_freq = training_params.get("disp_freq", 1000) + self.save_ckpt = training_params.get("save_ckpt", "model.pt") + self.save_freq = training_params.get("save_freq", 1000) + self.lcurve_should_print_header = True + + def get_opt_param(params): + opt_type = params.get("opt_type", "Adam") + opt_param = { + "kf_blocksize": params.get("kf_blocksize", 5120), + "kf_start_pref_e": params.get("kf_start_pref_e", 1), + "kf_limit_pref_e": params.get("kf_limit_pref_e", 1), + "kf_start_pref_f": params.get("kf_start_pref_f", 1), + "kf_limit_pref_f": params.get("kf_limit_pref_f", 1), + } + return opt_type, opt_param + + def get_data_loader(_training_data, _validation_data, _training_params): + if "auto_prob" in _training_params["training_data"]: + train_sampler = get_weighted_sampler( + _training_data, _training_params["training_data"]["auto_prob"] + ) + elif "sys_probs" in _training_params["training_data"]: + train_sampler = get_weighted_sampler( + _training_data, + _training_params["training_data"]["sys_probs"], + sys_prob=True, + ) + else: + train_sampler = get_weighted_sampler(_training_data, "prob_sys_size") + + if "auto_prob" in _training_params["validation_data"]: + valid_sampler = get_weighted_sampler( + _validation_data, _training_params["validation_data"]["auto_prob"] + ) + elif "sys_probs" in _training_params["validation_data"]: + valid_sampler = get_weighted_sampler( + _validation_data, + _training_params["validation_data"]["sys_probs"], + sys_prob=True, + ) + else: + valid_sampler = get_weighted_sampler(_validation_data, "prob_sys_size") + + if train_sampler is None or valid_sampler is None: + logging.warning( + "Sampler not specified!" + ) # None sampler will lead to a premature stop iteration. Replacement should be True in attribute of the sampler to produce expected number of items in one iteration. + training_dataloader = DataLoader( + _training_data, + sampler=train_sampler, + batch_size=None, + num_workers=NUM_WORKERS, # setting to 0 diverges the behavior of its iterator; should be >=1 + drop_last=False, + pin_memory=True, + ) + training_data_buffered = BufferedIterator(iter(training_dataloader)) + validation_dataloader = DataLoader( + _validation_data, + sampler=valid_sampler, + batch_size=None, + num_workers=min(NUM_WORKERS, 1), + drop_last=False, + pin_memory=True, + ) + + validation_data_buffered = BufferedIterator(iter(validation_dataloader)) + if _training_params.get("validation_data", None) is not None: + valid_numb_batch = _training_params["validation_data"].get( + "numb_btch", 1 + ) + else: + valid_numb_batch = 1 + return ( + training_dataloader, + training_data_buffered, + validation_dataloader, + validation_data_buffered, + valid_numb_batch, + ) + + def get_single_model(_model_params, _sampled): + model = get_model(deepcopy(_model_params), _sampled).to(DEVICE) + return model + + def get_lr(lr_params): + assert ( + lr_params.get("type", "exp") == "exp" + ), "Only learning rate `exp` is supported!" + lr_params["stop_steps"] = self.num_steps - self.warmup_steps + lr_exp = LearningRateExp(**lr_params) + return lr_exp + + def get_loss(loss_params, start_lr, _ntypes): + loss_type = loss_params.get("type", "ener") + if loss_type == "ener": + loss_params["starter_learning_rate"] = start_lr + return EnergyStdLoss(**loss_params) + elif loss_type == "denoise": + loss_params["ntypes"] = _ntypes + return DenoiseLoss(**loss_params) + else: + raise NotImplementedError + + # Optimizer + if self.multi_task and training_params.get("optim_dict", None) is not None: + self.optim_dict = training_params.get("optim_dict") + missing_keys = [ + key for key in self.model_keys if key not in self.optim_dict + ] + assert ( + not missing_keys + ), f"These keys are not in optim_dict: {missing_keys}!" + self.opt_type = {} + self.opt_param = {} + for model_key in self.model_keys: + self.opt_type[model_key], self.opt_param[model_key] = get_opt_param( + self.optim_dict[model_key] + ) + else: + self.opt_type, self.opt_param = get_opt_param(training_params) + + # Data + Model + dp_random.seed(training_params["seed"]) + if not self.multi_task: + ( + self.training_dataloader, + self.training_data, + self.validation_dataloader, + self.validation_data, + self.valid_numb_batch, + ) = get_data_loader(training_data, validation_data, training_params) + self.model = get_single_model(model_params, sampled) + else: + ( + self.training_dataloader, + self.training_data, + self.validation_dataloader, + self.validation_data, + self.valid_numb_batch, + self.model, + ) = {}, {}, {}, {}, {}, {} + for model_key in self.model_keys: + ( + self.training_dataloader[model_key], + self.training_data[model_key], + self.validation_dataloader[model_key], + self.validation_data[model_key], + self.valid_numb_batch[model_key], + ) = get_data_loader( + training_data[model_key], + validation_data[model_key], + training_params["data_dict"][model_key], + ) + self.model[model_key] = get_single_model( + model_params["model_dict"][model_key], sampled[model_key] + ) + + # Learning rate + self.warmup_steps = training_params.get("warmup_steps", 0) + self.gradient_max_norm = training_params.get("gradient_max_norm", 0.0) + assert ( + self.num_steps - self.warmup_steps > 0 + ), "Warm up steps must be less than total training steps!" + if self.multi_task and config.get("learning_rate_dict", None) is not None: + self.lr_exp = {} + for model_key in self.model_keys: + self.lr_exp[model_key] = get_lr(config["learning_rate_dict"][model_key]) + else: + self.lr_exp = get_lr(config["learning_rate"]) + + # Loss + if not self.multi_task: + self.loss = get_loss( + config["loss"], + config["learning_rate"]["start_lr"], + len(model_params["type_map"]), + ) + else: + self.loss = {} + for model_key in self.model_keys: + loss_param = config["loss_dict"][model_key] + if config.get("learning_rate_dict", None) is not None: + lr_param = config["learning_rate_dict"][model_key]["start_lr"] + else: + lr_param = config["learning_rate"]["start_lr"] + ntypes = len(model_params["model_dict"][model_key]["type_map"]) + self.loss[model_key] = get_loss(loss_param, lr_param, ntypes) + + # JIT + if JIT: + self.model = torch.jit.script(self.model) + + # Model Wrapper + self.wrapper = ModelWrapper(self.model, self.loss, model_params=model_params) + self.start_step = 0 + + # resuming and finetune + optimizer_state_dict = None + if model_params["resuming"]: + ntest = model_params.get("data_bias_nsample", 1) + origin_model = ( + finetune_model if finetune_model is not None else resume_model + ) + logging.info(f"Resuming from {origin_model}.") + state_dict = torch.load(origin_model, map_location=DEVICE) + if "model" in state_dict: + optimizer_state_dict = ( + state_dict["optimizer"] if finetune_model is None else None + ) + state_dict = state_dict["model"] + self.start_step = ( + state_dict["_extra_state"]["train_infos"]["step"] + if self.restart_training + else 0 + ) + if self.rank == 0: + if force_load: + input_keys = list(state_dict.keys()) + target_keys = list(self.wrapper.state_dict().keys()) + missing_keys = [ + item for item in target_keys if item not in input_keys + ] + if missing_keys: + target_state_dict = self.wrapper.state_dict() + slim_keys = [] + for item in missing_keys: + state_dict[item] = target_state_dict[item].clone().detach() + new_key = True + for slim_key in slim_keys: + if slim_key in item: + new_key = False + break + if new_key: + tmp_keys = ".".join(item.split(".")[:3]) + slim_keys.append(tmp_keys) + slim_keys = [i + ".*" for i in slim_keys] + logging.warning( + f"Force load mode allowed! These keys are not in ckpt and will re-init: {slim_keys}" + ) + elif self.finetune_multi_task: + new_state_dict = {} + model_branch_chosen = model_params.pop("model_branch_chosen") + new_fitting = model_params.pop("new_fitting", False) + target_state_dict = self.wrapper.state_dict() + target_keys = [ + i for i in target_state_dict.keys() if i != "_extra_state" + ] + for item_key in target_keys: + if new_fitting and ".fitting_net." in item_key: + # print(f'Keep {item_key} in old model!') + new_state_dict[item_key] = ( + target_state_dict[item_key].clone().detach() + ) + else: + new_key = item_key.replace( + ".Default.", f".{model_branch_chosen}." + ) + # print(f'Replace {item_key} with {new_key} in pretrained_model!') + new_state_dict[item_key] = ( + state_dict[new_key].clone().detach() + ) + state_dict = new_state_dict + if finetune_model is not None: + state_dict["_extra_state"] = self.wrapper.state_dict()[ + "_extra_state" + ] + + self.wrapper.load_state_dict(state_dict) + # finetune + if finetune_model is not None and model_params["fitting_net"].get( + "type", "ener" + ) in ["ener", "direct_force_ener", "atten_vec_lcc"]: + old_type_map, new_type_map = ( + model_params["type_map"], + model_params["new_type_map"], + ) + self.model.fitting_net.change_energy_bias( + config, + self.model, + old_type_map, + new_type_map, + ntest=ntest, + bias_shift=model_params.get("bias_shift", "delta"), + ) + + # Set trainable params + self.wrapper.set_trainable_params() + + # Multi-task share params + if shared_links is not None: + self.wrapper.share_params(shared_links, resume=model_params["resuming"]) + + if dist.is_initialized(): + torch.cuda.set_device(LOCAL_RANK) + # DDP will guarantee the model parameters are identical across all processes + self.wrapper = DDP( + self.wrapper, + device_ids=[LOCAL_RANK], + find_unused_parameters=True, + output_device=LOCAL_RANK, + ) + + # TODO ZD add lr warmups for multitask + def warm_up_linear(step, warmup_steps): + if step < warmup_steps: + return step / warmup_steps + else: + return self.lr_exp.value(step - warmup_steps) / self.lr_exp.start_lr + + # TODO ZD add optimizers for multitask + if self.opt_type == "Adam": + self.optimizer = torch.optim.Adam( + self.wrapper.parameters(), lr=self.lr_exp.start_lr + ) + if optimizer_state_dict is not None and self.restart_training: + self.optimizer.load_state_dict(optimizer_state_dict) + self.scheduler = torch.optim.lr_scheduler.LambdaLR( + self.optimizer, + lambda step: warm_up_linear(step + self.start_step, self.warmup_steps), + ) + elif self.opt_type == "LKF": + self.optimizer = LKFOptimizer( + self.wrapper.parameters(), 0.98, 0.99870, self.opt_param["kf_blocksize"] + ) + else: + raise ValueError("Not supported optimizer type '%s'" % self.opt_type) + + # Get model prob for multi-task + if self.multi_task: + self.model_prob = np.array([0.0 for key in self.model_keys]) + if training_params.get("model_prob", None) is not None: + model_prob = training_params["model_prob"] + for ii, model_key in enumerate(self.model_keys): + if model_key in model_prob: + self.model_prob[ii] += float(model_prob[model_key]) + else: + for ii, model_key in enumerate(self.model_keys): + self.model_prob[ii] += float(len(self.training_data[model_key])) + sum_prob = np.sum(self.model_prob) + assert sum_prob > 0.0, "Sum of model prob must be larger than 0!" + self.model_prob = self.model_prob / sum_prob + + def run(self): + fout = ( + open(self.disp_file, mode="w", buffering=1) if self.rank == 0 else None + ) # line buffered + if SAMPLER_RECORD: + record_file = f"Sample_rank_{self.rank}.txt" + fout1 = open(record_file, mode="w", buffering=1) + logging.info("Start to train %d steps.", self.num_steps) + if dist.is_initialized(): + logging.info(f"Rank: {dist.get_rank()}/{dist.get_world_size()}") + + def step(_step_id, task_key="Default"): + self.wrapper.train() + if isinstance(self.lr_exp, dict): + _lr = self.lr_exp[task_key] + else: + _lr = self.lr_exp + cur_lr = _lr.value(_step_id) + pref_lr = cur_lr + self.optimizer.zero_grad(set_to_none=True) + input_dict, label_dict, log_dict = self.get_data( + is_train=True, task_key=task_key + ) + if SAMPLER_RECORD: + print_str = f"Step {_step_id}: sample system{log_dict['sid']} frame{log_dict['fid']}\n" + fout1.write(print_str) + fout1.flush() + if self.opt_type == "Adam": + cur_lr = self.scheduler.get_last_lr()[0] + if _step_id < self.warmup_steps: + pref_lr = _lr.start_lr + else: + pref_lr = cur_lr + model_pred, loss, more_loss = self.wrapper( + **input_dict, cur_lr=pref_lr, label=label_dict, task_key=task_key + ) + loss.backward() + if self.gradient_max_norm > 0.0: + grad_norm = torch.nn.utils.clip_grad_norm_( + self.wrapper.parameters(), self.gradient_max_norm + ) + if not torch.isfinite(grad_norm).all(): + # check local gradnorm single GPU case, trigger NanDetector + raise FloatingPointError("gradients are Nan/Inf") + self.optimizer.step() + self.scheduler.step() + elif self.opt_type == "LKF": + if isinstance(self.loss, EnergyStdLoss): + KFOptWrapper = KFOptimizerWrapper( + self.wrapper, self.optimizer, 24, 6, dist.is_initialized() + ) + pref_e = self.opt_param["kf_start_pref_e"] * ( + self.opt_param["kf_limit_pref_e"] + / self.opt_param["kf_start_pref_e"] + ) ** (_step_id / self.num_steps) + _ = KFOptWrapper.update_energy( + input_dict, label_dict["energy"], pref_e + ) + pref_f = self.opt_param["kf_start_pref_f"] * ( + self.opt_param["kf_limit_pref_f"] + / self.opt_param["kf_start_pref_f"] + ) ** (_step_id / self.num_steps) + p_energy, p_force = KFOptWrapper.update_force( + input_dict, label_dict["force"], pref_f + ) + # [coord, atype, natoms, mapping, shift, nlist, box] + model_pred = {"energy": p_energy, "force": p_force} + module = ( + self.wrapper.module if dist.is_initialized() else self.wrapper + ) + loss, more_loss = module.loss[task_key]( + model_pred, + label_dict, + int(input_dict["atype"].shape[-1]), + learning_rate=pref_lr, + ) + elif isinstance(self.loss, DenoiseLoss): + KFOptWrapper = KFOptimizerWrapper( + self.wrapper, self.optimizer, 24, 6, dist.is_initialized() + ) + module = ( + self.wrapper.module if dist.is_initialized() else self.wrapper + ) + model_pred = KFOptWrapper.update_denoise_coord( + input_dict, + label_dict["clean_coord"], + 1, + module.loss[task_key].mask_loss_coord, + label_dict["coord_mask"], + ) + loss, more_loss = module.loss[task_key]( + model_pred, + label_dict, + input_dict["natoms"], + learning_rate=pref_lr, + ) + else: + raise ValueError("Not supported optimizer type '%s'" % self.opt_type) + + # Log and persist + if _step_id % self.disp_freq == 0: + self.wrapper.eval() + msg = f"step={_step_id}, lr={cur_lr:.2e}" + + def log_loss_train(_loss, _more_loss, _task_key="Default"): + results = {} + if not self.multi_task: + suffix = "" + else: + suffix = f"_{_task_key}" + _msg = f"loss{suffix}={_loss:.4f}" + rmse_val = { + item: _more_loss[item] + for item in _more_loss + if "l2_" not in item + } + for item in sorted(rmse_val.keys()): + _msg += f", {item}_train{suffix}={rmse_val[item]:.4f}" + results[item] = rmse_val[item] + return _msg, results + + def log_loss_valid(_task_key="Default"): + single_results = {} + sum_natoms = 0 + if not self.multi_task: + suffix = "" + valid_numb_batch = self.valid_numb_batch + else: + suffix = f"_{_task_key}" + valid_numb_batch = self.valid_numb_batch[_task_key] + for ii in range(valid_numb_batch): + self.optimizer.zero_grad() + input_dict, label_dict, _ = self.get_data( + is_train=False, task_key=_task_key + ) + _, loss, more_loss = self.wrapper( + **input_dict, + cur_lr=pref_lr, + label=label_dict, + task_key=_task_key, + ) + # more_loss.update({"rmse": math.sqrt(loss)}) + natoms = int(input_dict["atype"].shape[-1]) + sum_natoms += natoms + for k, v in more_loss.items(): + if "l2_" not in k: + single_results[k] = ( + single_results.get(k, 0.0) + v * natoms + ) + results = {k: v / sum_natoms for k, v in single_results.items()} + _msg = "" + for item in sorted(results.keys()): + _msg += f", {item}_valid{suffix}={results[item]:.4f}" + return _msg, results + + if not self.multi_task: + temp_msg, train_results = log_loss_train(loss, more_loss) + msg += "\n" + temp_msg + temp_msg, valid_results = log_loss_valid() + msg += temp_msg + else: + train_results = {_key: {} for _key in self.model_keys} + valid_results = {_key: {} for _key in self.model_keys} + train_msg = {} + valid_msg = {} + train_msg[task_key], train_results[task_key] = log_loss_train( + loss, more_loss, _task_key=task_key + ) + for _key in self.model_keys: + if _key != task_key: + self.optimizer.zero_grad() + input_dict, label_dict, _ = self.get_data( + is_train=True, task_key=_key + ) + _, loss, more_loss = self.wrapper( + **input_dict, + cur_lr=pref_lr, + label=label_dict, + task_key=_key, + ) + train_msg[_key], train_results[_key] = log_loss_train( + loss, more_loss, _task_key=_key + ) + valid_msg[_key], valid_results[_key] = log_loss_valid( + _task_key=_key + ) + msg += "\n" + train_msg[_key] + msg += valid_msg[_key] + + train_time = time.time() - self.t0 + self.t0 = time.time() + msg += f", speed={train_time:.2f} s/{self.disp_freq if _step_id else 1} batches" + logging.info(msg) + + if fout: + if self.lcurve_should_print_header: + self.print_header(fout, train_results, valid_results) + self.lcurve_should_print_header = False + self.print_on_training( + fout, _step_id, cur_lr, train_results, valid_results + ) + + if ( + ((_step_id + 1) % self.save_freq == 0 and _step_id != self.start_step) + or (_step_id + 1) == self.num_steps + ) and (self.rank == 0 or dist.get_rank() == 0): + # Handle the case if rank 0 aborted and re-assigned + self.latest_model = Path(self.save_ckpt) + self.latest_model = self.latest_model.with_name( + f"{self.latest_model.stem}_{_step_id + 1}{self.latest_model.suffix}" + ) + module = self.wrapper.module if dist.is_initialized() else self.wrapper + self.save_model(self.latest_model, lr=cur_lr, step=_step_id) + logging.info(f"Saved model to {self.latest_model}") + + self.t0 = time.time() + with logging_redirect_tqdm(): + for step_id in tqdm( + range(self.num_steps), + disable=(bool(dist.get_rank()) if dist.is_initialized() else False) + or DISABLE_TQDM, + ): # set to None to disable on non-TTY; disable on not rank 0 + if step_id < self.start_step: + continue + if self.multi_task: + chosen_index_list = dp_random.choice( + np.arange(self.num_model), + p=np.array(self.model_prob), + size=self.world_size, + replace=True, + ) + assert chosen_index_list.size == self.world_size + model_index = chosen_index_list[self.rank] + model_key = self.model_keys[model_index] + else: + model_key = "Default" + step(step_id, model_key) + if JIT: + break + + if ( + self.rank == 0 or dist.get_rank() == 0 + ): # Handle the case if rank 0 aborted and re-assigned + if JIT: + pth_model_path = ( + "frozen_model.pth" # We use .pth to denote the frozen model + ) + self.model.save(pth_model_path) + logging.info( + f"Frozen model for inferencing has been saved to {pth_model_path}" + ) + try: + os.symlink(self.latest_model, self.save_ckpt) + except OSError: + self.save_model(self.save_ckpt, lr=0, step=self.num_steps) + logging.info(f"Trained model has been saved to: {self.save_ckpt}") + + if fout: + fout.close() + if SAMPLER_RECORD: + fout1.close() + + def save_model(self, save_path, lr=0.0, step=0): + module = self.wrapper.module if dist.is_initialized() else self.wrapper + module.train_infos["lr"] = lr + module.train_infos["step"] = step + torch.save( + {"model": module.state_dict(), "optimizer": self.optimizer.state_dict()}, + save_path, + ) + + def get_data(self, is_train=True, task_key="Default"): + if not self.multi_task: + if is_train: + try: + batch_data = next(iter(self.training_data)) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data = BufferedIterator( + iter(self.training_dataloader) + ) + batch_data = next(iter(self.training_data)) + else: + try: + batch_data = next(iter(self.validation_data)) + except StopIteration: + self.validation_data = BufferedIterator( + iter(self.validation_dataloader) + ) + batch_data = next(iter(self.validation_data)) + else: + if is_train: + try: + batch_data = next(iter(self.training_data[task_key])) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data[task_key] = BufferedIterator( + iter(self.training_dataloader[task_key]) + ) + batch_data = next(iter(self.training_data[task_key])) + else: + try: + batch_data = next(iter(self.validation_data[task_key])) + except StopIteration: + self.validation_data[task_key] = BufferedIterator( + iter(self.validation_dataloader[task_key]) + ) + batch_data = next(iter(self.validation_data[task_key])) + + for key in batch_data.keys(): + if key == "sid" or key == "fid": + continue + elif not isinstance(batch_data[key], list): + if batch_data[key] is not None: + batch_data[key] = batch_data[key].to(DEVICE) + else: + batch_data[key] = [item.to(DEVICE) for item in batch_data[key]] + input_dict = {} + for item in [ + "coord", + "atype", + "box", + ]: + if item in batch_data: + input_dict[item] = batch_data[item] + else: + input_dict[item] = None + label_dict = {} + for item in [ + "energy", + "force", + "virial", + "clean_coord", + "clean_type", + "coord_mask", + "type_mask", + ]: + if item in batch_data: + label_dict[item] = batch_data[item] + log_dict = {} + if "fid" in batch_data: + log_dict["fid"] = batch_data["fid"] + log_dict["sid"] = batch_data["sid"] + return input_dict, label_dict, log_dict + + def print_header(self, fout, train_results, valid_results): + train_keys = sorted(train_results.keys()) + print_str = "" + print_str += "# %5s" % "step" + if not self.multi_task: + if valid_results is not None: + prop_fmt = " %11s %11s" + for k in train_keys: + print_str += prop_fmt % (k + "_val", k + "_trn") + else: + prop_fmt = " %11s" + for k in train_keys: + print_str += prop_fmt % (k + "_trn") + else: + for model_key in self.model_keys: + if valid_results[model_key] is not None: + prop_fmt = " %11s %11s" + for k in sorted(train_results[model_key].keys()): + print_str += prop_fmt % ( + k + f"_val_{model_key}", + k + f"_trn_{model_key}", + ) + else: + prop_fmt = " %11s" + for k in sorted(train_results[model_key].keys()): + print_str += prop_fmt % (k + f"_trn_{model_key}") + print_str += " %8s\n" % "lr" + fout.write(print_str) + fout.flush() + + def print_on_training(self, fout, step_id, cur_lr, train_results, valid_results): + train_keys = sorted(train_results.keys()) + print_str = "" + print_str += "%7d" % step_id + if not self.multi_task: + if valid_results is not None: + prop_fmt = " %11.2e %11.2e" + for k in train_keys: + print_str += prop_fmt % (valid_results[k], train_results[k]) + else: + prop_fmt = " %11.2e" + for k in train_keys: + print_str += prop_fmt % (train_results[k]) + else: + for model_key in self.model_keys: + if valid_results[model_key] is not None: + prop_fmt = " %11.2e %11.2e" + for k in sorted(valid_results[model_key].keys()): + print_str += prop_fmt % ( + valid_results[model_key][k], + train_results[model_key][k], + ) + else: + prop_fmt = " %11.2e" + for k in sorted(train_results[model_key].keys()): + print_str += prop_fmt % (train_results[model_key][k]) + print_str += " %8.1e\n" % cur_lr + fout.write(print_str) + fout.flush() diff --git a/deepmd/pt/train/wrapper.py b/deepmd/pt/train/wrapper.py new file mode 100644 index 0000000000..fe423e6318 --- /dev/null +++ b/deepmd/pt/train/wrapper.py @@ -0,0 +1,192 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Dict, + Optional, + Union, +) + +import torch + +if torch.__version__.startswith("2"): + import torch._dynamo + + +class ModelWrapper(torch.nn.Module): + def __init__( + self, + model: Union[torch.nn.Module, Dict], + loss: Union[torch.nn.Module, Dict] = None, + model_params=None, + shared_links=None, + ): + """Construct a DeePMD model wrapper. + + Args: + - config: The Dict-like configuration with training options. + """ + super().__init__() + self.model_params = model_params if model_params is not None else {} + self.train_infos = { + "lr": 0, + "step": 0, + } + self.multi_task = False + self.model = torch.nn.ModuleDict() + # Model + if isinstance(model, torch.nn.Module): + self.model["Default"] = model + elif isinstance(model, dict): + self.multi_task = True + for task_key in model: + assert isinstance( + model[task_key], torch.nn.Module + ), f"{task_key} in model_dict is not a torch.nn.Module!" + self.model[task_key] = model[task_key] + # Loss + self.loss = None + if loss is not None: + self.loss = torch.nn.ModuleDict() + if isinstance(loss, torch.nn.Module): + self.loss["Default"] = loss + elif isinstance(loss, dict): + for task_key in loss: + assert isinstance( + loss[task_key], torch.nn.Module + ), f"{task_key} in loss_dict is not a torch.nn.Module!" + self.loss[task_key] = loss[task_key] + self.inference_only = self.loss is None + + def set_trainable_params(self): + supported_types = ["type_embedding", "descriptor", "fitting_net"] + for model_item in self.model: + for net_type in supported_types: + trainable = True + if not self.multi_task: + if net_type in self.model_params: + trainable = self.model_params[net_type].get("trainable", True) + else: + if net_type in self.model_params["model_dict"][model_item]: + trainable = self.model_params["model_dict"][model_item][ + net_type + ].get("trainable", True) + if ( + hasattr(self.model[model_item], net_type) + and getattr(self.model[model_item], net_type) is not None + ): + for param in ( + self.model[model_item].__getattr__(net_type).parameters() + ): + param.requires_grad = trainable + + def share_params(self, shared_links, resume=False): + supported_types = ["type_embedding", "descriptor", "fitting_net"] + for shared_item in shared_links: + class_name = shared_links[shared_item]["type"] + shared_base = shared_links[shared_item]["links"][0] + class_type_base = shared_base["shared_type"] + model_key_base = shared_base["model_key"] + shared_level_base = shared_base["shared_level"] + if "descriptor" in class_type_base: + if class_type_base == "descriptor": + base_class = self.model[model_key_base].__getattr__("descriptor") + elif "hybrid" in class_type_base: + hybrid_index = int(class_type_base.split("_")[-1]) + base_class = ( + self.model[model_key_base] + .__getattr__("descriptor") + .descriptor_list[hybrid_index] + ) + else: + raise RuntimeError(f"Unknown class_type {class_type_base}!") + for link_item in shared_links[shared_item]["links"][1:]: + class_type_link = link_item["shared_type"] + model_key_link = link_item["model_key"] + shared_level_link = int(link_item["shared_level"]) + assert ( + shared_level_link >= shared_level_base + ), "The shared_links must be sorted by shared_level!" + assert ( + "descriptor" in class_type_link + ), f"Class type mismatched: {class_type_base} vs {class_type_link}!" + if class_type_link == "descriptor": + link_class = self.model[model_key_link].__getattr__( + "descriptor" + ) + elif "hybrid" in class_type_link: + hybrid_index = int(class_type_link.split("_")[-1]) + link_class = ( + self.model[model_key_link] + .__getattr__("descriptor") + .descriptor_list[hybrid_index] + ) + else: + raise RuntimeError(f"Unknown class_type {class_type_link}!") + link_class.share_params( + base_class, shared_level_link, resume=resume + ) + print( + f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!" + ) + else: + if hasattr(self.model[model_key_base], class_type_base): + base_class = self.model[model_key_base].__getattr__(class_type_base) + for link_item in shared_links[shared_item]["links"][1:]: + class_type_link = link_item["shared_type"] + model_key_link = link_item["model_key"] + shared_level_link = int(link_item["shared_level"]) + assert ( + shared_level_link >= shared_level_base + ), "The shared_links must be sorted by shared_level!" + assert ( + class_type_base == class_type_link + ), f"Class type mismatched: {class_type_base} vs {class_type_link}!" + link_class = self.model[model_key_link].__getattr__( + class_type_link + ) + link_class.share_params( + base_class, shared_level_link, resume=resume + ) + print( + f"Shared params of {model_key_base}.{class_type_base} and {model_key_link}.{class_type_link}!" + ) + + def forward( + self, + coord, + atype, + box: Optional[torch.Tensor] = None, + cur_lr: Optional[torch.Tensor] = None, + label: Optional[torch.Tensor] = None, + task_key: Optional[torch.Tensor] = None, + inference_only=False, + do_atomic_virial=False, + ): + if not self.multi_task: + task_key = "Default" + else: + assert ( + task_key is not None + ), f"Multitask model must specify the inference task! Supported tasks are {list(self.model.keys())}." + model_pred = self.model[task_key]( + coord, atype, box=box, do_atomic_virial=do_atomic_virial + ) + natoms = atype.shape[-1] + if not self.inference_only and not inference_only: + loss, more_loss = self.loss[task_key]( + model_pred, label, natoms=natoms, learning_rate=cur_lr + ) + return model_pred, loss, more_loss + else: + return model_pred, None, None + + def set_extra_state(self, state: Dict): + self.model_params = state["model_params"] + self.train_infos = state["train_infos"] + return None + + def get_extra_state(self) -> Dict: + state = { + "model_params": self.model_params, + "train_infos": self.train_infos, + } + return state diff --git a/deepmd/pt/utils/__init__.py b/deepmd/pt/utils/__init__.py new file mode 100644 index 0000000000..6ceb116d85 --- /dev/null +++ b/deepmd/pt/utils/__init__.py @@ -0,0 +1 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later diff --git a/deepmd/pt/utils/ase_calc.py b/deepmd/pt/utils/ase_calc.py new file mode 100644 index 0000000000..8d5fe8bce9 --- /dev/null +++ b/deepmd/pt/utils/ase_calc.py @@ -0,0 +1,65 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + ClassVar, +) + +import dpdata +import numpy as np +from ase import ( + Atoms, +) +from ase.calculators.calculator import ( + Calculator, + PropertyNotImplementedError, +) + +from deepmd.pt.infer.deep_eval import ( + DeepPot, +) + + +class DPCalculator(Calculator): + implemented_properties: ClassVar[list] = [ + "energy", + "free_energy", + "forces", + "virial", + "stress", + ] + + def __init__(self, model): + Calculator.__init__(self) + self.dp = DeepPot(model) + self.type_map = self.dp.type_map + + def calculate(self, atoms: Atoms, properties, system_changes) -> None: + Calculator.calculate(self, atoms, properties, system_changes) + system = dpdata.System(atoms, fmt="ase/structure") + type_trans = np.array( + [self.type_map.index(i) for i in system.data["atom_names"]] + ) + input_coords = system.data["coords"] + input_cells = system.data["cells"] + input_types = list(type_trans[system.data["atom_types"]]) + model_predict = self.dp.eval(input_coords, input_cells, input_types) + self.results = { + "energy": model_predict[0].item(), + "free_energy": model_predict[0].item(), + "forces": model_predict[1].reshape(-1, 3), + "virial": model_predict[2].reshape(3, 3), + } + + # convert virial into stress for lattice relaxation + if "stress" in properties: + if sum(atoms.get_pbc()) > 0 or (atoms.cell is not None): + # the usual convention (tensile stress is positive) + # stress = -virial / volume + stress = ( + -0.5 + * (self.results["virial"].copy() + self.results["virial"].copy().T) + / atoms.get_volume() + ) + # Voigt notation + self.results["stress"] = stress.flat[[0, 4, 8, 5, 2, 1]] + else: + raise PropertyNotImplementedError diff --git a/deepmd/pt/utils/auto_batch_size.py b/deepmd/pt/utils/auto_batch_size.py new file mode 100644 index 0000000000..5af7760e2a --- /dev/null +++ b/deepmd/pt/utils/auto_batch_size.py @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch + +from deepmd.utils.batch_size import AutoBatchSize as AutoBatchSizeBase + + +class AutoBatchSize(AutoBatchSizeBase): + def is_gpu_available(self) -> bool: + """Check if GPU is available. + + Returns + ------- + bool + True if GPU is available + """ + return torch.cuda.is_available() + + def is_oom_error(self, e: Exception) -> bool: + """Check if the exception is an OOM error. + + Parameters + ---------- + e : Exception + Exception + """ + return isinstance(e, RuntimeError) and "CUDA out of memory." in e.args[0] diff --git a/deepmd/pt/utils/cache.py b/deepmd/pt/utils/cache.py new file mode 100644 index 0000000000..c40c4050b7 --- /dev/null +++ b/deepmd/pt/utils/cache.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy as copy_lib +import functools + + +def lru_cache(maxsize=16, typed=False, copy=False, deepcopy=False): + if deepcopy: + + def decorator(f): + cached_func = functools.lru_cache(maxsize, typed)(f) + + @functools.wraps(f) + def wrapper(*args, **kwargs): + return copy_lib.deepcopy(cached_func(*args, **kwargs)) + + return wrapper + + elif copy: + + def decorator(f): + cached_func = functools.lru_cache(maxsize, typed)(f) + + @functools.wraps(f) + def wrapper(*args, **kwargs): + return copy_lib.copy(cached_func(*args, **kwargs)) + + return wrapper + + else: + decorator = functools.lru_cache(maxsize, typed) + return decorator diff --git a/deepmd/pt/utils/dataloader.py b/deepmd/pt/utils/dataloader.py new file mode 100644 index 0000000000..7c95f66c9c --- /dev/null +++ b/deepmd/pt/utils/dataloader.py @@ -0,0 +1,319 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +import os +import queue +import time +from multiprocessing.dummy import ( + Pool, +) +from threading import ( + Thread, +) +from typing import ( + List, +) + +import h5py +import torch +import torch.distributed as dist +import torch.multiprocessing +from torch.utils.data import ( + DataLoader, + Dataset, + WeightedRandomSampler, +) +from torch.utils.data.distributed import ( + DistributedSampler, +) + +from deepmd.pt.model.descriptor import ( + Descriptor, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.dataset import ( + DeepmdDataSetForLoader, +) +from deepmd.utils.data_system import ( + prob_sys_size_ext, + process_sys_probs, +) + +torch.multiprocessing.set_sharing_strategy("file_system") + + +def setup_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + + +class DpLoaderSet(Dataset): + """A dataset for storing DataLoaders to multiple Systems.""" + + def __init__( + self, + systems, + batch_size, + model_params, + seed=10, + type_split=True, + noise_settings=None, + shuffle=True, + ): + setup_seed(seed) + if isinstance(systems, str): + with h5py.File(systems) as file: + systems = [os.path.join(systems, item) for item in file.keys()] + + self.systems: List[DeepmdDataSetForLoader] = [] + if len(systems) >= 100: + logging.info(f"Constructing DataLoaders from {len(systems)} systems") + + def construct_dataset(system): + ### this design requires "rcut" and "sel" in the descriptor + ### VERY BAD DESIGN!!!! + ### not all descriptors provides these parameter in their constructor + if model_params["descriptor"].get("type") != "hybrid": + info_dict = Descriptor.get_data_process_key(model_params["descriptor"]) + rcut = info_dict["rcut"] + sel = info_dict["sel"] + else: ### need to remove this + rcut = [] + sel = [] + for ii in model_params["descriptor"]["list"]: + rcut.append(ii["rcut"]) + sel.append(ii["sel"]) + return DeepmdDataSetForLoader( + system=system, + type_map=model_params["type_map"], + rcut=rcut, + sel=sel, + type_split=type_split, + noise_settings=noise_settings, + shuffle=shuffle, + ) + + with Pool( + os.cpu_count() + // (int(os.environ["LOCAL_WORLD_SIZE"]) if dist.is_initialized() else 1) + ) as pool: + self.systems = pool.map(construct_dataset, systems) + + self.sampler_list: List[DistributedSampler] = [] + self.index = [] + self.total_batch = 0 + + self.dataloaders = [] + for system in self.systems: + if dist.is_initialized(): + system_sampler = DistributedSampler(system) + self.sampler_list.append(system_sampler) + else: + system_sampler = None + if isinstance(batch_size, str): + if batch_size == "auto": + rule = 32 + elif batch_size.startswith("auto:"): + rule = int(batch_size.split(":")[1]) + else: + rule = None + logging.error("Unsupported batch size type") + self.batch_size = rule // system._natoms + if self.batch_size * system._natoms < rule: + self.batch_size += 1 + else: + self.batch_size = batch_size + system_dataloader = DataLoader( + dataset=system, + batch_size=self.batch_size, + num_workers=0, # Should be 0 to avoid too many threads forked + sampler=system_sampler, + collate_fn=collate_batch, + shuffle=(not dist.is_initialized()) and shuffle, + ) + self.dataloaders.append(system_dataloader) + self.index.append(len(system_dataloader)) + self.total_batch += len(system_dataloader) + # Initialize iterator instances for DataLoader + self.iters = [] + for item in self.dataloaders: + self.iters.append(iter(item)) + + def set_noise(self, noise_settings): + # noise_settings['noise_type'] # "trunc_normal", "normal", "uniform" + # noise_settings['noise'] # float, default 1.0 + # noise_settings['noise_mode'] # "prob", "fix_num" + # noise_settings['mask_num'] # if "fix_num", int + # noise_settings['mask_prob'] # if "prob", float + # noise_settings['same_mask'] # coord and type same mask? + for system in self.systems: + system.set_noise(noise_settings) + + def __len__(self): + return len(self.dataloaders) + + def __getitem__(self, idx): + # logging.warning(str(torch.distributed.get_rank())+" idx: "+str(idx)+" index: "+str(self.index[idx])) + try: + batch = next(self.iters[idx]) + except StopIteration: + self.iters[idx] = iter(self.dataloaders[idx]) + batch = next(self.iters[idx]) + batch["sid"] = idx + return batch + + +_sentinel = object() +QUEUESIZE = 32 + + +class BackgroundConsumer(Thread): + def __init__(self, queue, source, max_len): + Thread.__init__(self) + self._queue = queue + self._source = source # Main DL iterator + self._max_len = max_len # + + def run(self): + for item in self._source: + self._queue.put(item) # Blocking if the queue is full + + # Signal the consumer we are done. + self._queue.put(_sentinel) + + +class BufferedIterator: + def __init__(self, iterable): + self._queue = queue.Queue(QUEUESIZE) + self._iterable = iterable + self._consumer = None + + self.start_time = time.time() + self.warning_time = None + self.total = len(iterable) + + def _create_consumer(self): + self._consumer = BackgroundConsumer(self._queue, self._iterable, self.total) + self._consumer.daemon = True + self._consumer.start() + + def __iter__(self): + return self + + def __len__(self): + return self.total + + def __next__(self): + # Create consumer if not created yet + if self._consumer is None: + self._create_consumer() + # Notify the user if there is a data loading bottleneck + if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)): + if time.time() - self.start_time > 5 * 60: + if ( + self.warning_time is None + or time.time() - self.warning_time > 15 * 60 + ): + logging.warning( + "Data loading buffer is empty or nearly empty. This may " + "indicate a data loading bottleneck, and increasing the " + "number of workers (--num-workers) may help." + ) + self.warning_time = time.time() + + # Get next example + item = self._queue.get() + if isinstance(item, Exception): + raise item + if item is _sentinel: + raise StopIteration + return item + + +def collate_tensor_fn(batch): + elem = batch[0] + if not isinstance(elem, list): + out = None + if torch.utils.data.get_worker_info() is not None: + # If we're in a background process, concatenate directly into a + # shared memory tensor to avoid an extra copy + numel = sum(x.numel() for x in batch) + storage = elem._typed_storage()._new_shared(numel, device=elem.device) + out = elem.new(storage).resize_(len(batch), *list(elem.size())) + return torch.stack(batch, 0, out=out) + else: + out_hybrid = [] + for ii, hybrid_item in enumerate(elem): + out = None + tmp_batch = [x[ii] for x in batch] + if torch.utils.data.get_worker_info() is not None: + # If we're in a background process, concatenate directly into a + # shared memory tensor to avoid an extra copy + numel = sum(x.numel() for x in tmp_batch) + storage = hybrid_item._typed_storage()._new_shared( + numel, device=hybrid_item.device + ) + out = hybrid_item.new(storage).resize_( + len(tmp_batch), *list(hybrid_item.size()) + ) + out_hybrid.append(torch.stack(tmp_batch, 0, out=out)) + return out_hybrid + + +def collate_batch(batch): + example = batch[0] + result = example.copy() + for key in example.keys(): + if key == "shift" or key == "mapping": + natoms_extended = max([d[key].shape[0] for d in batch]) + n_frames = len(batch) + list = [] + for x in range(n_frames): + list.append(batch[x][key]) + if key == "shift": + result[key] = torch.zeros( + (n_frames, natoms_extended, 3), + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + device=env.PREPROCESS_DEVICE, + ) + else: + result[key] = torch.zeros( + (n_frames, natoms_extended), + dtype=torch.long, + device=env.PREPROCESS_DEVICE, + ) + for i in range(len(batch)): + natoms_tmp = list[i].shape[0] + result[key][i, :natoms_tmp] = list[i] + elif "find_" in key: + result[key] = batch[0][key] + else: + if batch[0][key] is None: + result[key] = None + elif key == "fid": + result[key] = [d[key] for d in batch] + else: + result[key] = collate_tensor_fn([d[key] for d in batch]) + return result + + +def get_weighted_sampler(training_data, prob_style, sys_prob=False): + if sys_prob is False: + if prob_style == "prob_uniform": + prob_v = 1.0 / float(training_data.__len__()) + probs = [prob_v for ii in range(training_data.__len__())] + else: # prob_sys_size;A:B:p1;C:D:p2 or prob_sys_size = prob_sys_size;0:nsys:1.0 + if prob_style == "prob_sys_size": + style = f"prob_sys_size;0:{len(training_data)}:1.0" + else: + style = prob_style + probs = prob_sys_size_ext(style, len(training_data), training_data.index) + else: + probs = process_sys_probs(prob_style, training_data.index) + logging.info("Generated weighted sampler with prob array: " + str(probs)) + # training_data.total_batch is the size of one epoch, you can increase it to avoid too many rebuilding of iteraters + len_sampler = training_data.total_batch * max(env.NUM_WORKERS, 1) + sampler = WeightedRandomSampler(probs, len_sampler, replacement=True) + return sampler diff --git a/deepmd/pt/utils/dataset.py b/deepmd/pt/utils/dataset.py new file mode 100644 index 0000000000..24daa6e37e --- /dev/null +++ b/deepmd/pt/utils/dataset.py @@ -0,0 +1,918 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import glob +import os +from typing import ( + List, + Optional, +) + +import h5py +import numpy as np +import torch +import torch.distributed as dist +from torch.utils.data import ( + Dataset, +) +from tqdm import ( + trange, +) + +from deepmd.pt.utils import ( + dp_random, + env, +) +from deepmd.pt.utils.cache import ( + lru_cache, +) +from deepmd.pt.utils.preprocess import ( + Region3D, + make_env_mat, + normalize_coord, +) + + +class DeepmdDataSystem: + def __init__( + self, + sys_path: str, + rcut, + sec, + type_map: Optional[List[str]] = None, + type_split=True, + noise_settings=None, + shuffle=True, + ): + """Construct DeePMD-style frame collection of one system. + + Args: + - sys_path: Paths to the system. + - type_map: Atom types. + """ + sys_path = sys_path.replace("#", "") + if ".hdf5" in sys_path: + tmp = sys_path.split("/") + path = "/".join(tmp[:-1]) + sys = tmp[-1] + self.file = h5py.File(path)[sys] + self._dirs = [] + for item in self.file.keys(): + if "set." in item: + self._dirs.append(item) + self._dirs.sort() + else: + self.file = None + self._dirs = glob.glob(os.path.join(sys_path, "set.*")) + self._dirs.sort() + self.type_split = type_split + self.noise_settings = noise_settings + self._check_pbc(sys_path) + self.shuffle = shuffle + if noise_settings is not None: + self.noise_type = noise_settings.get("noise_type", "uniform") + self.noise = float(noise_settings.get("noise", 1.0)) + self.noise_mode = noise_settings.get("noise_mode", "fix_num") + self.mask_num = int(noise_settings.get("mask_num", 1)) + self.mask_prob = float(noise_settings.get("mask_prob", 0.15)) + self.same_mask = noise_settings.get("same_mask", False) + self.mask_coord = noise_settings.get("mask_coord", False) + self.mask_type = noise_settings.get("mask_type", False) + self.mask_type_idx = int(noise_settings.get("mask_type_idx", 0)) + self.max_fail_num = int(noise_settings.get("max_fail_num", 10)) + + # check mixed type + error_format_msg = ( + "if one of the set is of mixed_type format, " + "then all of the sets in this system should be of mixed_type format!" + ) + if len(self._dirs) == 0: + raise RuntimeError(f"No set found in system {sys_path}.") + + self.mixed_type = self._check_mode(self._dirs[0]) + for set_item in self._dirs[1:]: + assert self._check_mode(set_item) == self.mixed_type, error_format_msg + + self._atom_type = self._load_type(sys_path) + self._natoms = len(self._atom_type) + + self._type_map = self._load_type_map(sys_path) + self.enforce_type_map = False + if type_map is not None and self._type_map is not None: + if not self.mixed_type: + atom_type = [ + type_map.index(self._type_map[ii]) for ii in self._atom_type + ] + self._atom_type = np.array(atom_type, dtype=np.int32) + + else: + self.enforce_type_map = True + sorter = np.argsort(type_map) + self.type_idx_map = np.array( + sorter[np.searchsorted(type_map, self._type_map, sorter=sorter)] + ) + # padding for virtual atom + self.type_idx_map = np.append( + self.type_idx_map, np.array([-1], dtype=np.int32) + ) + self._type_map = type_map + if type_map is None and self.type_map is None and self.mixed_type: + raise RuntimeError("mixed_type format must have type_map!") + self._idx_map = _make_idx_map(self._atom_type) + + self._data_dict = {} + self.add("box", 9, must=self.pbc) + self.add("coord", 3, atomic=True, must=True) + self.add("energy", 1, atomic=False, must=False, high_prec=True) + self.add("force", 3, atomic=True, must=False, high_prec=False) + self.add("virial", 9, atomic=False, must=False, high_prec=False) + + self._sys_path = sys_path + self.rcut = rcut + self.sec = sec + if isinstance(rcut, float): + self.hybrid = False + elif isinstance(rcut, list): + self.hybrid = True + else: + RuntimeError("Unkown rcut type!") + self.sets = [None for i in range(len(self._sys_path))] + + self.nframes = 0 + i = 1 + self.prefix_sum = [0] * (len(self._dirs) + 1) + for item in self._dirs: + frames = self._load_set(item, fast=True) + self.prefix_sum[i] = self.prefix_sum[i - 1] + frames + i += 1 + self.nframes += frames + + def _check_pbc(self, sys_path): + pbc = True + if os.path.isfile(os.path.join(sys_path, "nopbc")): + pbc = False + self.pbc = pbc + + def set_noise(self, noise_settings): + # noise_settings['noise_type'] # "trunc_normal", "normal", "uniform" + # noise_settings['noise'] # float, default 1.0 + # noise_settings['noise_mode'] # "prob", "fix_num" + # noise_settings['mask_num'] # if "fix_num", int + # noise_settings['mask_prob'] # if "prob", float + # noise_settings['same_mask'] # coord and type same mask? + self.noise_settings = noise_settings + self.noise_type = noise_settings.get("noise_type", "uniform") + self.noise = float(noise_settings.get("noise", 1.0)) + self.noise_mode = noise_settings.get("noise_mode", "fix_num") + self.mask_num = int(noise_settings.get("mask_num", 1)) + self.mask_coord = noise_settings.get("mask_coord", False) + self.mask_type = noise_settings.get("mask_type", False) + self.mask_prob = float(noise_settings.get("mask_prob", 0.15)) + self.same_mask = noise_settings.get("noise_type", False) + + def add( + self, + key: str, + ndof: int, + atomic: bool = False, + must: bool = False, + high_prec: bool = False, + ): + """Add a data item that to be loaded. + + Args: + - key: The key of the item. The corresponding data is stored in `sys_path/set.*/key.npy` + - ndof: The number of dof + - atomic: The item is an atomic property. + - must: The data file `sys_path/set.*/key.npy` must exist. Otherwise, value is set to zero. + - high_prec: Load the data and store in float64, otherwise in float32. + """ + self._data_dict[key] = { + "ndof": ndof, + "atomic": atomic, + "must": must, + "high_prec": high_prec, + } + + # deprecated TODO + def get_batch_for_train(self, batch_size: int): + """Get a batch of data with at most `batch_size` frames. The frames are randomly picked from the data system. + + Args: + - batch_size: Frame count. + """ + if not hasattr(self, "_frames"): + self.set_size = 0 + self._set_count = 0 + self._iterator = 0 + if batch_size == "auto": + batch_size = -(-32 // self._natoms) + if self._iterator + batch_size > self.set_size: + set_idx = self._set_count % len(self._dirs) + if self.sets[set_idx] is None: + frames = self._load_set(self._dirs[set_idx]) + frames = self.preprocess(frames) + cnt = 0 + for item in self.sets: + if item is not None: + cnt += 1 + if cnt < env.CACHE_PER_SYS: + self.sets[set_idx] = frames + else: + frames = self.sets[set_idx] + self._frames = frames + self._shuffle_data() + if dist.is_initialized(): + world_size = dist.get_world_size() + rank = dist.get_rank() + ssize = self._frames["coord"].shape[0] + subsize = ssize // world_size + self._iterator = rank * subsize + self.set_size = min((rank + 1) * subsize, ssize) + else: + self.set_size = self._frames["coord"].shape[0] + self._iterator = 0 + self._set_count += 1 + iterator = min(self._iterator + batch_size, self.set_size) + idx = np.arange(self._iterator, iterator) + self._iterator += batch_size + return self._get_subdata(idx) + + # deprecated TODO + def get_batch(self, batch_size: int): + """Get a batch of data with at most `batch_size` frames. The frames are randomly picked from the data system. + Args: + - batch_size: Frame count. + """ + if not hasattr(self, "_frames"): + self.set_size = 0 + self._set_count = 0 + self._iterator = 0 + if batch_size == "auto": + batch_size = -(-32 // self._natoms) + if self._iterator + batch_size > self.set_size: + set_idx = self._set_count % len(self._dirs) + if self.sets[set_idx] is None: + frames = self._load_set(self._dirs[set_idx]) + frames = self.preprocess(frames) + cnt = 0 + for item in self.sets: + if item is not None: + cnt += 1 + if cnt < env.CACHE_PER_SYS: + self.sets[set_idx] = frames + else: + frames = self.sets[set_idx] + self._frames = frames + self._shuffle_data() + self.set_size = self._frames["coord"].shape[0] + self._iterator = 0 + self._set_count += 1 + iterator = min(self._iterator + batch_size, self.set_size) + idx = np.arange(self._iterator, iterator) + self._iterator += batch_size + return self._get_subdata(idx) + + def get_ntypes(self): + """Number of atom types in the system.""" + if self._type_map is not None: + return len(self._type_map) + else: + return max(self._atom_type) + 1 + + def get_natoms_vec(self, ntypes: int): + """Get number of atoms and number of atoms in different types. + + Args: + - ntypes: Number of types (may be larger than the actual number of types in the system). + """ + natoms = len(self._atom_type) + natoms_vec = np.zeros(ntypes).astype(int) + for ii in range(ntypes): + natoms_vec[ii] = np.count_nonzero(self._atom_type == ii) + tmp = [natoms, natoms] + tmp = np.append(tmp, natoms_vec) + return tmp.astype(np.int32) + + def _load_type(self, sys_path): + if self.file is not None: + return self.file["type.raw"][:] + else: + return np.loadtxt( + os.path.join(sys_path, "type.raw"), dtype=np.int32, ndmin=1 + ) + + def _load_type_map(self, sys_path): + if self.file is not None: + tmp = self.file["type_map.raw"][:].tolist() + tmp = [item.decode("ascii") for item in tmp] + return tmp + else: + fname = os.path.join(sys_path, "type_map.raw") + if os.path.isfile(fname): + with open(fname) as fin: + content = fin.read() + return content.split() + else: + return None + + def _check_mode(self, sys_path): + return os.path.isfile(sys_path + "/real_atom_types.npy") + + def _load_type_mix(self, set_name): + type_path = set_name + "/real_atom_types.npy" + real_type = np.load(type_path).astype(np.int32).reshape([-1, self._natoms]) + return real_type + + @lru_cache(maxsize=16, copy=True) + def _load_set(self, set_name, fast=False): + if self.file is None: + path = os.path.join(set_name, "coord.npy") + if self._data_dict["coord"]["high_prec"]: + coord = np.load(path).astype(env.GLOBAL_ENER_FLOAT_PRECISION) + else: + coord = np.load(path).astype(env.GLOBAL_NP_FLOAT_PRECISION) + if coord.ndim == 1: + coord = coord.reshape([1, -1]) + assert coord.shape[1] == self._data_dict["coord"]["ndof"] * self._natoms + nframes = coord.shape[0] + if fast: + return nframes + data = {"type": np.tile(self._atom_type[self._idx_map], (nframes, 1))} + for kk in self._data_dict.keys(): + data["find_" + kk], data[kk] = self._load_data( + set_name, + kk, + nframes, + self._data_dict[kk]["ndof"], + atomic=self._data_dict[kk]["atomic"], + high_prec=self._data_dict[kk]["high_prec"], + must=self._data_dict[kk]["must"], + ) + if self.mixed_type: + # nframes x natoms + atom_type_mix = self._load_type_mix(set_name) + if self.enforce_type_map: + try: + atom_type_mix_ = self.type_idx_map[atom_type_mix].astype( + np.int32 + ) + except IndexError as e: + raise IndexError( + "some types in 'real_atom_types.npy' of set {} are not contained in {} types!".format( + set_name, self.get_ntypes() + ) + ) from e + atom_type_mix = atom_type_mix_ + real_type = atom_type_mix.reshape([nframes, self._natoms]) + data["type"] = real_type + natoms = data["type"].shape[1] + # nframes x ntypes + atom_type_nums = np.array( + [(real_type == i).sum(axis=-1) for i in range(self.get_ntypes())], + dtype=np.int32, + ).T + ghost_nums = np.array( + [(real_type == -1).sum(axis=-1)], + dtype=np.int32, + ).T + assert ( + atom_type_nums.sum(axis=-1) + ghost_nums.sum(axis=-1) == natoms + ).all(), "some types in 'real_atom_types.npy' of set {} are not contained in {} types!".format( + set_name, self.get_ntypes() + ) + data["real_natoms_vec"] = np.concatenate( + ( + np.tile( + np.array([natoms, natoms], dtype=np.int32), (nframes, 1) + ), + atom_type_nums, + ), + axis=-1, + ) + + return data + else: + data = {} + nframes = self.file[set_name]["coord.npy"].shape[0] + if fast: + return nframes + for key in ["coord", "energy", "force", "box"]: + data[key] = self.file[set_name][f"{key}.npy"][:] + if self._data_dict[key]["atomic"]: + data[key] = data[key].reshape(nframes, self._natoms, -1)[ + :, self._idx_map, : + ] + if self.mixed_type: + # nframes x natoms + atom_type_mix = self._load_type_mix(set_name) + if self.enforce_type_map: + try: + atom_type_mix_ = self.type_idx_map[atom_type_mix].astype( + np.int32 + ) + except IndexError as e: + raise IndexError( + "some types in 'real_atom_types.npy' of set {} are not contained in {} types!".format( + set_name, self.get_ntypes() + ) + ) from e + atom_type_mix = atom_type_mix_ + real_type = atom_type_mix.reshape([nframes, self._natoms]) + data["type"] = real_type + natoms = data["type"].shape[1] + # nframes x ntypes + atom_type_nums = np.array( + [(real_type == i).sum(axis=-1) for i in range(self.get_ntypes())], + dtype=np.int32, + ).T + ghost_nums = np.array( + [(real_type == -1).sum(axis=-1)], + dtype=np.int32, + ).T + assert ( + atom_type_nums.sum(axis=-1) + ghost_nums.sum(axis=-1) == natoms + ).all(), "some types in 'real_atom_types.npy' of set {} are not contained in {} types!".format( + set_name, self.get_ntypes() + ) + data["real_natoms_vec"] = np.concatenate( + ( + np.tile( + np.array([natoms, natoms], dtype=np.int32), (nframes, 1) + ), + atom_type_nums, + ), + axis=-1, + ) + else: + data["type"] = np.tile(self._atom_type[self._idx_map], (nframes, 1)) + return data + + def _load_data( + self, set_name, key, nframes, ndof, atomic=False, must=True, high_prec=False + ): + if atomic: + ndof *= self._natoms + path = os.path.join(set_name, key + ".npy") + # logging.info('Loading data from: %s', path) + if os.path.isfile(path): + if high_prec: + data = np.load(path).astype(env.GLOBAL_ENER_FLOAT_PRECISION) + else: + data = np.load(path).astype(env.GLOBAL_NP_FLOAT_PRECISION) + if atomic: + data = data.reshape([nframes, self._natoms, -1]) + data = data[:, self._idx_map, :] + data = data.reshape([nframes, -1]) + data = np.reshape(data, [nframes, ndof]) + return np.float32(1.0), data + elif must: + raise RuntimeError("%s not found!" % path) + else: + if high_prec: + data = np.zeros([nframes, ndof]).astype(env.GLOBAL_ENER_FLOAT_PRECISION) + else: + data = np.zeros([nframes, ndof]).astype(env.GLOBAL_NP_FLOAT_PRECISION) + return np.float32(0.0), data + + # deprecated TODO + def preprocess(self, batch): + n_frames = batch["coord"].shape[0] + for kk in self._data_dict.keys(): + if "find_" in kk: + pass + else: + batch[kk] = torch.tensor( + batch[kk], + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + device=env.PREPROCESS_DEVICE, + ) + if self._data_dict[kk]["atomic"]: + batch[kk] = batch[kk].view( + n_frames, -1, self._data_dict[kk]["ndof"] + ) + + for kk in ["type", "real_natoms_vec"]: + if kk in batch.keys(): + batch[kk] = torch.tensor( + batch[kk], dtype=torch.long, device=env.PREPROCESS_DEVICE + ) + batch["atype"] = batch.pop("type") + + keys = ["nlist", "nlist_loc", "nlist_type", "shift", "mapping"] + coord = batch["coord"] + atype = batch["atype"] + box = batch["box"] + rcut = self.rcut + sec = self.sec + assert batch["atype"].max() < len(self._type_map) + nlist, nlist_loc, nlist_type, shift, mapping = [], [], [], [], [] + + for sid in trange(n_frames, disable=env.DISABLE_TQDM): + region = Region3D(box[sid]) + nloc = atype[sid].shape[0] + _coord = normalize_coord(coord[sid], region, nloc) + coord[sid] = _coord + a, b, c, d, e = make_env_mat( + _coord, atype[sid], region, rcut, sec, type_split=self.type_split + ) + nlist.append(a) + nlist_loc.append(b) + nlist_type.append(c) + shift.append(d) + mapping.append(e) + nlist = torch.stack(nlist) + nlist_loc = torch.stack(nlist_loc) + nlist_type = torch.stack(nlist_type) + batch["nlist"] = nlist + batch["nlist_loc"] = nlist_loc + batch["nlist_type"] = nlist_type + natoms_extended = max([item.shape[0] for item in shift]) + batch["shift"] = torch.zeros( + (n_frames, natoms_extended, 3), + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + device=env.PREPROCESS_DEVICE, + ) + batch["mapping"] = torch.zeros( + (n_frames, natoms_extended), dtype=torch.long, device=env.PREPROCESS_DEVICE + ) + for i in range(len(shift)): + natoms_tmp = shift[i].shape[0] + batch["shift"][i, :natoms_tmp] = shift[i] + batch["mapping"][i, :natoms_tmp] = mapping[i] + return batch + + def _shuffle_data(self): + nframes = self._frames["coord"].shape[0] + idx = np.arange(nframes) + if self.shuffle: + dp_random.shuffle(idx) + self.idx_mapping = idx + + def _get_subdata(self, idx=None): + data = self._frames + idx = self.idx_mapping[idx] + new_data = {} + for ii in data: + dd = data[ii] + if "find_" in ii: + new_data[ii] = dd + else: + if idx is not None: + new_data[ii] = dd[idx] + else: + new_data[ii] = dd + return new_data + + # note: this function needs to be optimized for single frame process + def single_preprocess(self, batch, sid): + for kk in self._data_dict.keys(): + if "find_" in kk: + pass + else: + batch[kk] = torch.tensor( + batch[kk][sid], + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + device=env.PREPROCESS_DEVICE, + ) + if self._data_dict[kk]["atomic"]: + batch[kk] = batch[kk].view(-1, self._data_dict[kk]["ndof"]) + for kk in ["type", "real_natoms_vec"]: + if kk in batch.keys(): + batch[kk] = torch.tensor( + batch[kk][sid], dtype=torch.long, device=env.PREPROCESS_DEVICE + ) + clean_coord = batch.pop("coord") + clean_type = batch.pop("type") + nloc = clean_type.shape[0] + rcut = self.rcut + sec = self.sec + nlist, nlist_loc, nlist_type, shift, mapping = [], [], [], [], [] + if self.pbc: + box = batch["box"] + region = Region3D(box) + else: + box = None + batch["box"] = None + region = None + if self.noise_settings is None: + batch["atype"] = clean_type + batch["coord"] = clean_coord + coord = clean_coord + atype = batch["atype"] + if self.pbc: + _coord = normalize_coord(coord, region, nloc) + + else: + _coord = coord.clone() + batch["coord"] = _coord + nlist, nlist_loc, nlist_type, shift, mapping = make_env_mat( + _coord, + atype, + region, + rcut, + sec, + pbc=self.pbc, + type_split=self.type_split, + ) + batch["nlist"] = nlist + batch["nlist_loc"] = nlist_loc + batch["nlist_type"] = nlist_type + batch["shift"] = shift + batch["mapping"] = mapping + return batch + else: + batch["clean_type"] = clean_type + if self.pbc: + _clean_coord = normalize_coord(clean_coord, region, nloc) + else: + _clean_coord = clean_coord.clone() + batch["clean_coord"] = _clean_coord + # add noise + for i in range(self.max_fail_num): + mask_num = 0 + if self.noise_mode == "fix_num": + mask_num = self.mask_num + if len(batch["clean_type"]) < mask_num: + mask_num = len(batch["clean_type"]) + elif self.noise_mode == "prob": + mask_num = int(self.mask_prob * nloc) + if mask_num == 0: + mask_num = 1 + else: + NotImplementedError(f"Unknown noise mode {self.noise_mode}!") + rng = np.random.default_rng() + coord_mask_res = rng.choice( + range(nloc), mask_num, replace=False + ).tolist() + coord_mask = np.isin(range(nloc), coord_mask_res) + if self.same_mask: + type_mask = coord_mask.copy() + else: + rng = np.random.default_rng() + type_mask_res = rng.choice( + range(nloc), mask_num, replace=False + ).tolist() + type_mask = np.isin(range(nloc), type_mask_res) + + # add noise for coord + if self.mask_coord: + noise_on_coord = 0.0 + rng = np.random.default_rng() + if self.noise_type == "trunc_normal": + noise_on_coord = np.clip( + rng.standard_normal((mask_num, 3)) * self.noise, + a_min=-self.noise * 2.0, + a_max=self.noise * 2.0, + ) + elif self.noise_type == "normal": + noise_on_coord = rng.standard_normal((mask_num, 3)) * self.noise + elif self.noise_type == "uniform": + noise_on_coord = rng.uniform( + low=-self.noise, high=self.noise, size=(mask_num, 3) + ) + else: + NotImplementedError(f"Unknown noise type {self.noise_type}!") + noised_coord = _clean_coord.clone().detach() + noised_coord[coord_mask] += noise_on_coord + batch["coord_mask"] = torch.tensor( + coord_mask, dtype=torch.bool, device=env.PREPROCESS_DEVICE + ) + else: + noised_coord = _clean_coord + batch["coord_mask"] = torch.tensor( + np.zeros_like(coord_mask, dtype=bool), + dtype=torch.bool, + device=env.PREPROCESS_DEVICE, + ) + + # add mask for type + if self.mask_type: + masked_type = clean_type.clone().detach() + masked_type[type_mask] = self.mask_type_idx + batch["type_mask"] = torch.tensor( + type_mask, dtype=torch.bool, device=env.PREPROCESS_DEVICE + ) + else: + masked_type = clean_type + batch["type_mask"] = torch.tensor( + np.zeros_like(type_mask, dtype=bool), + dtype=torch.bool, + device=env.PREPROCESS_DEVICE, + ) + if self.pbc: + _coord = normalize_coord(noised_coord, region, nloc) + else: + _coord = noised_coord.clone() + try: + nlist, nlist_loc, nlist_type, shift, mapping = make_env_mat( + _coord, + masked_type, + region, + rcut, + sec, + pbc=self.pbc, + type_split=self.type_split, + min_check=True, + ) + except RuntimeError as e: + if i == self.max_fail_num - 1: + RuntimeError( + f"Add noise times beyond max tries {self.max_fail_num}!" + ) + continue + batch["atype"] = masked_type + batch["coord"] = noised_coord + batch["nlist"] = nlist + batch["nlist_loc"] = nlist_loc + batch["nlist_type"] = nlist_type + batch["shift"] = shift + batch["mapping"] = mapping + return batch + + def _get_item(self, index): + for i in range( + 0, len(self._dirs) + 1 + ): # note: if different sets can be merged, prefix sum is unused to calculate + if index < self.prefix_sum[i]: + break + frames = self._load_set(self._dirs[i - 1]) + frame = self.single_preprocess(frames, index - self.prefix_sum[i - 1]) + frame["fid"] = index + return frame + + +def _make_idx_map(atom_type): + natoms = atom_type.shape[0] + idx = np.arange(natoms) + idx_map = np.lexsort((idx, atom_type)) + return idx_map + + +class DeepmdDataSetForLoader(Dataset): + def __init__( + self, + system: str, + type_map: str, + rcut, + sel, + weight=None, + type_split=True, + noise_settings=None, + shuffle=True, + ): + """Construct DeePMD-style dataset containing frames cross different systems. + + Args: + - systems: Paths to systems. + - batch_size: Max frame count in a batch. + - type_map: Atom types. + """ + self._type_map = type_map + if not isinstance(rcut, list): + if isinstance(sel, int): + sel = [sel] + sec = torch.cumsum(torch.tensor(sel), dim=0) + else: + sec = [] + for sel_item in sel: + if isinstance(sel_item, int): + sel_item = [sel_item] + sec.append(torch.cumsum(torch.tensor(sel_item), dim=0)) + self._data_system = DeepmdDataSystem( + system, + rcut, + sec, + type_map=self._type_map, + type_split=type_split, + noise_settings=noise_settings, + shuffle=shuffle, + ) + self.mixed_type = self._data_system.mixed_type + self._ntypes = self._data_system.get_ntypes() + self._natoms = self._data_system._natoms + self._natoms_vec = self._data_system.get_natoms_vec(self._ntypes) + + def set_noise(self, noise_settings): + # noise_settings['noise_type'] # "trunc_normal", "normal", "uniform" + # noise_settings['noise'] # float, default 1.0 + # noise_settings['noise_mode'] # "prob", "fix_num" + # noise_settings['mask_num'] # if "fix_num", int + # noise_settings['mask_prob'] # if "prob", float + # noise_settings['same_mask'] # coord and type same mask? + self._data_system.set_noise(noise_settings) + + def __len__(self): + return self._data_system.nframes + + def __getitem__(self, index): + """Get a frame from the selected system.""" + b_data = self._data_system._get_item(index) + b_data["natoms"] = torch.tensor(self._natoms_vec, device=env.PREPROCESS_DEVICE) + return b_data + + +# deprecated TODO +class DeepmdDataSet(Dataset): + def __init__( + self, + systems: List[str], + batch_size: int, + type_map: List[str], + rcut=None, + sel=None, + weight=None, + type_split=True, + ): + """Construct DeePMD-style dataset containing frames cross different systems. + + Args: + - systems: Paths to systems. + - batch_size: Max frame count in a batch. + - type_map: Atom types. + """ + self._batch_size = batch_size + self._type_map = type_map + if sel is not None: + if isinstance(sel, int): + sel = [sel] + sec = torch.cumsum(torch.tensor(sel), dim=0) + if isinstance(systems, str): + with h5py.File(systems) as file: + systems = [os.path.join(systems, item) for item in file.keys()] + self._data_systems = [ + DeepmdDataSystem( + ii, rcut, sec, type_map=self._type_map, type_split=type_split + ) + for ii in systems + ] + # check mix_type format + error_format_msg = ( + "if one of the system is of mixed_type format, " + "then all of the systems in this dataset should be of mixed_type format!" + ) + self.mixed_type = self._data_systems[0].mixed_type + for sys_item in self._data_systems[1:]: + assert sys_item.mixed_type == self.mixed_type, error_format_msg + + if weight is None: + + def weight(name, sys): + return sys.nframes + + self.probs = [ + weight(item, self._data_systems[i]) for i, item in enumerate(systems) + ] + self.probs = np.array(self.probs, dtype=float) + self.probs /= self.probs.sum() + self._ntypes = max([ii.get_ntypes() for ii in self._data_systems]) + self._natoms_vec = [ + ii.get_natoms_vec(self._ntypes) for ii in self._data_systems + ] + self.cache = [{} for _ in self._data_systems] + + @property + def nsystems(self): + return len(self._data_systems) + + def __len__(self): + return self.nsystems + + def __getitem__(self, index=None): + """Get a batch of frames from the selected system.""" + if index is None: + index = dp_random.choice(np.arange(self.nsystems), self.probs) + b_data = self._data_systems[index].get_batch(self._batch_size) + b_data["natoms"] = torch.tensor( + self._natoms_vec[index], device=env.PREPROCESS_DEVICE + ) + batch_size = b_data["coord"].shape[0] + b_data["natoms"] = b_data["natoms"].unsqueeze(0).expand(batch_size, -1) + return b_data + + # deprecated TODO + def get_training_batch(self, index=None): + """Get a batch of frames from the selected system.""" + if index is None: + index = dp_random.choice(np.arange(self.nsystems), self.probs) + b_data = self._data_systems[index].get_batch_for_train(self._batch_size) + b_data["natoms"] = torch.tensor( + self._natoms_vec[index], device=env.PREPROCESS_DEVICE + ) + batch_size = b_data["coord"].shape[0] + b_data["natoms"] = b_data["natoms"].unsqueeze(0).expand(batch_size, -1) + return b_data + + def get_batch(self, sys_idx=None): + """TF-compatible batch for testing.""" + pt_batch = self[sys_idx] + np_batch = {} + for key in ["coord", "box", "force", "energy", "virial"]: + if key in pt_batch.keys(): + np_batch[key] = pt_batch[key].cpu().numpy() + for key in ["atype", "natoms"]: + if key in pt_batch.keys(): + np_batch[key] = pt_batch[key].cpu().numpy() + batch_size = pt_batch["coord"].shape[0] + np_batch["coord"] = np_batch["coord"].reshape(batch_size, -1) + np_batch["natoms"] = np_batch["natoms"][0] + np_batch["force"] = np_batch["force"].reshape(batch_size, -1) + return np_batch, pt_batch diff --git a/deepmd/pt/utils/dp_random.py b/deepmd/pt/utils/dp_random.py new file mode 100644 index 0000000000..e81488c506 --- /dev/null +++ b/deepmd/pt/utils/dp_random.py @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from deepmd.utils.random import ( + choice, + random, + seed, + shuffle, +) + +__all__ = [ + "choice", + "random", + "seed", + "shuffle", +] diff --git a/deepmd/pt/utils/env.py b/deepmd/pt/utils/env.py new file mode 100644 index 0000000000..5b6eaf7c14 --- /dev/null +++ b/deepmd/pt/utils/env.py @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import os + +import numpy as np +import torch + +PRECISION = os.environ.get("PRECISION", "float64") +GLOBAL_NP_FLOAT_PRECISION = getattr(np, PRECISION) +GLOBAL_PT_FLOAT_PRECISION = getattr(torch, PRECISION) +GLOBAL_ENER_FLOAT_PRECISION = getattr(np, PRECISION) +DISABLE_TQDM = os.environ.get("DISABLE_TQDM", False) +SAMPLER_RECORD = os.environ.get("SAMPLER_RECORD", False) +try: + # only linux + ncpus = len(os.sched_getaffinity(0)) +except AttributeError: + ncpus = os.cpu_count() +NUM_WORKERS = int(os.environ.get("NUM_WORKERS", min(8, ncpus))) +# Make sure DDP uses correct device if applicable +LOCAL_RANK = os.environ.get("LOCAL_RANK") +LOCAL_RANK = int(0 if LOCAL_RANK is None else LOCAL_RANK) + +if os.environ.get("DEVICE") == "cpu" or torch.cuda.is_available() is False: + DEVICE = torch.device("cpu") +else: + DEVICE = torch.device(f"cuda:{LOCAL_RANK}") + +if os.environ.get("PREPROCESS_DEVICE") == "gpu": + PREPROCESS_DEVICE = torch.device(f"cuda:{LOCAL_RANK}") +else: + PREPROCESS_DEVICE = torch.device("cpu") + +JIT = False +CACHE_PER_SYS = 5 # keep at most so many sets per sys in memory +ENERGY_BIAS_TRAINABLE = True + +PRECISION_DICT = { + "float16": torch.float16, + "float32": torch.float32, + "float64": torch.float64, + "half": torch.float16, + "single": torch.float32, + "double": torch.float64, +} +DEFAULT_PRECISION = "float64" diff --git a/deepmd/pt/utils/finetune.py b/deepmd/pt/utils/finetune.py new file mode 100644 index 0000000000..9d82783cc0 --- /dev/null +++ b/deepmd/pt/utils/finetune.py @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging + +import torch + +from deepmd.pt.utils import ( + env, +) + + +def change_finetune_model_params( + ckpt, finetune_model, model_config, multi_task=False, model_branch="" +): + """Load model_params according to the pretrained one. + + Args: + - ckpt & finetune_model: origin model. + - config: Read from json file. + """ + if multi_task: + # TODO + print("finetune mode need modification for multitask mode!") + if finetune_model is not None: + state_dict = torch.load(finetune_model, map_location=env.DEVICE) + if "model" in state_dict: + state_dict = state_dict["model"] + last_model_params = state_dict["_extra_state"]["model_params"] + finetune_multi_task = "model_dict" in last_model_params + trainable_param = { + "type_embedding": True, + "descriptor": True, + "fitting_net": True, + } + for net_type in trainable_param: + if net_type in model_config: + trainable_param[net_type] = model_config[net_type].get( + "trainable", True + ) + if not finetune_multi_task: + old_type_map, new_type_map = ( + last_model_params["type_map"], + model_config["type_map"], + ) + assert set(new_type_map).issubset( + old_type_map + ), "Only support for smaller type map when finetuning or resuming." + model_config = last_model_params + logging.info( + "Change the model configurations according to the pretrained one..." + ) + model_config["new_type_map"] = new_type_map + else: + model_config["finetune_multi_task"] = finetune_multi_task + model_dict_params = last_model_params["model_dict"] + new_fitting = False + if model_branch == "": + model_branch_chosen = next(iter(model_dict_params.keys())) + new_fitting = True + model_config["bias_shift"] = "statistic" # fitting net re-init + print( + "The fitting net will be re-init instead of using that in the pretrained model! " + "The bias_shift will be statistic!" + ) + else: + model_branch_chosen = model_branch + assert model_branch_chosen in model_dict_params, ( + f"No model branch named '{model_branch_chosen}'! " + f"Available ones are {list(model_dict_params.keys())}." + ) + old_type_map, new_type_map = ( + model_dict_params[model_branch_chosen]["type_map"], + model_config["type_map"], + ) + assert set(new_type_map).issubset( + old_type_map + ), "Only support for smaller type map when finetuning or resuming." + for key_item in ["type_map", "type_embedding", "descriptor"]: + if key_item in model_dict_params[model_branch_chosen]: + model_config[key_item] = model_dict_params[model_branch_chosen][ + key_item + ] + if not new_fitting: + model_config["fitting_net"] = model_dict_params[model_branch_chosen][ + "fitting_net" + ] + logging.info( + f"Change the model configurations according to the model branch " + f"{model_branch_chosen} in the pretrained one..." + ) + model_config["new_type_map"] = new_type_map + model_config["model_branch_chosen"] = model_branch_chosen + model_config["new_fitting"] = new_fitting + for net_type in trainable_param: + if net_type in model_config: + model_config[net_type]["trainable"] = trainable_param[net_type] + else: + model_config[net_type] = {"trainable": trainable_param[net_type]} + return model_config diff --git a/deepmd/pt/utils/learning_rate.py b/deepmd/pt/utils/learning_rate.py new file mode 100644 index 0000000000..eca3c6ad87 --- /dev/null +++ b/deepmd/pt/utils/learning_rate.py @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import numpy as np + + +class LearningRateExp: + def __init__(self, start_lr, stop_lr, decay_steps, stop_steps, **kwargs): + """Construct an exponential-decayed learning rate. + + Args: + - start_lr: Initial learning rate. + - stop_lr: Learning rate at the last step. + - decay_steps: Decay learning rate every N steps. + - stop_steps: When is the last step. + """ + self.start_lr = start_lr + default_ds = 100 if stop_steps // 10 > 100 else stop_steps // 100 + 1 + self.decay_steps = decay_steps + if self.decay_steps >= stop_steps: + self.decay_steps = default_ds + self.decay_rate = np.exp( + np.log(stop_lr / self.start_lr) / (stop_steps / self.decay_steps) + ) + if "decay_rate" in kwargs: + self.decay_rate = kwargs["decay_rate"] + if "min_lr" in kwargs: + self.min_lr = kwargs["min_lr"] + else: + self.min_lr = 3e-10 + + def value(self, step): + """Get the learning rate at the given step.""" + step_lr = self.start_lr * np.power(self.decay_rate, step // self.decay_steps) + if step_lr < self.min_lr: + step_lr = self.min_lr + return step_lr diff --git a/deepmd/pt/utils/multi_task.py b/deepmd/pt/utils/multi_task.py new file mode 100644 index 0000000000..f97a826b03 --- /dev/null +++ b/deepmd/pt/utils/multi_task.py @@ -0,0 +1,129 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from copy import ( + deepcopy, +) + +from deepmd.pt.model.descriptor import ( + DescrptDPA1, + DescrptDPA2, + DescrptSeA, +) +from deepmd.pt.model.network.network import ( + TypeEmbedNet, +) +from deepmd.pt.model.task import ( + EnergyFittingNet, + EnergyFittingNetDirect, + FittingNetAttenLcc, +) + + +def preprocess_shared_params(model_config): + """Preprocess the model params for multitask model, and generate the links dict for further sharing. + + Args: + model_config: Model params of multitask model. + + Returns + ------- + model_config: Preprocessed model params of multitask model. + Those string names are replaced with real params in `shared_dict` of model params. + shared_links: Dict of link infos for further sharing. + Each item, whose key must be in `shared_dict`, is a dict with following keys: + - "type": The real class type of this item. + - "links": List of shared settings, each sub-item is a dict with following keys: + - "model_key": Model key in the `model_dict` to share this item. + - "shared_type": Type of this shard item. + - "shared_level": Shared level (int) of this item in this model. + Lower for more params to share, 0 means to share all params in this item. + This list are sorted by "shared_level". + """ + assert "model_dict" in model_config, "only multi-task model can use this method!" + supported_types = ["type_map", "type_embedding", "descriptor", "fitting_net"] + shared_dict = model_config.get("shared_dict", {}) + shared_links = {} + type_map_keys = [] + + def replace_one_item(params_dict, key_type, key_in_dict, suffix="", index=None): + shared_type = key_type + shared_key = key_in_dict + shared_level = 0 + if ":" in key_in_dict: + shared_key = key_in_dict.split(":")[0] + shared_level = int(key_in_dict.split(":")[1]) + assert ( + shared_key in shared_dict + ), f"Appointed {shared_type} {shared_key} are not in the shared_dict! Please check the input params." + if index is None: + params_dict[shared_type] = deepcopy(shared_dict[shared_key]) + else: + params_dict[index] = deepcopy(shared_dict[shared_key]) + if shared_type == "type_map": + if key_in_dict not in type_map_keys: + type_map_keys.append(key_in_dict) + else: + if shared_key not in shared_links: + class_name = get_class_name(shared_type, shared_dict[key_in_dict]) + shared_links[shared_key] = {"type": class_name, "links": []} + link_item = { + "model_key": model_key, + "shared_type": shared_type + suffix, + "shared_level": shared_level, + } + shared_links[shared_key]["links"].append(link_item) + + for model_key in model_config["model_dict"]: + model_params_item = model_config["model_dict"][model_key] + for item_key in model_params_item: + if item_key in supported_types: + item_params = model_params_item[item_key] + if isinstance(item_params, str): + replace_one_item(model_params_item, item_key, item_params) + elif item_params.get("type", "") == "hybrid": + for ii, hybrid_item in enumerate(item_params["list"]): + if isinstance(hybrid_item, str): + replace_one_item( + model_params_item[item_key]["list"], + item_key, + hybrid_item, + suffix=f"_hybrid_{ii}", + index=ii, + ) + for shared_key in shared_links: + shared_links[shared_key]["links"] = sorted( + shared_links[shared_key]["links"], key=lambda x: x["shared_level"] + ) + assert len(type_map_keys) == 1, "Multitask model must have only one type_map!" + return model_config, shared_links + + +def get_class_name(item_key, item_params): + if item_key == "type_embedding": + return TypeEmbedNet.__name__ + elif item_key == "descriptor": + item_type = item_params.get("type", "se_e2_a") + if item_type == "se_e2_a": + return DescrptSeA.__name__ + elif item_type in ["se_atten", "dpa1"]: + return DescrptDPA1.__name__ + elif item_type in ["dpa2"]: + return DescrptDPA2.__name__ + # todo add support for other combination + # elif item_type == "gaussian_lcc": + # return DescrptGaussianLcc.__name__ + # elif item_type == "hybrid": + # return DescrptHybrid.__name__ + else: + raise RuntimeError(f"Unknown descriptor type {item_type}") + elif item_key == "fitting_net": + item_type = item_params.get("type", "ener") + if item_type == "ener": + return EnergyFittingNet.__name__ + elif item_type in ["direct_force", "direct_force_ener"]: + return EnergyFittingNetDirect.__name__ + elif item_type == "atten_vec_lcc": + return FittingNetAttenLcc.__name__ + else: + raise RuntimeError(f"Unknown fitting_net type {item_type}") + else: + raise RuntimeError(f"Unknown class_name type {item_key}") diff --git a/deepmd/pt/utils/nlist.py b/deepmd/pt/utils/nlist.py new file mode 100644 index 0000000000..23a11684a5 --- /dev/null +++ b/deepmd/pt/utils/nlist.py @@ -0,0 +1,431 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Dict, + List, + Optional, + Union, +) + +import torch + +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.region import ( + to_face_distance, +) + + +def _build_neighbor_list( + coord1: torch.Tensor, + nloc: int, + rcut: float, + nsel: int, + rmin: float = 1e-10, + cut_nearest: bool = True, +) -> torch.Tensor: + """Build neightbor list for a single frame. keeps nsel neighbors. + coord1 : [nall x 3]. + + ret: [nloc x nsel] stores indexes of coord1. + """ + nall = coord1.shape[-1] // 3 + coord0 = torch.split(coord1, [nloc * 3, (nall - nloc) * 3])[0] + # nloc x nall x 3 + diff = coord1.view([-1, 3])[None, :, :] - coord0.view([-1, 3])[:, None, :] + assert list(diff.shape) == [nloc, nall, 3] + # nloc x nall + rr = torch.linalg.norm(diff, dim=-1) + rr, nlist = torch.sort(rr, dim=-1) + if cut_nearest: + # nloc x (nall-1) + rr = torch.split(rr, [1, nall - 1], dim=-1)[-1] + nlist = torch.split(nlist, [1, nall - 1], dim=-1)[-1] + # nloc x nsel + nnei = rr.shape[1] + rr = torch.split(rr, [nsel, nnei - nsel], dim=-1)[0] + nlist = torch.split(nlist, [nsel, nnei - nsel], dim=-1)[0] + nlist = nlist.masked_fill((rr > rcut), -1) + return nlist + + +def build_neighbor_list_lower( + coord1: torch.Tensor, + atype: torch.Tensor, + nloc: int, + rcut: float, + sel: Union[int, List[int]], + distinguish_types: bool = True, +) -> torch.Tensor: + """Build neightbor list for a single frame. keeps nsel neighbors. + + Parameters + ---------- + coord1 : torch.Tensor + exptended coordinates of shape [nall x 3] + atype : torch.Tensor + extended atomic types of shape [nall] + nloc : int + number of local atoms. + rcut : float + cut-off radius + sel : int or List[int] + maximal number of neighbors (of each type). + if distinguish_types==True, nsel should be list and + the length of nsel should be equal to number of + types. + distinguish_types : bool + distinguish different types. + + Returns + ------- + neighbor_list : torch.Tensor + Neighbor list of shape [nloc, nsel], the neighbors + are stored in an ascending order. If the number of + neighbors is less than nsel, the positions are masked + with -1. The neighbor list of an atom looks like + |------ nsel ------| + xx xx xx xx -1 -1 -1 + if distinguish_types==True and we have two types + |---- nsel[0] -----| |---- nsel[1] -----| + xx xx xx xx -1 -1 -1 xx xx xx -1 -1 -1 -1 + + """ + nall = coord1.shape[0] // 3 + if isinstance(sel, int): + sel = [sel] + nsel = sum(sel) + # nloc x 3 + coord0 = coord1[: nloc * 3] + # nloc x nall x 3 + diff = coord1.view([-1, 3]).unsqueeze(0) - coord0.view([-1, 3]).unsqueeze(1) + assert list(diff.shape) == [nloc, nall, 3] + # nloc x nall + rr = torch.linalg.norm(diff, dim=-1) + rr, nlist = torch.sort(rr, dim=-1) + # nloc x (nall-1) + rr = rr[:, 1:] + nlist = nlist[:, 1:] + # nloc x nsel + nnei = rr.shape[1] + if nsel <= nnei: + rr = rr[:, :nsel] + nlist = nlist[:, :nsel] + else: + rr = torch.cat( + [rr, torch.ones([nloc, nsel - nnei]).to(rr.device) + rcut], dim=-1 + ) + nlist = torch.cat( + [nlist, torch.ones([nloc, nsel - nnei], dtype=torch.long).to(rr.device)], + dim=-1, + ) + assert list(nlist.shape) == [nloc, nsel] + nlist = nlist.masked_fill((rr > rcut), -1) + + if not distinguish_types: + return nlist + else: + ret_nlist = [] + # nloc x nall + tmp_atype = torch.tile(atype.unsqueeze(0), [nloc, 1]) + mask = nlist == -1 + # nloc x s(nsel) + tnlist = torch.gather( + tmp_atype, + 1, + nlist.masked_fill(mask, 0), + ) + tnlist = tnlist.masked_fill(mask, -1) + snsel = tnlist.shape[1] + for ii, ss in enumerate(sel): + # nloc x s(nsel) + # to int because bool cannot be sort on GPU + pick_mask = (tnlist == ii).to(torch.int32) + # nloc x s(nsel), stable sort, nearer neighbors first + pick_mask, imap = torch.sort( + pick_mask, dim=-1, descending=True, stable=True + ) + # nloc x s(nsel) + inlist = torch.gather(nlist, 1, imap) + inlist = inlist.masked_fill(~(pick_mask.to(torch.bool)), -1) + # nloc x nsel[ii] + ret_nlist.append(torch.split(inlist, [ss, snsel - ss], dim=-1)[0]) + return torch.concat(ret_nlist, dim=-1) + + +def build_neighbor_list( + coord1: torch.Tensor, + atype: torch.Tensor, + nloc: int, + rcut: float, + sel: Union[int, List[int]], + distinguish_types: bool = True, +) -> torch.Tensor: + """Build neightbor list for a single frame. keeps nsel neighbors. + + Parameters + ---------- + coord1 : torch.Tensor + exptended coordinates of shape [batch_size, nall x 3] + atype : torch.Tensor + extended atomic types of shape [batch_size, nall] + nloc : int + number of local atoms. + rcut : float + cut-off radius + sel : int or List[int] + maximal number of neighbors (of each type). + if distinguish_types==True, nsel should be list and + the length of nsel should be equal to number of + types. + distinguish_types : bool + distinguish different types. + + Returns + ------- + neighbor_list : torch.Tensor + Neighbor list of shape [batch_size, nloc, nsel], the neighbors + are stored in an ascending order. If the number of + neighbors is less than nsel, the positions are masked + with -1. The neighbor list of an atom looks like + |------ nsel ------| + xx xx xx xx -1 -1 -1 + if distinguish_types==True and we have two types + |---- nsel[0] -----| |---- nsel[1] -----| + xx xx xx xx -1 -1 -1 xx xx xx -1 -1 -1 -1 + + """ + batch_size = coord1.shape[0] + coord1 = coord1.view(batch_size, -1) + nall = coord1.shape[1] // 3 + if isinstance(sel, int): + sel = [sel] + nsel = sum(sel) + # nloc x 3 + coord0 = coord1[:, : nloc * 3] + # nloc x nall x 3 + diff = coord1.view([batch_size, -1, 3]).unsqueeze(1) - coord0.view( + [batch_size, -1, 3] + ).unsqueeze(2) + assert list(diff.shape) == [batch_size, nloc, nall, 3] + # nloc x nall + rr = torch.linalg.norm(diff, dim=-1) + rr, nlist = torch.sort(rr, dim=-1) + # nloc x (nall-1) + rr = rr[:, :, 1:] + nlist = nlist[:, :, 1:] + # nloc x nsel + nnei = rr.shape[2] + if nsel <= nnei: + rr = rr[:, :, :nsel] + nlist = nlist[:, :, :nsel] + else: + rr = torch.cat( + [rr, torch.ones([batch_size, nloc, nsel - nnei]).to(rr.device) + rcut], + dim=-1, + ) + nlist = torch.cat( + [ + nlist, + torch.ones([batch_size, nloc, nsel - nnei], dtype=torch.long).to( + rr.device + ), + ], + dim=-1, + ) + assert list(nlist.shape) == [batch_size, nloc, nsel] + nlist = nlist.masked_fill((rr > rcut), -1) + + if not distinguish_types: + return nlist + else: + ret_nlist = [] + # nloc x nall + tmp_atype = torch.tile(atype.unsqueeze(1), [1, nloc, 1]) + mask = nlist == -1 + # nloc x s(nsel) + tnlist = torch.gather( + tmp_atype, + 2, + nlist.masked_fill(mask, 0), + ) + tnlist = tnlist.masked_fill(mask, -1) + snsel = tnlist.shape[2] + for ii, ss in enumerate(sel): + # nloc x s(nsel) + # to int because bool cannot be sort on GPU + pick_mask = (tnlist == ii).to(torch.int32) + # nloc x s(nsel), stable sort, nearer neighbors first + pick_mask, imap = torch.sort( + pick_mask, dim=-1, descending=True, stable=True + ) + # nloc x s(nsel) + inlist = torch.gather(nlist, 2, imap) + inlist = inlist.masked_fill(~(pick_mask.to(torch.bool)), -1) + # nloc x nsel[ii] + ret_nlist.append(torch.split(inlist, [ss, snsel - ss], dim=-1)[0]) + return torch.concat(ret_nlist, dim=-1) + + +# build_neighbor_list = torch.vmap( +# build_neighbor_list_lower, +# in_dims=(0,0,None,None,None), +# out_dims=(0), +# ) + + +def get_multiple_nlist_key( + rcut: float, + nsel: int, +) -> str: + return str(rcut) + "_" + str(nsel) + + +def build_multiple_neighbor_list( + coord: torch.Tensor, + nlist: torch.Tensor, + rcuts: List[float], + nsels: List[int], +) -> Dict[str, torch.Tensor]: + """Input one neighbor list, and produce multiple neighbor lists with + different cutoff radius and numbers of selection out of it. The + required rcuts and nsels should be smaller or equal to the input nlist. + + Parameters + ---------- + coord : torch.Tensor + exptended coordinates of shape [batch_size, nall x 3] + nlist : torch.Tensor + Neighbor list of shape [batch_size, nloc, nsel], the neighbors + should be stored in an ascending order. + rcuts : List[float] + list of cut-off radius in ascending order. + nsels : List[int] + maximal number of neighbors in ascending order. + + Returns + ------- + nlist_dict : Dict[str, torch.Tensor] + A dict of nlists, key given by get_multiple_nlist_key(rc, nsel) + value being the corresponding nlist. + + """ + assert len(rcuts) == len(nsels) + if len(rcuts) == 0: + return {} + nb, nloc, nsel = nlist.shape + if nsel < nsels[-1]: + pad = -1 * torch.ones( + [nb, nloc, nsels[-1] - nsel], + dtype=nlist.dtype, + device=nlist.device, + ) + # nb x nloc x nsel + nlist = torch.cat([nlist, pad], dim=-1) + nsel = nsels[-1] + # nb x nall x 3 + coord1 = coord.view(nb, -1, 3) + nall = coord1.shape[1] + # nb x nloc x 3 + coord0 = coord1[:, :nloc, :] + nlist_mask = nlist == -1 + # nb x (nloc x nsel) x 3 + index = ( + nlist.masked_fill(nlist_mask, 0) + .view(nb, nloc * nsel) + .unsqueeze(-1) + .expand(-1, -1, 3) + ) + # nb x nloc x nsel x 3 + coord2 = torch.gather(coord1, dim=1, index=index).view(nb, nloc, nsel, 3) + # nb x nloc x nsel x 3 + diff = coord2 - coord0[:, :, None, :] + # nb x nloc x nsel + rr = torch.linalg.norm(diff, dim=-1) + rr.masked_fill(nlist_mask, float("inf")) + nlist0 = nlist + ret = {} + for rc, ns in zip(rcuts[::-1], nsels[::-1]): + nlist0 = nlist0[:, :, :ns].masked_fill(rr[:, :, :ns] > rc, int(-1)) + ret[get_multiple_nlist_key(rc, ns)] = nlist0 + return ret + + +def extend_coord_with_ghosts( + coord: torch.Tensor, + atype: torch.Tensor, + cell: Optional[torch.Tensor], + rcut: float, +): + """Extend the coordinates of the atoms by appending peridoc images. + The number of images is large enough to ensure all the neighbors + within rcut are appended. + + Parameters + ---------- + coord : torch.Tensor + original coordinates of shape [-1, nloc*3]. + atype : torch.Tensor + atom type of shape [-1, nloc]. + cell : torch.Tensor + simulation cell tensor of shape [-1, 9]. + + Returns + ------- + extended_coord: torch.Tensor + extended coordinates of shape [-1, nall*3]. + extended_atype: torch.Tensor + extended atom type of shape [-1, nall]. + index_mapping: torch.Tensor + maping extended index to the local index + + """ + nf, nloc = atype.shape + aidx = torch.tile(torch.arange(nloc).unsqueeze(0), [nf, 1]) + if cell is None: + nall = nloc + extend_coord = coord.clone() + extend_atype = atype.clone() + extend_aidx = aidx.clone() + else: + coord = coord.view([nf, nloc, 3]) + cell = cell.view([nf, 3, 3]) + # nf x 3 + to_face = to_face_distance(cell) + # nf x 3 + # *2: ghost copies on + and - directions + # +1: central cell + nbuff = torch.ceil(rcut / to_face).to(torch.long) + # 3 + nbuff = torch.max(nbuff, dim=0, keepdim=False).values + xi = torch.arange(-nbuff[0], nbuff[0] + 1, 1, device=env.DEVICE) + yi = torch.arange(-nbuff[1], nbuff[1] + 1, 1, device=env.DEVICE) + zi = torch.arange(-nbuff[2], nbuff[2] + 1, 1, device=env.DEVICE) + xyz = xi.view(-1, 1, 1, 1) * torch.tensor( + [1, 0, 0], dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + xyz = xyz + yi.view(1, -1, 1, 1) * torch.tensor( + [0, 1, 0], dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + xyz = xyz + zi.view(1, 1, -1, 1) * torch.tensor( + [0, 0, 1], dtype=env.GLOBAL_PT_FLOAT_PRECISION, device=env.DEVICE + ) + xyz = xyz.view(-1, 3) + # ns x 3 + shift_idx = xyz[torch.argsort(torch.norm(xyz, dim=1))] + ns, _ = shift_idx.shape + nall = ns * nloc + # nf x ns x 3 + shift_vec = torch.einsum("sd,fdk->fsk", shift_idx, cell) + # nf x ns x nloc x 3 + extend_coord = coord[:, None, :, :] + shift_vec[:, :, None, :] + # nf x ns x nloc + extend_atype = torch.tile(atype.unsqueeze(-2), [1, ns, 1]) + # nf x ns x nloc + extend_aidx = torch.tile(aidx.unsqueeze(-2), [1, ns, 1]) + + return ( + extend_coord.reshape([nf, nall * 3]).to(env.DEVICE), + extend_atype.view([nf, nall]).to(env.DEVICE), + extend_aidx.view([nf, nall]).to(env.DEVICE), + ) diff --git a/deepmd/pt/utils/plugin.py b/deepmd/pt/utils/plugin.py new file mode 100644 index 0000000000..c24f36f574 --- /dev/null +++ b/deepmd/pt/utils/plugin.py @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +"""Base of plugin systems.""" +from deepmd.utils.plugin import ( + Plugin, + PluginVariant, + VariantABCMeta, + VariantMeta, +) + +__all__ = [ + "Plugin", + "VariantMeta", + "VariantABCMeta", + "PluginVariant", +] diff --git a/deepmd/pt/utils/preprocess.py b/deepmd/pt/utils/preprocess.py new file mode 100644 index 0000000000..463ac112ad --- /dev/null +++ b/deepmd/pt/utils/preprocess.py @@ -0,0 +1,318 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging +from typing import ( + Union, +) + +import torch + +from deepmd.pt.utils import ( + env, +) + + +class Region3D: + def __init__(self, boxt): + """Construct a simulation box.""" + boxt = boxt.reshape([3, 3]) + self.boxt = boxt # convert physical coordinates to internal ones + self.rec_boxt = torch.linalg.inv( + self.boxt + ) # convert internal coordinates to physical ones + + self.volume = torch.linalg.det(self.boxt) # compute the volume + + # boxt = boxt.permute(1, 0) + c_yz = torch.cross(boxt[1], boxt[2]) + self._h2yz = self.volume / torch.linalg.norm(c_yz) + c_zx = torch.cross(boxt[2], boxt[0]) + self._h2zx = self.volume / torch.linalg.norm(c_zx) + c_xy = torch.cross(boxt[0], boxt[1]) + self._h2xy = self.volume / torch.linalg.norm(c_xy) + + def phys2inter(self, coord): + """Convert physical coordinates to internal ones.""" + return coord @ self.rec_boxt + + def inter2phys(self, coord): + """Convert internal coordinates to physical ones.""" + return coord @ self.boxt + + def get_face_distance(self): + """Return face distinces to each surface of YZ, ZX, XY.""" + return torch.stack([self._h2yz, self._h2zx, self._h2xy]) + + +def normalize_coord(coord, region: Region3D, nloc: int): + """Move outer atoms into region by mirror. + + Args: + - coord: shape is [nloc*3] + """ + tmp_coord = coord.clone() + inter_cood = torch.remainder(region.phys2inter(tmp_coord), 1.0) + tmp_coord = region.inter2phys(inter_cood) + return tmp_coord + + +def compute_serial_cid(cell_offset, ncell): + """Tell the sequential cell ID in its 3D space. + + Args: + - cell_offset: shape is [3] + - ncell: shape is [3] + """ + cell_offset[:, 0] *= ncell[1] * ncell[2] + cell_offset[:, 1] *= ncell[2] + return cell_offset.sum(-1) + + +def compute_pbc_shift(cell_offset, ncell): + """Tell shift count to move the atom into region.""" + shift = torch.zeros_like(cell_offset) + shift = shift + (cell_offset < 0) * -( + torch.div(cell_offset, ncell, rounding_mode="floor") + ) + shift = shift + (cell_offset >= ncell) * -( + torch.div((cell_offset - ncell), ncell, rounding_mode="floor") + 1 + ) + assert torch.all(cell_offset + shift * ncell >= 0) + assert torch.all(cell_offset + shift * ncell < ncell) + return shift + + +def build_inside_clist(coord, region: Region3D, ncell): + """Build cell list on atoms inside region. + + Args: + - coord: shape is [nloc*3] + - ncell: shape is [3] + """ + loc_ncell = int(torch.prod(ncell)) # num of local cells + nloc = coord.numel() // 3 # num of local atoms + inter_cell_size = 1.0 / ncell + + inter_cood = region.phys2inter(coord.view(-1, 3)) + cell_offset = torch.floor(inter_cood / inter_cell_size).to(torch.long) + # numerical error brought by conversion from phys to inter back and force + # may lead to negative value + cell_offset[cell_offset < 0] = 0 + delta = cell_offset - ncell + a2c = compute_serial_cid(cell_offset, ncell) # cell id of atoms + arange = torch.arange(0, loc_ncell, 1, device=env.PREPROCESS_DEVICE) + cellid = a2c == arange.unsqueeze(-1) # one hot cellid + c2a = cellid.nonzero() + lst = [] + cnt = 0 + bincount = torch.bincount(a2c, minlength=loc_ncell) + for i in range(loc_ncell): + n = bincount[i] + lst.append(c2a[cnt : cnt + n, 1]) + cnt += n + return a2c, lst + + +def append_neighbors(coord, region: Region3D, atype, rcut: float): + """Make ghost atoms who are valid neighbors. + + Args: + - coord: shape is [nloc*3] + - atype: shape is [nloc] + """ + to_face = region.get_face_distance() + + # compute num and size of local cells + ncell = torch.floor(to_face / rcut).to(torch.long) + ncell[ncell == 0] = 1 + cell_size = to_face / ncell + ngcell = ( + torch.floor(rcut / cell_size).to(torch.long) + 1 + ) # num of cells out of local, which contain ghost atoms + + # add ghost atoms + a2c, c2a = build_inside_clist(coord, region, ncell) + xi = torch.arange(-ngcell[0], ncell[0] + ngcell[0], 1, device=env.PREPROCESS_DEVICE) + yi = torch.arange(-ngcell[1], ncell[1] + ngcell[1], 1, device=env.PREPROCESS_DEVICE) + zi = torch.arange(-ngcell[2], ncell[2] + ngcell[2], 1, device=env.PREPROCESS_DEVICE) + xyz = xi.view(-1, 1, 1, 1) * torch.tensor( + [1, 0, 0], dtype=torch.long, device=env.PREPROCESS_DEVICE + ) + xyz = xyz + yi.view(1, -1, 1, 1) * torch.tensor( + [0, 1, 0], dtype=torch.long, device=env.PREPROCESS_DEVICE + ) + xyz = xyz + zi.view(1, 1, -1, 1) * torch.tensor( + [0, 0, 1], dtype=torch.long, device=env.PREPROCESS_DEVICE + ) + xyz = xyz.view(-1, 3) + mask_a = (xyz >= 0).all(dim=-1) + mask_b = (xyz < ncell).all(dim=-1) + mask = ~torch.logical_and(mask_a, mask_b) + xyz = xyz[mask] # cell coord + shift = compute_pbc_shift(xyz, ncell) + coord_shift = region.inter2phys(shift.to(env.GLOBAL_PT_FLOAT_PRECISION)) + mirrored = shift * ncell + xyz + cid = compute_serial_cid(mirrored, ncell) + + n_atoms = coord.shape[0] + aid = [c2a[ci] + i * n_atoms for i, ci in enumerate(cid)] + aid = torch.cat(aid) + tmp = torch.div(aid, n_atoms, rounding_mode="trunc") + aid = aid % n_atoms + tmp_coord = coord[aid] - coord_shift[tmp] + tmp_atype = atype[aid] + + # merge local and ghost atoms + merged_coord = torch.cat([coord, tmp_coord]) + merged_coord_shift = torch.cat([torch.zeros_like(coord), coord_shift[tmp]]) + merged_atype = torch.cat([atype, tmp_atype]) + merged_mapping = torch.cat( + [torch.arange(atype.numel(), device=env.PREPROCESS_DEVICE), aid] + ) + return merged_coord_shift, merged_atype, merged_mapping + + +def build_neighbor_list( + nloc: int, coord, atype, rcut: float, sec, mapping, type_split=True, min_check=False +): + """For each atom inside region, build its neighbor list. + + Args: + - coord: shape is [nall*3] + - atype: shape is [nall] + """ + nall = coord.numel() // 3 + coord = coord.float() + nlist = [[] for _ in range(nloc)] + coord_l = coord.view(-1, 1, 3)[:nloc] + coord_r = coord.view(1, -1, 3) + distance = coord_l - coord_r + distance = torch.linalg.norm(distance, dim=-1) + DISTANCE_INF = distance.max().detach() + rcut + distance[:nloc, :nloc] += ( + torch.eye(nloc, dtype=torch.bool, device=env.PREPROCESS_DEVICE) * DISTANCE_INF + ) + if min_check: + if distance.min().abs() < 1e-6: + RuntimeError("Atom dist too close!") + if not type_split: + sec = sec[-1:] + lst = [] + nlist = torch.zeros((nloc, sec[-1].item()), device=env.PREPROCESS_DEVICE).long() - 1 + nlist_loc = ( + torch.zeros((nloc, sec[-1].item()), device=env.PREPROCESS_DEVICE).long() - 1 + ) + nlist_type = ( + torch.zeros((nloc, sec[-1].item()), device=env.PREPROCESS_DEVICE).long() - 1 + ) + for i, nnei in enumerate(sec): + if i > 0: + nnei = nnei - sec[i - 1] + if not type_split: + tmp = distance + else: + mask = atype.unsqueeze(0) == i + tmp = distance + (~mask) * DISTANCE_INF + if tmp.shape[1] >= nnei: + _sorted, indices = torch.topk(tmp, nnei, dim=1, largest=False) + else: + # when nnei > nall + indices = torch.zeros((nloc, nnei), device=env.PREPROCESS_DEVICE).long() - 1 + _sorted = ( + torch.ones((nloc, nnei), device=env.PREPROCESS_DEVICE).long() + * DISTANCE_INF + ) + _sorted_nnei, indices_nnei = torch.topk( + tmp, tmp.shape[1], dim=1, largest=False + ) + _sorted[:, : tmp.shape[1]] = _sorted_nnei + indices[:, : tmp.shape[1]] = indices_nnei + mask = (_sorted < rcut).to(torch.long) + indices_loc = mapping[indices] + indices = indices * mask + -1 * (1 - mask) # -1 for padding + indices_loc = indices_loc * mask + -1 * (1 - mask) # -1 for padding + if i == 0: + start = 0 + else: + start = sec[i - 1] + end = min(sec[i], start + indices.shape[1]) + nlist[:, start:end] = indices[:, :nnei] + nlist_loc[:, start:end] = indices_loc[:, :nnei] + nlist_type[:, start:end] = atype[indices[:, :nnei]] * mask + -1 * (1 - mask) + return nlist, nlist_loc, nlist_type + + +def compute_smooth_weight(distance, rmin: float, rmax: float): + """Compute smooth weight for descriptor elements.""" + min_mask = distance <= rmin + max_mask = distance >= rmax + mid_mask = torch.logical_not(torch.logical_or(min_mask, max_mask)) + uu = (distance - rmin) / (rmax - rmin) + vv = uu * uu * uu * (-6 * uu * uu + 15 * uu - 10) + 1 + return vv * mid_mask + min_mask + + +def make_env_mat( + coord, + atype, + region, + rcut: Union[float, list], + sec, + pbc=True, + type_split=True, + min_check=False, +): + """Based on atom coordinates, return environment matrix. + + Returns + ------- + nlist: nlist, [nloc, nnei] + merged_coord_shift: shift on nall atoms, [nall, 3] + merged_mapping: mapping from nall index to nloc index, [nall] + """ + # move outer atoms into cell + hybrid = isinstance(rcut, list) + _rcut = rcut + if hybrid: + _rcut = max(rcut) + if pbc: + merged_coord_shift, merged_atype, merged_mapping = append_neighbors( + coord, region, atype, _rcut + ) + merged_coord = coord[merged_mapping] - merged_coord_shift + if merged_coord.shape[0] <= coord.shape[0]: + logging.warning("No ghost atom is added for system ") + else: + merged_coord_shift = torch.zeros_like(coord) + merged_atype = atype.clone() + merged_mapping = torch.arange(atype.numel(), device=env.PREPROCESS_DEVICE) + merged_coord = coord.clone() + + # build nlist + if not hybrid: + nlist, nlist_loc, nlist_type = build_neighbor_list( + coord.shape[0], + merged_coord, + merged_atype, + rcut, + sec, + merged_mapping, + type_split=type_split, + min_check=min_check, + ) + else: + nlist, nlist_loc, nlist_type = [], [], [] + for ii, single_rcut in enumerate(rcut): + nlist_tmp, nlist_loc_tmp, nlist_type_tmp = build_neighbor_list( + coord.shape[0], + merged_coord, + merged_atype, + single_rcut, + sec[ii], + merged_mapping, + type_split=type_split, + min_check=min_check, + ) + nlist.append(nlist_tmp) + nlist_loc.append(nlist_loc_tmp) + nlist_type.append(nlist_type_tmp) + return nlist, nlist_loc, nlist_type, merged_coord_shift, merged_mapping diff --git a/deepmd/pt/utils/region.py b/deepmd/pt/utils/region.py new file mode 100644 index 0000000000..b07d2f73bf --- /dev/null +++ b/deepmd/pt/utils/region.py @@ -0,0 +1,116 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch + + +def phys2inter( + coord: torch.Tensor, + cell: torch.Tensor, +) -> torch.Tensor: + """Convert physical coordinates to internal(direct) coordinates. + + Parameters + ---------- + coord : torch.Tensor + physical coordinates of shape [*, na, 3]. + cell : torch.Tensor + simulation cell tensor of shape [*, 3, 3]. + + Returns + ------- + inter_coord: torch.Tensor + the internal coordinates + + """ + rec_cell = torch.linalg.inv(cell) + return torch.matmul(coord, rec_cell) + + +def inter2phys( + coord: torch.Tensor, + cell: torch.Tensor, +) -> torch.Tensor: + """Convert internal(direct) coordinates to physical coordinates. + + Parameters + ---------- + coord : torch.Tensor + internal coordinates of shape [*, na, 3]. + cell : torch.Tensor + simulation cell tensor of shape [*, 3, 3]. + + Returns + ------- + phys_coord: torch.Tensor + the physical coordinates + + """ + return torch.matmul(coord, cell) + + +def to_face_distance( + cell: torch.Tensor, +) -> torch.Tensor: + """Compute the to-face-distance of the simulation cell. + + Parameters + ---------- + cell : torch.Tensor + simulation cell tensor of shape [*, 3, 3]. + + Returns + ------- + dist: torch.Tensor + the to face distances of shape [*, 3] + + """ + cshape = cell.shape + dist = b_to_face_distance(cell.view([-1, 3, 3])) + return dist.view(list(cshape[:-2]) + [3]) # noqa:RUF005 + + +def _to_face_distance(cell): + volume = torch.linalg.det(cell) + c_yz = torch.cross(cell[1], cell[2]) + _h2yz = volume / torch.linalg.norm(c_yz) + c_zx = torch.cross(cell[2], cell[0]) + _h2zx = volume / torch.linalg.norm(c_zx) + c_xy = torch.cross(cell[0], cell[1]) + _h2xy = volume / torch.linalg.norm(c_xy) + return torch.stack([_h2yz, _h2zx, _h2xy]) + + +def b_to_face_distance(cell): + volume = torch.linalg.det(cell) + c_yz = torch.cross(cell[:, 1], cell[:, 2], dim=-1) + _h2yz = volume / torch.linalg.norm(c_yz, dim=-1) + c_zx = torch.cross(cell[:, 2], cell[:, 0], dim=-1) + _h2zx = volume / torch.linalg.norm(c_zx, dim=-1) + c_xy = torch.cross(cell[:, 0], cell[:, 1], dim=-1) + _h2xy = volume / torch.linalg.norm(c_xy, dim=-1) + return torch.stack([_h2yz, _h2zx, _h2xy], dim=1) + + +# b_to_face_distance = torch.vmap( +# _to_face_distance, in_dims=(0), out_dims=(0)) + + +def normalize_coord( + coord: torch.Tensor, + cell: torch.Tensor, +) -> torch.Tensor: + """Apply PBC according to the atomic coordinates. + + Parameters + ---------- + coord : torch.Tensor + orignal coordinates of shape [*, na, 3]. + + Returns + ------- + wrapped_coord: torch.Tensor + wrapped coordinates of shape [*, na, 3]. + + """ + icoord = phys2inter(coord, cell) + icoord = torch.remainder(icoord, 1.0) + return inter2phys(icoord, cell) diff --git a/deepmd/pt/utils/stat.py b/deepmd/pt/utils/stat.py new file mode 100644 index 0000000000..837a0104f9 --- /dev/null +++ b/deepmd/pt/utils/stat.py @@ -0,0 +1,112 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import logging + +import numpy as np +import torch +from tqdm import ( + trange, +) + +from deepmd.pt.utils import ( + env, +) + + +def make_stat_input(datasets, dataloaders, nbatches): + """Pack data for statistics. + + Args: + - dataset: A list of dataset to analyze. + - nbatches: Batch count for collecting stats. + + Returns + ------- + - a list of dicts, each of which contains data from a system + """ + lst = [] + keys = [ + "coord", + "force", + "energy", + "atype", + "box", + "natoms", + "mapping", + "nlist", + "nlist_loc", + "nlist_type", + "shift", + ] + if datasets[0].mixed_type: + keys.append("real_natoms_vec") + logging.info(f"Packing data for statistics from {len(datasets)} systems") + for i in trange(len(datasets), disable=env.DISABLE_TQDM): + sys_stat = {key: [] for key in keys} + iterator = iter(dataloaders[i]) + for _ in range(nbatches): + try: + stat_data = next(iterator) + except StopIteration: + iterator = iter(dataloaders[i]) + stat_data = next(iterator) + for dd in stat_data: + if dd in keys: + sys_stat[dd].append(stat_data[dd]) + for key in keys: + if key == "mapping" or key == "shift": + extend = max(d.shape[1] for d in sys_stat[key]) + for jj in range(len(sys_stat[key])): + l = [] + item = sys_stat[key][jj] + for ii in range(item.shape[0]): + l.append(item[ii]) + n_frames = len(item) + if key == "shift": + shape = torch.zeros( + (n_frames, extend, 3), + dtype=env.GLOBAL_PT_FLOAT_PRECISION, + device=env.PREPROCESS_DEVICE, + ) + else: + shape = torch.zeros( + (n_frames, extend), + dtype=torch.long, + device=env.PREPROCESS_DEVICE, + ) + for i in range(len(item)): + natoms_tmp = l[i].shape[0] + shape[i, :natoms_tmp] = l[i] + sys_stat[key][jj] = shape + if not isinstance(sys_stat[key][0], list): + if sys_stat[key][0] is None: + sys_stat[key] = None + else: + sys_stat[key] = torch.cat(sys_stat[key], dim=0) + else: + sys_stat_list = [] + for ii, _ in enumerate(sys_stat[key][0]): + tmp_stat = [x[ii] for x in sys_stat[key]] + sys_stat_list.append(torch.cat(tmp_stat, dim=0)) + sys_stat[key] = sys_stat_list + lst.append(sys_stat) + return lst + + +def compute_output_stats(energy, natoms, rcond=None): + """Update mean and stddev for descriptor elements. + + Args: + - energy: Batched energy with shape [nframes, 1]. + - natoms: Batched atom statisics with shape [self.ntypes+2]. + + Returns + ------- + - energy_coef: Average enery per atom for each element. + """ + for i in range(len(energy)): + energy[i] = energy[i].mean(dim=0, keepdim=True) + natoms[i] = natoms[i].double().mean(dim=0, keepdim=True) + sys_ener = torch.cat(energy).cpu() + sys_tynatom = torch.cat(natoms)[:, 2:].cpu() + energy_coef, _, _, _ = np.linalg.lstsq(sys_tynatom, sys_ener, rcond) + return energy_coef diff --git a/deepmd/pt/utils/utils.py b/deepmd/pt/utils/utils.py new file mode 100644 index 0000000000..780dbf7e62 --- /dev/null +++ b/deepmd/pt/utils/utils.py @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +from typing import ( + Callable, + Optional, +) + +import torch +import torch.nn.functional as F + + +def get_activation_fn(activation: str) -> Callable: + """Returns the activation function corresponding to `activation`.""" + if activation.lower() == "relu": + return F.relu + elif activation.lower() == "gelu": + return F.gelu + elif activation.lower() == "tanh": + return torch.tanh + elif activation.lower() == "linear" or activation.lower() == "none": + return lambda x: x + else: + raise RuntimeError(f"activation function {activation} not supported") + + +class ActivationFn(torch.nn.Module): + def __init__(self, activation: Optional[str]): + super().__init__() + self.activation: str = activation if activation is not None else "linear" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Returns the tensor after applying activation function corresponding to `activation`.""" + # See jit supported types: https://pytorch.org/docs/stable/jit_language_reference.html#supported-type + + if self.activation.lower() == "relu": + return F.relu(x) + elif self.activation.lower() == "gelu": + return F.gelu(x) + elif self.activation.lower() == "tanh": + return torch.tanh(x) + elif self.activation.lower() == "linear" or self.activation.lower() == "none": + return x + else: + raise RuntimeError(f"activation function {self.activation} not supported") diff --git a/examples/water/dpa2/input_torch.json b/examples/water/dpa2/input_torch.json new file mode 100644 index 0000000000..9d783b35d5 --- /dev/null +++ b/examples/water/dpa2/input_torch.json @@ -0,0 +1,102 @@ +{ + "_comment": "that's all", + "model": { + "type_embedding": { + "neuron": [ + 8 + ], + "tebd_input_mode": "concat" + }, + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "dpa2", + "repinit_rcut": 9.0, + "repinit_rcut_smth": 8.0, + "repinit_nsel": 120, + "repformer_rcut": 4.0, + "repformer_rcut_smth": 3.5, + "repformer_nsel": 40, + "repinit_neuron": [ + 25, + 50, + 100 + ], + "repinit_axis_neuron": 12, + "repinit_activation": "tanh", + "repformer_nlayers": 12, + "repformer_g1_dim": 128, + "repformer_g2_dim": 32, + "repformer_attn2_hidden": 32, + "repformer_attn2_nhead": 4, + "repformer_attn1_hidden": 128, + "repformer_attn1_nhead": 4, + "repformer_axis_dim": 4, + "repformer_update_h2": false, + "repformer_update_g1_has_conv": true, + "repformer_update_g1_has_grrg": true, + "repformer_update_g1_has_drrd": true, + "repformer_update_g1_has_attn": true, + "repformer_update_g2_has_g1g1": true, + "repformer_update_g2_has_attn": true, + "repformer_attn2_has_gate": true, + "repformer_add_type_ebd_to_seq": false + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.0002, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + "_comment": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "../data/data_0", + "../data/data_1", + "../data/data_2" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "../data/data_3" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "numb_steps": 1000000, + "warmup_steps": 0, + "gradient_max_norm": 5.0, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 2000, + "_comment": "that's all" + } +} diff --git a/examples/water/se_atten/input_torch.json b/examples/water/se_atten/input_torch.json new file mode 100644 index 0000000000..7da3d64164 --- /dev/null +++ b/examples/water/se_atten/input_torch.json @@ -0,0 +1,91 @@ +{ + "_comment": "that's all", + "model": { + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "dpa1", + "sel": 120, + "rcut_smth": 0.5, + "rcut": 6.0, + "neuron": [ + 25, + 50, + 100 + ], + "axis_neuron": 16, + "attn": 128, + "attn_layer": 2, + "attn_dotr": true, + "attn_mask": false, + "post_ln": true, + "ffn": false, + "ffn_embed_dim": 1024, + "activation": "tanh", + "scaling_factor": 1.0, + "head_num": 1, + "normalize": true, + "temperature": 1.0 + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + "_comment": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "../data/data_0", + "../data/data_1", + "../data/data_2" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "../data/data_3" + ], + "batch_size": 1, + "numb_btch": 3, + "_comment": "that's all" + }, + "wandb_config": { + "wandb_enabled": false, + "entity": "dp_model_engineering", + "project": "DPA" + }, + "numb_steps": 1000000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 1000, + "_comment": "that's all" + } +} diff --git a/examples/water/se_e2_a/input_torch.json b/examples/water/se_e2_a/input_torch.json new file mode 100644 index 0000000000..053a721a44 --- /dev/null +++ b/examples/water/se_e2_a/input_torch.json @@ -0,0 +1,79 @@ +{ + "model": { + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_e2_a", + "sel": [ + 46, + 92 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1, + "_comment": " that's all" + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "data_stat_nbatch": 20, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-8, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "_comment": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "../data/data_0", + "../data/data_1", + "../data/data_2" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "../data/data_3" + ], + "batch_size": 1, + "numb_btch": 3, + "_comment": "that's all" + }, + "numb_steps": 100000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 10000, + "_comment": "that's all" + }, + "_comment": "that's all" +} diff --git a/source/install/docker/Dockerfile b/source/install/docker/Dockerfile index 26b7be9f19..793272ae6a 100644 --- a/source/install/docker/Dockerfile +++ b/source/install/docker/Dockerfile @@ -6,7 +6,7 @@ RUN python -m venv /opt/deepmd-kit ENV PATH="/opt/deepmd-kit/bin:$PATH" # Install package COPY dist /dist -RUN pip install "$(ls /dist/deepmd_kit${VARIANT}-*manylinux*_x86_64.whl)[gpu,cu${CUDA_VERSION},lmp,ipi]" \ +RUN pip install "$(ls /dist/deepmd_kit${VARIANT}-*manylinux*_x86_64.whl)[gpu,cu${CUDA_VERSION},lmp,ipi,torch]" \ && dp -h \ && lmp -h \ && dp_ipi \ diff --git a/source/tests/pt/__init__.py b/source/tests/pt/__init__.py new file mode 100644 index 0000000000..fdbdd73f79 --- /dev/null +++ b/source/tests/pt/__init__.py @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import torch + +torch.set_num_threads(1) +torch.set_num_interop_threads(1) diff --git a/source/tests/pt/models/dpa1.json b/source/tests/pt/models/dpa1.json new file mode 100644 index 0000000000..dd838ac692 --- /dev/null +++ b/source/tests/pt/models/dpa1.json @@ -0,0 +1,39 @@ +{ + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_atten", + "sel": 30, + "rcut_smth": 2.0, + "rcut": 6.0, + "neuron": [ + 2, + 4, + 8 + ], + "axis_neuron": 4, + "attn": 5, + "attn_layer": 2, + "attn_dotr": true, + "attn_mask": false, + "post_ln": true, + "ffn": false, + "ffn_embed_dim": 10, + "activation": "tanh", + "scaling_factor": 1.0, + "head_num": 1, + "normalize": true, + "temperature": 1.0 + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1 + } +} diff --git a/source/tests/pt/models/dpa1.pth b/source/tests/pt/models/dpa1.pth new file mode 100644 index 0000000000000000000000000000000000000000..75acf2fa15d874dc7c63a0fe1bbc3653267b2c4b GIT binary patch literal 15469 zcmeHOd0dR!|DRGxSwgm=NRh35QR3r?X!4wxad%CmrcDj>;Bf~@0-iBeL;=bA0$WWZJ_MhS{Y-i`@>`b>~IJvocNG#i8 zPv1hfU+84VkU*4aG$f>KN#Ijl=r*oM*pA`0#m-U1dMm@p*+as53*C0Ble4`wgYN3# zw#9mzh6Ea*tZX@uXzxL^v~i##5%LjGAIo#`vcPbHA3r4Z3b4T)I}Ag*@qThTyj zI|GR?r&6fGR5_}MKN>{!q>505{Edp-EGvZZ5V`lZr@e@{g<_~%?|n&4;8hZAE`LG zTDxy?+oa-R!`R~FiAMABYoIX%zb6BY+7Bj@#7g^yYrsS?!U45UW(o1j)FsWnJyO$O4U zA#KaqeWi9KZEx-Bw#7w-oHj^@k5dQflAN=7R`x}DR3TraPx8%SAOjkjYpL@?e7qjc z<73lBK(fvMhwT?Gn<2@zfPog$(4s#1vii4t*@%yKHZmr7312qh@p3)Nl=LVwlAFpv zi)m;{pM3et+`Jwy<~5{8ttHv580c3TTGuCM{t7#<kVYj32(L`xot^q zI|j0+A$p&@na3^2n>X^4Y9I%a)RAXsU*yDfuuUZ4W(IPmA(uY6b8kXkA6@zBG?5!g z=l%~J;?iv)={y*SK|@)UU3>(;vtWuZxZn@FENp zMCvEtgGu;fB>Zs(IzdAxzo}p2v;Gi%g4rmPAehKNVO#_X;nCLQZk+O}*@@a z8cJfKWU{?_U)uL1L@7*^NubU71MkauN!-&N_B!cZj>Rgod^3Yd&k^`J0zXIK=Lq~9f&cLcaFh8At@P>M#HlT}el3~L z6O_!=jSTfaC-cvT%HMG%yW7q^93;xm>(6oc=?6dO!OuAGGYENuljg{~)V4$|cwj6lSHL<=ytQ1FGZLc~x zv=WBK-pIOmFbf}_CGDT`p&V33wy&K!uNsPk_BzVnO2(olXN(uIYH>kZMvCHwJp6|^ zo{nQ*{^C;pf2B zpJsO~cs?=Rx%KSa%qutfnYH9 z_mNhsDgkc$>lfUO!Ctph#+Gs}fs41u0IgOQY!4oIxk>ppe&w)u{n=DDbS%nhxak#! z_x$A?u-B{{yyMotr|H)7=JWO8X9J{&xy-F!v&Srf@mSl)a9ZEvv3#p+@gduG@KBys z=yvKOkN;O;ky%oNFL3Mk^Xq&UfAKFK!*;Sbc(1EXLcV?-?A#I)S=QN#<$e|Z`+Z;& z2q{L%WIE@AZO}ebEPM~g?thxFy&wl)8@z4GLTWotuVvc1F0Su#>(}%O&R@Fy^51*T zzC!lNFL8_Vl*>h>Enu$|eLU_`3EZ4LwsptuS3G?MO&RqQ32of^HGLBV+H1cMTQUYn`;r>*&`!q8iPu=5<`7-iWu1bbm_2W4J;(-b{MvIMaxsON zE8P0E@fB=;y?)t0(Q=P=@&w_W9ZaQ?z-tenCC~I@(#}+zS-W70oox$tj~Fkv-XsV6 z|1CykP40xT5yf5OWA0+-H)nozWl!_@?DYXnNV2ZUl@>Hhpj9+lGIG9q5OiZly=M|Oc$T) zaph1IUa{tupQi!~5>0FJ#Z1nbVHzAbY%nQ@X_$j^^}VPzIxKi0;uqt-p&V;V za>~Y(v!K?6I$hxt3+Cwmw&&f*5;(E=MaS;sN?dX9lbTEs#s?`KDl2QxVZGw`N;}0p zF#VUmL~i3EOm|s-Tt>D47*QS}8*Nf?p8nvOOTweUchDVW*~cuXN!xkM>*jgraDRA( zH7FH}p2PDO6xBmzRn{L*S)~yFDaPW-$!wr`4O%tIydJx;wH=lRC1dFj@n6HvvT^;L zCdtT6+1PQRdSw0Y90;=v6$@W+5pIk#cy5xC4Dm1bC%xQX4hvidEzvf+38&9@<$6Zc zz&rhf>jRes!}7$9?K;Bcpf{0zBYblW1a_EA2%nk_DHbO-*p5pCd5hNu`*>6$g&!0R{`e|$Q54{RUEH$H04#WOXZ)tjak zLy+<2X8GF~W>lOiXtmx6rzh9Wz9&@z-eGXW!7~qDEnX_(yRrx?6Mkv)CLNYvkxDm! zGmw>f!7HI66$ad3pVW_F}fH98nh^!GUJ|_#N+TlHlxnExO{J2FOo)Jv>J@8Q*tX>bN*C z5zhOxWTZIVgpbs^&Lm?NzV^pU?K{yqAk?+iWS1-lGg<3pm6*4{H>A-2+}|0H{2(zn zeRw3CT)Oe#j$Kul!c@cQH*@jS$hn`S^BX~#t&zdDZGh04jm|41IQY)6Q;kPgSAkNS z;n_zrSvX-|n&}Z18|ED|zM!_V8pqyhUwOpq7SxT@9BN}xicczU+@F+R4}%ap%wbLl zWQISH&nYg%oX}qaUX?dstF+b(Z_I&qpK`f7x_R*2VV0=cpBdQKSbvS9a1|(PUz3w= zy9HM}-@7gnO@P+yF)s6-=HZZIzb}%4Y`9UcEB_2C;Cbb)j?1%CdHLn`L!aav;ze3+ z{qFcdEI<1bKbp!gA zW2PR21ovvgyRjc3B{u1_V`w98laudAjBmnL&(7@GKfM;OKH~8tVgnlrYph)L(z~!+ z^yp3rt4C0K@}%e)n_7t3FxhEy?w|=ePVuJE2v1}D4(rQ8Ij0jcn zYv81;@otQJEe<%8CpN+SI#$s+o=I=5$FCAgEv-hTKyi@&gNFNQ@M6f*#cgNGU}|mj z=)|sEIQ4FOg36c@ES(e7GTo{QM0RYCwa~i_1HGt}z00dW+Q4hGo)ZV^&WkQx|Dhhd zr#(pA)?5Q>0~HohZ0}>aUl!lG+;Iy_p(EC+g#E*C2y>Zn0Ysl|o_DbGE|iZQ^~z=^ z;ot3UOh=gu(7sJ2XtI+(bWA?x?{gv*(&c2`8zxp@gZotmrv?-IOrNZ5c2yz-Uky(` zFI@o2j`Vxc1KHrCq;OtmKoaC!U+!hZY{7A+P54i}<)C$E)girTj5RN0CsbI6!g!0& z(!16*u>5TN@)4ekVgAUXq?R|;V82l3(e@3^P=3oT_|l6~kWsuK85`@#vqPHa@M7-X zlUu)L2Z8yESg!WR|L(N=85oYw$8F%~U{n4Zel@r7lT2j;rU!W`DuuMclEvO0BibI} z``Ya}Cd1x<$ZN&CtoS$>mr<=tAJN2{KTggPW4O7OTfbXBvE1#Ceu9!H|BLe4@PREI3*eq}BP%)ik zXf(S0q+k&Tj;;B19`j)XOgXkAZ|W)vjCf^?%8p@1}`jqw0Y!=0;pyBJdfkBuxqov1sHtIwTZqSbY!1fIT=K-_IMXc)ZJ+Z+1NzFuF8n<`p?~d2t#WS<6 zeRInoHat?KCcGX`8Lr?fHmnA7G&^@+Yh~k4G7j4V>*FD&I3(V*IUL_rb(s??c^|x9 zsalWADhBtN_ue)x%ExQWnwtX7RDyMdXwky9R#;_gEmKx(hmvDko%*yQ*U>Jr-&JhS1}(SjF+c*DB-o05twEJ{c7be}!N zd!yqaJRl8ZFS&+W8xiA^H#+gOLOGT*nzSPAel})3`AwztZ7Br$jDMp;jQ7iebvpIN3Mq%{U*i};N|Z}S*+fi3yK4T94|O!;*YU!FP~hJ1k;TFS~|Zx zA6v_7v&?AM@%ukVT;0uT#;gz}p9t?7>?1vXiuL9u2-6f+`^4s8lP4DqB;JXm-1#AA#WuM8Y|AIplSMaS*AfbCDXEm73$fQms= z9w|hK<&3(L_(^x*GwkFhKl{(rKFhVa8 z-4CkyA*3b>hUlfD`<RMCBJCG%W?r84tl4MpdB%lq+Bs*)Xl7<^ZuZ;%3Yq0F8#Kh-!`pR) zNB@3aI-i>EZaO!Mp#!~}HZ(Wumh?a0^nLpd@%HxUtLr~xuurzO)ZK%6xHss| zRz;&tSJ|ZysNFq$`qKVGbwjADcYhz9g{8$H54XYE^nEeY*UQ3nn6AIKPc~QT(PvnH zZ+BOn$6z0P>u~FA+67!c#d-5cym^y(^9kPkI&A@swxElBioBlvy>%YA3T~r3s1L4# zyNAb6!)TD3j`km^Eo4;HT3Z-Z%j&d6l&X21(@)bD#iG(p(-spIi|e%iXtX6+LA`~Y_z3NFTGA%TB(;#saHnSD=X@i(`n0Vv=#nhg;o|xE39Z#(NWI&F20w#Hwq(8_|Z(88#mowg?GWz=ac#R^f+O4PF!^=x!n zTaDK4FIH%syh3}UiuPIuR4lF2Iw}=2I-5_^IysvwE373d*4An3XtZ_zVujXXh4qZ; zIcSNvqV>fJQLllh*HF}Jq|-LmXq%+?bmCJ|p3bI5jE>r7U~H$;HW!~!HszUW;apif zQ!NFnvrgMeqivnyGi7Cw^h~ue;&sxt6};^v-uB778o}E^r|qcGc5S z!_{L@A9o7`4|n(egZo*CO-tKZUn@OQtt_=&P^^ef>!K9PpnMEn#bxOx3U$|MduX&h zT}r1|sGqeclXMgJGA6gux`I5DPTN~t^GwsUZqAvM&$y2u@2k_gYqTCNB~{7!{px8< zZLRGmsJ-;m{RQ;^K|N5X_10(yxl~D!TEE+P_aE%xM)gnOVB;z_+96(9TASm^AF7nW z4Kq!v6_QYmd^_)>-&7qQijz*6w6j}%KmjZvb;Xi;N~PCHhk9hbJH z=pVQ7##QXJ6Huk7PV1{w$>faBVxqWUlSGxtI_(sVc52#IlJtm8Gp=E;oi09<8Tt=p zrl>JX)R?W)&e3S+x>QrYT0`lO0j~Y5`svp)&$y0*c0TG<)@c_gb2!mw`?(Hs8;ad< zsCHoTp197qoRfAv%9Ye8l2as{2LNK%F*7qdnl_@OM@DV-{>&(^`8FHL)3mC^ge5H_ahY^RTFSM5hha zXv18prlqF$pyUlT+_;R5mWeVEd}&j)k#E$6{ANtPGtL^4VoXU-VRTR6X(_YtTZ@Ac` zv6$%%c+Hi3~yF%M47ju%sZX-y+->% zeTz4g;Y;~wRK&(o`$;V2vwkUGM2)Ya#y6ezyGHvXZA;N_@IQ^KSZRNuN>Lp(!yjUl zLQ21)=`i4TAWaX%%h3!#I+_ttLo*?!ZEd1`(acCNE^JM+D09j_LCt~0B2hal2BLO0 zAW=IzkdEd+)XTS)+!nCc?Tu3smZ$on{bINl^^8hLJl}C@}#X!{02PEp}2h!03 zh#FcDQN7(6>Kob}Erc{<1-7)XGRGAZ0n#fdih)p23`i&_4y2?1AZlm{MD;dkW$}kL zNJ}EuScV-frOYYKkd_7#H$moUv>msVR*<>yHHcRUv$GE%`CCVHxUmu8Him9{#AUYy#h_9k_BOo1ZjHsba z5Y^kSRGP25DKd--+gj3Q${a7;94M)93qYlCOMDfDoq=?;6{3c=MpSRh*mzTD%d`y= zjq6*{w#poTMB4!=_4y{z9s_aRH9$f`2Ou5oh^V2R5Y^kYu?Bs+rk#;#EW(<0QRbAU zO%18YOKPR_EYARCQrS96ctLJr~NSyDh2=v z6$62E)EiMl2O+BWN^6t0PX{B#xVjx3qRjC>snVfDSR!a&p>4J2xh0n*X2h#EQ$F>OmvJ|@udNHMPN zKqn}3d_VLBQmXUA!bA+jvoHxrRG$o_qf-zybSh%nR-SZNn1%%7+KzO(GROCU89-v? z*#Bl?AZpJ7618Uo>F69p4V{aqeps;n{je|(Nyhb^=zL|4?*j{fl=^%hScrkBzX(Xw zUks$9OAs}5DWdve!BAg1EG$EskpeqQx?Gv#3RVE=6|BTSC|Cs~6s!i)(KUz~x)xFW zuwebi!@@e`8q2Vv>y#x=oqmx8rsop#-;lr!-G~~x2T}d7@S76Ty>TxnjMdmse`SuV z*$1Ro6M%tGvmZ#P2?Wy7AVdv4fT(_0__G?mO$39$Sc@$^sLb()B?L&(!Ve3FFc4Y} z0|_lhfOIqzQA5KJ)ej5FS`3GUaO4=5x1&s%0>}T8ilB# z(TM7Y1*tS&_iI4oSjKpYmX0tpebLmwcj9~M&5!EcF&NHtPnZ%H31bG&0d22xb;!@?5`go>v?Ld7#6 z9es|dp)U~CJEo24Vc{iGjH_GGSIV3?EW8F(%Jaj*8w^DGw?Lx&J0Kl>kEo#^5Y=0z zp*&yuMDSB!9djh3M6WO1Jcp&h#L9>F>OoN9~OQh#kjf+{iV$D z{m=|Q{!pq5X8{B8ETjh#)iVIWSwMudfS9(GCp!yBFs^M&vnX?XA20_JD@SL@ih-z| z4M^0^4g_Zb5zYdl`eDK5_rpRiBpKJYqq&tiz7OO9QtAt50RvG#ACRb@9|+C@BAf+8 z^}~XpzI0e9gfwFX_O!4v#}yO-(km#6flyEkNGK={1ZM#e&H|$PVZr8)hlP^JHJ0H( zODS{0SpX6@f#NJ+kmM`?2^r;p;4C1*SwK`jEF{TD`LIwC>BdSNX(eTj-;R}mgc95m zRWJ}2q$-e5QVj^s0wSCRMD@eMZ%Rz}Mhj3Ft8t<=l{v1)5=gJc3In0W8c3+I0fMuD z2xkFN{jl(7HGG?}2Z50m2TSUp%<+fC5lGP@oCOSomRdkUOKl)H3y5$Q5Y-P0$yyAD zg?h*_E^kGNGRMo;2jZ6k&H^BJ7VuS+ZUh8p0TIpuqWWP$D$Up36dA^at!Xo5ju&nY zlvKC{Aa@q35p{)n~%QtAt50RwT}H9$f` z2Ou~Lh;SAV)ej5C8VrYp&d4+tVN1IxbHZ5w5+W350fQuG0Z54G4g_Zb5zYdl`e8v5 zVfsMyLb9 z<$ZwQEFi*JKvZv;hVp#r!;xTI+>wq@=7h5Vq+j|d3`FhGK%({-AUF$%a262Lwsier zVLVcdt2@yN${gPheSwtf!dbvTJPVV6MD@u)a262ZEFh+B<;l(h5{zm)TGHvt9N!0K z0Ev|=&H@Ia_ADS#do~c91w=Rti0X$0+usih^N?g*--^yx=J-Ca07$7XoCOR-{Y5~c z{$e0F3y5$Q5Y-P0hWgTBVHwhl6W2l} zKOPp=A=g-j4PCFy31Ai0l`^7gtLICeppD7k@8_-E7FaX*wSsv z9KRj60|_OHvw(rPAUlDCl3hS>77*bqAgUi0ep6z)H|_<6u^Kz-ugq~Z`+)Rn0x%G2 z_5%qufk1E;5aBE!svj2qtcGtB!5}c!Vowh$bNpcm0aCOGX8{AD=kzrie zk;W)yFfz6Js>y>h;SAV)ej3P>EO4- zL!=t3u%?fcIpHh-DJq1sfPql)6iBFe1_WmT5zYdlddIXgJuJLLig9%t`bwEgauxuU z^1@lbK$L$AB+9=7g0p}KX8}>YWg5!!rGG?%adBJvNtqMQ0+4>`Uoa50zXFNc-+U$+*4)&8^JweIO5z zQeQX=7>N4$fJFWLKyVfi;VdAk9~KPtrNcrYq!}x4q=l6^uAm5zUO`a|go0u~LP2pL zI17ky77*1B3wD1zER;m9u?#0#N|_VR0+6@~6lVd0BxeCg$S4N{X8{q;0;2k1AxTEc zhlPqr*DC?W-|Drbm6S<-KUM}3QWR?e193&F0tqSAfM6{k!dgI7KP~(w#dLGD0ELMh zD_T>T*5^m42)5OS=6gd7_ncngT|77*2s3xAfwHwt?Yn252a4$35dUL1iGF~VHH zK!~XYB*fGPg1LYQa{*EPypSx$a9*f~9FzJslqi$DetjT*G2ku$a(4k=MeRmFa2F8a zE+DEO7^K>K@J0L}X@E0%;mtF%TM05axzkmpT0a5+PU@XFLWax}c6BYKfi!vz;1|XqA zF&HpNG8lk_ita!#7!YAFAgZ4kBo(HQMK2_q2yvjU%A{}@fP@gmVZcE2tUf?ONM9g0 z42W3JO)Jd=4q(U7e5>c#?`GX=?G;~m<&Mr z#gD>36dw&FijM(;$$$ux0Woch*Pk24BgLe=6`i0=@}1EaNGUH|1`Na#F$qYNp9}<- z0TC_(V%pZ8>@pz1q_{Pmu1xa%U{d$B&-4w5>^AjXh4L~fT(_Uu>a%PVI6W!blB1L%A{}_fW(cUI1LyiISoKUhaV7} z21Gawi0X%jBpoRq9=0OgM2bD#rcCmCayyWaqF4mTb`cB$6ETkTpfbrH znGhgFj4&H85MmAk2{A{2U^XDaY(P{$JS2-T93H}vV^ZIVGG&t2j{xEq18xH#cN_3k z)IJ6Tw*e7u1ETujL8{GHejFLbm8~slj55h9p8!g#d=ik`4frZ5#{$7_K!n|ZsD60B z4xQ5B;VcqO1X$5HWs*Os=YSLe!f(JpT>A4sLc|3i_zj5g8xYkG55^)4hlfODny9d* zmy}6iH~Fa4)m8Y$#+IG{Ht}Pyl@>b z5KlyUAW>fVXYFtu5aBu?rfu!Xt^*QGiaXLQ$|Tb% z#d*LW$$0=0I?4gTc|e5ofT(_WNYat=;h`eZO{7@UO3Ea^Co2O9DT?!efw&}9frON5 zKyV%q;XEL!A0B>_V!BIOfWkzM4Xvq6aygbjdO21Y2sze3LXHg(oCicW4~XiAhd;~V z+l4&{OvKny2W65!GLArs7~wo%AjH%H5@Kot!FfQ0^MI&+ct{pwI6Txtj!AtxN|Z@n zzdjJZ7;qi{x$}UpqIM%7I1h+$9uUW2sH&?y}r+91(HfCFu-O!6nS9greGI1dq#m21cX*7>pE?@>X<+GMVH&04nu`^MHY!;9xxD3#3Ue5elif82Shjzh-q7Uvh#ohlj62?x-!Z4gBd_#?TYh&fhax; zNEDw91m^(}&I6+Q;lc6u!^1oznFz3>^OZ@yA1nY;1PJE=10i4$kPxsK2+ji{oCiep z!-GM9ba+^XG!qH-bh$FgC9D9_OIV44kgy6!NLUR7=K&GU1ETuj!SRoWhjqv`(cwVX zE0e-`01`KX;yhrGW7D9F^0oK zIC4ztTT!M=^7;`#{9?d)0OZaCzKYt%fZ#kJ!g)YcKRiga`O1$Y!=$n`jZr3fq#m2%2Kkns!%&I2Nx2SoM$>129%c!?B~@(%QsGMVH&04nu`^MHY<{}xEp ze+LBT0TIpvqIv^0)aQ%;hy;`Bj`Wis10tLUMD@c%l8%%Q4;7JaBE^IBF3IND3kn=aRgGt2BRl0T{KfD{43 zdB8widJT{e(E$j~10tLUMD@diu?WNAp))d#Rp4LLbWtXS^8h4ND9!^0NzMb1P|+O- z&I2Nx2SoM5gQUXrvFL?l6CqaARhbmd1CS7+I1dMe#fJF7#KyV)r;XWY#;gy?7Z<0{lf<-_=!D1lT4~VcI5M3k@kd6?`kY}R7kuFyzxrP-$dJQWv z5E@ni2@R`(;6EV3e?U~<5}@HvuXOengav`<;`o<(__h%Y z1`{>b^q?}yADj>%MUC(vFc4}E0|_-pfZ#zO!h=9mKY8mvDZ@D;97!euY$#JExqt{D zemP)50CE!oUq$g_KrkT?VL~9P|B**7&ewh%DJG?DX^b+-OP>HrDt!`=yAb#)O2-1h zg+PQ0fvDcYQ98ww#93sTD6pe($|Qea&jBe4gbjg#xccXTgo+D5uptm(Lm;ZRP7@V| zqeLQ7O=Q^9OUk71A%KJo#fQKk$%gCbBQlK3F}4g>LIqz4iTG62DjK!hEEn9B2s^`~^w&x}kH6?l21 zGRak#1Bu%aDzaiARAd7ZDzXE?mq3IsftbqkX{^HVeC9%`i443$Qkmq>XC5FS1J7q( z48-%94@k(!4+M7t5$*(HD$l1RL;qBWcYo7D$Tm@ecS$Of{P`>bB$VL!EQ*1+F^T~R zCB=bYQXs;lKuqQNOi2m9K}#anL zIG4t&ZD~~?QL`El+z3Rt5s1UaQ44%=$%@jhgFJL@%5R#WH8G|9q6TUSP}v3lpBc3R z97nB{Z&rRO#Blr$1^nI#YJ&;qbUrS*F=z6tCa5h>u|ph?{B3!E|27LtYR|vXLH@?l zDSe}b`%pJ4<$c1+6+&@Gp64XzrSuwI{Hh0|JmttJ7?OVd1Fgl0YRg2WQWRkHJ0xfw zo>o^*E0cN}7F>@f5;-xYH;7wW7}YcWofWh`Cuty)r1UmsRT5}v$Y~n=HySJ2nA0@* zZ#34lDW_@npJ?#pH0LxeWSW$2JuBLhf1|Vfjm1;=2&Lb2KwI&Y)^bV(^%TP|N1$zZ za$7mMXo|^6zwm&z;|cBMgtBT07H;^3FvH#On@%)5vxA(O(l1=W{>J|ZLp$>C?<9YJ zu@oyM|4swinP+s7Gg8);oRNIQpe{VKtDKq2tNN1jm`TBz;1(^OpAg%@y&g))U| zFOvSGOBeCn#d5A{FOqT%t6#!NmdYd*)t+MmiPDRf@$BVtwrVdj$~K5t!D&{?G^)KQ z>Fw+MSLM)EJbAU8oXV$Q+DPacPO(;|Q0+x>ebLSgYgxy+*2`R~J45oDW1%e`GR<+s-L> z$dszRD47zyXeUqI^)FJBH$J+Xr|ywcReKS(9@!$IdwIUUoUhu8O!5uO-^YmpWTI5= zI9UjK(SA-4C=;ae;TjVdqy%xM12U6pFOt@zzd?dI$3dAxwHKwpVGtC;$qvb6s=dgX z9_ATGseB5CWYI|ISxylrQ>gYLxxQ&HI>))CkT3;6I_r9 zRC|#rf#^jEoaLg-qS}j6WHEd~iJb0|OsBeC{YIzXYA$o8D>9R6FZv^sLEKeNc}=EN z?M2Cy=tb9g>WzPqn!F9un>_WFoT}Q3r1y|3ziE)(=J|KzeAQlLl5bf4T~2gQCQ9Xw zOJ0L=tDqO%=L8RAf>b_SV*-Pehn(q=%%s|jq&4YpkjI?kiOiwei&Ee)2ztuNp2=jY zy~u_>=NT{L4ApjkH^wIQqL)1Lm7J-%5$Q7xa$j?bH!?+ z_nhK`OrhF~3~v{=rXP9kCplNO7fHDW0iQX^7n!7@S}!t?D81+_&;BN7tM($JY=emJ zoaTp2quPs--W@IeT=bJC|B{na`4oQZMP~SE(Kwn8QU60A)m|jmH|<5~IadamOLb>R zeh;?(&qWzIK_;0%wHKKZh+dSLvt*H3RC`g1EQU|WoYQ5M=~TC?-{|yPO*YPyU1n14 zMSo;6h|9q#bIO#ey(pOyy(kw?&HXP@leb}-ho|P1Q&oGB^qOnli}LaO{Bpi(FEYtD zEWZFJDku}Ba>pgFA*mM?;sk|df>b_SV*-PeBAltH%%s|jq&4YpkYb#pxXhv2i&Ee) z2>OSUm5|9)dyy?I$ummH8LI67FCeI1nLxD{nG%Rz4a6RO4OYRh!0+tqJ$`mLr8XR0eRsrI5j zG8x3x(}p~?k({d9i=?+G^Ip`L=Qok_ReO<1zG3-I zIZ-p2D3v=dc@0Uus5vKSArqwX;TjVdq_pHr&N7o~FOt@zzd>4Yj@B}VYA;HG!yu>) zCu=K{srDi}+Ky+mmorq`0p4?&)QdDcvxA(ex)JF!4RSkjicT^`nu=S~&OEb=oSCNL zQZG{e{?<6!Ri;qwMTU1HThnelx4WFH+KZ%I!|Ho*lAbb&YA-U7D7~l`&vuowReO<9 zwn0R1PU9xisP>|yx890g)Q2bcm6KEX6n^VP?wrCyrcmuga(&ZYf0kx9N``6D^e zD48ggJ1%(*Nxf(^Cm16Wr1Ie!6Bwk7aftx11_jOQE^WDeC{lmdrAkS`~j zD3hu7B6~WCXH1qeRNDbwo0!y#rtr+Ea;EA=q|Y?SoyIAq%M@uUZcS(K%$ag#nu<%k zXcniKEmNrWBE!pet?3+|J6FzC?L|_qVfFJk$$XhawHFyklwP!eXD^hqReO<9wn4-q zPP161QSC)ZFHse}XbDeVDkrD%Dg4%pmT`*ZGKFd{lIxrHq7|HLrOc(eGbFzdR^N+O zae~z{fod-@B@n%64QE*^v#9o>6j=P4ZP zB21=G?L~%nw_4M1p3CH1)m|j!8de{{Ng`zu)m~&EQF_r)o_$QtR_#Sb*#;3&oF-bP zQSC)Z@8lG{=r~V~k&{#T6n^VPCpg7PnL@P}$@NWp(J9UqD|4yt49V{o)%T*)oZyU1 zpxTQ}2}CbC%UR-N7S&#qB8%Y@I>+haWjfXE>Nh(5R&$;+U67end(j`64B`?vzb5Ca_9Byf!}70lq8l<%DtBD+ z8j^a^O-^u2CP?MOH6}1fxy_mG$V{reNLrKr2D!^Q?#Udgy(k3^gP{AI?14duh-x=MX7`pF4?$poss z$do|zA~SRT*D(UQ-Toqnsy$eA+9Osc);k4y$}nK@+^ znNqbEB~zjone)`F{~|Sc8>ZQKYIZrbvf58tr8iOXUX+99=alnRdyz@LVfnc@QEr(i zl{+qZ4N1Ky4=2bg6QuIt8WR|#4a6SC!Wb~2sncJ&*beyg$POb#-WYA^aDlR=y#r*x7j zReMn~C3;aUo?81~q$Y2}v<^?LE2mag>qXLw4S6rB$McDtuiA@D@(s(c&xsnyM5)|y z$!kdJMGZMYBbgwT57(H$Af+*9Y9ceK_9AIb`WvJv=V&H#sP>{1I1GZCbFvmPS#h-w z3BO6QB~NjdQ&jIJzkEqV&=#W`GM4%H{YNP%fz>dndAWU{KMTox|K@Q;o9 zaE87zgKD!+mZN77oynckc*r#W@(qXBK8G7VCr{4TPv*1vR}0g>J%M_0s{S%noxh_p z$Q!^p2g;nPCp`UCLwEA#xr6>qZt~7d2lLz^a;|DuK;7hAe(TVooIooRsCETY0>dSs zoXJOKO6}H@rQly3={UnMnIW~$*qFf}W;mxBAycWIn&oBbd(=oyGD;?~Pvu6@>q#at zNE*%A#>i~yP0EUnRm!gHb|JlS*FS?>d%$o-yzd!Ja@XBtNv^la>cVTgA>e@3Dlnr69U6BW^tz3 zGL!nVVa$YQV-9DSD>JA+8>S2fG4nXpe3?r9*^sD`Z)>`MlPr`;)SrzMNDPt|akj-W zTLC<2DZR>%F5%z1RQ}#VfB0U!n{65Y?&WFvZY#QifA>o1yNCJov1sR<#XPoE51Y#S z@h(67*UmXZMl&;We3$9}3YZl#8#Kh-!`oH7ZqTQu@;1PlmQ5SxGwb%-|4g(0H~!xh z_}>-y-xc`Z75Lv3_}>-ye}4s(zFy7tU{&R>8!P{*_4Rrw^>wSJ4Qr*)*MDpDf0z&} zd`vV-#+laec=DkKA`mf+jWM&O1=LVG^?9p|HmKfo&Np+ z(6f})yn^8otoE4!W)nW%CB@HPEb+GcO_t!j^-JKHYh>)pn+<<_d`x{(+%Pw=7%n zhFc$zwLNk#)$O^?mR^nRe((8f=CtbLqPpv&lFl9CZu^_4T)$20zb%hIvhv@b&rgNS z>ZE-AZ~-0v`U2jLDctwV#q}g)-uNz+GahA&_VxCOJa&a;AM-3nc-6{in|Ll!$lknyMLE-AR{B3qW__A(?7cbfqZ7D>%71Ft(KhAlh?A)0zrK#EYCqrAplBqydg<1pVNPd= zcHHrz%|b4-vK{*WEbnoU*cUAxX=Q$ygjj4JmBaijnS7+-v6_7kuz5AFC0K4`WJB+V zXR^FHP1b#Bd8$|N5mK^>&7lQ1FS1A8F|+>$0DA#XJk?I@%!}YWNo9h>qP-IE* zQfBqbnw812?b$sRlUp-vN~`VUUCRbDp48k#rnt`Co3+#juFtLUp!t>Y*((33eZB=! z{(NnkHq7(e=L;(QFZF(@T%SwTcVGSw^=PC90pa?tx^!fkCJ9%uGu=iyvk0WSafK7d;p6H zU$nW8bp*4wdeqFe&UNNF>!)wbggEvq>{zk-8@IBn-&sh3)-%brR?DZh-*t?X?2vBw z$!q*5DjpE|19*~5)1#i{N=f`RnIB=!LsE5qqP}!C19G5f4mnX|?&vl;lx;DOGhmMCy zgTNb>Lq=a`*2V7cFZFUMdHgyzJr#Y3OgsIsa8NrPdzx)t@zrb3la)UUcFJFVANx9B zUbnfcFR(mqDqlFXdoN43F3fL3uS=}N@{n08Ge(f2u?OrT2H#^ftN0Jwy(f`1%;BHC zSg-A5$Em{^AJ)Ce?@RLT>xBC0%*>Sk)b7ha?BDkPW&iHjF3;mGg+kc6Img#GuYQ!B z`7u+Qxm^Sytj3zL&(D*=7oS=c{~S-oTd;+pLH?xH$Xd=fUW;mqO3&1wNG z@$&TYcM{{tjss^BD&9;W{i9-MXK~v{hT4=Rfw#7k7BBMJ48FLU4Qa5iaNYGAS=(L- zwh8?HAvU8_C+$R z!`TvD2cIR44z6n-*XImrGWTTD*ViIh%pl(S_nj@xH}wx%Y`@YvwQU&z|ct2~1aSTk-YRSc|#DSw&a- zc-($9i|Iy@Z zxt~ow@IHT5&300z+?Pvt51%8|Lf({)9(SAFa*tnCdGS$VUt;O36H8CC0-cU-YQ16% z`P}qW;P_h?*awf##V#L@Vw#<^%ZIHzMl3D*gy)O9%!=m-@6aLpNfxrINcg>rJIUa~ zo=fZ9I?X!1p5__scY@vD*=D@Msk6jui+8ax^RJP)qjC>;9&(yZwh#JosL*-Ra7)CG z%igiXXLiP8O)KqV-}-;8pLbzAS-R!T?mbT<*o@3qCapT1Ky0_B%hPUC0P*@&cAh8h z;~7(L^q%uAiutx%)x>3P0Q(wZ)~w2{;iSjoj=qOGo@2pw`yS@+e}ZXF6wIEp{0;Us zB140bVORL`doCv9rTQ80{NlgAczzxJ%kvwOX{7tXjW@~J4OPa}nior!_lumG;Bk$a zyDz>QaQZ0w8TX`R{GJF_y*VnZB_qzUxiQ;9$1R8?>lS=z zIC9PwR(0X@Gj6*N61$O2tfsC%PHv7adqX$j04tFv@Apj6k*r+6&y!CI-)9wVd(^y9 zC5F|@KR9N`vjePIvk|$YEO#-F8wEzkO^;x^M#dGHVK#v*`shCFQS4!6_q5R?Z^sB` zP8+1RuNcefg@)DI+{%qS%X0VNizZiz!;^YHn$1~Emd;rg*yHpCa`?lVxjlx&FmKO` zE39r^Alr{TZDvcO+4Y*frJfIsV~5M;npr1yEBR)~kozoJ);Z6{m&LmO=w zah{d3_%bSfRUFHn+2Z5n`ukbcb#HrA-M*QP&XV)&`p*gM`pnU_e2$!9Zl!x?+v#+g zwDP<9s&@Y!lUB31cqR6Jk@5jY@94DI{@AlgIHHe&kHz32X z_s3a<-WTe3q0PzLt~(lB2umal8doVdx7|{*Z(Y#7GX;X##canWTlR<~0}}tqf3A2e zJ8!pcT9y9(WNw@GKQdl8!#Yh}JmbXT?QBk9)37Y|kxc39oEQ8DoEkMY>M^G5YpH{`^FZ*dJaW@|FMoLci00(f=s$q}Q9wYe?9v!ky=|JiuD# z(v>)zF`T{g@113#$4Yj1TCp4XG6$2lwjI8HOpIV*YbyjgoE*%~)EznHWoRsU8f`sb z-~CCPux%J*LTBP(wvW4F&MwY=qWR{ct~YuCvZvT)LoGMlxlSWKs{xn5oh zA{EM?KQt!uK0@r4vvsGVS+T42N$9VYZ2sfWjk9i!A=$SUdDUjaa#DFkwmP4~_Okh{ zmIoa;;l_@1`B5uj?0K>&SMAZ`b^c^x$@UFySS@1y`R0$zm?fO}t*qI8#G}Pz+th(W z8n4(v(#LLIlW68k?pg1NXg2dWI~E(;x#B-zEV|6&hn*ImAxqwR4x78|C}}+5eBX7! zC&?lA_E{Ud$B=LNLrcZ%2p|p}N{p$wX$-07;Te@B%8y*|IuJXq??&=9@zA*XSJpDW zh4~hxD|&=AuH^XbO{@KE^OMmx23|Nv8oMR_%w7H%X|(+Px}{m%Silimi^9i_uol-3 zH0)k0icMa-zKp%yQQ~&!)4N?2Hn423o)la(X6tzQol{lJ&4DzOy9TV!hYL$)Y2MjqnJm5 zHuk-*93hqm&vZFqxshnEE$wtH+X5DuXW6y7r=rP;=40&>+~<;V<%T?~G%t$v-qi3* z%S{JamM-V>t+7F|-yI=NFZRD${9!*EvS~zqr>Z;2hdP$GgS&?l zmvv+BExNRi%x+~LHla!o+p@Reo-Ln+)LW6>KVIf=sxD@Tj)k{$8b{W+pTU* zt}JI;?9Y1o<8vK3>SwXJ8DdEK>yyf6ez%|bm8-JAee6==JZ<)!R{4FIjb-bvr`nz( z`RscwJH2KN37i;EwpWIu?B2ZZH!^(OLY!OXEaBO25qmduO2Qq_)1-mpi2j|oM6w}W za;~`aY6V-Er@*S+6A!Rii$>aZ`gxcguK!?ofy{^5rngOV%{dy)96URR>^Q%bWv@GU z>8l|}*rl#F%dBj^m{osy>RR-d!>r-=PJUN%2a`FvjZU9i4rit9yFVRuViW1~Z1q&< zPutnL$@yEYJhGNd-C~<>?XXDp^4`&bc@IW17W=)#;9dTtwytlJMi#+j*yQrX)^$6| z-gNctl<$2g3t!zPM_GqhcIkA+t8W^uVC5^9YT5DoM$&a-^EH8|BgvWZUZ-vc2a@1p zox-1d*ulKpq#u3!;Q^LAK3|)Ii;l4ZFCP7(lQ%N+_wPDwYZc3`<#&B()h3eF?3KRR z5}SR*C&&6qk4o-hG2eoA__}T(yB{vASG?*D;#%cWt9+-z$n7CTFV=a8=lbHsh;o-> zSjBW#n=YxkjFdjr&A$ab%I3x1xY8+fBU|q~q+-)qJ6OLo=OP>upQkD}RdB2_F!&drK61U8wPcnBXHU(JAs&*vxRU_TI9^ zLn}ux+oE}E9QtP+Yj-=~SHiXc_A~0M)A7p3$=eEzbPKyOHvUf8ejb~4uv>#P&Dxin z!P*U&yd`4sIAXtWb>R#}Ls;X1{^u&>3?*kXU8-~4Ycne}Eq@l8%-;;wxkU2hqQF7NnpR*|Eu_1T8?UOkIs5%WXN4m%r58hhOKez{=*S$(== z`L=Bjk)?50e%j5PO!n+AQ9jORH8Ee6apI4$OGvut?^F8@I>j30sg zk3Ya}Y+IM!?$RPsH&4Uj*-8hHgEs4Klq(Ry`fvJGBm4P%#OtX0-q$TQv(U?N&-!Od zV4dphC_8II5bGS+HYWS@jbw7$&9iHT;JFPdytYgKovg#VCBEm^M-p4>iMNaGpTpLC ze!cL=h5(kYa?cKPi$xHR!}oeSblAq&iSPpp&TL>?x~yzgtu?NfpWnK>jn9)KO}8gL zJQ2+TUTHrxnjcOs4BqM6`*Jv$Jj*)KKT80cTW)LA_$mox;q?9%-Z>Ajo@C*)t?Rau zmy;r$*Ov)q4X5YNS#0NVR()mMB>xwO$U|K>uad0-iDifN0nzPZ*o?a?2bXJeo;yozQg zYuu)wk9~<=h^(I~Su!dm| zVgnNQvg5k7?&F+-$@P*I=Nx`Dm6h?Gdu58(elmMO^9Ii|gtGlpKhIp$p=9}|cE{%&oI>8s zXt-?nbU#ugCePyZRsC4ky6)R*SFsR zrv@+<_s{rV4Z3$Ft6VGf{?Pa=3xCyV)UK6bq)E)X!dbkIu!DoDIBa-4j=4Pu$a>D@ z6nU_?UCV{7mXqcq>pRZyjV4~RKg@b#zK!{Q8#(^|tP5o5`747~nMac96OQ;e51+@1 zw>?qnSe8g)@%F-4VttG?taaGcJ|UDXZA^l{p9o^nc6r_f)Z0yL#;giE`rensH+}tZ zV;*l(CEvBy3t~?a&4A8d0&5%~H@0MH(k<#38R<5*Tc*O%>hDDOxo4rnF8he;Dt}*F-W5;Erf2RR+ zKb_b?zCIi1yV3k8S>APh^{p>9k%vz*E`8Tj%W~h(e69Pp5H{{~-cdn&XR<;GHa{}P zMUurMnC7NeD7*1~sdwKwOGpj>;sZ;ck7A|#Uis%P;?Ekqn;AW@;0|)ft@F~04^Fbz z;b*hntr|>r)IZ$l)ZJYq-PC7C59SYMqiZa>&$`3wJIHWXCfHuG!o>f{bn6 zyL{gD`^bbji>gMw^=D;2jpk8weilW>{8?FW5P}xA?Byvx%Qj3oph`9W%T9fV{BXP{GIbX zjbzs8vQ_988%es=De3ub&2n;oLz_npmPE4KduN)Nf0@J*ZkDjLS+I>{UAm?5&6m4~ zrsXj2w~tnmCr$-&Rm{4LJlS1#t%v6+HfhHT^SRgd5KSdlzmZjDlGy>@H~Qh+k!M5k zp?BP;Gw+3q9UeYELv~c^O5NWFl6QV%T<3obA(ivIyij{k2)Px|+Ut16ePn#~S7izm zImSNK%#vesWDx0oe$4iGZ4l{H<%QkehM{cMnex?o&K_q00T~G4l8`Ay=sl3zwX?X4c(&^IAS++l8$nK(dGWLvwFYj)(vCtk5_BCgGvx3B* z&^d1ZjB(t@c71)Zd)n61?DLY>wd1GlV|m6*-tS}U&nll0>p%U5aF ze)ecx;;JCWrL1GwLytzB+CfS;%=gcb0a4`sfDVsFmpVx1cUa)~7=L~murS`|%0++T z^(Jgwll~!O_G-_!t@bP#3)vH@bDX@o(Gd9 zXKpv_YPpcz_jET~JtT~!Z!)#S+^3t#imjJR9_T%n74CXtf4T~>Y;lQUSGTP_%BBu= zTjt}noy2r1={f7%5z-^zLHYf|Pm#CxX{D0wS1{}N1s~VG*vSIQJB$vxdx(^1+vvjN z%hBZg$=xLit@C9?2R+|5`Dp+-@F>)JRozIYyLU5PwKKsiuJExRElW|-y61pfbsI&J zxMvBQUp_m;62FDe5_5u?*E4_jiBSRMd*_oY`#A+NpMEz_c6D7t*!Sga@6=nu)~4Uo zu3D~8_RK9{cK(V{q*u1YTCK;dC3Wnc|5~zf7AwN`m1vY}KbyU}^YddZ0$B8*9`%;z zKSK(2|CRkr-d!a3jhV}155hZN1e_}r}a0j%=mPZ>|o3?%vTbj+M*NF-S} zVrSOTmWP$1&Y~D>{dD!B*F%6Hf=C$HKY?yO~O}`j7 z=Tins5)MCU(9SA?Jw7yS#NfykY{#IP)pgsqkZqR_b@hlp&z^f&<(xhL0MYho^+31s z01F*cC*)D#K<;0h&2%JZ{XFn5@ZX>Pi$DGR8b3Dt4?n-oIC?~Xo9lO&*%R07<*#2L zrS8`Pa_Nv;^_qhtxz2MFLN@*V&ws4&bK>8B{xzX+h)3cUv-+P4 zMZ8!!;|=RmCT#!4DR>ATKAb=F(DMf=QTJY(DNXK?PnjQex`;nd#SATZ zb5EHBu50+5q0xWu|5)Jy{{7Fdy{84%nRojlySQg+>1FoU$h$XzpQi3!O16!;5)gE* zA6fF?r#55bE3A8)Oba(QJjfbOif(wV(;Rl9=8G}$*KU!1Hv<;Mow&+eN^bV4SpOv3 zUMVixaoa=Ev){++6K|X($M)av@A@T@?R{H%K>mLch~p3MAI`atu(6s}{Wj0OM7H}|F`-nMOy zDrd;}Ml(9ie;h%c=U%+*lFxotqQ%l-hhh$qMtQSW-^9X5eW%ivr3+ml5DwzV#Pl=F=ADI*y-`N0zwW;a8Py>`~Rnj|(4;W4p4r4EWjm9J^Lw zVd&_EC)ubu^?vlg&llQnY*PE$rYo%HftFcoT=F9YM|q8?KJpw}wB}fzUO%_8>48;S z_8aq%+0F=<^|R|$G9=%XtTS>Zl1(#y?Hg79APKpbd*PHx2icqoUWIZriYMoTn+0BN ze}NQyckbQUrKiZNFLr0+n=fQdqg+DX<=xFT^@{9~zT83fw!OveF&#G(FP~Z6d$zku zZ1Y?{v~FG``&N8-!LdDre?0MK&Ov|w^G7TET>9^yzp1m`PuRANC*h5>&MUiIC0Wxy z{>L}$AvqEjFyx@kSvIVi>(!kNqsaR$XBsU%97pMDEwU3qGMui{M_@JKl7Ow%Ipu!P1k8rIP2T+v}ewdVQfa9 zH9xvUo@Yxp_3OSj{XzDvk*3&(`_ZiY_YKYBhD5MNp0}U4Pq@Q!-R{+)M9(;8-m6pW z>_+ha-98O{6BEE1{0uwZI5>!eo^5sV+m9>ka-%*Qt7J?dZx8Qj^TRxfjIijF??+@j znczLB@`v&p*rH>R&#fQ(k(eBTK4XvECzY-&-hSu!dG@vPi5>gb$FsyHue*M}bATz& zRb5@+fBF3IbN#>n{K|ROEj{%373T1<@#L_lciHP#89L2*5l`~9?fkA>*;}mA>`Ns& z54=gP?i^I9tL1IB-Im!mbBiG7{RVmGnsJbPtT))s>3a-Yor8S5fWH^CVey2xG8MyE z*}N@}`g$!U^F~pdn~gTJ@_Da3vU6NTg4^9GU8l@wvUQQpwLJWg!jDV#Sm=3{jPmW7 z{m`qu>}VhVQ)};SXOBA;AJu--Ib!`{qyMy#$H|aeovL;?eTq!!b26mcgGI#0dqZXW zM|X&K?!ZB(YhNbu?XMTVzx@K~Gq_V|mHFtKjm~tdHs}JGJHGkw+e_m}`S@a43P0M; z_t~x6*VcNCJy!WoZJ*7N;@=~&!q4l?^?#4VriJYqmtQWdeSu7;-9F!DGlu8Rx5g`o z5UaiC4)nUmZWL?$p+}y3EVkOdP5JXCl5(dOIF)`opER&b+}7u5ILSDgt$GoR`-n|j zUH$QSw#?aKeCWyx3TEXy#f*M>J!c4st?H2_izH6_&xU3=t7tHeYn!Tb?krWKlnN5zyJGFF71B#W;i{81zPoF zdG4QMC%@W0@v?yLK6!aS@8$_4v|z?7JL+#EmqLfdZQpZ>eBYPl@{e+t8UFC=adTS) z%TlY_+to+Uv1b`;mnzWn4%?bHs=$PHacsk{el<03BUs>rPqjnl#Id>Cvz%&^H=5Nb z#df!}+QNFLPwc*<(Rq@;K);v!^PVNmy*CYw@?OX?=1ZvS`*J=ZIozLYKXs1%6EObT zxRbY8!!;#`9Su0ao;4~{uB*>|c6F?4l`3n_vJs1p47fPuHotzyuUudJAFdyMj`#1c zUqXZZ$Ie|$7m@AW`iwIcej-=1dG4${=LRVdc4Ky~jvvVD!!rw*%@lu6c@DCVmNw6tX1~ND^0CGb zEFwun)vF#Mb!U>D)1!Q{{D|P6Pq`rN-oIbJb&BU_vHsNKfB*Yi);K5se*FvGEIRPQ zn~N;WomgFpJWGC*D&$)(<^;(dvFFJKkL%2B`;dHxCY&MT&i$-J-tHr3M)_Q8-C#Yt zaUwiV*T)x01{c5eg;xiVk?+2a@VU5y)bRSpZu~ia^5tIK5mG6IND~p22pM^%K}beOvRBbk%1X)J*?aH3oi!ZeHJ;DMdd2V;;KK;>N`tyKEZFAnNzxq$?JcyN@v3!b zC)~BR&mG}RNWPj9cO72x7tUT5%7ALOwM_Hi5h#e-+*K&}7SB)WG^-zN2RlJJXE(_% zNRO(Arz0iEm1*XFR-MsW1I(*Rgvtb@BYD3G>$Uhv@X!?c%a}5R6&sbm_bSEWZLN6W z$f-WyHs-l5Pt^lE`G;Rzj%dM*>8ti8$NozYdwGG6pPry$TrUfo)0>BVQx(L3Uj$H0f^=G-{nraJWMT zrxhxxiHuH+h`P?-vfcreLxcW5x0K?=t4sUxp0whj7tXKDo~EMeqnL>3>3YyC5?BNcf%BPuVTG4}>j8E1B7{aV_%vY)I80cJngTA8J9UZDj@0A&)spI3Gkx@@c+YS1GwEg6iM|Tq#c01Yf%n`U^@|iE%cFn-T&#O2Z{Tj-Q zZAR&M2z|MGu*;cCZE&$O)z99r4LK#<#nXd^AmuL0zH6>6pdr|9=-bl~*3jh6!{V4v}OuU_=fa80_!+=+HGA0#Gf z>VTq7H|yumB-G^4>zUux2D29Ijz7)QkwftGI(2$3mPHcen4eAHizt0!> zjmTIV_^{RTbP-gik+y~IyoKLHV?&>g zZM)~*ivtAyblzZTPY?3+k>d`#^x%dB zlg`S)s~hj_*&I6YOSsDC8y3Sj0i5r56cQb(GHZXJ{Uk&l;dt!%JssCP)c#0!HshvX zoWefm0+iUHOkw%C5(GH5HF0)D0IjWUDaBk1+`Qv)TI%5_Rv3?-+B`Z0Lp5ho;^{g- zvuA^aTZOxQIdH8f{27d#-d&YC-m)(2lyPdylu&fZz&V+khWPS(ET#7$s zY7^1o`!|}0x#h5@FFgIgXg>z+qdI@>a4Jp-$|pJU_ka}hC#qA_LvYWyV9TYl5nO)% z!n1ZXAG!m1yT#lmP~sw`kPX2D|8k1b(JS!8tqiBi`9=vGa6w(zTlfvUe(ZW--_2CG z?Y1@JbRf~^T9`JaaT0v{h0d(*bKP*>J1gO#&j>EkZ>&?A=VL_19g*iHuR(^dKAnEX z9b}~S&Yxragw=H)Sq?}w;K8@Or)!sjAk9j{_cCh;&gOovZhSC|>N1(D%nfhhiO@pq zohx}*S`+WfIaq_SYq`JdeHxHIYpGJAYZ8MhZ^$R+kgzCdyI>Z5FZkx1n|6KN@=sl{ zqD8;sKi6*w{j2^ve(aBxUC#`9e^)Kh(_aJf6RPC*G=1pL{Kz&U?E{>=a5eu4^$)zS ztx>Rbst(5-m7QL3PeWrVm7Fu9??2<87w6;3=L5B>N0Wubr7f3CLF1ozse zp+t4bPJez9`1!~3tg%+YiiM`Nmwgtz=N+k?q$@+}V+SyqaIl#~_}Z(Ay#3R7Utq?9wIJyn)q(A+<4~4s)!o zC4J$2fH#k?j|g1v1%t)89Z_RFSVsR##BHJiZauSeHD%}kYKF71+Amr#MJ{VF$B@vg zOcsudbyvc{{HPblT3aw}=EV0c6oL5a3FGO71Nj&>V8=2#QHq;8Ne^~P7Xr)c>JWzb z5766j|wy2Qn5QX|wavmcev%R`op{ zke-I8YTh_Sh9x3B*C8e!~}OJKBFcW+>8Jj5n_E zo^~Twg6X&K*Rm2Lp(DbKW{55iFFGsvDz6oSbD-Cxo0Ji*^YC|A&^DoH-6AjZW;x8& zsGpyqsey`zOlzZ#K@d1-b-$;e8k70iXk%p~-9&Tj4=X+=;H{wl2WS`p0A^z7`p zRtBr3_ZGIF2*6vC8;=iFdc}0E8#qD@zKf0EGc9-$co52!H&G#@Lk%>%F4vr%x8b z7xT*30&@wF!u6U(U8fAKIFHEo#F5d#Nv!As8wo_!$Az6Va&^V|fX_&4{lp`tQRE&~5!B9Klot91~RDbte<>dQSgW*X9<~8n?dLe5?@N zrC;%wtHvU$JCAA5nSAg!d{NvxoehT!-{%L{JK; zKyfmq?D69Q*zwGiv)a26S&Z~=YB2_3l*l!a*2FCIJ>2EZ!jTT#nFT{uIwcU1s%12o zLc;SeFD^JbJVIKBN0e{V9AW3_d0OrLDIoQ@YPFHB8o6(4-`*(fMGR9H=sr+_GWj2O zNxO$ZDE0G*GconB!_w!&IgWDVrz<%`cc&76Th{z~8QBGaJ|d=+2%+A%7Fr(t1FI=mWp3~U*w zhhJqjg58-($mwy!xV)(u;-xQK?@)I~uf-vTPW@7-lwH=K5Z`|c7wbrXhUZn2K>}%bN9=U{ znd4Cqca=Naz^V_62KRa&dC-RL21!9H1&-LS$0+07UxjCBBdeY=Jc4x=O(v~d2_W*! zG5+AmGT_X}Y56%vg0m+ao&+3jgMH#>a%Pxfp!AjICYNe8=*#jS{5V#GO63fq`a@*g z9YE${9BV|Y9b302-Y0|cO2%w@D;efaCUZ2&C4eVYVk0j%2{U7zZWent;X9kXogYtR zVZ*_1id|pxAfQ2H2j6M~7~EMclIS~%|6!2Q>du5Xh20v2c>rbaa@GYrQ zn~!O*La+PD$}0r2^d-kS7!uJT`23Z?AEltGX5V5~R18`LEIlSiGBKff+DM|O2!9G* z(k<^vgvv_(C;MM#gZ`OaanY>_Xf*34$&mE`pASTsnLcU);|L~?Tf};noU&nMnXSbB zX!lAww>G#zQJj|@O~z|!y+_~gF9DW*+N?;89I&zaAg1&;AN1k6oNZ47SU)XbS~%zm zvmN?x%kQ+pr_*oKp1&zYhd+_--77`d)1Wo*<8}s~ui-4|!~!t%A*mPEN1$t&H}DfgS@UaLSq?wSn6I>1W$N=zhV(?#%Qw`&LZo zm6FtQuNJ0(PuJkF5mquXZnM5LbG-muOiRAqJm7*mgU(vjGg`t+eKqGG&vHDtli4r+ zWF3Ss+0Yy%_TP6-A_|!en!bML>2<{J)1#7N54CXF_oYF|-Av@a=}Z6FX%Hn_^_7OFEAV4(>L&Zb z3w$6KZ_4Lc3mVFLO;5hK;I!e+eXo|&fqH4omzp&NeA*Nz&s|Q#QJ*i#?9yrA{=(0z z*QgD;CH#0KhhE|qt$EXqTPavSBr?F%Ur6Yk41cce=>eXlqzBh1igEJ>z4$YmVsMeu zb*vCZSZFyKVzp`vPmgUAQdP3UC%)aq(*ou2snK9<(*#`(NICRX&x8KeK7}`dxZ z`(K|b0e8jK*>vRs(Csk!WwdPo?eZv!(?V0B$s)nD_+~zi^G?oh9*u_L1Ik+G9EtPh zbCu1-ZN2EdAXligkcwP4jfR{?>cR8Y>wPfZha(odHpODEZh}A zJr$3XR$=bSvAtL{rfcQ8-i+VAtQq^ekbqasEI$pO!TV>I)=M0gu}?jD#de4cbu*Iq!dh_048OeHiO{>s40Fi%=K>( z2;ZWT;Pj@6QY0eUtVF7U`c{Y`F}bl30`Q)W%e_l{CzCE*?5@p zNC7Mr&#}~==tb(tmTUC%J8E?4;?$oX+h74WykJnz5!fRHqsUE_|KFiq0&vLj8YbbI#U*tF< zL#Y_sW>p;CVTunIP;A6Th9aS{^Qq|8zNzD3oq?C+O6-oNbpgFu_~Ld#?{l%x%y6l{ z3k^Oc8jKxnhTW~cJ9QU}@oAx~gll9S=sC`KkIc4!)6=7O+>49g&$Qr=;Ob6T^z6tO zwk?J!rJ?%0@{QpCCBnTUx)4ijuDlQ}7wvfPWzd9#Mm^GmKO#1mVISt@DE%3r*yak7hy7lT9ERkF0uU&XZ zACkH4D8B70LWXVIt};AuhxW$Tp#_}2;I}1#!AeU2h^Y#WA60&Lom*QH$N9c#hXj?gabG!U4(xnT9 zXOjJ9E|Kx-70y)2SrXb#8qC%Sm*WvbnXrupQe%M&#y_Qi^M;zr11o-%bpgt;u!?G$B+Ce z**@UiirW^tuZJM#rrL8|r8bE4L?;G=5m03|DBx(H1cRVW&74^>95l31J^qUB60`bE6x@9G+;HhK-{@^*;A6(w)Fl&vLhAa1dW-kEPlP3}IP$ z(j!%sK`>0%<+60O7N0db9Z>HW1h#LVv+Jp&aQpS{3yr1`Fd00@`D5qAKR8yZs?zj- z&c6!%NBpbiiR~5w-R~gm_TRe-OSI=Lvocmf+u0@q z>R<4W|1JNzVJLc?{38H^H8U62TYDhp45MS`=TP8H?0Q|H)(?}tC+I}7Do}#r$cgu? zwXn}VLiG8Icc_yPd#})}59hi&3Ue2$Ze7IDNo+3^?|>49LU< zqBR}&>7>{YJj8cax!SBAD3vct{TPh`$-<8;?R~>|Khldl5&s$+=RdV(H`d{;ALdGJ zhhBrAxYeP_Q)5VVOl9%d{c$kVG=-Aq_1HbBT=+Sr6wk}YCmeJd24i>2zaP%FKzL7U zpK*}GKhNd3p@qi(%>VtL^q&NM@GtbAw8!E-x18)mr&F#WB3{Cv7w5wy6CMFAfqZds z*2OU9BJfEsKNy?du`qIYl!5f~sKg&}+Jvs#`KGm#4W2*oJD1X{0qQf->;n{X;il0Q zgXPi+sC4VCQK+cL={M^oqs#dOZ+2_w7vamCbmpcS8L2|yE$Iddwspuo^Gt&;vK|ed z%LzvDr9fwP$h@pc9bBmG6uecwHXO>&y30Ns*DKUyyofAQz1Um?#caT4A>QY-z;3ez;i?@^b`Dp~!WMc+gLT z(%r4U9@=K%{FDCYt4A{6k@`UAtHnep&VRder)VSo?7x{Ivs?%@D@Nj{vNNG1q`@L1 zrWo_XIf6X<+ac=gr^`wM5qM78P}D`V5`~$MZZVmwf~efL1x#h_@Ys3V?ahLA2esDMj83U$?KOcVkuMCC?9Dp&&ByRj^(>1>c_2ta2T>kM@*00a3I0aDhXwx%5Ci zHgbfS7h)Nbz85qe{T_&SlMCx#(S%~Pmon*INd@#(T#Ef85s8tZb?z))jd-#5^JNvn z-|2rUt?Oh*IZnygg(vW4;SKi@<-AKxU`IC_*`ytcu|qwMA0-+=XmZqkygLt`DM*zY znY1JOFYWl(1THSD?E1d4y#gb1p1u>}tboW{Cf|D>wqj9#-@Bt6aCyfnrKn#Ev_-UcT70a8qaD=^bOAYp-%o4U#Uu&-x>io>%{IZs z?qwfkqcZsYY5%J&lsVwg>|v$8uL4f<+%&#vX$>Qxk@n71MQFYCw&%N9GIFs@51Ej1 z@%blZeYegAu&VeJQ*@>c#yf2`kN>H~%cITjDZ?JYu3ei|7OurWrQ4%%@J~6&75&Nk z`JfN?YK*$x;7!GAS`Q=ZcE!MHMIB*Zav4aiSjCmGmJ$8v{ucV|dQcqkt9Cq^1wX_c zT&}H=A#*Q%00&e-(1%!_>6=9usXU`MZcv1`E$HI87K!|gh?u&?Rm4NCd+S)GiT!Cn zd2oia1=yr~r3D|f0+s#z*E!A%u-dAZvd_2~fAJ{jNV=9ocY0K?3PTDyhJCst^rH=o zXD{*T@l}G{9!t&`?k>FNVY;<5A{zJ1%#!L(q=VDh`L2Bdtq^~>?-t{&Ueq#GVT=g$ z!qXzDYlpA|Z3pS?4`+MAjG*V4|W$_@??G z7K1Lofgi~&6nPmxNWXpMgGR%&j%O26VSDDOx|*kmx6Hb(D^J(p=XsgkvdvXk{vf@^ z;2{ZPlVS`H-)O?-JMTL9_V)8xOQ9OrZ9mVnza$r~ z@;?5t5EzPWcWDO(PgXY4^$VsLNWgcjn#VYODy;Ge!HKVj2DhH9#wgZa646_ctAH7w=L2v zm2gCZq|#lkSTYH}jbBX`HcCfcwc)Z~6@{4Z7PPA9Lxz>?%QJfZt+=sa{&7sS6pB(d zTJBwI0_$Y6^EHQ~pzTji{BN!X9O}ycOZ$QhcXGboe5#lVewz(d#)?@$X_7e`d^s7N zr_X%;^{g5+Ru+#x4EBJHy2;~%?Zo#779WjS3$-}1wqY*5NP_OLlFKcl-XOY@o%$6`C1|S8 zb*C}C#EA{dGmq(<;erR{Jb!W}Dv^T3P4e~d;8RmIKf`t?KEl+~F zz~5g#Owh<(S(0^_@q21l(x(@i@`4 z6OP|DKlu4+4=`_D?2-PRix#&&ZLCd(!;U{3Rh~3eP(WcoQS`JP-qmg%v12NLdBf~3 z^@%iypUlvp)6a**%$I!R(n?%NvE|SdZGbA{59ccge#HA*GAWe6h2E;3C@nMS$5-p6 zJEp`#;g|-ui1nokc>i%IOWUm$E3M4d#2AZ^O@Mb}=W!3z%JSnQ9jd^k1S(*1X#)Sq zg#w-QY7}5^r+*Y^ff1zR5*EpKKww=>)G)yoyJ9%6ld%2VoaDk=R_~>1N!{yS6W1pl;-prN;w2PLn*WDoI z-SM#Akoso4y|OnpuNYy!^vs`DEh8Xv%4J#!6Mb5a>W-kUW|+HBA=H#b;NgN*zd56t zU|Y+>)8~mgy?KZ1VHUPReB{2^QYvSOhwq%uiLmC>cHw<(>|5z3}Dr>ND!9o13FXl zKI?!4w0~p9^N~6nHp`-JXnu+T?W#$gvV{T&kox>swz2>Y?YLh?sa=gUF|(Fm6dTas zW#HhnZ4%CXtgkxSl>i!wy$&*J8)Tv@(S>!(h&gK^G_eNkGgBEs zzObi?xoUmb&%-`DN9b4`$thkjn)SGik=DQYRt2P-hJ_-#A{c&i@1#si5p+<~pH%6~ zfL%I~@|FbZ`c3S$%qCY7yvc9*9!TuZ8nNcmVx$U4V;15pO$$T{)y(8bDMey8n&l@DyzuG`zH?X3*xGS`|86^ zac@Y-^*BtuhA$hx7D_ShihT-QLWVX>M}pwMzK85RRpm&V&>^wBr5U1U#2P=o^?<1^ z@uz#zTk&z2ytx`jHBQZ5R-hr~?Fw7H`GKElIC$%Tg_Lm-hS_WiY;17{8?jh)qbbF$ zJ84&*J}2^$gcforzxhKT?eW00nX$H1WjEpH`)+Xw^$3hDOFtVsNqBuSR+#oC35)fc#iJquz+bfB5f9P7Qalgoiten# zROe%LIm|V9C%`;xPKE@`eg}FS8&c40|F^gK^;hB2@t@v}h#gw0q z21xCiAPt@^fheJGzUyiMz;H2o;=ynRNcQ#=q^%d?8NTgrB1&6OXKM0%-$5Tti~d{| zR#Jj#BeV`=u~z)c^jNEPt_H(Tep^a>Q4h!5@8mV!X+n)i&9p=Efsja6ql*n_#8dir ze@?5BVGoazXa;p5=;}8WQ!N*PuI@fLAA(N`H5%wtb*%+*IF=2W)MD9VvPD+sbIjXO zsJ}zI5&9K$MDyeuP(}Id?V8^`xTuM2UyX~A+1$SFEx8d+bq%^z+)KhE6S3024`oC9 zxw}7ZJRswdX?~;0ks7#r?k>-{$LZL?S{A&#;!pVFwLiogC8E!kU+jG$)kr-U`C8Jt z60IuMy$r8EgGiQ3O4Qfu@T*QJpK@$EPLwHF9P=!JrH|YnT z)z6jUr=h#m=0L)|U#_!WQSnyg#X3U7LaF3#u}o&vHGOdOs2C0 z2?OR0&+j?NH`$iyB~b#$*XGzIRXyM=CEKSUh7RQ8=K9vmQ42-a(rh0oj^OOqO%i6T z#4W#)-6X8I-bPV7JUg~gcA z{r?>Q6#iHF!HTl7Gf$kGk?YcYI?YTcO26^Z(I;@4%rDv883_rI51&mykgASiGS}Y>*dfAt7zn$KqlGBwuX& zWKq+B=l4h2@|ccdpXt@N@yi-TOY&Vq|c~!v2Hpc%O5RF<&O)R{B&ACW#J=`Q##BlGF*8 z!d5u@jN0&Azz%_nheqMFD_<5ls0g#JD*j10*$X_Y(m5rPgl^-)*Nwt;;+)E*T!|Gy zWZ^Gd_PE=I?^l&M3ce0P&Q#2ueD)T|+RyNyD4~MjpVY5POOWtEarn+<;aZ$f>=IW&&WX&9ti<hAvdupMfcI7#Z$Gq7~Z+Ib-; z3FZt=r=+F#VbzhgR_Cu1`16;&S;p~b5Rmye;>Iz7B0dj?yj41o$=OaXjcx!W|qe&0eEIja1?b83HGET6$BPMdTGq^v!E6M1c75E2ADi3ed8lO+V>R9r|-y z?^*Wf!sb0aVggKa*z0nIzCpPe4VH>N2nh6J)A2pO3{Lc6=m;W(|aF$(E!hH~L`~9r4bnHdSVvjMmwqESlsVEB}@*`ep zK-!epTNtFVpll`du@N?JF2&@I!o3u!-@l1mJ{j$FiERQcAeWcWZaxr#1Cxh>T9lJ9 zOpk49>_!bvoa`s>5$T0FD<-Sb@i`FUnA)I!OoDZe+a*dt)!4M3N41M<;h%XJ#wq*j zKgZ972_W?U-oM1t7cv>frO1K}nv~BFHj( zZC0K;1mfIfKX&cx$B_QsVNdxaJQFlLNLL>Mg}j5uxUC+deUp?~nrQ-_22YN!T)A+3 zos})owwch8QwKOaCc|&)cFOMx{je{dE!A{?FJ|r5=}{RR!L4z}Ta?{z;MD;^20lXYzK<5AEQW3H*43=@?ZY1Umkuh z|Hc1D=z#z4|G)5aEGI3`jDp1YJ2|e@Ua)#~nc>XZ9FDx%)8u^eCw?m6H#&0e!$0%> zBAxHTf6hM$|F`+kgk+d_{r~CzVxjo&^Sgx!p6-A5KmFJ5|IeTQ-hU-b@NED4@1I^v zUS48J#kIdfcTxyHk!#6yHFxO&)Yi(YT<2|v$iTG(?T9wi4lA~~ELwyU;#7*#M6QvM zb|>=3y+Xw?XUujPgk+YR4xuW9NmO8DF>qzTM{r~n0Zg+t_eKgYQM=M(S>hx-?^6Ac0o{_G4oN$ z0;HN|UUjufgS!t#q61m{2poGrkcu}7f1jwP45}G{b3p4{)Z2>*mmeK7w`stD;KEQ* z-#*y?%l%Z)?IGB>rFloXeHbsEIoiadJ&c#rQ;z=*YX!TX$E;>ET5zdIp-aKz1AdYj z@PM%~+~9qG;0sd^T=#RFjc9&{DLr?^9Ez&(;A@zSZU^2r7=ZL}=8G9R<2V_&;U{j=3uRhc)U&yYL1(m-JD$`Ej{Vfv&OcAb{zJ)E^iLp+ z?D1At-(QRJw~RMl%8g^DkJu+U`DTo06`DS(dJp8^=pJb%@LrR{-y=9Y$gnHqdlExR z4D9w0FqZpOh%cENYyyl49qE_$LMuX-t#h9&V7B%28-?kXsHUhHxX6(DU%5gB= z^w8Azevm!gdQFYc!IeCJEGMp7f)te{io>*8!*M+qvDV>XobwIlQ>RG0B4lanD2)ml~79ZFD zIH%}ejs|1$7X-`mK{rU!_Qpp-NB>P{>jBY0wE1%=@#ORn9-4eaT}JdV9YM2}xKRly zX;rD-X<2Y8y2)bbS~q+mrLrHdEX7ME+vrlXh`hq(3qB(O6QKLD)k!|A0GRdbHNB&& z@TGEpud{v5fywO0^SN9eAt#?d(E+J3RHayP%-b52f{&{mVP*iG85-yB(n?ZGLLt z&z#r;!#Rf^`fWT$m(U`7!Z#kO~;qk{&TK;R@Y$s&;3`z|0@5Np~KCcX*ZGYuFHHXsFmPN z0x2(UynGFr^P_g`Q^k0{N%eWr{Q*4tcPx=ov>GZ*^%{@<=*BBq1LkLm{M~8_xldp- z06C3(T~`U8YCoskw)=_2SUKu?*vT~k&G(fTf7#xSJJqu)T7%xeSdPJndd~!W@=@|o zU2Ml~X0rwpG6=2i9E&z=h;yaBk>m3P7){mPX>*y#Nw{S!Z|(U84?W0nJ2KONep?P@ zc}cWlt0U=;di@97cE)yP&bAl*)o9-F$HwA9KlK}_Eq&-DRM-B*WEh@(&KqVAZa_CX z!1u9P@Fa-FUr1>XDcsw~hQ#u)bzAD$+^qXBbWw9hFZn$*May~Im@9yaj;Jx){zRmG z6e^#)zYmUW+Qq-P*Mjfki@){uO~4WId}_aOIPR*`+&fC#GqNy?xye6>+yGy$`*VH` zXfvGO^Z9!(ES{-&C0y1FrN7R;*`b#LOEnhmcWFxD?op@PZvmK`bYhiDQbwB-ySlC*YLiL5vMON3B$dd}ln6T#D zd`z_v6IGKRtB2;pRn|xv3d#=L5_6ZoW2O^suhMotNGQcE`%ID#G#BD7QJyz>2Q6_= z@h+dKl#zetU#d^#-v8Nu^?%~;iL&co@b^*%l1tk&7lE~de}TiZ9)=HItauas8wdWb z?GEkvjY|FU49G`${_0(Ap<6_*0~M3$!q$KlRG~78u>LspPu(rL5>9)bC}IBB`~Tj5 zb>?67?}BX(>-TIOiFy2mGQNPwFA{jew8vx_zwd08vMHFykCz|sjoL#-^4`CAKw=Oa zWbHX~qMES(%5d53H?#kYf12sqm47k*f7QR%8~t7`&gup&YYinSB0uzi+K$T{61}L) zr1GuuUte)Nj)5XM)AgL~xIJF5pKL7z z+iAW9ue*mr-um4G7xU}z4%^0^@RVZwb};3Ti+LA%OGnWiyPS$*!u^bIUgU#tcTTzv zZ4j1wY)pF2bzxwEdwRr7DV|CaHE((7hB~JP3J*#e;@q0742@_yMn1mt@btS5-1AY} z^jk$ZSQ1g{6pl5pByqShpHzi@W4f`jpLH`)gJ?SZAcCQvK3=2DUQ8#1a+3tqU zTOKGJbtUIme>Z0Clv=y{Edlt9?)n}0S%zwH4&5rxtARK92Wg$B5UT3N>}v-~;dARf z8T+(Q=v|I1*S_J7R)ak8EYEAuoRBvalfI>lKyrSqOq@qTYsaU}UV$U~skpFKvW>d@EE5o+I~PO$B6PYl!0B z-%DhuS5lO|yhR?)Ql%|h9Iu3NksK@TvjsSwytUHGCk{OYX~{ftIXE51E@gQj6t`UA zs`^gk(AFnknUpGt!LK}gcRsugK+gKov691)Aale~t+vq#8%O8O4dX(gojt;UqbeRA zWoq`z6*Pd?iPscv{J9XBl{Ks%R17v2ldIu>2tF%k;^(8TRD>52@(Mo*Us3&`^xCu4 zpz8TLFTyMjg~gO63Y+TTl<2!WS>rl*@VZ=7{8KBsgtoFgjHyJPaMO4L>0;;^dHHHr zT?71NOxk~{pd520C=QFK)Q+zYrq&MYSKM6`r2kGq@@WuMEO{@_ZOkf=(iX*&jMJZOx~XQ`7uma1QojDDfVF#4DYvh@n4q3(lW*#yNb{O*!J|A;CQ^SD;iLWh09 z(NH}kwLbw|nBQM#f71dCRb3Nj=*eJa_9~$LLMqA{+Kx67ImTMH$KGw7szU3}if=WE zdkuyv-t(n4G~m+qXNu!{Bd||*^_Q4MGn@>3X?|jg=>$iZq=Jx0GMDJRnD6dbQwkpKBBBJbTlyOSc)4^Vg|)mI{$p_4<}qe$Aj+ zK(le9p&8soIdp#q*1$eHHU8M>R6PHRZGhLN16{K7*BP?vKz7h4J&IIL?6aaic0Ihpyap^%FHW9atq0ZZi+cXgNnxKmqR6P`5~2~ z#8n1hMD=ZN3F4yTbIoTC4N&cpcy%vd0MgQGRmaa0_iDxS<(p5G!dw+YA&s#UzLk;| zJk9YOcGz@DcT8pB$>_Dk3AHMq*}ET=T76);wuvXJ&jx>oi=9L35@1&wsb3Z$L9OE( zt%3ATC~~l&Th2@cS^!D9FxM423B)Z zWLQxD+dFI%4uqAS0Kv-$FU=U}ogyW$Gt=ge=Xes+b zB<5&27G{RWy(9D$x2Kud%syG;<^3x0;iu~HCT-9bCaOk!tkGQZj5Qu9EcnyovVu{b zjqT{iMn4d|9Fu$fO$Sa~vJ_f$YKEQ7OZ^-~{yJf=U(h~V1c9uUjDo(kguml{$-A{k ze67G~_OiPKM)N4HdNVZRDJ~g{UzJ&~<)=f~V5keIXuD}>HMQegne-kLjWj%<{gX%j zkr(Pg&7Z1+l_*2IA@HcW2!kH3@1yrFh1g^ajTm@>Z7Dz8#b@i0xXneSpQtZL!Y;vT zFFo;vRsln(R}*;My%rbW(t(V25)ys)a?nR^sIR-K7$#AKEt<%oxDxv?eAnF$SQFVj zNGV$jA9j9Lk9gV++h5LBnOD`K!h?+TMWUWjRNKE#E~5|w7?X2<6M6>qxzNAoY>R*^ zk&|O3rv^j<&N`J&H)7|hpF-&$@^Sst;Om8)2oTC%R20AC2T#wg{Cz?Z4e___oYg&E zfatqzJKF++ps)C;#P6VNj63JRE+$`u!CILHR|ShunNmE>)H?qcLUtbeuyIbEE4$8*|rX5lKpR&PpJ8$K- zKV0}cdjT%JipN|@YyX$}#JxB4y1Jhka*6smtGVCb7}2iCaPn^(SO!>;(+un3fGzj= z9m_;6;?a_GrtziVpr8};gT5ANH9sEQE?aiaDo79|DT)dR27(v}Dk>l#S#pj7l5=V@8|WtIoU=p&AW1~=p82Ng-XAklQ}bDM zseKOLb%fgLx54kZ>xV> zHoTC!@GWK_1JXwCkx!B{QMFGmglaAx`0sr(W4`5yBe$0V+jX_E8#)1{%BgP=5L z@Qft~uWFcWeQ_iR1KNW)uV-C@4_;@nrKgr;Ic8}u9uBXYjgg?Ff=O3{5Z>1De7W0;GuoYoSFhh$fF{dhh`E?)7ia(AYZ!#-Sje@PW z>*J&yI>9kq@kvGuh47OZP)F^LMWx0nEg9Dm*!9ubpwrP0vP@3;zNV{$yJqJf9s3-K zZqIoP8;VHSx_iLvni+}61Gg1C`;~(kU6WKrrRDh9?bDph>oUlampLcxpNmG#^SeHo zTZ7zUa6qay1>~Lis-C~g#K|J_wQRk3ESD)gZb+;np%3_PEXZbKdz91nD?jpKbx3zU z?p+CfZM%KIo9Hhy?Uk<+i7N$v?(r0Q&V0CeZ-SO0kxFp6qCeJDR^VCJq{+w3h2T1o z5gz`W;JYnf_@b^`geAYv@F+hfcq2En;x!51?W;q((>Cgcqhg&0qo=1F zaY!$@P5&twQi7SAwwV^A%8#5|rO#6!#r6%AqG~=Yj|v%yP<(M4i&xVoMOSp*FeQ8P zdKQH8NM{BakU%CjNakjO7o;BDyBckN9sKy3bP@Di**}QEY78WH^43h^eFsT!x41Ic90@d?kFemz-W!d`8xQ z4ow}>H*8-%$61r-0{*+%mS4~PlgqsCL&=}aU;c=HrT=v>>E6LqOdO`Er<^6j=;aqo z!D-!iJu<1t-oK9EWD6Y5XUTw`w+#7jMn=&2qe0(&>lXaw+GV*VlLEi+*5ifTG`P*T zJbc&nBkV6*ko00HggY1Mm6dC{V0y_vc2uPnlA8H9k;Cio^s}FP{O^^(<>p(eQBDN! zhkyP0g{5J%*pu|)1!D!ioaSE3${>Mn`s#%WLg#(O@#0cXMHN_Xzw`Lm=@yi$9+$iY zPB2q--_mc#v{w`wgTA1CinY~>{XFEf1W08H zjU(^!L)lASb7*CJxTo;i@L%uG=^azm-`8(X{agJuPs{wAB%vqGnnVZfG^htAYR;G) z>jF{d(D?qvS|Udy;pWa_?}wDI+S~8$)Pbtf zcp6)xU#Z9NaGLT5l-sHsTKcmR*0WcsCML8ZUuJc&{F^fPRQL4mf^QTq*_7Y29`uG7 zovO%U(`=O3D$#r(H615|qBykqQ_ykN|K%cG6smm{dC??R4N3*;oi&XnaA;p?sf~0A ze59VaTs)PJEFOv7ukTb4y`okx&zV+(aO@hlbbJjKo{8E}a<2rPlHxZ!?Cgi_$Qb9`w_8=jXFRIM(L!*JsaIg_Vh`8I^i^4q!pt{4eo^qp+;NDaj&F5zkJO|Tx{oO5aqa==> zNPdIyl`_w`OPLUKR&m7oawcwwoZl!X)dY`o@`pQ#oQrt(CB}=^^{}AwL9Mi1E1iun&YT=hQ@wxx_ zQ)N#`Xbte}dLV2fL~sX$Uh>EtC;I5S-_@7US7YIq{nsB@|I>eEp&+RL2lwxf^0#~U z{?fg^DG=w+P0v=}B<5h9(m_h0jc~NNcG)u_7rt{WC~qP39uIau{d&(9Ouo6D^vI+Q z%aZ3P?AiT5t7`ws>SixA$FKA0P;ZB88{9VPUaA7}$$s~$?M0w#3+qL~y3z6M^6t33 zIk@%0WbpuzTR*82M$O%ni%(=jU)uzhqtj(2o94(!xWal&y8QDH%pJY{xaVpRWOSF@p2-!%Zs6`&v5GhyKX&eL+V4{rB2Kms=q2;`;-k>!V@z z!~4z+s*T8amYz>l@CBwj!U9`a$=KMw_@4C=1^8yxk)%Ux@Zv)khGXg#NMpZc^X7PB zZoiuDXTZ`6uOr4wQZkACirN$Zed)5=5 z%s*nQcwQ;>Klq`K1=p?TY6Y=PZ|uGcIe_z^@?OT%ouK5iI~VA+_Eryfi0q@p&kC| zhkvjPnkRlyZ^8FYR6qJ^NzhB*>47PPzOK`@O(drjd}rS1J*v+ke651hkIWK5Z~x>e z%At7N&#xq~%r*-7F47k!I$NJ)%<<)|NZ*?r~VNk zLWlg{pFb`)>YVB)vmjWl(6re(8z;G6sh?fwhV|*E9uK4~15a62ROvMz_{aSX8@X2T z_xC?S|CXOkNUp@s{}2C~5OGfUSO3^QU;n@V{d51C5V6ny*VjKkx#44YMkjQ9>Q1Z4 zjRCi;Fl;y-jT-_)jrPL;TrT_Fk($$rPmSmdDp|;Q>An0ia=3Yw(J10X8+M$y z@+yY#hwW^$Ym%Y&#yLyHEgSX?KrQJ9ZOv6;{@&HRJmM4p->IQ&-R%}Qv**FxS?xCX zyg9?_^!+q++POX?m+*~O7krkkm@9(^e$O?YnmFM*#|LLPR%=iqYsZ)%Ya4{$JV3qt zYyy|Q@pZfIZiS<%S5lS>2O)oxVD8|rWS~75`%|Va7Aa*$BKHV>Im^0nUbnJl%#Kzl zxOK7+PR5&KYDF!C*HJE|ysHP!J3G^M5WbrDLo7XyqXPbVF0*bmng0F#gAj3U`1}0f z>r+Bi)GTdSlg{1nleZimR~OAcl_~))`%{%0G@5~tEUDh_&zlsiaU~I6gYL8wqSf)vBJzZCWKZf2Kr4#po_uKI+x3;9ho}Zsf zygjGDZgYM6=njHYH~NJmOtp!ayBj^VBlcmslWFCFV%=zG{&K=5sQ}J#C8~rnm*ZDG z>2pc@Tamp^v*yT?b|li{t4-t{==1T*70hf0`{(a-xD-3ksg&BjudM+0_h+#VE61W4 z-=pR4$6`SKtf<1Bu155J=o-KMS}j;NeEgNtQ3%7|8I3t16Hy`LSjBP#{FI(eyC@b6 z_x3u9ey}!zCr#a{*)#>H;oNYsWzh~&v>1fR(gnbKsLJV-WF+{Wmkm1H)q!0~tCCL? zvVc#Wp~JCO1GHR*ym+tXfX*$IpG&IEgil3;IcFGghhE)=MbuWRLONg6O3Q3$5r(Vcy(-0Kuo0;^+_ui7fx3|>p^=# zgI{4_S#7lA+0hDoea2TLN{ft94wk-i#Qceid{OuJWErTOBKvNfbx$8LwSo%WNcqA)NPrIiRt|smzm;0 zLd)q~XLSzNDV_eZeZ>bYP5O?p>??(5vQiU88>%6LDdBU~R0qsdFS;JOREV;9o2>iS zn$bm}{gSRm5h@e0_dff6V5Ez9Am3LE6vmuw8kZWtS2Ra7A-WnXSR)(mjaS1CmZa6Q zk20V}#7RzDwE_)YwEVx@HlSrfox6BkH5{Sxu9I)Ah3l8aZI|DWi5$gw**G_%Pw=Pg zwgYj4kYM+U@!g4PXdLFfoXpV*X5aUZG5Yu5g}m!epY! zU}5XXY?xF(y2|zynjUNYYu_;TyRG8y``3j2h=0s_@K?n7CBmO8acuJu(|d6Dn3Gu3 zlXlQm6SAPz??saQ*R_co#JTzMRreb0SgiMWzTQbL3-4^>qW*lO9_e##&DUIR$Mdc_ zj;BK!;L9V2ohF9~{)UkW^FnVYMis^Xx@*^fe6OkIEUbv{3H9BElqfHd48r`xfybb4 z7!oE^UxB)pPwU9&eT3NuJ$pYvHIzO!4thaT1C{m$3Gbc8koxxA4ig8?XTK)Sl z;fuZZ_Q180HmKck<&x^76nMWRMw;_(1lsM#>3_M@gH)s#^-0l2(CL`@V9Z&9e43>G zuF(+O>s%`JV>^-0AwPe5U9=i$VwwDob9sPvN5w7X@IH8=q`pkL-U&;QK`IA5iU{4a zb*#cG2AOn5heUqn!PN%0OEUED;IXixU}$azzGb*PfB(@ChIbxczM!24-|ExVvF)vG)8yCNK2fe|SvUEe(y}%g4%LP>ZSFX-Q7QXX#pWQTim{CeyM9 z26g|ne&j02FaCG`+JDOb2>sFg!0qN*l9T&)3@A=~(WSNm8$`NaD#HpC=(C$!p6|pk zRm*e*{W6g7e;Old)QV?z|8iTH?}gkU&btRMH~w>9mhb8&M=(X+i#S0ThiBo(h^Cb*V_6ro=JjJ&i@J``T(r#sx8 zgSnAEsk2{x9?pEHKiVABoy zaP99EpehhRs_CPka^t`<1HpZCTH?|i=*-7M(41=mxj-BaL_jP9E!53}lpU*amEQi0 z-&Z%xq(Ak;j_@UIIqpzSs?7o8efPp1svs&pYE$~O`7t`$bv=1s9SVC-rtKwhJAu#xjA)o#WqLC3 z(TP5Cmq-x`DQ8S5OxJD6MXR?N^^^-vXV zfHTtaYoCOJfKTbdC*3Yr45W(NQ$ld?6pxwEb@Nhiv8mxBoG?T67TXv1`RZ}xw3c(_ zN*=CV7K=OY@E8L>N<`DWO@RHIN|(0o%>mUu>Pw~uK5%%3mv76)3~;zLdy-sGiH&{Z zRq5p#u=Cq|#Y35V$lo4WIz@1V_~stDZ^vp7k~7I6QpT86xNBJJrX_y==q}1EPsSr| zO6wyu>fxICNK(ePdaGkxGl~~VUv!qs#hOgzi|!@}wUdQsPJbzY7jt%7#A&lp z`~`y#GkX%~ew+W&kzJ2`<9CGp?`Oe@st0-?lnCK!xi8jx6FPUQLsoQuCES+Tw%3L? z6DJy}Pi{O|iz)JqfehPok-70mLTb1v_}X-w4--iPzUwbo-jt`InCzA>N{=1TV_3Sc z{(U7#F!W2MKCZ$Y>uT$s=Z!RN*cGJ2G5y#dnB!+&o{G$bmlt)q0IUn`evnd4yNhVKQg-Eb}!-Q*6Qao%H`N9~mzXW)dFS388ibn&Ym0h|h z4^%e!AKhDtAjNS-Hlw2e2fX8yl6Jdelie|qQr%RT7!fRyBXSK{)ZEhz2g2dxb|=ol zfhrWXw()#>H5`0XlmZ@JPDLJjCb8#>UfA%#?s^kP1flzn>iTp9LKpe#IjhNca7)Ur zF_JikH@g15rQ8;VwSFpJj(u@~9bXPzKR2EYIsUg7UkTnu=1pQYjOw*`esAar1@ANf z!LI^JiBb5ySU4qU46*F(*fP7xdr+mRHSrTJz$(QiUM-GD$l)Q0nGjrdx#_Dxo_7$A zu23~{JVz*zDEe5g8-X1tH9a}ekU_U0I7<7%ig63q|KyCUS@NG52V#0F|jgm zSbCu(C>TlX>r7AIObG^#8jG;@aK!M5pv0rvCEy?Z?3ABP8OB`QC!{u0hVLHieOUD- z0}Ly#Wt{Jbguz(8 z#54@NXm|4AhB`d)>AvpKvOsu8tIp;sOz6nw93S`<0)fl%{f0%c3e5YRuP91zCmm#6 zUyjxibN*N#pSKPlz~~XLf=pNip3?n5yTOYLE52N3m4B3ivacfjp=3vJ@%riHWS<2M zmDX($L{47qRF>73abGO2mb@1!PeIj*jQj;kCg|9Xzg%4`MkNIg6NbVv>|y0_RCpBv zd)HNSEv*K_?%C_1hlMKO+dA1Z*Ie^JY~Mx$W1=T$F;FUmzAO!o-(mb_8(9V?me<`3 zwkZZVF7~686O9iyhxGPzp@HnVz1mjKqF=n@#>BA8^P#Cb(ZC55E51HZqLG=MN*UB;t$QS6QHif&*=x_F*1}!V>@JB!9G^Y zqLz;9{p@*_9LjK47r8*-7YW}9R|YIwR$wSoRnk+1M7$PxC)~lQ4ir3t;|z~vW8{Xy z^U{T<;KPH6mn=4bcac)A~Dtnw)%d{B=<+xNOC-X6KSno?aT@&zaNxt8i;DulK znWIY{y@LST@+|L)EV$kpCGJD`R%dpf{aNrd8=t0nsi)S)V!}oSpVh`llz+CqQ6j|; zi`B&D#t8pv;r*QlnuRN&W6!s(4~Tt)=3YJ4r#$8OEh?3~Rz`u)*L!7VSqq@n;D)P%V!cS>y^w5>f3l#S`y(Qb{<8B@g zQS*7?IX>9h!*jzM^N&aGZQ+Q4$*}CB1utFUp7xGN*C&4PHt@)A$tzXx#PjZ%e4R?% zD0fTaaRV8~2)+eN zjkM9eEU-=CAs1s%iOu+=+4tKZ z<#y-$hwxyce?drGp*;)p+pZ+n1bc#rL;QE&O$lgwZ{LMygdgR^tW|J>Y%K0>={xv! z)dsii*{kx4;Oa5R)$80R<=_m{DPK{KWGLA?R=Bj74}+ruyBPQ6!tJT{-D{__vF$Q{ zp&olBWU8j>XVw(}|CjLHGaG{-Q|GmJtzkLFp7>fNqTU2OhDKw%Tx*fzsBzE1eH2WZ zPYq!wXB43#3P{p!rpW)gYrYycluUYtu-9qWH~=OyecMbjCpK zqDc}~og2MB?w*Acdwcia(ToMj0MWFuUnF2*N!<|t6XE9Z-|s!P6(FAFd+_Zu31VzC zT2d{e3H^ymSwOi8ZvA@x{u{47Hu_z;GTGe%f=+n0v>+Y?BHWm3_tc_Y@fL}#bxE+B z`f7AztQnT>aentsArlU4-ek;oy%bsmzPIslIb-rF_xg^KETp3|ds=PPjBKg*L?u5w z1mQvIW)g8;TRtBDl5egQT9p)k9?_(rt=ou;=V^i?p6-3M^OqHJDvW(N@jM!aq?dBD zLbD)0(r5L=wvra#LnGKwe!%?(U2y+HIaWCXNUO;Lc>B#!Od#uA8$WFO!>oP@2J zvIAEL;$u_Dc+LU!`a$p{F zC;{fu{j7y7C79ak8)Wy1ICskYlr`mwfy0J}rN{TZM5W^vgZ%Fa9^#ByCs{BZSf6~p zpB@$nH;&5me4|#2++{ArpPcPA!i_$5PXl&9Q+ zTMUZ1ck(vo<-_5sg2DS|DPY<$65Lx_2ETUXKZeN)pto0$@0E!F)_Tcnp{*34&-v=_ z^0E}%*t;V)aK+=4%kjCxy%li2bH8D-5SidBFKy5eB>YF0V)uX0_5z>A77?DEHE^SU z;+w^+6?{=}G+($KkH?++M54>9aQ!K{EXH@`_){ixJbjfuDB8Un_*?q;5sk6v8L_q&EKHyDad%xMNT;Bn!h{6&G7O`GKVQo#=(5tq>)Z zzpf@M0B)^B$p#unU;}f@bIM6a;GaEp;sdXQ4r1_EzC=D9(6tF_RKbxLVCo-v@1FB=vyji+N9$NKQ|bf zC5brU!jQ`NW`gG;%^Gi@5E74fALKpR$w2VQ))FN=iN2_hE;J)U#OLE?h;c^L7?`za^KZOThAAw;qOI>T;QIr6wODDdzvh2uhZAW3zW+<;kMifa z177nf%?Y?C?$XN7uu)9EdiR*b#d+Mn{j%(pYZbs?M0=V^a1Lcgo>=ls3<0~i9Z$$W zJC1rDkZ=uN`0M?rvKi$4!TjZq^0$nVO9hREy{IRBN`k4i9LcX5c9ZW^!V%*$d{jC0 z$hLSXiZVdt-Oi=$F&C%6(WZt^X5L-kL0=O@jAd~hMZVPaB*B>!N^KY7Dgf6}_qVb} zEku8}K$W1xljuvd>z`a;Qo^XmuHFZ)m+lhcZI zYw7@~Cf%!MnD>DtUxKV#7L8KEC#J!jg1<@!CS$*LAX~;*WbRx8PJYl{(T*8LOPZwi zpd%fB&9N9`y+Z!G{`sHuV`2>VH}vEE3%>=Av3)_`qX+2Eo){zMR&D`VZ3~!Tu1ja0 zH4e1B=WIH zN^Ofbp&x}*ly`;vlOJ;Ep0C~?y#Bw{UmwnG-e6JN2{AGsqlH{^!FfxdME$oGwB=Dv zH8yNUd6N4vx|wo_(AN*Dl?leZ+ZqqnvbQ43IYlweE;7D{i9>7+O)!3X;*=I=45(6c zbc4MUG5XNGkyLXcmq_+KaLXeV*QHFq8NGmbC-Wtz{I*!^t(ZDgR#=6YDPoaL%u&`q zpy)hzB0>1pmrgZeQDrNHmvg*h>%{+1D1l8d3$=o&(7+;?lqz3HxBJz zp6aeeZ>zd*xmvxL@6ngRN#uY2p$jk8Tk8FN{qgi4`Bx9$WshL{-VDAkC7CVAZ{VA$ zr15wG!RfD~ahk0U12(4{iNWd!Q+;&uZbW{m_H_e0JM0!P_=+3)ZO zpj@1PdrdtXJ~_6&vZ4NfjLEEz%rl#jJLk}25A#R(Se&))TUsyNOkm~O_@x>?q$-n& zXp4xRNx|*G@2k=IFjY76N)yNlHz>(ScEVmGpZS!q9Bf>SNiVt`0{C@KE^FoT7;q+||JI?E2D(v*3iDYCdOD0r=06e~C5!NFm@4Pq$_%`heeA)RpmsR0 zb*kY$eK+C5uB{c?QUg6T&9d9>wZX-r19gun|Ga-`Eh3?RaR2_uzx!ZTaq#oCd}xj_ z@_+|Ju*-YDJjDyq&}yMDz_lK`#JD;;xQXZeyMNQ!A7qToHFA~}tU+??kq}AZJR|&~ zaH9WJH(JAv*oluL@ad7<`olz@mRi`0QNAC|m@~7S#OmyaC*yxE%+Mjxb4WM85YmQ6 zV$uyx9jU^Ha|N3%uM_(q*Y^S?x(&c~;78CJsTJmSnZ)G~IZ^%ZA*B)-U8s^J!9RIv z2p4xvWE<=XhFfM#M+T+pL3blff%#|&UVrL-Jfpn;6nZr_7twShbtcuBQmSFtQ)Ht2 z@H4?R{uVu!Yn}Gj_k4oKK>gqMpPeT3`v2!YD+xb!E^)0Fd(SA=t~8|q{{~C`(NnEp z;I!#^l~xq$=x=}IGz@B3j*yRPN7|2eWx2)^SE7K6qi8GJyXTj%ybl6>2$45=>(=dGG0fz ziT(uXA2)2e2VigbvGwv(wFH+iX$KSkFnV}xYRCLOOsHCSC@rZ4w@#hcu1^^S$ySvV zaY9!ZvQ)igJ=pNqb=g|1#Pr|SzyIX_IZf!G|GWREaOg+F&(CC--hE{MR^18+`dxdz zttbOkx|br8rrSYpC_FLd;0V!EY|iRw-wgqu9x$r0S0ZoXp$4)=A2_B{@d|NwA?@aa zvRuKV$kvx}xTm)PVm@RzAI@w=8>&VTxqaOz`Pw~`ldl}3e~D`CwH<~tcHAx;1RqWy zEcIj(ISYB*pU!ZtR%4YHYh5&vizyw7YCD}p^aRA|3_ml?f;hgDvQC06m{fE#&LxuY z0lOSxIa5@QL2qA?ScqKXKfZ^>dWPcv;QA38i2wcieduPBHs>8i4x^H4CWBSsU;n@V z{d51+>3`e5a<}%zt~(vIc)?rNZ6o*-z37>_BJ%ISqMkHdF#Fw}vlXV6D( zjeM;AB~9@lSHO?L6lPf>uf5i>_etFPVchDm^#ljOPx6*{Ej}yLi8@-}MKqHei2Uj| z4QqNL4aj!3`>s|Kbj_LebGOGp!(h59D?=p;?!0B3OWTjb^o2gQF0GLG{w9B$K^^Sc z>GtSsehcWQ#~lkEh(Q*YoVM+ZJ-{X}YuIG5Pj67mWJvcht*>ePyMC{wGO(UF)P0GPaJ=6Fubq&skiD+B) z57$2y z$mN*lF_pVU(+=@ddw}CWFl@B8t@?PT0cgcu&nl>vL9p%m&>j^Zcse>EbAzf3Z1m`=!V5Oo{YBvlu~-Zui*L+doU6CUW~Efsk6Vgv3ps_HJCi^Pu% z`!xFrKEa2Z)N7v4J3;^0PUY`6lJK5rUhdeZR`BXqkL_|F0s(=a8%PC1AZy5T=D}z) zB%KrQuk5c!e|LQ{y@NZ9tB657XTX(<7}Ipa zYaL$ce%TgCT1#S<+Lj41!)({p-O3@O?}E^fUJka>@vW>oSdE64=^pu5gkq1Eg`1sR z5Z+s<%Re692i*2YX0#mgV11(4Bh6t#ZK z()$5xs%g1)i4eRck+{sv4}|W1C3)*c~BAb9axCFpjWwm@*b zo&WQ54w$HXleF^L60FB7gAeC+!gjB(75;|Ja70l0=td$Zgl9CuChAMEag*YAjfi$E z-*t{8dbSvIzfV-1ogaXb+t;U#a@FDBxrTccS9W@zW2A)(c zg>Mlq*ZIv_u}0r{mZYDBmo&e&T|MFiMWQa}o^WJ<@Owk5B=;V?=PfhwilGFzY>c?} z*`^!qrFa+*KK}rR^7DDi@3uk8S>=6~)rhy%c<(b)QWNf0b$`PADUsmG88=jGR-?q* zd@}uvJAk&%n{_ij;BRHRS~Qu1!D@ESB6W?}Q2SOcv$qvR2dU1+Omv~)vp^lGYhCCl zI<*B$ThUX;EUUn^5#|_5e`a-8BTts1W$j`K*2)sV7@7{Gxh`B9&{Yj$$$?W^SIU8p z-R<`4{hi<7G+&Nks4G$kw-e0?;_?%Eys2+7RV&L-n_f z9o#WaXPX-!fi?Fw{#lzqqzUmqqetrn&MIkrCUv>sN~AI?k_+%Xjkj8Fcp`F=sW!jZ z;)~`EHMWPC6G4p3&X`T`^Xjez8xN>?;ub&I_m&Zve?p&@@Ad# z0_+tq?;b5JPxRWv&vdMxvPuPg-x;@T^)ArS49hQN%7N>{M>0F~N|4&WLSLQmS<=u7 z8AuI}A4m>Z|1eNy&=EHLOu%)jd)AYe0nkiOPU)o-dYPJ`4uoJ#W zjSGCywwXk3L%QnrxW^E9hDbzBXGTNQI_KZ+)Me;Qw>(Kd6@Z(g9u}?s?#40g=NCil z<8hqvvf0n}WN7!|y=gK}^kOIv^bG13;k`E%X$6gB=-H@mnW>dM(>9;act{x+V3IbzPto73=Ur5t$14qC*BSnwjPP`NNB;|g%X~;clse{)t0$>wGvJ}oUjkrMD#G&Co|fYknmKq{u7O8#NKvp z&YI1=aNwoUKGQ8^xY731UW`oi9?4bT77x$G+k@v-J-FI%V`63Bqvu2}Bh-&^@mD1_ zkMVK#@KA6;#Vl^9#NrF*JgGKL{(^QXRN8eo9&7D7Sa~JNRD1OF+_nw5e8@HnotIbK8-VppO-uA|4 ztO?pKq%Zc{{`1^)EL`k@B(Vkk$M65{e-r+r^Sj8fOWxttLS%RxCd|`BLWVgdY4%w% zX1<-iHE@#X^^*~wS3VVlOpHJH=VssIn;P-(xVzbKl4Rz(**OO^tmXSeE{7BF#|f}~boXXEt(S2~fwVqook^}|M%*hlPg za6kJb6?{Y{?YECsVq|pz`xTk7!Q`dSx9^z<6BCi?vLAj5g5#N>SvRnV1foo}%t8O<3Uw;*#e93OYv>RCwe zYmA?M5`LM2)346|^b;$9sQy;BM;iB`bDEYWcqieH<6_rbHYDa5D-;gpjI*|_^l7JO4N`^h+;f!wEjB^~Y)Jzyv6 z=Ikgns9aC3+P_Nhx*TNEZM`#4I^x+oAB9}lcRezI+c+MpYfRRO5qhdTbEl%DKp8C3 z?G%1b^wvP}^Y{yrk?8!vda|vo5(H4_(6**}H2mc LE<<3fzt6KkgY_j6)ZvHA`NS zaQSf2_|75<6lk*OvlmriFe}YFheZ+|r1v?~f_8XRE7MH1s|3SK(oQj_YF1Ppvz zr(NF$d)HdjOxML>dhn(PPP4Z7Ubg3K$7~jSP5(@_s7&mC?(9$vep3i%Z&I93-$?+T ztst@~orGUnZ`^uGCj9%Mdv>4IBKje==p?yovC!xDB^A~qrDpPXdh^9^yw`FAtv?<$cj^+WV%Njf^W zywPI~EQC7G#;!+c@9=o2Kq$9QDHdJTFpn#DMBea+`n%E#AjNEIayNS%Tr$#fD3#6w zGVfsIVyqWN3NZ z40z08rY27KW9xnz8N9L#z%S=lTlD5z!D>cRS%%7Z3$Cc_|+fWfy+0_*OQ@ODN~{a{Zj z$R?{z1!NKX1A2A-l_U{xBPDv#in(H=FRi|II|U*G6c+dhKk0CQhPM5oQgC6+s_Ysj zLA-QmYePj27T-=kPk*5jT30Xp-1MUY8_Y@>Pnt%e($P;odc^a@G((l;(d-E3MNSMHF+qFwkhuohm4EBt1RGDq8vV`GF*TzT(zbH#8- zrqlIhH}+KGv<{lL7C%;A$(m)YEVV&%DHE;Z?Z;{jQ(?XA?Lcv!$fw|cY=W|fUTE`-NJC&$PQF1E+GbdcSfi+CR9cIWJg z@hgCLQO~UAUS^_teB0NL%&+(Z568@eJ2c!#9+>evcp}a-xI5fet zUrpy3fasI@I#Ij)o{Znbu8XT#wc))vKXsb-2#1S{HN1r@(DgKZtUp@`=K2?!?cQ97 z)*`Ysza9i3?3C5$*`5h!I5+Khzs?f}H;&Vt&`SW9@hY49`wH-RQq86ZFEZfcQ`0Jm ziW)pOevmd#H48g>m#w#Vr=niUw(+~}1TT@CHDPR40+q_veP7lUfwOrmN$(RGDz`Is zvTVvhlSb)asznlX@Jid>&?h(meKMmwE~=PrGIamud@Z4%cOa<7%3;epr<3J;S3 zFYbIeyD1vq>l&|8mrTWuB1Nq>i_v-G`uVKh=<#T6#h(4wX%IMVwP%!N7N$D-J^{6wz zb2yUV{tU4MQ^opZfiZ_$n=frV(s;N^@A{gH_1~I5+virI=vX%OXh83=Dh2gu5zL?p;63nXnef5E0Htu)*LM!DK1>?8IOnUs&(9U|x!k6tO zsAY0|)Q_0Ye~tA1v?kwzq>x6#$K#RUDo3W;L-!X~lRtRstDPO& zQG!RW3$JP3%>gD$muEL;5>e)XO*ykgAq46TU%NGwi@&S3Pe)&)5WV$d-nWSJC}YIt z*W!HyZ`MWQ~7wwo`K*zMQ!z208&*`~|2gFAien&A|8O zj;QdFY&^c{e3taK5`wR=W^j?bO6-gSi`hu67c=OXrd)_b(<@ zz)z+b`-eoMDal>@Kx$wJy~r130_9TeEZO7v>}xaI%op}`x-?c9%^ z9>Ewu%6`A)O%=C%o!^Sd;o(I*vTpxZ~l&47krSUn#gktX~P6 zLhSVmQA=L=>Z@zXcS2sKOXHn@}LAVq`zI@vb#@65M?pd^_Es7A)Rxzd)Cn4J?cWjcUs2V8>CJ zoapX`7i5YGj@(VeT~QIbGVk(%xv`w}LR}67Tx8tVt>lTk{gLqp=t;(Kk$p!m~FYT?A9GnoXXu8jo2YJnVEY1@5f2+y-*?_Jx*logpMW-MG zuWkD+ZQD?S1ZdpV`KL89&TQ1kCitZ_BH?ci3N;b^o5dWJjtQtK)>JI08;3gUp3G*Z zrK5W4gOA%~)1V?uwd1p60+1drUY+fG5Az{r`+FazqaRzVu7_b6G@n)SzxOi*`$W9V zBvvxP%Tegg#1t_f`N8#wbbtiW{h#X9B;P|e{q4I>W$~!zJosw5{R32NZRDb;6v60y z>rbkFS?J#Qk>lyZGMtdwc%(w|ITY{_A7E_~*vJ^l#dt^H9l8wG@tkseJI14YVM`)5 z8=q?+*b=zz%`_uLDg`cYzNY6?5eNRe_^R=ID;j&ewEMX^9_)0)lZs|N(Ze#T>C+UV zuC^z$+Lc-yyf3(!voa2N`SYvE6QP)|@cl|{LoQ4&j~njNDnYwxT{RE>YIxqSVSBV8 z2n_2eCgXYpcRx-eYMwI_DoJL7m(??&U}B^9D31wpntH_^^N#_Afd(JPGiC6YmZtob z4&k$$)5@)WO~%6kQuyXt1fCu!xpC360#$h*SBsQpfXZkBEjFh?jaeRDl13J;bJ&w7 z+nENPF;Nl6&2v#^cazag&i5GWM%%;5lYvp)ncltx7yh`#_#(%8A8@KWbWmzA3H0|) zubr|$c_%$Ay#X#Da^6Au#>*pcb~&fO(X1<^?dYuszvjG^v?Bi;72cvn*JJn>4ptj@pIM*g_q*Mp{k2N7VC z{n;V2j&$tk5k4BE5eY*#6Pm2{l|cXXoR2CO-@*(>;yV5pHelLCyKg_gFCJssrtsxk z4ytlkh_yr)LSyLRslDxau-{h4dG{GPXyag~)^$I&`LfAjJlbQ7sPZn-V>V>*;P$KRu4zLmR@RiSy+~3F^oyb}|}Y z?adOs9F0lQ$~#iNrlE*wiTEIq-)>tRVm-zZh65_?bMhOKk;fr2T>ND&uFZ<*&3=qS zUDg2FnT9m@EVJsT)gAKJ{N?_plHGsb|0euL`StArVsDDOUjRjjHt)ov1>}G6vLRV$ z9)}(ct86doMas9!n`Yk5;`eartzlHZ(2^u)qtZEr0||386u+gv=9k77zcBy7{N<1G zyR<`ZorhJ@z@JL(@nTvt{z_F?N#sg`w3}M{IL{1&W*U!l%h6^uNTjb}x;6nfzo@md zgnFaKNgqiqmKK=mF?dC0BKT6?@;BQ`2<~W-y2uUnZu}u=Tht}#htrD(3wlO|p!x}i zR)1tZI1E(ksXLIMCo6&8Ev_EtpI>%fy50-+)FE_hH^$(|^3!VO*lwbCesa}a=rcS@ zIgncG5P=ItiVZi&WZ++?tt{Z0gEil6qLqDn@tp9-P#v4Dzn+5)?vC_-U%wXqTl%%l zcL!dIOh2mTikwuxI|A=|PY>srOk#Y{{~_+ZqN>`qFkOO(Cf$d9Lfg(OU$>ig!l1VCqV`%d%AMfUNqXFZbxGUTKFsyw^390pLw;S*K6?D- z8%m-cS&*Ley0r*SR3FdhBdY}mffCU_m-=AfFwZg1)J9ZWA4@ik9D+x@tRmjIgq~#F zL^^NK9aC>y6MKSPkj}V~EgcpE)bg(s9d32P$9*cYaaF@W6pUo#1PdXAl;z_B`veN! zw{g{po5$^)zUBOcE@MJM{|~ut8s0C`va9}+{nxlW)4ARE5Bh)qNI%QxS#!DFVF?@f zUo@Z5?nf2(q82i%0YcBzRDL(4li*jst?O6rz^`|+M+y~+@FdIcD|<%>KRxM-x$j~V zm;%;!gt{6*D!#H`TfYoh?u{Bc5PmzZ^lfFL;)K6wcGt8*c|VNq)a^)EU%*<@r+#s_ zrtw3mN3yc5EvgHtGyGQeA5+kpCbEmS@ID!?xviQ1)A zLozazz-uJBsMSsIs5Pr1zfJZ*)~Q3+mMMFH@;AAx(D5kP6jO{zY0m_ApndX*)@YYHMbMICkjx{Y0q2#*;W`}WF2-o zRF5*gEP~{n#lZc#%UMW*s7sh%7iNm8MW44YWFcOI`+rRN$+;1ImU|opN_w6cJ@G+8 zUZep(+2(#ay+GuzGv(6Giwz4h)2c5u`dgBwhZZEt%AU-{lN${vODU8GXvoIv1 z{aL+q2r96o&7I#dhSIDp=T1A+|Mi?**}DAlzw6KFNof8pr;3;^RR5Dx<(N4%aXO|F zcSoE)Vro{8jfc+7l`(eUa#g_|kLP)iKHn8CPUM|_SIs`EG&Br@(yi>w-*ZqCI<5NafCnK!0ia0>cj&* z+r@L*YC-vA-;&w#7daqT_yqc^4r?_bX^FFtC)zkmOW{3HE> zJg327si)oWS)n;LAt4`hSQ=~nb^7tG;bGd#yDPx5U1$DnVht$ya8ND1Cvx?Y+^?o@ zt3tMc(@|_UBe2hev^iaf;M~#b-WD{;^+ z%m3J16qL9)rI0gB0QteH;xW2J_$2u}bBA*?o-|HmdGl)aulsuADC^7f$CbJh(;l3I61J-apln3Hyb_JH0fEA@Tiu*O7w*kdYdg^B`^-DM{!F z=vgtY2ef})c}L`}#)fd%akoQhGIb!WUMIZg*Gyz~??764lgY+U)vzg;|E)%#7Moe# z+`Y`y0ox*K2m9*TMD^XGmEH9eKzuAG1AsC$PtD z!HkWw8MI zDbFTug!E(N@y*F+XU8x^@XD7I+A+BD`4*D_k=w465Eguhy%HlH2`Mo;x1k%=YZFiA zOrR$-a6YQ54=G|)JD%KVgp%VNtkE11&_|$sE88pa5>(n}yJqRst70V}p_-l=lbk#TMAu!FFjcxgEx8?N|D(-U0Kg?92OZcSBJd z*_}t7J@9bgl#rZUHvHk|`(-Ja1G*7DW9c6YaR14#j+s?-Z?47#l#a_ zi3e^}9vn@e*f1i)Hdh8u&qt4VM|WXeFiB~gOC_GnVLX&K?uhv|ujk`Qdf{`)eXSMa zayUJFF6pgcABu|cUeYwkLt9suPhz6Epzih4Ol`FiBAkvZjt1nzRYN7Y60Tyb3u@b4 zPxu33sD>Q2i4y*rGtMHk4t0>GS81XuQWGM`L`;IvG#EYqAu~iO%&Omn|nY;rAUtjdyq0x+&R}8l7@5R9t@8AM^2nNMO z-l`qOFM;`^dEpt)CZxKhDX~iAWb9iCb+(l)LqUJJKGm5L5bsd$J6CB0qxU8s&nM>L zHtMX&kW*a{QKYa~!yXl>ce%z*c>OwKh8YPW zxz}gI6CavTqs6XClPJ&f6TYF#+>MRAh$p#&(J(RkL~s0FG_SL0#v9^6_Dzx78u z7b2yL-9A+KK$u8(rpZ_bIO|Dlo~a9mqk9tTlrtkRfbqQRM=N{ebQ+w=lc>dMo31E- zk4iKvn(uZNC34G5PAOLJs{z(APU8`sLXa5excF168JY=Ulh14>tXmFi3^qP+T!j3*XLB&#{ZPEeLRRZUY-=qOe zOOZp3Vh_09%xRi>o(504CAYY9T7i3;$9B=?9FPuIyY$4Y6<3)(Yu~K*Ls*{Hj;O#q zI5>XrZlq>A?3=D&D<2*}mbnJ`UgCaOO_O*|Cs%}8$D|oI2~Og3r{8@2&ztcLweXHz z+j5a^vtizx-wqNPt3-q=y1~9xPset5A95#LqxjsBg!yqfpUoDUk%Jl36wTj&)Nzs! zubsmXnO~XzTXqOsBtFg=j@1Dp$?@$H-u0Nac5&b3rY=;Md46NPx*mRICfsn~>qImA zAnNlhJ!sa&Fy1WIjrqs@G*y`W;d0DIwvp-Y@_~4FbphKJcz5`X#lQS-vXg58k}p#r zD3&TGilh(u-+12OxiJnp2g6d*_KriQZjjKh3Xv~euAIL)Tn-swKZ{&Ct8lBOvY2Ie zH+cN{8k9=ZQN#Bxz595i3A3J|^$W=%y#AAGp?#wp@?IQgYxy{VuGgD~yNEt!?2iut ze8*bgr{d!?ojW6-D~jokEmb+Nvo7D187hVYJ9y}J5c{u%+a*>_;(34Fe?=Zbiho~! zCi0K!7pAo-dR#%1u=uIEAUAy-rB(!O8Ym`!KVMzcQGEoNXqi^8-tI@A5QXrGg;fYh z^IIg@wTxG|?p+uO>%xEO=a>3f>@E?zsDHiw@AYRQ|LFWqU#nTu=9-Epa$FF4Iuw@`Ej(1TF3wdGLsn*5~c-sG- zYaF@=jbukSwO}R1`yN{Ad@ww}ly5i?fcL`~wCMKNAp`4JdEk07?)|o1$Dzz z^%DO2o0%_X4b^fWl5Bk>raKFVx&@<8P2>RYcA<(J&IFIt*|O^X+cKb!y7fxII|}yF ze4iE0C;BEC7swZ!Q_z;{z{w1Ge`ravd!O{F4n6NLbqAH!!MLg&!(>7>Fv&TvDUrrt zmt@S6h*%{$v^d+PM^wV|{g&z$@pU+6wd?YLP$KY3uAP!wt_Qi;$n0s0B%oOJC=0I1 zz?l5Q*3E|Lv`rK>b8~6iH!y)uIwlU(PR29*#tHBXU|9 zsTyeHU3sia?}Nv^dPq#_tI%b0j|DZUB{W^D5!)~%)^VDSx*wEX82=-sry)HQjt_Re zw7r%B``nG}7YCb=Y<3NHzfQn=KPXh(t0RF-&8ApfA_nGJMPTS*680nyAJ}!N1xpsK z&+5lkz**)`J;sDCeET`KH%%55U?Ip8b~W@BY;&Z#?|0S|cjc$PR7pb9t?7;hH$vycIpHuI6a~E*jMiQ}5d=5*VOa~YpHM6M<3ztL z5=2j1b5k4D!N>WW-w8yHs+327n>HkvIsu27io_tqVRD;rej+Y_~i2E+X%uL^|61-J7rg(ljLHf=g8P+}(7{`!r zd5e$O52#2#Jwg2jT~^PjKiCru5*B;Zgk4|Z^{qzk*-HiBBalje>L*cO(5zVcvLh5O z9?aU?mlFz#tjw!CPcyKw;Ec^2Uj;s>*H~HBU%cqee|z8k(mE7giKb)8uSGu>YyOC1 zDR^bd=G07-5Axh3w_ra>)HlV24vMnHK>bo5zD#LFlVi1hR$Qfok0OI%7m*93cQyon zkfx)&$rTqC@kV^8v)BJqUL!8revWz<5C_9rf_mGD{ZZt)>g6^>wwN}!b(2ar@=^+J2`flQ5In5On`&CV`JN!n z5OCk?X)-pt?byfdn1?%s4~@1Ty9b-S+iN5iy?{j2Xz8jM(FYy7Ar=Na`>BP+elQRmR^T0%UjA>jo z3-iVYPa2OGV5?oiQl3r@Z1v=~ebNp%1Ui@ny54Tiz+f$(o5kuSSoLvc&u6PNe90*%mL`&g&np66D4PU} zb$w^&LsAD6zj;dX?(tHTJM2o6Lsf_2fmZo#GDKc6`5l9kgif*J$Z4Y-lUl;a4VDtc zc_1iXC&QWVg%3?D9{64-@*hj;sFr^EA%*)ZSsp@XT`4hp`QC*l@G!LviXF;>8MbC6 zp~jc^YUmmJuq<&NFwuA|mX!wiPj>%4ajgiry&0PW3qlaK_I{GTMD*2mNx3g8dV(Ws z;1z1@(w9h`*=NZL&KXX@BoHDJoBJZbklN zCK0|jG1Iyi3aW`Xm&|_P3wFR{9HrwzBvU&+f#72uHrp|j^hf>E{PykbsR6wHx^%N-{4!0e;n+k(c@ zaE!*5<_d2G#Du;UDm1CUp9{1s&YnJCuJ=OaouCCS2koTnB>Fe~B9w~d^hMZ`v3f?S zw;9CvPR(pOSAy2Ak2mfgD#eNuXH!nO6#-|06YGQ1#nAYzD(eD$81Q`Uv5b!h0sE=i zXbrI{5PE;P?ZNW~jLe#f&ploQ9wTQ@xV=h)mFt_fj4N3XzFVi~=vXjX3VAIpJoH3{ z@2OmujB;`3igWjy+ysnyR#eKU0&PU|vR?LD0Wdut~74$h{-oJW`5qX`>4o4O==hY}khgDQj>9BqH^(sfa8SL_f(dVPd@RRR=Za$G$x+m~tUeVGch>@%K zMK+%T2Pu7jwilH^>^9x}0zx;NO-o}e)jn( zvzm9Y&V%a@nJ7c53owfJbe7}IK2YxBeRfy65XBwkq-H-=;IES&_Qy+;QE}|N_l?pF zTviX&JD^9rx8?afBMW6ByQCQHFd_|yyA$U2m}OP{mJ zhe#9s&cKP-M|F+4^mu5O;I+J)vh5zJv!@YxoLsop zVxK`<gaLQfci*OlD9GYIc8EhK zAIQ9imdi2&A!#6wi-q7j@`_O{%h%_k1(q_9t5v|^`IE!a-BrkR((|xL_hY!YsIHc1 z)`ETUeG8IO1@L~og!?pF;gTE459@$5$UJ02bBa$3BNvng)qU?0ysa_nJLAz1w?BMN zEHx9OPO#t9n7@y1#tF~Zqch;(hq-s>>6-B@O<1}F!LNjmCEwI^iRb&*b5jf?t+4%r zQ8($+7I4uYFdU>IxLaq3)(^j^1Z_2KZqD^&xN(cE<;jz5n06A~tfwx(V6u*)+>?30 zK}(&+kemrYd&2nagsOoj8wHjVNzOStw>k6$Gnhu1%u17A;xdR?q@8I|OSF3yD z@`?56IRj5=9;k1-MRVS(3bG$YUP-^(h(8S1dv9yTLje=B?4BbXDDyJ&z{9aZq{u&h z#B(bj*u7E)Vqa#0pVc&^FSVi5>2&`f0V1c}oaw861b83!97$%Dp`tEE!N z{y`>6G%5kh=^rg{X~g4|Xp0j<2P$By!}9C-fqbAoC>Jo|5e{+M+2VY+Do{_^>mpqr z;ZIsC6Mf881;RR!QbOH%=qbc@F`TXdjxq^QxwJlq(+>h;H&5r|8otae&?v&$Jx8xE zl|-ZdS?$aEeLOJMBqg%lAsPl#C+4{BzQqBZE&el_gzr|PkTm&K5j38DsgvqfheM-T zWQE$^U|uMD^qW~SN?R*cekb~9+Uj})i$U*RPT>qq6$S-56w(k} zX2ZmkKa*MEFq-emJZnhg%rf#sWmgmDRib_omnrw`zw6KblmDM6U;G3Af69L9Za;%z zOeqz!C&;!)t}VybV%!KrXZEtPy40b!?RBS>UI*Cv5H2HdVgxEx&ZMsYB+j>db-S;* z^ufuGqk%t6UqKUBR1D?DJXG`_)^2911jaufSl>Z|{JpTC(TqR-8vj3U zixI}*I9?<6yh87G=ig|jPv$fXL_CRBxK>q;R} zzVQ!<%p{VNm{X)5bAbJ$mJ0N)DHwan+}v`$9!(1<3xr4-AuY;QljU~{9Dk*ilbk^K z2J#eTmNOcVL`f>5`Ro)3?wmB@Y;43E`bn-&mV3~8Bcp2PCb17|Wl_$4Qv<0MQ{rKd zJK*~&H8wSkf!IFxeiwcsw|)LV-j3>a^sAu%^R2KBZnAVmzjmsCU#DA^RhpeW_61f`vyj+dWLN0G-D z-Y9&jhfg{Fatp*caL2j?+t$u_EUKhB+w!0R%n!Z2O3p~=`)-#!KG)R`-=4OzVonD< zER)kpR_+6lTeo=ZRkZj^VM5Q@loXD;qU-uSeA$%enu>dlz7)j@~i{n-= z1V+>tR>*h4)d$tN3Pk@hK)}}Uo8<@$C?NZj)6*!`C;jw^d>zK_VH!=^SquKx1YX{w z>xDz%?^0;D)xgD_jjY3pognvas&>UfujK7+L#%niR4oUaGbUXb%JE9(w_`Nk-gy8z-tIDWWxKHC?%aXYmZ8gSW z%YO2`i4a9@N!1Wgk0;fIS8O7xk;UMxRIT(op!qbzeN?ju+qq^H$#goP_u6wimd_PH zDpitNqEdzYrc<*5#reQ!@rI)3W-D&GsxYdYt_8)9-xdw*2_JxY7Js=wG(3H#Q6zMi z@cT-K9bqiUL#3Oab~MdNf%&xHcBAQ9uKXjXMYp}TZqk9$&(^8Rif5&0-E;GEV| zTuOpDw^+YcqAw9_QGKF=q63R>#+z)=<%8s@V>Pk{-Y^(iJNe1H8P}}fx%-7oxcKPa zQ!~yEBzeWmcVoN@2i^48*WVRlM#f^Dwq+Z>%&?GV*3ZK|8awuB=Vzmy)?KUqj93g5 z`}N^s_W*jgo}sxy@Yc)^eKT!HNW+|Kl$FPM8?k+%^E^3oI~w_#Dl!XI0Cm*W>3x$` zI8IwIAIALxbAyivXr+1Kf$;N`icO3 z*(l^Tr(mhcCjOuNTj%t{TVZzA@k8pEH^@&qpISXe+z+b8kJAgR(eDCEz4fSteEPe* z>?V`2{FO_%J*Wrw2)Nt($PoUnO`dxdJGJIRwNuC8Fh=*$i;x!@H9t*O6bY7WDq)*57EPhs{5BZV8iyiL=vG_ zab`AVvGnT3Qs&}|f^1p1zAx8@+M@%0Gi&d-6yAZhJ1P!nJxhf9rTS^X_p0DZ+7IgVB$EVdj$~P7luo0T~NcPt+4Y&8DaMNRi$ zyU&kYmc&NvK9)rtXqpGvoxJ7l)-AAk@l52f5aDxtcFMnVAPTeHM)psgynnD56D)^r^gXM?Lu}u7Yq|}<{45{5 z4=jf3du_k2aS-|mN8T-0(<(UqRPwj>^%`jJP2suXl!Z}d(F_J>>hMKk*P@SV2R=*_ z+Wu{I3P`k_qK-`s!-CTD#4NRP9P;EPRqCn!>;6k@vsnK7{Zr9@4)@$NDNgC>M(I(pDij7xtZjN`bnk%Ncp;AseYpeciGH%w5>Nl z`^M#S-+kIJ%tapcGfy246$#r;Dz>B_%trJRVL)S{K6yY#e+kWxq z9cX+^(~--Tr~||pC=TgV;!_f@KjR;|(Dd_7kNnye{Klzxq`{{R*!He+Tn_C4QB|ci zj37Al@(&d3rU+f?pFH~!E??klGWrv9xduzU$Vn~o`(Ue4e`v+GgI5Wk3`59CLs;_qI_}u9R(kq-n zw+o5ukBTc$5xVn8?m?0}f7)?ud~2SjtQY$^0}~$-dgt$}o00`7Ik1OeLy_LG5ci*Z z_WRV!|RWd-7%vQ5#%*dW1NCyA>K0C2JPq(Y251(l;ydjS-isNx}fOy1h<{F{wh;Uhy&2 zs1Brep0@90?SivyS`i0ZyO6KFt%A~^8;<2!YW=Wn0_{hU1+$hVR3LLd#z-s&`%I`reF zh}Jc-j!wLDt1#%{@orq(K>2MPE;y$>BcfcI4}1K5!##=haYOkd3kgd${LB{JuFcwn zdJC;1i@H_FEKi-Zg*(SqW){Vg-8EDO}%Q*^4JqZ)mLh z_hGxj*J7Jn1H|X{x!@oe2|Q!DdN0(!O3)nS+)rum@z-}u(yFFfkn$e zsaWK2d#kIR=!cK}&T%%YAb2~T5LPr8zyq%{$=#p}_m2EPBZV#$aQK9u!O$ ztttIbgKZ`;n&PwF$a_TVN6X7L6zYo&rFV?SES~W?mRqS1$i24ND-i`1Ta=Hoi9Wz% zf%g?QFJe(gHMPl5D<8cMwK<5V=Ah7NPX*PwK1kNF-@j|63;bx}tVpj9;Dbkgyu~f8 z&>}Q9ec*;K9$?#*ae<@_81^fCa_lI=Kh$ULkc6e-pkH45kqW{mvaK%EJlvC5CC%QL zpY;Xl6NANKQ_i6Lnevj^y(ScvpK_dfl7z?7e{2m!Hz2o2L;9|$PI!7Il+B={9g^Qm z7!~F=!mhq(8G*%C_^NS^&FMiCq}iNgniOusIPJ$|ayckWI?Pa zQYwcHDTw`Rkk|PW#JcdUMQBl_hYfgz%EHe%R)Kw!N34xm9}J!{WG&ZD#bMWSAusA? z2z*UNbNgdA@;%u8*z!>?#w-iGOExaY(rl}bK_5GSx7@?4|6Cj9JIPu!jI^O)EeV6U zTptEgyEZoT7GsBwYW)uWHa!1*=J1b7Ga|3B=+Kof9T;8OS-E)23ocWcow_<$k15l2 z4{3?@RO8a|&nvk-P$+eE>6%G1+V=10>|Lw|Hu_PzN56Y8y_8jOg{}hLC@N3t7WCm{ zkgM){V%<~VXQo}h--VZ&$z{(Q^`Vc?N=n-PZuGT$BY(!K0-_n1ZnJZh|5?AapXB1dtH%-56S4nk2l@{w_5bs7m4a*J3_T0RRmeq~dF0DT4Ri!D-!~Vp2g=z; zrV2eoe|_;~_ljIT+Pi(0p)h*u__mMg?T~*e&nl9u>96s);QfT1j+BJt zU$6f>9`q!9|8_ix@+kNJ`G72T*w%jkRt-GoZ}Ueg7r@yD(h^$gGFa-*kfijT1J&%# zClUll*S2k~pZq{EG&GV7dpJe`sZQ)`{>eBfJC|;`Y?6ryXNO)2)Qlj-bgY=sX%A40 z7A&(>k43k4q)P|B`JzXw-;v||UHD?4sdjyJ0HuP?I2vO%-ftnhdHs9`9Qs||z0Pt1d(EsY_9{T+CmIVWGo z()zFO&1Ck^j{ojo`49OSfWfGTjC-ERQ7o2AWX%j0AIpQXf=`X-kHm zs(|G@>qX_B2+(4XVXe2G0(z~_YTxb+NheTlKI)GH4hG- z>6l|U3E#1a%I%?rj=$btaE5-!fA{~;lhFU$c|nv3_x!K(f_Ul?KmTv~fBw_;i89B3 zy#D`q7yr9I|1~~Y7nTbQasv;EO zt%*N;pbD3N6;}>@2*rud&V9{<&N}y{x`1wM1zr!JZ;_R6$Ju+24hXw7K*DIi1Bq{4 zILRHXA-!`9vL08`GO^?kdXRUHH;n!8L(zxG(3O1LO4xP7s;dJz)|8$IPrSz&@fCq$ z@*$)=B=uWKi8!xHxE1C8sRE^Ef3DLM)uFn`3TwYiGpfEN>9}T7i9y*XKZ&(=pj3Qe z+pdLN5M0?IIr)&tQIN~XNNuYB>%N@qT#Ekh_3Qu8KNKX6@Bg5GIKW@F!Qozs$22;r zJW1>D!MV6czZR!pkD0 zj7ugvnt{-lJawsk(}-XYmNWifsUN>{6hC_wY6pkC4P4HAD+GSM7ji!|Q;EONBKmE6 z7pTbA=;Wn$;^p&aH72hWVzZ9ReaFda!YA426#jVxT3mMw&6pK{ztaa>7Mm&H@?%K8 zeWU~`KdAB^sTu^AizYNzq&uN}|85zMbsONn{-m6_FAe2a0!YqI)X*{K_&h)d>?CmW5v4#4-2L% z-Ii{``@b(8nI`Iss#bi02XijGXI*Bct+@G6=KRv>OI1-a@6EpRSv=0A@s*O3fh83_1|J%}R?rO*TVdC}2em*^TKC68{>5OlmeO$|iNKpaesI_< zB^KL!Symst?ghsk>OSL@16age9j!al4YsW<0dcn~QFU#R;zmLRZ2zMcEy&&h_a#m7 zy#%30>vB%wxln_u9zv1APXpl69s1UI=_xF~_uL($3xI9RD7B8g1%10!L!@R)kc})F zDC|rAn!jr=9|-^V`t^UBzyFN?Kl1jlR*Yv2RGyMF)@n?LY7>slf( z7yhyy`AN(x9>pj8?+Cx%-J_zGH!I+7*B467TO)+kJnUzsT>&=Fu;+cWPJq}CdrtcL zCE%^`CWA2AE+Q{Cb<62tIX<(b^-ogCfwtcuETvhE4&(Bp13pRM_``hFhcpZB%CyEz z@eSixDAT>K6;I&khUm%ktsWdp5iO^?MfmQ82XE=ky}&<*a%0TLdckvlg16OZDa4h? z(bZb_;o$K=dAZa8i1;JCY`s(hLlimZ?{ie4uC=vV=XyN0J~)sspIi-fRKJebo{R*2 zO@SlR8iY=lU1Zv#A|G$Jx*IfHsD~e+t{SkX9(8|9MqY|eLDvHk2kml+TqMrd@~mHO zAj_F9jVZT0%vv<|4h|#qhB?b@#oBeSv@CPjVEP4c7!{q^>d3`+gAKTn6em3FA!Y^iat7| z&{2t@5#JU*P`4qq8Ao&H{1BLrby4gyD+i|if#;|g2-(yoN0Q%;D)=4tC7xzm5eVPz zh|b%ahr0Cc3EppNVN58-%<+^vu|AdTIoUr1#hMe*Q`DU}Bd6{@QRnm5{YQQMY~O#c zU;n53kFfvzgY~1o_lYpzD{%+Hmho~H)-W8U8Xy)zM>>dFlHaMhyD_p@2o ze9GWx$>KfNT_)h*V|dV>BpdCIMScqZrXyvGxtDO?-a*w@rx@d3n5G}@&4I^<{C6In&ci1%_9a&>Gr=J7Tm{pPdZg3x8+}#L zii4L$gFf!eLibZMEz`|JojqpRl|eQY-<3Vf^ISB;xy>WtOOts>!Saoy^Lz_Z_6>bC zCis5EDdhRVw=g2tt#c=#!+~#mVG49o6h%qF1cG(Py z10Uq#@o>nmfl`NecuA6F>cr_npg&|@>N=4LQx8Ag*kjNMI~rcN$RCYHha-o!JKJ=^ z^%xtO2-Rfl`N69jViN=W6Yf66_me?ua^l4ct2{XLwwdRpO(^K_%{|s2O+tU_%*%8p z1fH5brTuiXIri8W+@)L0g>{OUN3*&9aA=bh{O8$1IPm-A=(QzZ7`S`h?eMom%*CTOuqX#}#5}0z_Ikj+ z!pm{O1n-$w(Eqadt{Awj&(M%C+=9cWuRgfWorx4rrSEVr z2!-B^auUWkTnY>MQRSM0P6J;qYv0KtxOR*m9AtA*o!jNvl{@LEJMIu8PjDpn(Vg+$ z;hG70LCJ-#O+>!xbm3tp0@GtV)Rg{g8S%P*CptVCCYC?l+h$ZVA@ng(8(v)rBbh zo+FkyF3yf!qFNRM`=!y2MsOSDr9{*bp|hgmr~`2-m0w3Y}u3sAPFkVNbq zu|Ilg_NIuT3CfPlG*rk~5jimFf9hzf;8zZ>ubx36Mm%_$Jdm6XWYm=&pcaw#V4D1H*FUj{#`{Ve&eB|93ZcurVa>%uxF!)Y-tRKWneJ7Cb;{xd~n9(F#AeXLKO2oKo?WXTmvVVjChYD9W6is?pn9o5K$W3?eq z+=xESs2T6dS?Mx(SjiVIpH~Hs$(9nk_?yxDgn^vFLxQ_q8!Ww?RsyS!JMu&oYk_}T zSAUL(> zSq{n87on-u+$V9DctpQhfh**Z@Y%)Ua6?cw?jPZhjWs=w&yv?n`=^sYXT$y73{43d zrhZ#1y;}&&d|A)hU!?%=Z85g*g9Vs2Uz?QntP7flymo~KH=^5k)E@V-a%`=M`F%z% z29erZS^Ydwmwgv;_Ser!u(IEuI!5CH>R%#{3ktu1iBE5I7dy-02CKE^!!mb}-{2|m%Wb1y%mM3zD8axi$vbUnb%q~>F~;9 z+mAE5f!NY3q%>rKvVVg<0>e0&@s6#1L-r`)1%39Hq=u(Yy zlX7vs_t?O$GX3xrBu$$FZ~h%*%TSj=Qh^{eyd&vF7pquJi;+ykTXk$*55ZAAEc?`URrU^{$ir zW0x>=6OdjplJY?(TOYUSqlI{hnY#HT+!usXf>hh15;E{rhzt32sl+gv$Z^C%)UC!Iwb(#Lnr5J$cFpkK zmJdF)=X^wk9zz?q_iWpxdic7enCK^{4&@eM&!5FafX-6}i)$GrkhaA^8+or9F`gIrt@L1cir$z$XQ7+|o0h!q6QodZ}lY{yq0^Qzr1#oiD zB=?P2_y>wvcavx}gtLSEnpJ@4ACSZ*Pr`N+tnSqHH%$5F%66J8c2O9LS5DeH_+T zk8RTP1_6XF+NRzbc7E?bMbl1KzNC22VZ@B+d_8n>RCcuDk3g2+pUKEx)xx-<_S$Tm z1G1l*60$6a!~ci1uZ+rSTmKbM5JW))1te4yR1{P|knlkiL;*p>zyL%-F$hTmLAtxU zyE`9Vy1PSCkP^fqjJx(e_ro3I?(;wQ?0p$y@%5c+t~J+u=JPA!1Wllwg<#`XkkI%s zjL79OzW4l5I&3#NmB~o>7OU9E9eLV7)ISE^!?#XY!=1SMcMtFOfQLrRy?ula1Ea$A zp}PenR3kN6U98N99WR+*t^T|XcE^9+z1JUtTmn0Fl=%WNhT^%>{;3Ysdh;YH&!iRv zO4(o3HK(D+^B2<-%8l^6KKryaLk-lAY8kS0L5ghMWIcB1f;BWI>yph_uyb6Qm&lrX^^P-g^^(Bj)RYPxy`D zu+-&o;m<^!v7K8%F}@44dQ!z5YrIjy)Hj|hJOJ?R8bj}EPr?U2;8p{nW3fIR>;Hn# z+Z|0)o*O1|IULowe>_hu#XC!jB5Rt}VALSF|Ld0!Mmme&`Lu#sz zWr@CijU?ToyKo&I2^yE;m#u`W5^UdpD;B_WtGx}^O>=N-{KspVj13sB5Zd&<;4$Xf z2!2uB)dHKo$B$EV<)D@pEnlEY7Jf?lCFRys4R@$7-fk+jLU>hwsf)-HVY`+gDf7bx z&*zk~-MbqNE2-lKo!OQ6#CxGhPp%rnirD#Y3*^Ch^<1lyLzmHC@ErX$LjS$_u+^2f z9K~>RM^sP{J%Q7c&Yjw$UyNJ1pOkXzlaO)e#QrhDhha!^FylZWaV{A1nyQj7fN&XW z^>u?#$h@#gn87F%W286^h;U>AwG`!@l6N(buuqX=J%{iE`ZSupaw!wS4{&YiI^~I} z^tIe%wn<2RoxE3uxdxd+KJTwkC-5S|aPO27NNsq+WOC`!>Q)Y0NrqTMv zJE9KmDhm4fq!!*z%F@^i5PF`8o^wX$YG5^;vej4YEnMnPX0*GFC=I?dp5;Zrbn;iv z-d$O^wd+=o&_)!vhY4xDbSuP9hbK!5r1Ft_fyJipZ4p>1sUF`g*ok|e52-!h8HyqM z8IJI!RAQCP*7fJyi6FQ%ucv2Sg-7nl+}^$~3?g2rhe=&_0f~&mQ+73FP;0DC`!Tl~ zuFt&`zw)>N%8$c|`xaGToqf%G`^9M3w%_rDl|~-$866f9l`2Eg?0w(TToM3UF^w1>eDQd~0O4c#9#OHODS)G(uIW zMLrdCG48%8#+=X@3B^qM6Dye{*zGf2*&&+af5wnOROL3Qq+r*0)`MMlZ{ma`>n5Ta`_0qlI_n$J%}UlH?s?B z{5d${maWHo)f1nu{$3;nb)d$!Eg6+_1Yi7C>g7lG8bG*#@t2KKHCR#Gem@EoX#LxQ zHl#5J#jt;xKGEME(7`0Ck%PWup88OFsV8i<_w*HSCeySuU5QSQFFvI?=^ zPftgAX2SBl-~2pzndr|dpki^4gd#70cBTZxqe@SG!QIP#kfcB^$}XLOd&PcO_^@Px z1nFV5*Qgh0e|`FW?;QkRs>Qp3`dJ_)ZJ)R|wGtV5)xEVi3BQ2-=NK1s%OL+|$FVN^ z3^XlJB1wtnW6_(Ld#5&K!Sv$@(h0&(zmslc=jcx&PqBV}B`7Kx&uW*;YtLt+=D}Xs z4~C_9qWP`I7vEAmH8aEVtFj%RC_M>Sbh3h7pV+m{hfI;cwo?Y{!PG2{{;Vb{nheZ6g%ZklA{wwg`;lGD3E+%AZ&S71RHGB~m0A^~uHK#KVLxU46<4`31}MZ_ASNlKvcT_W|t>fAoI-xc;Z{{$>A1fWTe{e*>lGI>=HO@3q^j4X@~5cN5VHfX#v@wnDeCMR!AbByqJH1&^^fdR#DQo z!>dZ62^F18T+2CR?DS#~C+3b9GWqsG@1ADSx#N@2=WBe=_gEh&KD!|eeD(PK>+2ss zyvGniAogU24peWCy!hs66fTeo+ZnC9Q8m4>Ui0)IM(9(N&B*)07X_WIYyRyJLh9rc zB>1$=$K19IJg!CcrEa|$g6AdPlxQGTS`RzgZ52pY+y0zKX~(g&zl~oD{5|}&d28HU z%)vT*>h8dh$wyw%u;fO=bE_SSMBj%VRh+>O2h5CBzZb$E_~We9@c)DV;=jQ^1^ya; zrD|Nxen|KYPsU?nzQKE(;Chuyd$a`_wvMGc5x(+KxG5j z7b5>j(zq=#-+4Zv?+6+WkaNq-isk0uiU~>5Nw*((P1R`MR7c^%oUEl4cOsvi^Wvuj z_ByED^+I49fg@ZBHKKA}Y{CGa=onMtL)+y*e!SSd8sD}S4l3QLM~WE6iQ4N$U?ezV z=RMeppPe5T%f6gIS^t?eKL0lG4n6xkGrbvW0uIua$c@0yiesDXZ)D*5a}8Q?%{D!|**%@J+^x3Orz>+w}Bh6Q*8YT{q@RK-*xi(Wg^O&Sn~nc`Z+f%4 zM*cSbC_v!S|6u(YJe*b(_iG0i=WRQ`Uu^-=_{Sqs=^e1(F4p~_tp&Fk3z9LC3bEER zX+ofPhZaw1%5oE`RlRkIJkSKJ zOJ15ug$W?08yUQvzZp6VkFJ$RjG?J zP%{i%`Kj{|4{>*vOM)Y2)?cP_wtM* zoMl+1K;3SoKL%#EA_;hqIJCQ*sX)E{0m33C_cCF&2v7KP$ky(MY}TX(VPotf8bPbjbcp zE;(!YZGWySwq$0NzyJIQT>l?Dzw~dK6xtHm@Y}eDHD{?24YVHfsC*fO(T7*9DisTn zN=6IZC2BzEZ3?&ZJracfUOvD-k&c^(A*F3)0NREoUyVjaVt8ZXtmZ?)f1}MoV!PZd z)No}hH8Q-1?O1rmo?#Z~`WU7t+y)><+H8$-f$$BjSG#Hy(uQUX(ccenH(~pBX`Kff zB*>%o?pIAIg5PiadX6i$!3~yMb_p}xcxPQF+x>eLD39=8^R|t^v_odj8qb$~%Mf6wI<3&U{tANC%J#%5P0CVYX z6y-&=KuZ4f6SbUlaG&s|v^+VC3+HKiWZnBgGmOKm{lOUCP?Yy^xH1aucljImd2G;n zRd+me#1l}yuZ zK>|nqwRtLqgK-Fk^$+dom^Vg=Ui!{+1U_$Z*k#;;&^Ip()vF}ze-8a6c3I*}-8eQe z?rYnhh>S-+gqQ~o;qd~#%SVpPV2emt3YpRbbO%Oe4A_rCr}u@v)AHSazE>AO@!a2^ zKLW@58}mmsy8Rf#>?X;+uz>+jx-rb-eQ&4{Mp;p(IlWT9X9GWou6HSJEdM#azLH<3 z{x*L|fWYbgf= znaXGz2@AI6new-Gqe`j2NZIdBpiNMjtzsGmg|8<{Uv_#|LN}^^S=a%I{Po4U!Yv_o|n7Y(P4Bzop!AiFBJL&E)2$? z?eg9vr=CvCl(a7do>(|Ml4mo-IkA3EKWB+rFOcb^CJ}J9D4|i1FpE&E# zf_^3!bc*`=u%Vi{<5(^UrAH>M0tkKOAJ0W*V6(vA-oFKjy8R!#KcyxrR^ii~u(3W) zM2%@3Js#ZYF!BS5ymczRX;2gt>CJoEqnm&4~ z9t&r7^HL~XOM*y)17{WI2XN3nQuXa*1^CET*p)J6fTi!2vPkY!xc}kD?zh+Lu!zsm zzQHmGo>8jNcNml)MOyow?}p^;r?9didV0Cx#VJIp{v2Dl`giRjeVV(T!-fWp4FFavyMN%BGsH z4!|!FlNvYkYIyp#-*r20AqE^fC3=9o8lLUZ8TBOeY`(_>n$^|Hk$l;Y>{4qhyd|3% zUdyfo4Z{P%<4bio+BlqeIer-9=5wOTY6|hxN81f|t@mKYwj;K?8=X2jhlwN@+(xfw>Q?E8$?a%ieK!=h8$a3A#FGe~ zwMt6w_SK^mr$+a(>=?=}C~@t{2Xq50c)9xE`H8B#om+|gpA$(3E{`@4xxOp@V!4$t`SzT| z-qQ|ne0TL(_f0j>Xiv_4B{3V_ZaUx4FRMkZKt;hK%{0{5sb1#okb~t{xUYXVD#Zuc z(d7FHT&l;#h;h=C&^we&^Vl74g?ZA1Pta^N%qG#j9}NbIa-`>LayQF>0o00(q^7{^P(AbD$SW5bpf3SYVw^-_vi0{wUuM7_Z`3ulHFXx+h_&em( zb2sZ`97mqj$S1l4zP#$V+r;2S5149_iMkpU<6c3^F4oU&cyrJov3Ykhe08x69*ob! z=`GF1Hq@OURA5*w_l58aVm!-eE-?;soHsnAdqQxGRb*(fq7uw%HhVzD9U!qEyKyK0f2OU>E@GypcNQfH;mf&9|S^*7s)O7VeUuP9)_dWA+ z5G+O4JtW4;>H&OyOZ(%dV8X{bi}M@LYBz{V7N6Im2!`({;afIAyiedtPlX2a>W3TQ9_yhwrq&^%kF@&9DYgrJKF_P^ z61dXquhE`^OS#BV+|&3Ys|>!>fAK6dDZ|99h1f6xU+Ormxx?U9E6R6w+FIQn{PX>* zl>Es0xA|X!|LpwkA)G~aSmj#>mjjzxv z9Ab6;^)e>K@S8u3$b)Ym-pMSx4gER({QmrBl0+5r$MxUWPyg)v8rnJuwmDgN>ZPuc z6v1PRI>*7Z-0ly*!>IH_=t97S#yCdNw+Ox_hW`|0&%yUz$E7qU{Xlb;FXn7(1O&+L z6MRSHaVF9+`=>pzfmUa&sxl(i_RxKu)aa0E{Ls};?LzQG?&j%leAe>^!=_9dF19=r z|2ey|_@oZY&iQ3eA0+bQSu_Klyw1kZ(0!Cfgm1&mr8Db?oTEVO_xQ%UNdkx9yKS&Y z_$JnyR5(z+D1i@_S5)^9=hRJ;ao3GS8&SEO@zhSL7@&VX?379HV7W#0#qAS|aK3w5 z&8Ig3r_|Ix405%CM_TIcLUs>W=8iNlClz4jv1x6dzFHIuIXA%HQ3Uikzg;%%Bz#2g z(tLlG?u;DglEt)ys<5=ou#Kg{9&Km}NIbrADDq>in>Ml!W#%QtrhmIZmRj_n&{JEm z^tw0Ttw`XK6pwN}zB=L4rUM!gcXKdh-V@~42^@Pd+tVTHViBG)oss)- z#S5tCpu~W!21=6LyqE}ItM|b-n`|k`_D=vSbujBlrv>tLoh%B$V1r=V6%Wj|OjoGRL3hLPWE|^+Q(*VOeW( zK#r#kw5`tF7G5RzWu^2Ag<2I5!oFi)?ZFWE^0~p5VYvjZG2Avhaia=ahW)udt~BFs z8pNG`Ibgo;SpVhs#gH7I)8+rA82J3P79Lt>puu3Z(=Cb$)7V zi)DoZ%4aGNGS6xKX6p@t+q+U;vy;W6u5j_18j zn+xDdgu4-&q&dpG8!3yh3IVayjE@ET8z9V}a$B72Sv-I3LTZs<2H5W~s0iOq@YCcj z&N_DoV=qY{+Hj@_LdO>l4bK$d`tqaI726`nzQt7-5>^In?~Bv+YZar&&Q*C7t0Zty zX?<96#68qyci5j2xW151en?y~${)|~=YCcMSEY+7GO2yxb-H(7enAB|?tfLRcBcs? zbd8U)T@zom*5Teys`b@tW^~_#nj6Kc4$pK3D-QKB?r>G_D=tsi#VZH7g zx*W*5#OcCGX9uSzDGyaYO^3TLxJ+i4YLJsRY7bLE4%Vu@IkH1M5ACJ}&C-U9AyY~^ z?dRoo=qh|0ZrVb^6v5p5mga1z+rfH_`&l$ltC2U|BJPv>=B2@v@@g!WekpgWG=j+6 zwam|v&VX-C%v$Y4e?jDMAXCXq7F2W3wWW~kK;$!heDcFm;9;#7r+pZOf)&N_{yEwB z<0@tAlbgBd7@f-Y$SWFLPUd~HeN~8QuR1vR*V~c9kMmj}eGaPh7j&}RGk|rGhdCEQ z8zC|`__W}@8gwD8T7PhlhM>~t+|qM)Fv48?ezqzfJ{^#EY&>0qccX2|N!=#!Z&kKwz9#f~r_Vo6*3_Qta;}+dG)YAvi(z8W8{K5& zc4QtOzZF6BwU_iJl1qTixm4`JR5188d+sf#cEe2j+kQvF5NycI-ML!{UPV?#W?@+> zoV-zB@P#rT`I=d7@2U)g2KHQ=!28*lz_#@8JO!b9-!1D@Zcz`%?zl~cT2-RAU6#;g z$5i~h{R74I!=<2j+L}z_Tmdk;E2a%a1px2X6MpBpf;rBLf(Gj`@cb25t`73 zTEpRl?3)=_>m>0mWi1cm+}TQVd`sXB-`b_r^<&p~(hjJ+dsq_XZJ$F=q30Znu^tD1*)5&f&^iD(`kySD) z^E|wHt)uq@ZxztTT*_;+O@?h1cicY^xddCa?dZ>}R6~Mqp<8ZSCPY)M`DzW=LMZp} zfSGb4eisPZwZ_|q%OS^}r@DLN34WHwyKmz_UX|c9Zf%G2=cB)-?`VP*yUNY>4+=5T zfTiHrSO$F6*FAFGqze159TZ6yAb5?80S>L%_c5oVxMlOP6r{;+YflQd0>-#Yyx$vh z(QTpPWOhd}>VG}r@t&s&3_1&gI8`dKr(bgYiCs3hQ!U55#Y7?(G(g=!s1|L6mmhNX z#3Qv><_#*s_bDjfF5j-P93OoiVq;VG1%Fx^v#zlsXzM!>w8yv^@&Zd_c3q2uVjcg3 z!x2efmV3C*!mAES=sl*wTe)!ZjOu(hb2|QN6cM}dxCo@RO{U69bKslF3yGWFiEzm1 za{XbC3YeGrX8Lui9-7Kj=XY?Dp!4ZF!5ni}XxtLUxL+&@u3C?u^g5e}otX*QOeKU* z{q}&dIyn+bNHzF`4pcx7twQDHRH9#h71;}fO5rr)^XlvTg}5?rf8j~9Ib3ac-hQPj z51Zl`&W(Sx!JH@gxdXyMI1s84`N$^{?r{FJrWSsV&TTv1b14z|SM8PWWBQ3Yk)b6m z-@F!8$SXr02zDT8Im~6`Ho;pMFCaftT!CdaW*^8D2tWQB6O~&%(J=q+w(Ul+26Qq` zRcR1x<(%%;mA!Hqu^Z!F zZ6>}A^;FQ`W$e_WUI2zkFL-jfvr+5fu1?YOnfOf0j%}hd}!2f^Y6QkU{tY(Ftea5qh&Ivm5a!;^O3e<$NvfI2L-P#wHEu<*Z+6 zFvdV!ikd?1bTE9>;kjf@=vKx*m7Nl~5=ZnU?(F#07m4w->?XQ8#mIk-b7Fob38ZVI zSQ%b*WB7J8{wsw94?cbWmBnSkSMS=duQOUL=tN^V(nXN~-nzOq3|m7mnQ$HG=#p%}2X#XJf(CiQ1nn zc_0@5TqdMH3VYqomfaxw>yF>{B~ZQ1#hx1bABNkC;BiAC^A`V1oaQHKSeNBs*+KrC z?U{wh+CO}A-97*k?$UCreh!7|tykkxxQM)oc3=5?u|k;O9yUo$j)T0N@7o@E)ZtmR z9|D7dq4-EmrTyk;5l}X7)+u|NgcgN)`JYM>kypp9j=o}#(3X5>K>vV=zQy)ng=njr=uxVUoR))F5}`mQ9>~wXL&z- zB%&Rrg)Le+6l(BtEHle~n|gE&cZp)H%Ej9+`5sLZcxZ-wh%O(&1FI=sD%8A@jP-0W z>z>5=$&%jksA^j=WNq-st{8g)0}q|3Jzpixxt)cw^-ypezVZBsRXxb*9*PS-L-^!d zWi6K8$w3-D<;x3Wl~7-(Yj9;c3(vWRU;KW$7THhTplz)Q!$QHh_4i*p&@CrrXpnA=h36IC4oJ109*q_xtaaZFS^I@mG}N(OdC^?=SbRJ>$LkNE5{ov!gZ(n695X(c{zrft8LQ zCAgEoDPHvDkFaWRX-K^DjEe-ZQ-+3IUWvG7E4}Kz$r-AZbN9~KC&O0}?$WO^esGDh zZ~6FQB^rh&TDH->LEF1RnJ7fOXXg&-T&Ac3ON(6&ikk>u^dlc^Pt1{v|M(N2OP)y)goqJAOxyw8@34iFbGZqq0-y_J*Hm&iF z))9RSv%tPbM9zw#)(X>hKe(LZ*duq)BaeN5vJ<>op@8c(pgX(Ed{xyqq90 zVo?*)1-6seJ9B_Bxs>Z}X(r(}m{8_n8HewcmY;8l@CEk!)>Sgwt^z;f+8RqmD?G?k z-2d`c3Q&CJa`xX6j;#Lr_jvY&gGj&Y)1HB3)S#J_q1{`H>jx!`zF6kL#fYkk+N@w~ zm?3AH{MH5!s3Prc8{0s#J+mXeED3$W4C*EwiXR##<#TYcHXowtPzX)?hGLur6sYvx}-AnLkeZKR^A4*$VDc@TZTjR4ydguaufnc@|HB4jBC z_rB@M0@y^VaG?>8W5FH;+Vg zG+vs6H#L|)S{gV&^b3y+-RIW+`GV5iZiUG?<=*6q8uBYW-*Mo?3>f4N5VRTY?qZ|IhR#Nw8t zR!vcmE6#uD8gW0obA7WmCNYrO#4%pq*YRinp38Py_kZSp{TuzA;6KxEkJ4x_DHRML zyRTmlHOn`!v)pT2!Z?NXo^-J+kH_Fu@u`<87IUch;S2RA-45KS>tHqcuz<~v#VCST zC;uFO*Q(ybKYA~JT>pLl<)7(SJwwhcFpDOGowh>Sebs&-JIBc_KK2nfU;f(4JKKP9 zo4is#z8V4EEShqK=s4WU+ET>Oynw}m$0L;Dz0lNw$A|NtHP)9u9TcHy0FATaVT_?I z(8U~P*X3)BON*?^`K`V(&r}tOv3xcr+SedzFou zyvo0P1RH4mE&eusEBN>DTeCz}a+N|t_a59par)jkBrv=%ncFu9?LE!XOZ`RI)z$Lm zmfR-EKl;?h$(tkpHhwGk*ZA#TIi7?(uSpzr{t)KH6acr#{9dNm_2V|TuHv!_H6Yly z$AWVy0tbDhg?74kfUR7~hVlJUjQw&Y{nk((@?H=&&uSgT^D}Jz+wOGZwebX1C)aoI zYws0RA0p?%G~%Pi!S-yNHRPV*`rM1fF2g6eD{@i!zAEp$Y8N&yyB&$~>%iCq6EW?Y zN~9#Odl7rG2|4fVemKXJGNnA- z^cNrSa2~&S*O>~avT1Mr@-PPsWZssX--);`+f6mHuLQ0$xLqjG9l+-D(CwNJ-ou!` zHU&AM0pGfJ+fCn|IB+^HExOsh9ohwF1;cj}{iUPElKqlZ@H8Z2(|NrK2<-A760;Zt zr?G4=Wp^*U!^^A}g~orbKjT~%hyRTK{Tu!$(f<4={L%i>RAxHsMSS2Q_>H<{5*UMz z4!5waqJq!sm={Op{v6-oMU>u0d>j6_{-^H)`(NSiLIl3e@)yAS|F(4M`Mo+-N~#1* z)UUfA)d12tS{IdKyYa28+?Exsa(E_Eyy@=hZHQ1=^Olh>1kSOe_p@56q2?`hYo%BN zntL1BNTiQJ+;Q&c!>_z?_wLEh%C#-Xy3M^pTfGY&g*Ay5c{c&azD-qoPL-o|$2;co z|X~yz(F4pxwp7CR^z|m1LW%G)5D4F~*vs@R9c0*%c zgIPf>|}i}9+dU5ln!eJBO$qOr{&Ax zJ|F$8!B)bzZ2jBxevNMA>dyQY!sm~d8q8;361f?(>1TwSvi`-NWvXo2{h!~Tf3yDx z{WJb-H_Pr5Z7wDF>Fyn!w6=1T3w7EmMPCCSWU40~5x9_^F>ArnR4e*UUAfOOJ`QXA z?&&9{i8@aB3Ipr$OuXjwb?4@H1-O!P^_9!hdOSfCbL5@zSfH13ENwr5JLwrPj5!a% zMO9VTuxqaPaIIZGVlo%}lLXV&lxASS{(I=B>R=#OzjQGvvT?eFhv&;Nq)HKAYJH&+gW6H+T_vq(sVF z2u;I~bCpzVYu(7t@ias@vK1cj8K?*Zkp5f`y4yaN{cZl25P{eKAN;Qvi-zx7*P3B} zZg1hHjX~&t^7CifherJPsqVC$c^y1@8t)o@p&D|jyFH^-+rgpxBAjvWL^h>}iy0!- z_~fh81Sjt}HYOQ*B#_F{goe?pd22nE$u5mNNF;a{zqQ9TulE2wymk4>NAR9{4a#?x zbpqYPAgxKWXcQ*3NIkB8hu_mba~|lb0>1Fg3Sa66FnR5$B{z{1;F5YtWBTP7bna`n z*t$>!tFoqii7YJ;tvs4yG8&GukQn)MUY61&{Qma-Cq&?&e}lhNulQ+@zM&b# zYkyO?6tn@$p*e$0ok3KOku?)Ht^+Cfw#|ZS3SS5&w8iuEK{t1f;7~&~a>a6A$cVSY zxUZV;;u`vK!{uC1$>0>MM7oWY5I)U|N0<+1a692^zeuLV&9(5<-t1_$z$kVpGy3;V z%;J#g(lJo(1+7c7zdujbAycT(v=d-@^^nZW;2psQk%wM2tbOvAg3f}Vx zcJA6diw6=V{4{5#aYC3w>8n;Vbe*Xj=2H1r{gB5+y79N`2cf_Ak0Q1d;_v?_|LfoI z$AySG`!D!Q|N8y^{Qr;V^N;+qgorx%FMj{lc)14%^r2?Ab=RB>!B0-wv~r&54ba?p z((qGr3{<}`vqV^RfHIfeui*>PaL`fDE`{hv(qCB(W{RAE%XfU==^U;C%J&rZ-zKWS zCr}=r6aKg2X3s@#5ZRi9+~% zFucP%xC$&vDwz&;68J|d`IJIc!=LNsQM<9{-{1fKng2=1(OK$D?M9^2?MO>`(u2I= zQ%Q=d;{<;qnLq6@(a&HGexxVT2*)1`F!L^s<0Dbly*CxR(ER1ceT5vMFmpcb@JL%b z`ljc$7E?b*?>(oVW%op(Zy~#?d~qpKF>!pKpXvm1?0>~wlz`t0+vg#-1`p&MGI;&5 z1oYD1+crHS^7hCRU)f|0!pBQ(#k`S~c-rD?`iJcuFzUJ6RgI_q+-L$2{|Yr!iw2BY^UwZXop z_E+^r6W|wr*A+&mNX!uYWN>-00H~k1Z&gmqMvs-16O_hr5Y_b}X_PV(&b_QsR1b{7 zQFdmxn^BJNAzwl$mdJN$dK}bmVNnBQPKKig)?4t|XxzSS8#VCb-0&T;rV^YsW-KSk zwu8(_(nRouc5v-9uGxfs<6PDo!3qw*1tdBF0hQ~R6vECPWer&!h{k;{BdIw(C z`ke)FM{>3wxo!-OQ;pSmkF&6T*VE;^i)E;;t9_r|J)6kuy|+>7*$768UEkFRePq66 zKfOgy7qr(1vxtffq7S2VAgxLp6!A3gOZIZX`Mf=($7ky?g5yT{Y6YR=Uti>Rry@be z{Nc`&qY3z9N8~=v6cSz*U%7ps@a+*}YJRy+*M%$-d`XTjLueLN$o`YiEn2fJkDt?Q z!oaTkRnf2NVce}Oh3jx1+xqG=k6~+}8M}N~1xeaaou0hr>@S}8~ zx&MV8lrZbKZ64kaujR+9BgKjHY~U5mOQbsZQ7~F~RHFvmYDhN8g*M=M)gaBDwi(Pi5RMkS(r|-KtiM}x!lhIJcmqYA5EfxDK z|I#fv2_(_~ZT^(dKlA^oJa#IZ?P(eud$)62p>r?VOK=V@9wBmrw)2_wh$moR(VjYg zl_p@>9)4(uVhC=MYqe~*7(=O7G8?>tLl|p!$e`dz6+RY_Y}?7zi(Lh!{-WgmsQ8n9 zRiE%9435<>+|^r*mTxu(IxU3^V7q)nEU(~oo*EfFhBZ{BUB8wRJ<3P^`pQ- zRkZAiWfF$fs!Ru7XvEVO1J!Tkwt$7J70=uu!aqm8zRf+w7gc=S=h{*mp;JnQdxYS3 zmFjJbOcnKEkl}$)-m4^RGv{uPOw5G17|GAVZBan|@P_@*XNb49GT!F6Sqr|;WB6p< zQ())($j4N}Mo2B$I2-3XM9hOg;Wf=#JZP-nbxxrLeqHnq@SpESbvh@zZKF9*6mUb1 zT`&tNe^B{xeknkcljqHUHj`jYoRQ%`SvweQ+N@247b5+`n$^-$63$VdcOIJRLJA#j zwDzvX+nX{O)vBH0k!s>5^>b;ETT14zqpKg74l+lXbJU_sD7fBPX#na6uW}z9jlnVd z)+Vve4>+wxPU9z@jmsJSW`gPc(4S|le08P+{Ugp$T5M{BjlRwE+f7M0UD(@xDzgf{ zbA+6{|1uvLXLBu2D~-WBGL^p5N!_r`T)t@_#SZuyC}j=}{cr!p2f|GMnLqV!&fi2? z@lVd*KU_kl-44~_Eg6o9Y|U=a5K`=wSc*oE$1BEDdR6!=hbMTsvkc8L$nKXtjs@}f zqV79~YT=vK&1<3Z1z66?=xd^rkK;qs+NzrMa3-+Kg?UE{Cb7Iaz~(cI7gN(b@qG%; z3EcH=q#Fl~)9D=sR)`+RkdqzOhbE@sWclHJsCSg+^g+VE-F(3JSYUP@n$4x(F3@Vk zlR|f%u3EA#1(5T&K5U_|b#9ifJT z?~&tv`lc|EVX)V4q|&Wv2Sy|LiXBnCSS_#ocAC5$?C)B{DgPMzbN}u zoi2k1vg5DctJUK7Yqcr5W+jmBT=|6hfICVV`2A*MDMD3EvZhzNl3+W>=EP^Uaro0N zF)yY+}P4|kS zcDulO#fvIXJA9>(=WZG#PT5oIC=+?xJg=Je&t;+3l9yH;!GGIt|9<4!*(iMCD4%fY zMJ-l_>Zs5%JJ3m~mMNKL|ydfzy%-<3F>xM#d#Q!4IVsyCv=d?H`L z(RyMf8A`mlU#p$Ug(CMb>WzjXR4B0{^J}gFSNGY1H|^DMQDUONw5bxiBqbHtUIk)q zY-;Qj#Nzs#LWu{FLuR;7Ph#`UEZp%q@`OmH6-2NK6p1AifKG-h+oysy#q@;r6YO#+URBYhqoMDYpU?Li%Ow&_`6~KCayIa7% zQW#E&UR}Lc1m2Z7{9F1F?(QqpG~7w#EeFjn4Nn#0fVAk?2mM_9U|_NCVxNh+sgd=+ zl5#M<)A>a)n-7@r*t6Y~4uJN9SvU1HsxhAHh|ySg4md|{?|VYvhnWTMMn4zkV~;ma z`^~vLoSSjadZ1V&_XXo5w)ERlCCv4zVhF_G}o=&8RUm=XyY@$%wI^NDze z?-#$)L@M4g6me);Ao4uvX9C~3cA+hed%lk#!Gl@2%x(Xz1fQ6U-4L*gMYfW_uBNe0 zl<$Fw(;@BP?((QYf$)WT;<|l9J3*YfC`}HW7%MoDx&0@~Hs}2}iY;U|K_=9a>rE|>_;iw{a$t2p~1yuTs-J@QO zhb~m7f5Be~qRAb%c?ch?7o>KHJnITfY0~;YL-!o78?5&*AJ0ds!LUi7ZNTnZr}6UR zGMMKb#oO-uQJxdMT-JENE^JfFl)uqob6TH6o z$=}JNm&?E_^Uhf9gEyE=*YqT=Cl{&GJM_*Hbwt6<$Abw=q4+v?{t*53N=%m3JCZw) z4!!&dJDKCG(6qVG^d3zk=6{N0?+QT_EK}*Wb~8mqckh^=-_n31WoZ7up-Pxpjveiz z34=Qx3|gDx(?G(d`((?50@xXJmaFDE;UkZ#S$Z3F82{7caHBrrr97P@4*N4;Uk9`0 zy}own|E8a9yk3OY4r?c?m#1T;D@`EFtqy#z{z&PVi48tyD6$OHsRmWOmV9Nxw}w8u z^SmoZGOC;QA0liALFnM{rI%_opu;{k>3+T#cfI|=Wo;INl1Bb_<~+keGIm@?kGUQz z*MH8Ft0zMGy)W;^&--BX@M!8O$_nhNc^g;o4B@-=t&W%r7VxmAhQVSSVanL~&~5`F zkBZ4#G14IfJl5q{g`Miq<~6&(r!GQ&u})bex77=uFlP1F7Xv$yHz zML$d^`jF7`p%9KeOfdcJmy-Y6#p{dyQ_<7wGnu6)0P|z(%!j zsw#OSwwu2ndO)nFy~D=^y8EkO*piN=UZ)XFzJKliqDed-{o|H&8iYSnsl-Qq#v&YK zjJIQZQV8MGO|nPlvcN>HkZIvUA>7cO5ai~ph6UCb=?l5Bc=EHwtVKG(XS{X|cZwwA z{bv$RUw(Una-j>N~HW=YUDQ%sdx$JXl97%BU4LVl<^|sQyA4cm%R()P9YDU(y_G zuCDE{t73XVv$-5tL=P%m+)d=_kqj1tE)Y3M-dB~Md@{n(>S}t9GpTres`+O5uVUzL z{zNV)>jx$I$XWLP@S@hAQ`>WVf;%Z(vT;fyfx*6wD&kZQg1Is zQurQv{@;4QzcDMr)?9=uBUZn!lvJX%{mqS+VuWvT+od7ZB1<%ho8Q4a5ezXUB%@vN z#Tc~J>ot?9H`EHAv|{EVfy|aJFN6ItDCclw3mq{(65Q=&6;%1ed!k~w%o>C5St)P) zI!e^pr>^`;b}0u>!7MX{Vj}dlKwB$s!ij z5smk*C1~3nEk|1UD%Donbf6=?>GPj5@RpBQ>nD#qthicf-1j&I+ze7UGzCk*?Niwv zH>y%d)m&!TMfkY&H=928N=$_@5%$*guvQeY3$OX&l7pgYlQCbW5Vr)3lx|xHfI$Ba z$4?_Ikh4(n>2g{%&Rq=1<9M9~1;b5ejx}VVCXMaKRn9Ej=(sl-emEZw_{n^hO7;zhT>%SBy@y6@J$u4 zKjC8YyVe46=dNtpaVrlKeO>N#rxgI@X*>O<$Smw>A=@|JS&5`2u`ju|(op=7wHnj8 ze3)#{%;dJH!9Hg1l_2Um}LlNYSRU(IWLHw?02fBDk8$!k6^r&L{3 zX_5%r-YBeba~PnCG*d)ASps}lJGq}a-5(h^S$AC?$nG zJWuHLG*Vvg4y^-q_EI~O#uONSA?hi>R)P;^aJSv16j0xtxYt)Z2c6%Jq_lE2AVE3Z zv6;vh_(GF9(ku}Mvqrqm4@8QAEfkppO$pvr?yIxgf7V00&%*fo9&c20&>>4Ps)r7n z-4@Fr0#QgkDPx*Hnxa>;*`; zvBuijKwF0SLb^G3Q?2lb_AjGPXL8ZTHTUdJ=Ms$4Q-1e~@G}%Z>r1T_<*-CiV^(il z3^A_#f|1#X*&DCVZ1E_-=K_KflYtdLE^Bi5iFh137K;>mSyrP~6;)M*UnMLx#Jt-5 z*#T7?IlXN5Mq}@95Iq~-P9C&PZxZRwg8W!nYJoJ03fTz%b{x#1r z+;2Ez=1`G}fx^Oamb+^(>`|huOk+J*JPeFi8x91Xu~-sC+2hA1Ek3=nI;Jer~Ena>iS9bL?No#WxabC$$e#-U~W;!pt@Txo6o}Sp> z^AzXedUNr_%Lj2Q5+slnxiqWGn+ai|KV0>LGg0igUY-(J8t@H%S|sf$M6$ZoIF1&?n zc`8FFLmE&j^6u-p-anpqt@~d0dg@*8`_E&o-*2^At!20Ox%WQL^Y|Rc;UXJ{sruP> zgqK3`lU=f!f?YC%Ue!?xq!fbuy><`TG+*G|9j6tXDq7(vb^8Ukz0n}r(v!D^CIIh# ztbKM`x&rfuoEP=Hf(_eh&3USYP<6?b!CT+dNK1XOn&6^-t?z)9!5*@q{1f(pc-D z#)odAkIiVu7p7|b{P-2$Yw~*BszFB2wR#7*OrkD$Ss+HOc=viftHOIH_HsAAuY?dP zjSuYI)u3b3ImG`l4?q17(CtxZgu~16-zx}y?tnv?C8u3J_A+$cty`@{vv;qxFGR-U z(<*Y|D?~mK)kOZpwxSByAtf&``MwKMWcV0j$CAJ@@El{)asf8(bTuqJ5sh3HKiKD? z5ijlAO}}QAjY{*r@!ktWK0j3zFGsUK*wr`ZxZfo><5DILM}qrg<2u)=%Uy+Hqaj`a zorTCI^e+3}jYjxO*Gh4#iu-MTP4LhB=K<>io{`ekKmf`LWd*PC$-XCn4CEh>y_75J zS;iE8A^pz#(QX{-PDQY=l>9`!x)J9v>aSq1)a4q*u=Mxw2YGs||Nj2VpZRb5L)y%! zzT3fO<_zx%!#=#Gcb=7+HWoE*x@n26y~edAzV4MReK_2qUMdFV(9dMo_eJ|m|uKiBA*eukKrIvZiM|OJ>{*qufm%@K?EKWhgDRZy~-dOZ9C1XuS!w**EscE3FiE5{aNr|>(5pbZXx%c z7UPY*?OXKqCxCCnE+qX!BRmye_Dq)^f=}B`FIwL&hwUG@(&tYVV2t_^M$1h@uw~O` z0n_$za4gH&_g=RZ1}4%}(xfsl^Sp)C?U~Q?S2dD0*yV=&)BT3`q%G>3g`2E@KaG|YDaHrIxn7gd_A6W;dMV$Y<{W!{c$zkTxHTr^9n_suiPH-c0KUGI?JqBCmRpcnmLAC za)hdS8=YGJE{Hy3RI0b!2xrC(KXWD$oX2Ia8xevXFvui&Cvxi`bT8ZMh(8^KV zT-=6XzFBbOhDbN&=WX74O*I`Hm|T={Hqv0B()+;st<|_WURSTYT zKW(DpRDb{a{Tctu`H{r#AoU2;%O1}7uDuv+P67lkVwd3$b zB$oA#NhjzP*v8dRw4t1jJ%@YrC`OI379D&|_!B6s?rLV$!$GIla=Y*?Qu-@5REA_D z|8AX3LDp8FeQ=^4+7$eG9o* z7OAKMUr6mG{Jf9)zdt4EgPV(_3PNGEn3HsI?Mjmk?CxIa_`qERm*V5LRTBNI8T`j- zN?RJxSXMhkr#ugXG{5(cDURVOanWhY%X8?Mf0gA%{==DB(Bvg%bk>8WWwL7u$r}@eHihevkvn^w; zFBeN?IU4=i{h{@)Cv4;-?sFY z;-~vV@hHpFTaEAgF~-bKpUt2T4XXyv-{_ivTFs))jf-9Q*YiF3MbzovfBz6V@Ne)> zQWv#krS4SWe8MmC`EMQ|^SJuCJWUJc4833YT9t=mbL@TH`&;qoquob&&U9j@L-3KK zRxQxLsXo1Bt`kL6$PL-k`fx&(j7+Ak5I?WoC=9os!Jx#^>)$PiK9~$fs^a*Um@yG) zJIBzCBG-4e>yOq;Ji}#dB3~{fuB)uxTBU|M01lEO{2p zN;BL)unxkKB|lSLPAyENbbMIhsRbL}y%Bs5njqEk5QX{RKj-g}(2fKD`TWt62>vTh zm$;4j{ynDqZwpvo-SG2j?g&NeEV^R@6z`DZ13k@Bjyo8dwRJt0`~r?2cR5KqM4*P1 zc%1UjxA5BBH{;y4@94ZNYhGkD`_FaPYuWYN^EaVC)BlF5kB&dc_=MXIdF6inwhGs_ z3yZ((SwQn;)7bZS-^1o>Db$~4NB=&~9sAwee|!ER^sn=?2+5H6`G3H_2@&)3f5D&q z^ZWnv_aF0bLjO9y_4?p4xA)Hiu)KTakZNfYj7yaZU81SQG{0AOM=aj}&HBbd=GT10 zQ*Osy3p9Ibb;(lE^7CsIKUno^~g?D4s8^(d_LtRK+tQM0m(1}+? z40z{{xBdM-iS?iS`P=Vrp+C!i5fzK6*z%^3@WWr2TRhQ#D~p_Uf_mYwRvfc`^I{rq zy!~}3Y^eeaC4Xe`hjhU$sp_iM12XZIvpc{+5ZGSZ16?#=Bu-<;;p=M>f3mZ&Wm4_5og}vzC)lqbRgK zBORC2Na$9+#p-PZFzwA~f0?BWu8PDftXWn8jd}U%JP)xyEKqpr^Wq@UYY}w*5ltaH zs_5P1*wG4MtdC^lQc@zd6a^G2tV138g zJ7$~ek(Zr*AM3>kq;HECTK^OcH;xTN*Ae@$8R~jju@?wmE6clA(kE(ho|dt?^9hl& zv^F@ZcO@J+VOLL9nK2NUg)pALGUKx?Iye1f#1V#L}#%C-ge$}6BqZuSNfE- z+WW^bY43%Ta~gdpN>w6fXI%rfE1v9aLCyHlk9lpU{@4P#|BDv#eIbnLS) zRU%zX;he|qt*Qsd@rbteu3eW3@W#5)9gUzapc2jM@KP!w_RnQSTLcEdwS$^^DWD00 zU#hRR#CG9$6np!x9HL)?rs>9rT`hzg(pBHxF$H%tn$C`USECNKT7Xhl9a>sP5uWrc zJhRE=!PsakK4xZk8z0yQeQD+*15yW321S@t@8Pq4*c41c8Md-@$dJ_DY;9&zsH*JDz7k*x79(Bms>POZ zWvLuN4`BEzCPU8CgP;1I`_GTm5Iyl|b2!@x9cu=+2kF+MZ=F={eu6W5;`56ud2c01 z**~vP?5u)^H0{=Fy)|%;Zz}30-y=A$vgB_?E#?9xQwmY0o620~(wQOm{ zyHW5kjliSr(nqZ2~JJgp}6$oItoXdo}z;;<(R9ws)A zeT^XcS<)H;udNfhi;ztAoKYR>)EwTW#f|v*P+@G=?h>G_ z1HNo81QFM;CKs=Kpm?xXMc;S_RL#z^& zeb}@-EOOz^gs-~>bvKfSY?ODl^r3cz)@NF+dbkkVb2apFFN&G}656>x4Zoaj6b%jN zhR)3lH8&GFLAQv}^4jexWK!IAgfyuTzZxI^wtl(<4o(~vJVO?US+*}MuMoM|bf!$D zX4i|5vG~G;XS;l1CYE7_d2c3==Z3spr4CncAaC-Mw7{S2|0 z*4g^^zR*5{=m`m83iH?Jzs+9>{n`5U;r`n{HTelWBa)1s?s_pE-A$!qA#9Alp1IA7 zTNYz?P~<+#qAqMFYw_GhQi~D1s|S?S@{!k#o`iI>KWdS+K9y^&!DwBT)%m;6fWL*S zE@~tLj+eK+k-FLn4L+~=TW=Wdwuk1Oxb`9=K^X zi#0zhh1;3pW%{9waI(1l@Hji6gZOKj*8NBX-?*IPEX$QZs>j01eUj*Hm$`p%R;?9k z{F6|fHW)@beCdvf6#qIaepbt*H>ymXtvH*hxlF_A<5 z&iQ7k$?00qT8K-|@kW9pC6M#r{hIJ;;(4=!W{m$yILw88Z!pj8LhT^Ks|gb6K-x8) zJ6BEUG+QN{!t*LX=fnedMQlQU#GE?&EwBn*!!C=DPNss-S=p5{WC#s@ zyIRKJE}^S#($TXh#xeRF#PsT!$m z1`C-NTpiyL9#FU0zl>?a&k^Mff%-)-z@h5ao9KtCxdyGt2kUV_=uytwZ!f?qIKQrx z@O^FJdDSIHPU!XYA_G4^#zQ#EO6MsePjOn*tFkw^gy;t}|CDlu;1DFG)0l6zK+%sU zpRA6QfopfVgi>@eWu z>MuTgBn)rQkAG5^%7W*ygl=hD?&t{ zK%M3L3mF%E@#4+HhuI$Hz|xblPI;zM;QTJ;_@zJ@?`b~vE7S;uoD+>_56w1$n6X}4 zM~Ewtqsaa9e0Ul(-oAAtLaz{?b8j`NP-%of3HnAl+d3Hc%9VZls2HEbwK|ErCcxI6 z{#>84i9YYHu&*zs;z45gEEm`QC_HvfX0Y>G4vHCXImj|r4{>P{47HDn;M?dqx?lk! z*Zn%TlK5O1iidv_jS_f{nZ<$MqBfF2|E-qc^TAB~C0jNuJX8c?6jeN88nuxA@bmp= zwAqmMBmQvg%M@gmH?kwSYldY~_GA?^89*0m!oPiu;KrU`=V7s|L_Uv?Dde6HfP}qv ztt%%Hzi|AN6dO%Nh4V-CUIin}c>FvU-j)f2Pufqc7v!SI1r>6IF+y*auyfK+O2=I8 ztNTiYiFKlgGo?{-85|2~Y&yC-AH@shJ7Z09(O-L4W1V_5d|UJqa@w4YPb7bx1s!hU78qBQeHl(MMI6@03GDyu>MC17eswqMLZi z*pE*q>FD(y5jjaxm4OaKZf()0>a^;Jy3Bwd_~nU`&!mA*WvgK2^LMOG?E^K0Wc2`(p-{eHExFyx;=8 zw|7OG&@|yGg`3he0`(}F+44;N#8W(azbW}>RV}bc>xNMh{*SKf+E4B$5Ixrl9R(#{ z^B{LBvp?jO6)cCb)HQFE0L3)t4CQ(%Z193HkLem0b0!!u=YXV%j(e-J-TeGHhEM`b}Tc z?D(38G>QXrUu>IT^wp+KvqbNxwCLSZHP?Iyu)6M)mYj#_Y<0np(=$+M)ka*-kjRz% z!m2yOi@%669ny|IL^Nz{9XMs9^e~;=bIIbjtHf~x>JkI7UMkp z_U3zm>$_s?$}6zFVo{IJ+roqSSA2=y=ueFCL_Z}%g28^tqHfTX}CJC&YXe@U#!Wew4R_)1^1!i%xogZPng?i;sqQqKeI#4DH|6C zn>NvN-$PmXE&JbzCL_yBQ+?;nacD4H)7d%fjbfpi?{^Oq`H_rLSMH^k!CnK8yjwXA z&?MaZP`Dxr-#jqh7%@u#qm;)hF+`q?u=wNmqPM!i#<0VNqPGSQe2`0UqagCn&_GJ? zS_6Jgs;A2zYQ$clZ@PVm@^ z#`*qnz^CFmnA%716*fGLR~e&$`oSSv2O>AA;FN&__vaS0wCR!e)UJlJN1VyG|7wOC z&uJv_fHi7UvPLzet6=h}15OFkLvpHajyII;;!3KK)vf`2)jo! z>eXyIOBI`q)UR1PT;G<1`H$#3mPFs!k{5->4(~#s3Lk$x{Iw8$N`jVCyM?gGHZgGD z&up*_aTrh+cmeH;sRt>Q>*2WI9nz(8V!b(WQs*&UG)^AB^EpMe0s7RPc(Yzt!iCIO z^^0DyShz>xyqi!YH1!2v@s{aFiWjwK>Y85Q*RZ-gp}ADt8+GlhC`l=d%+(0=xme=2 zgsVa;1Q)~;soh)96OXMlsWqC~ad`Fpj95|V6G$<;J-H*$8!u5=Gum7y*7a>W?7Y5J z<8VbLy=1dJ1Vl_RS*?4a(|*p&zaCejSdTRMO^JA{qOOxRnXyD}YTi?UcUqCe*qFL7 zWe~(H@{EY-JakW2Z&NcMav~?{9BuDpVXfP#`TleM;Ll_5;YUdm(gj7*yAVDpKV?c% zzm6h|dz}%*7?%qr^(>d~b>+d$-TQBc=O@DBGZrV?#zLX!Sm+HlB3JbRO_*v#a3eC2 zq`WoWnGELT2Y4Q&*TRO_k-cQg?#K|TykDo)0h6u>h`d>^LuG#fCdz|R_+l_cc1`OA zYHFFrCk&S0T+?ML7uhy=r$hg)K`$A(IM*Cmr?O%64ex`ft<4ZWJgdq~(*{mHwll}YqdNt{o~O{4X}631&p zjLiioayTSDbFmGhc8DH-8HVV&XRCQ@X(65?yZ3nC&r)=rJhGEyItKN>P0KSLAbeMs zNwNc{lksMdL$hR%2l)5Xs9fTx0*h_IDYur5VeREM`CewC-*}+Jkju;y0+mh;%_TiS znth=Ij>&O^&!~Eq;2$A{{pAw9oirZ0I??Ii6OM(qGPds+F2(T5L;3SBkt-!D5{0zO3P#RlwNT++_w7k~3J5jU#XD zg_)m~A*uDR1sC69ND`AYe6*j_lRN@_lvma}=WFTocHwU*#4$$T1wXQ8@;z{d8CWal2ux8k*-Dp?=%eMX7 z5<{|qigmgq<3|vjJJ>KI9@PZ<$fRj6|7?KF%hWda3}caor72&lqY@7D1sKZ>7T;^x;>36*IGdMjKX&R4PqT)cQSSJm3RnjaJrTikPVzW zz@hN&3%v7qA8jvR6Ii+iyKa1`1es-ey2Q0|%%Nf~b?+@f(EtsH_B4n|54kTbhG@dqsi8TUhXwBW_$06# zuhLAbDn7|Yv!{JVryNUhk9yMwMIS`hhfJ5bE7PGuX%ocz17g{Vk zWPyPBk$8i-3LxiZI6XY)iUkh}ZW?S3fFoOuoqZJ&gAGhPTE@w(AbI}muWIsINO)H< zy^olG7zKA7VCv|>AU@kG$KOTZ`r|da1qE<yVIa5b3Ly}lH zpZn8hQz4w@HY$bX{k{JZB$IvpxA`lfKl87P+4|m~x>OG53^%nM7XO0EoCzwee4oK# z2$z`()?gzgI(t!V9!Yu|#Dq`3LE*HnSxu=WpzrRDwe_9(`}oCvwUht;{tKbo{z3o2 z^Oj-f`QTpMGnPpa>@tisKV+KqZgu1Q)vaFk-96Z~=|Vkf_8|3}sd2XF`EdF0(M^K8 z@-d8Qd3WTaX6R2+I2gCs1DesUz1t^WKy+o%Evv&xSR*gl*-=&p%!?n(A~6^EZ>d|| zH6wUwzI)nLEk$ruj8AS~v_E?65hyEC4?&(!PV5t^YMzWqfW&J{~JXGY2laZR??bpT`Vk($z>B__O*76RUNl5w-P!gQFLDgeixbbiAB>TK zeHpE=rn{8pc=RJkO=-PnxG@fY9p|LBU(s*t|HA)T|Nmuq9p$i~8_pXOc$7jqa&PTlALxdds37Ud~yua~?1k;vQo;>}JoLGTpsOm?}w`B{SY zb~&}1gv(H?tk3y@5|QJ3U+|seP%F%>u^P1HS7B_@t7VxR-cTe%DtmP?87G!qY*1v`Sj9;F0KdF;x-GhDu z&JR73nh0MHzZ2)14mj8Lu6jkP3C#>0{S)XSAek{OLD;ez$>viY*t85mexk=xjvJzS z)ZxV2(!>O2idntHyaDpsmBmvn8}abM-u}9{76@^?xN!K^2zEW4yymZ6g;(y}w7bgE zLKL}127lA_M}PhU$0StW!o<#|Ev778C>KxOGR)Nh^OrppU9Jy9?()++N@TqRZ>W92 zufPKx@?X6aA)5q?Gr{B>QB}D0SjCu~QVQ+~wLJ2ndK|5EF&HGv?<(xs<_?{#0=?rpo2|KSg-)#P6M{`LDae*4WF zt4h|@eAt`%F<@oZ677dl%1yW`V8-gxa*0F%96iG8`I7rB$V97bPwi-i=H+)MPfYiN zorf)-$kqB z4uRw}bH&2_N%Ylbx^4d=132D}evp&u#oK(Q7gA1^;n_Kw{qKXCAzX3hhwq~SP*Naw zr*K0E5MlH?Ki7rsOKL}%4z@zX)Z*^dRwDoDW;P!KvHp4+=p6Y&n>fx6?+V+dLZosz zRJ?L|0Imnhx)~G@z4pGLt$XYT|9+0`iL1Z&+x($0!HfJC{;=GfwCt_%0G_uHZ*;Gi z#$B%4E4SAF#y@E(CfBd5uhi!#X>S!|MQ;(K(?pd$0n%9_$t! z^NB-`CtFXP`%nj#`Z}W9m11CPe{cE|#ZkPnqtw~#=QPG%XSX{^AB{SS1t)eh5`HD> zU!#5<{UE;*xe%`rh3X}2sZ3fGxO`Cbd*7E3)R*=t^S|B>=Oy`B9)3td1G(YQc*{YM zjvIddX*dTaUta9F;X4Q#_egHOaQbH)-ir@&`p@$(Iue0@#m@;7I{a_&hqKPJLsOa8 zVe-~1DwjP?khwf>B7SEAUfC&~;HsFyEzFe6BgDGKd5iAb-02<^X1#D~Pir|YT@Rpq z!qf@nrjPUkXS$#@T}FKG-Yz)VseFdx*dQK_>Q1y0=))}Wp+g$nQ@~SlXj_8xx7EB@p21ZKFN1C;q^G2{*mo?ZQ-9D;^4Ka?B73sgpT|h=dbIm2wfcG zBB4*+q2GCeLc+vQMcHlX7gih|vN!en26mx`PJEF5$0ssK)?51f?{7jU{KAh7eU%5J;2Yx)%>Qm5O)Q;rF{M{jtzuM$tH^6 zU2Q82Tkn=azN?*^IcXQ(({M|Cqt^$u!Q@{et-8=4&T394tOx={65vKy8JtYkJM7NV zi)OMft6$c37l5X7h)@xXZjViIutOkCa{o-3yOX21T*0V1u3vqqD zmur^L?G?Rs4V_wFp?vv!SHC3 z;Zc>-`+DTqeQS2s|DW8)D7OLH|NQ>@H~B-vW%-l*p?^lse;&X8^Y8vT1OIcL{%?Z8} zj>P-7kK>yJ(Kq@+zjA7z5CR8}>B-P{fuS60AKk@97+`im8?P+5C$Y-Z_OlTeXza(Y zelEwjr}{55gX{1}?|8pKcQ&3Vu-CCJO$6Ny1$Wh2ZJ?0*rU#XXyrVRgo0Nudr28OD zcDTXbwJF72N76Cl1Pz&Yc_S!nPY$&{T?JbYJIVOEJHxKA^|XuK6=;z*9r9c!2cxu0 zBrnj0p#x>NgLiK`x}S-ui@x3lLpSoxHpzDavuvruh)*3z`}fKvL{`H$`g+>zoi(u4 zqts;U@BoPXxNEGCJ%BqhyGrQGJ77h%m$PxBjL?Pcvb}8yUgHe2tmj-51R5-?GTYWd z`KIrvz+3}`GtcKc%v|7c>zk%+yaj~ctU6Ehv@2le{a;e9Z77{?rmOz72wY6RuQ66v zz^oRLaB0$nC-%PCZ)MSej?>NK=@a>2?q;L7`C>bk1)mBw+?|0xa`#z!6C8z`9lu@L zKo;g~?T1d&2snBnXi4DA2yArxtl0F<5jB@8pYVhxpqg;pX9Knzv>4VK35+bnQUe}y z7NLHaD*Sa}p&^;*pIJ5XEO~@K9vTL9DtE#{?U00ETNav5#4=VXlw;KvDXJ!mN=OO{ zpweQ=LRzB^Rpsz#ym0MgjSI~Pq-J+-X{m{VrH2*9>eGdw2oRQhBVL0kd7eebpka7`ch8Mmhc zrTC80Xe<|^I;*<(nyW98lGAy7@5;e=Z+Ao$B5=@5}SO5DFZq5-SF&9935S^n9HJ&N<$sVShb zl%*KTSq5bKGVD)Dhk(lHk#whCKQLbieotRIL-cFP7c~r3*ZKDpiAUBr|r)-i>*S5T`n#=Rx)swD%tXwei~#QJ@DGXBpTYUQI3}I z_hE&~K<(9i{g7q#Fv#k8Jl;}zXQHlK0t+@mAGoI@q5nQvvszUS?&$r_@Rp(u<{@Fz z<;UZ=abEbxva3E3|PP8g6zHgm1r$Rrp|J5z+T_E@C063MS7olnYg~W1kWnJ(E?8l*idt`LpZc zhy{nT{k>YaBA~IiY^p5)<^>#_LFw8UL{1|sMev}eLbj?8ru3h?Kv@pqN``%&aRBg4S2CL(VxBaQWJ9lpQqMfG0&Efhuv zs&(ddLL(E^&Vehz=;u6S&?(*q55!H+1D7AVe&)4bE9*o0mJ?jzSBSirU5EAf%n{kd zKQb-2)uG>6HIL@ftiRW@-?r~#Cn5F@|N8v5_YXz>Z2#x{2tR$qu0ixl`dUHn{|@Nl z3p{x>KcnumeV)-*Nh=x0Q+x~q*Kuul>+HwXDfl=;uzuG^fI3{4m0P>+@B3$!`HQ3k zarl3I{`<%OXZs)WQxsc9$cu4+{W5zvUkwU=X7#@xSqw|Vxxcas8nH}`dy6bFuJ2zl z^~;Zx#3#casD0;&`MsyzmOg@~`>40|S7|K5{D50Gqnke_FmT+aI~RpcMeMa&p7lU$ zLVM`P&w3ojpW1HoIY`ys_5QhXE%u*YdG)b94{S?0(>`s=!YvxN>T@IN(c}4Mz4hQy z9DYT%A{}Ff=OrUp?-G6KapRObwgzWF*0TPA4zF6GpD5B-B!%FB)K9Na4>d>1)ZszE<`7}P4c5}tMLi%TX~9YFHs~s+;_gn26=bB`-%@ru%`N$ zA$fBwsOr%ej?;&s-kSUE#Ak(I$g#^^>my=!-_-;j#bVG;6FE_TDF&r2!n&qv3*h3g zk@&s#r#O-O%UR8%7*D8q(zA~j!{%V2jXS&Q@UqOv0%K1#9E9^rR@9H6X5T~~&!-~D zHKZ(mpxXo;IwxcUGNS+{1F94m@^FWKCcfpY>5xR zr|rzvKW)m;q3=?FBYhScHuQhJGx7}BZpo#M^2XzKKSPILuNv5qn;-mzv=zexcta-{ zYd|U{MeP3EbE01`asJR)CYDFhFz=zL!Uo~FnF$6D-42~2^28<7soK9h&BW2x zOihj|1osZ*ih$Q8*te;VL(I1VK7D$)KAD_`26jJP9W3)vrR#pbK6?WO%2Rf=byCeVlM1%YjL`4(10OPifwnkHh^5dR7lUrOZ*rk9-?uu5z>4pTZ5Y) z!;HD(sb^gkNUCZk&(rS(xO`UmVFb|=no_IbdcFV@vYAg`ug*b+RO5WOLi8^(Y|M$C zDTStn48?Q&sklJbdY&$`1Wrrvv)yjW!d@yB3>J%ma}*a^@0Yir=ua;tzfa}h(#Ot3 z;ZlIFT~!Wjq?SSCYcutT&4_dE?_!c$3UDkw+wH7<9XOMF4?e3dL%qJI)=X5-afYR2 zJG*Bw8rcW$a(`ZfD~Y1Yp4Q#CiQFQ5UN{WJ-w)lpMNtZ}mR}+cJgEgPud`COD623y z(|}gEIs^-r{W-!4xIk4l_p}LXJ|1lB7qmQVkIGtiD{pESK&4e`(%Z>I_>ojU@*^w- zn;4U0v$quBkt?6Ki#InQO;)DjX?sHF+;;mccdaMV<$2t8=@rCm6Q^{UfDGJbx_rvK zItsfqzuNt}+zPKG=N;H68=+{xxQ4hSNPEgq033N5mGnsD(BQ7o?-gDOU@ZxKvc-T*~%btR&E~ zFQdj)#UMYdW4$e@5#qZWF8D~NqQvkh>%Prl==6}=W|wRX(%#t~yTHuj9&KR|ng&_~uukJHwARl@_?4@W%(OK_O9g*oU&HtcL)0Cwi-m7Vk!)3HDbX1PLe3^>`kO+7dTB}cd=|L4 z(jMRRwhU4hkI`IX(tz%t)=QTph#s_FSF#5LznEw=^y;Zc9o&@a^NOaaMw({UuI*o= z&_<_be?r?U*fdR+DnXVAllu3(!>h_Luu$4%k|`IYM>=n5gv8=EA>kqZ(iq$}^{ZS& zAr=dKf4F#jBsfv+2EpSDaS&bVrK0w+9#5vX2c68Tf?aW^tp@`-(ZgR_>XwN!=*?I@ z6kF(px@cW`FCvG6?od{QJT`&I1V<7XU6kc!HLUM13=%1BvZU+4t{4!jT$fF*Ol7gXw`WNysfZ>cF?}bV* z-tKwj1ko41_uRrAt-V>GPay-&geUC8uEO=E{Am24six0POHhm?@!{w2i89a+b?$|8#r|3jCBxUm9 z_+*`rs)8q*M;EeSkKsOrs)8sC+j}!2?c4~u?UAy&9EP{)% z4tjft``=9B1^o}pP;7U;qh6Zr1ljxO;6wNm{BR(FE>g#j;7DxfjzHR>zgp0~#7y2PRgG4sgJ=v0Zm%w>F5yj43sjM-9!mHT1%=Dy z8rO(Eh#te#uvLc_P%<@M8xUBJMG38p-_uiIw`Il15f37N!n5Vlwb?chqGU5QD=LI~ z`Ld_JxtT=IDpLGcRhr%(|KPLO~BI%d%rVh7GrW9i5mN09{&2mxqnKl1Sd0EM>cm=gT&yf zeyD3D#CJaBse4&S^bm_XIJZ>cm%d|PEz&J<)!*dyjbC{XYJVp8tV=JNQcrzwA5B7S z2Nl`bxke~0-+yX@(g!LF0Oxa-;T_1_`}fBMqS1OXKxdJ8wbO@_e5?$Qsb9G66U#ZGOhw2Z0vQid=&+!7CD)xLW9wMeL!Drv;YPkk1)O=e32o1^R%Br9P!M9 z=dYaaw}acMne}C}b|ini=X|PTJ>Gp0WN8}l5*c+;F4WQ{V>Zvzt#*=Sm|+HpMq zm@d&A(^jG)Kfzv)dvQ|d!rfHJ?n;}zN#rt$(@+QsC1;}%+3~X0UgEh$ zMOz~T3-F@|IlnNto2XexnL%W3zD0nyuWSk=dWx(1UU z$0bK!Z^UaiP*3z@1Cqxj^X#01lW@YCn|+ZV-k@JxtgBE=~m zom-$LF0p6C@@r`w}o6Gr4l=gDQYha(GUH2pM7M?A>EejvLJUgsQNvwBpGhs3ML zDcB$4QI7mg+T=yxD|hq^8($~9WZT@NaGBtztJuEt)Og_2bzXJ*sw7}r_gyR^a_!Q0 zj$e6l&lBAGyAGGw)WOv%-aU3_%J9rm_o=7@l`#8OY{)kx7raL|31*WtV4=hH;q2{s za8ihO%Y?`&(T%X%9$Nx})+)Kl{*|ag@3S|L@K?PMI?Be@ z?hB~9Z_Xvb2E18Bop+udfR760_caw_VRZ3T{aD5`xR^JpkVKvX^b;SQLpHfVm7r3D zCZz?+nIGN0bFv8cGjaA&izBwr(2otgJ!^D15q?>_iOEQy-_~dDaYWQXaih{3Q^1 z@q7eXQ5l4YWt3Qz#(}4}cyP((D3IR6Xfl-80Le;AglwJzt;zhxJGT?Ozwg$pNx?#V zyy~ubiSRGwe7pNhW;q^-Hs?Klt%#Xq-m}&QrBKOJA9J0z6u9zqAnHN~s7s2zWdeWGUg<%D(&GOx?M zD=;&GokD0=IrhXp@H6TU#CPuxep@za#5=nw7$YZ&v0n01JtRc|S4`@Fw_F$`s$8QG zJlh0YnT1{vFbYOlIvyQ2%m=yGcRMWPI>AL&`~KUm0?4|JnzM%Kc*~~f^;D`Icx>>| z6x4JfZCspB{EjNvv-!jE?9VO0@Z_CICgCU26Vaz#J(dRz6l*Ka)6sZ0)IBtSis*AY z%-p@Lz8q&spUZJa_Q1F19_Gz@dBpxmR&JzlCVq_*D8Mg|W+iwO~~g+ay0)mkKL+n%jwg30@ z41@V^^Q$6%mOsJzVoUEk^>3i4C3>y3yAQ}k?kg3#z5xk`nFu}I8Mt&+mG9!+b^KVD zvVMAF64;hPOuP%cfz%*-Q2oK!-}^^2ha)ci{{G9K`R}(r?|7@%n1qGQJt1c;YM`&p zX|heG74}9Z>m?mPv{f?76Z_r;Q}%3AF5HE{8-4#rsd*{zwtMw)hfEMWxK(6{Ne>>r zE=%V}HV?gf1GCK%BjEXFnS)iq*{Hnx&{LC&K`6P&PhRiVk4dito{~m&V_DA9#fWp0 zkViwCL`hs(MBr)kG%8hY zgsGbTb&fs0KrL~KDU~Z4?`v(2E2nNHa$i4pwhoR$)V&_HTMhvz{>>zW?BUf&+q(H5_tUcYaDiBTY zf=mA8;Q4@)QlB~$FYh{3yKgZMF0DS+j3Ri!zn&lT(GmjxS^xew`2!+<#=nO~?JNAU zc@iE^*4$y)HipK2-Pg?`NANi7iO(8dT|{r}2>}kmr{o@A;a8tP^ide7W_V6zVC@Uh zD)H`V*ktS|cZ{S5LFn<935_&VKb{ezf|X#qwO8Cn*c7v4DIccac@Ay2PiH9_6Z!2v z0?QoaBgk(W%0@L+iWLX$hMGMug0hQGt4+Il!QkzNqVt<7$a0@=Z5r){3!5nJH4YVH z!tQwTc=8F5zH%nCB!%$ld>0kHHI$1tXX{>ZZ7xM$>sme~g&~aLQBqcT*M>?VJFc(Q z^x(qD#htyJ{;1|ru=QROktd|g;;6?z0x$h|iVrsS!QJ`;195zP7#lG(bUJPfBTtWe zB)Jt1>n9y0n)JXCeH4Aa6s)M@y#59~*_f6p^uf`8l+d8H-dw{`QE$e7y9|#`N ze!Zi%6F(pR=FpR#j>*>oJ07}~!@ZB9p~q|AK;gUAHX33*d0xKW+6VICkbUcp^E-PW z&L{0L^*JJEt=hATlUSFZ8n-nFD(r!}8!ywCHD2RUd!#+8(S)2+Z`@c<#zU~y1Cx5z z3K+XMx9sB3_*dS(zHnC!iW)=>d=yQD0^Ma5Yk?NfHjNZm|Jn`NGOYLP`uyOaYgpC< zbioX--cc*B8SEdK@!KZV2K49k<3~oC|32S+>AUrRhu^c(H8a;YG5$aJy;Ix&x&yl+ z?p^)g0t%r(Z2IeuBoq{XeUp&1T+}#qG>3%buMhn{fAwF!@0>M zq!$qRG)Mtmfs#;DhRBzYgbyJYT7lk`ZruMp&!1zKkWBL9+4ueJbDLat-`u>yi7)3( z$$zorH+^$+N^%}8eyK2;@o>uD`joZ_|Au#XyU+F29#0@lMMmGpq6sGkPrt|s<`zom z7fo2b@;^&=g#Yiz);YZW@Gy6A0)%*BO~`MwvHp6=&4!Q5di*Im3(Q zJ5OXXhqp>$Bt;KLb>jOqoDtF($x4GWuE^mC zo>Um1`s>Hr7jeaWrxXULo>uaq!mv*(3{d^Obt``E^tRj=>#V{6)t3zzy`PV%=bXX- z)vJ!xF60dFd4&P0AK!W4cYI#_^$G)2*DI)5V}R;CE2eF5a!Az!4Lk?vp|@DGh5*&a ze^mSru2`T^i2yC8e zEoNL(7@&G${&!<}4m;*9cGA!<2~=PE)aX-Ob*qiS0M!ruWLRHEF?rb5v{e|OdY_zv z@robq6b7hXa4f0F8B?jc5z84cwVm%hvx>*C?^h6@`X5)%*s8QxcZC6}FX^6jl~0SO zhr$5Wug-p}lry{u3IoPtSlhA-d}{qaRT!Z99a>Cdfa-b$1s+s#fa2QSss*lXP5ziUHOoaie zXK#tW;xMG@HCdbiL;B~hpRH1+c7%ce)%6w&X$(+ZZ!zB!N)AxHVuD@5^$2MUQ2q3+ zBUQY`!rv&-6NmnwdT7j(P5ea47^^UFUbd7iR4V8xg#oG$t}Ab?RM683162Q@Ye6N? zVN6mOpnChiy#21DsjNax=iJRzw>1W+o_i^;Wfd|xrrt6o2dG|{_smg856K8=4Dcf*^M_TpB*XlZ zk^@w~d@HMjSCC;T3{ZW+BXLR2zDU(e7jOo|xKZDb$fw0vs31V~qPPjSIK#h4VSwtP z-+xofTP(O(VSws-i}{u)3{YKfv5>|9)q~5LhB(tA_3$lKa)9cM-Lg0E3bKO=15~e^ z-+vrGk+R-Z7@&GXb;2y=&|j%Az_GO(M;G!FDO+QJ>R+9?lIt8UQgv%1&jE<r={r5uJYe z`?gMrNx>5KastRPzj{T>*_=f9KjR2+WBCU~?>IFm5y5>N0XyPuL5cAFog=^ucL_>F zNI`%Ybze5Db|jYw-+rC~ytr2K{6NQXiLeiF1ZWXAYQRxPafz^Oj(`G;t#}~8FftWz2i8-JjYXj-%HBBdz3#P8TA|i zet*%US64oqr41Ybeh*I{JCq}WjT`}fS8u(U#2csP0!M(~dgBBxas>E&^G4EFj-Ar) zW{!Xf(L2k!L_?&2-&x06C37u&mpKCbu3gpGO6ja`IRgCNm2sh)(plFy0{q@wvw5mB zn(`dX-^BYNXBD+us= zid|yyNey)2DZuZ!$?FZ?IEI%a!0)&k^AFp+2LY^Mud6Bb)TwjHd_~ zitVK%dS-+Hku54j4^9BT%jfJ|!#m4L;0W+LIPc9HJcZehBf#&}U!)%3ej7;~0e)Le z%UXWUCC9>_%n{(XdtrEqXjTy5w|i}JiLm`wpF@Ai!_;lDBe40%<%2 z`0ZYGQzDFXjsU;i3nxm1{WwQ}-|n>wCBn?)2=Lpze1Jq4SsVdgxbNUE5q365fZy($ z*Gq);EJuLf?)!jCzia#y0U_LX=$0Yu=QsiUcHc}|A_BkU2=Lo|KVyk7p63Yg+kJ~( ziLfVf1o-W~o2^7-`+`M@F#o_2;J5plLWu~@(^b literal 0 HcmV?d00001 diff --git a/source/tests/pt/models/dpa2_hyb.json b/source/tests/pt/models/dpa2_hyb.json new file mode 100644 index 0000000000..b5d53b0246 --- /dev/null +++ b/source/tests/pt/models/dpa2_hyb.json @@ -0,0 +1,69 @@ +{ + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "hybrid", + "hybrid_mode": "sequential", + "list": [ + { + "type": "se_atten", + "sel": 30, + "rcut_smth": 2.0, + "rcut": 6.0, + "neuron": [ + 2, + 4, + 8 + ], + "axis_neuron": 4, + "attn": 5, + "attn_layer": 0, + "attn_dotr": true, + "attn_mask": false, + "post_ln": true, + "ffn": false, + "ffn_embed_dim": 10, + "activation": "tanh", + "scaling_factor": 1.0, + "head_num": 1, + "normalize": true, + "temperature": 1.0 + }, + { + "type": "se_uni", + "sel": 10, + "rcut_smth": 0.5, + "rcut": 4.0, + "nlayers": 12, + "g1_dim": 8, + "g2_dim": 5, + "attn2_hidden": 3, + "attn2_nhead": 1, + "attn1_hidden": 5, + "attn1_nhead": 1, + "axis_dim": 4, + "update_h2": false, + "update_g1_has_conv": true, + "update_g1_has_grrg": true, + "update_g1_has_drrd": true, + "update_g1_has_attn": true, + "update_g2_has_g1g1": true, + "update_g2_has_attn": true, + "attn2_has_gate": true, + "add_type_ebd_to_seq": false, + "smooth": true + } + ] + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1 + } +} diff --git a/source/tests/pt/models/dpa2_tebd.pth b/source/tests/pt/models/dpa2_tebd.pth new file mode 100644 index 0000000000000000000000000000000000000000..3d4fc5511c93036a18be1b290fcdecc482215bf6 GIT binary patch literal 1085 zcmWIWW@cev;NW1u0AdV047vF!sX6iGshQ~+CB^zFi6x181=%@nP8Rmh+jRLB@105mx@Hz_qGB{MHw4`Nm!Q*uduQF4Y} zd}&E$PBB+}QEF0YW==|cNornkeo=gx5mzCzhDHQCP;W6%Wny}2AqzwcmrH(WQch|x zM3k$LHG&bS$e@rdf*D9B7nByVdrOoQas+xacr$x*v=wrCb9ghh6>@cEfZW4fQpgij z$Xi@n$QQv0)DWMWT9OFzSRp@HUO*!PD9TVMSX(Frw!SR2s2J$ALSb*lVz3UlUkXKP z3q>73%-(Y`P z?Y_N0-0}TyYI%RGUVnUlUGB=6m&4xKzj|P0d}!wz`|9Kr2MOK>`?tipFM7!OWB;uA z(`2Tk*4yct0hJWn7;su)ND1z^u1=w=}MPy|JPBrvI9nxSui zZUV9kMNv$c0CX*?3D9r}@MdGvfhv__)`e>V=2.14.0 +deepmd-kit>=2.2.7 +dpdata +ase +coverage +pytest diff --git a/source/tests/pt/test_LKF.py b/source/tests/pt/test_LKF.py new file mode 100644 index 0000000000..33aeac7f4f --- /dev/null +++ b/source/tests/pt/test_LKF.py @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +from deepmd.pt.entrypoints.main import ( + main, +) + + +class TestLKF(unittest.TestCase): + def test_lkf(self): + with open(str(Path(__file__).parent / "water/lkf.json")) as fin: + content = fin.read() + self.config = json.loads(content) + self.config["training"]["training_data"]["systems"] = [ + str(Path(__file__).parent / "water/data/data_0") + ] + self.config["training"]["validation_data"]["systems"] = [ + str(Path(__file__).parent / "water/data/data_0") + ] + self.input_json = "test_lkf.json" + with open(self.input_json, "w") as fp: + json.dump(self.config, fp, indent=4) + main(["train", self.input_json]) + + def tearDown(self): + os.remove(self.input_json) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_autodiff.py b/source/tests/pt/test_autodiff.py new file mode 100644 index 0000000000..4f303a8bb3 --- /dev/null +++ b/source/tests/pt/test_autodiff.py @@ -0,0 +1,190 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import numpy as np +import torch + +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) + +dtype = torch.float64 + +from .test_permutation import ( + eval_model, + make_sample, + model_dpa1, + model_dpa2, + model_se_e2_a, +) + + +# from deepmd-kit repo +def finite_difference(f, x, delta=1e-6): + in_shape = x.shape + y0 = f(x) + out_shape = y0.shape + res = np.empty(out_shape + in_shape) + for idx in np.ndindex(*in_shape): + diff = np.zeros(in_shape) + diff[idx] += delta + y1p = f(x + diff) + y1n = f(x - diff) + res[(Ellipsis, *idx)] = (y1p - y1n) / (2 * delta) + return res + + +def stretch_box(old_coord, old_box, new_box): + ocoord = old_coord.reshape(-1, 3) + obox = old_box.reshape(3, 3) + nbox = new_box.reshape(3, 3) + ncoord = ocoord @ np.linalg.inv(obox) @ nbox + return ncoord.reshape(old_coord.shape) + + +class ForceTest: + def test( + self, + ): + places = 8 + delta = 1e-5 + natoms = 5 + cell = torch.rand([3, 3], dtype=dtype) + cell = (cell + cell.T) + 5.0 * torch.eye(3) + coord = torch.rand([natoms, 3], dtype=dtype) + coord = torch.matmul(coord, cell) + atype = torch.IntTensor([0, 0, 0, 1, 1]) + # assumes input to be numpy tensor + coord = coord.numpy() + + def np_infer( + coord, + ): + e0, f0, v0 = eval_model( + self.model, torch.tensor(coord).unsqueeze(0), cell.unsqueeze(0), atype + ) + ret = { + "energy": e0.squeeze(0), + "force": f0.squeeze(0), + "virial": v0.squeeze(0), + } + # detach + ret = {kk: ret[kk].detach().cpu().numpy() for kk in ret} + return ret + + def ff(_coord): + return np_infer(_coord)["energy"] + + fdf = -finite_difference(ff, coord, delta=delta).squeeze() + rff = np_infer(coord)["force"] + np.testing.assert_almost_equal(fdf, rff, decimal=places) + + +class VirialTest: + def test( + self, + ): + places = 8 + delta = 1e-4 + natoms = 5 + cell = torch.rand([3, 3], dtype=dtype) + cell = (cell) + 5.0 * torch.eye(3) + coord = torch.rand([natoms, 3], dtype=dtype) + coord = torch.matmul(coord, cell) + atype = torch.IntTensor([0, 0, 0, 1, 1]) + # assumes input to be numpy tensor + coord = coord.numpy() + cell = cell.numpy() + + def np_infer( + new_cell, + ): + e0, f0, v0 = eval_model( + self.model, + torch.tensor(stretch_box(coord, cell, new_cell)).unsqueeze(0), + torch.tensor(new_cell).unsqueeze(0), + atype, + ) + ret = { + "energy": e0.squeeze(0), + "force": f0.squeeze(0), + "virial": v0.squeeze(0), + } + # detach + ret = {kk: ret[kk].detach().cpu().numpy() for kk in ret} + return ret + + def ff(bb): + return np_infer(bb)["energy"] + + fdv = -( + finite_difference(ff, cell, delta=delta).transpose(0, 2, 1) @ cell + ).squeeze() + rfv = np_infer(cell)["virial"] + np.testing.assert_almost_equal(fdv, rfv, decimal=places) + + +class TestEnergyModelSeAForce(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + sampled = make_sample(model_params) + self.type_split = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelSeAVirial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + sampled = make_sample(model_params) + self.type_split = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelDPA1Force(unittest.TestCase, ForceTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelDPA1Virial(unittest.TestCase, VirialTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelDPA2Force(unittest.TestCase, ForceTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelDPAUniVirial(unittest.TestCase, VirialTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) diff --git a/source/tests/pt/test_calculator.py b/source/tests/pt/test_calculator.py new file mode 100644 index 0000000000..e8382b22b8 --- /dev/null +++ b/source/tests/pt/test_calculator.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import torch + +from deepmd.pt.entrypoints.main import ( + get_trainer, +) +from deepmd.pt.utils.ase_calc import ( + DPCalculator, +) + +dtype = torch.float64 + + +class TestCalculator(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = [ + str(Path(__file__).parent / "water/data/single") + ] + self.input_json = "test_dp_test.json" + with open(self.input_json, "w") as fp: + json.dump(self.config, fp, indent=4) + + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + + input_dict, label_dict, _ = trainer.get_data(is_train=False) + _, _, more_loss = trainer.wrapper(**input_dict, label=label_dict, cur_lr=1.0) + + self.calculator = DPCalculator("model.pt") + + def test_calculator(self): + from ase import ( + Atoms, + ) + + natoms = 5 + cell = torch.eye(3, dtype=dtype) * 10 + coord = torch.rand([natoms, 3], dtype=dtype) + coord = torch.matmul(coord, cell) + atype = torch.IntTensor([0, 0, 0, 1, 1]) + atomic_numbers = [1, 1, 1, 8, 8] + idx_perm = [1, 0, 4, 3, 2] + + prec = 1e-10 + low_prec = 1e-4 + + ase_atoms0 = Atoms( + numbers=atomic_numbers, + positions=coord, + # positions=[tuple(item) for item in coordinate], + cell=cell, + calculator=self.calculator, + ) + e0, f0 = ase_atoms0.get_potential_energy(), ase_atoms0.get_forces() + s0, v0 = ( + ase_atoms0.get_stress(voigt=True), + -ase_atoms0.get_stress(voigt=False) * ase_atoms0.get_volume(), + ) + + ase_atoms1 = Atoms( + numbers=[atomic_numbers[i] for i in idx_perm], + positions=coord[idx_perm, :], + # positions=[tuple(item) for item in coordinate], + cell=cell, + calculator=self.calculator, + ) + e1, f1 = ase_atoms1.get_potential_energy(), ase_atoms1.get_forces() + s1, v1 = ( + ase_atoms1.get_stress(voigt=True), + -ase_atoms1.get_stress(voigt=False) * ase_atoms1.get_volume(), + ) + + assert isinstance(e0, float) + assert f0.shape == (natoms, 3) + assert v0.shape == (3, 3) + torch.testing.assert_close(e0, e1, rtol=low_prec, atol=prec) + torch.testing.assert_close(f0[idx_perm, :], f1, rtol=low_prec, atol=prec) + torch.testing.assert_close(s0, s1, rtol=low_prec, atol=prec) + torch.testing.assert_close(v0, v1, rtol=low_prec, atol=prec) diff --git a/source/tests/pt/test_deeppot.py b/source/tests/pt/test_deeppot.py new file mode 100644 index 0000000000..7f3ecf7d1b --- /dev/null +++ b/source/tests/pt/test_deeppot.py @@ -0,0 +1,81 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np + +from deepmd.pt.entrypoints.main import ( + get_trainer, +) +from deepmd.pt.infer.deep_eval import ( + DeepPot, +) + + +class TestDeepPot(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + self.config["training"]["training_data"]["systems"] = [ + str(Path(__file__).parent / "water/data/single") + ] + self.config["training"]["validation_data"]["systems"] = [ + str(Path(__file__).parent / "water/data/single") + ] + self.input_json = "test_dp_test.json" + with open(self.input_json, "w") as fp: + json.dump(self.config, fp, indent=4) + + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + + input_dict, label_dict, _ = trainer.get_data(is_train=False) + trainer.wrapper(**input_dict, label=label_dict, cur_lr=1.0) + self.model = "model.pt" + + def test_dp_test(self): + dp = DeepPot(str(self.model)) + cell = np.array( + [ + 5.122106549439247480e00, + 4.016537340154059388e-01, + 6.951654033828678081e-01, + 4.016537340154059388e-01, + 6.112136112297989143e00, + 8.178091365465004481e-01, + 6.951654033828678081e-01, + 8.178091365465004481e-01, + 6.159552512682983760e00, + ] + ).reshape(1, 3, 3) + coord = np.array( + [ + 2.978060152121375648e00, + 3.588469695887098077e00, + 2.792459820604495491e00, + 3.895592322591093115e00, + 2.712091020667753760e00, + 1.366836847133650501e00, + 9.955616170888935690e-01, + 4.121324820711413039e00, + 1.817239061889086571e00, + 3.553661462345699906e00, + 5.313046969500791583e00, + 6.635182659098815883e00, + 6.088601018589653080e00, + 6.575011420004332585e00, + 6.825240650611076099e00, + ] + ).reshape(1, -1, 3) + atype = np.array([0, 0, 0, 1, 1]).reshape(1, -1) + + e, f, v, ae, av = dp.eval(coord, cell, atype, atomic=True) diff --git a/source/tests/pt/test_descriptor.py b/source/tests/pt/test_descriptor.py new file mode 100644 index 0000000000..da38cf007f --- /dev/null +++ b/source/tests/pt/test_descriptor.py @@ -0,0 +1,166 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import os +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf +import torch + +tf.disable_eager_execution() + +import json +from pathlib import ( + Path, +) + +from deepmd.pt.model.descriptor import ( + prod_env_mat_se_a, +) +from deepmd.pt.utils import ( + dp_random, +) +from deepmd.pt.utils.dataset import ( + DeepmdDataSet, +) +from deepmd.pt.utils.env import ( + DEVICE, + GLOBAL_NP_FLOAT_PRECISION, + GLOBAL_PT_FLOAT_PRECISION, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.env import ( + op_module, +) + +CUR_DIR = os.path.dirname(__file__) + + +def base_se_a(rcut, rcut_smth, sel, batch, mean, stddev): + g = tf.Graph() + with g.as_default(): + coord = tf.placeholder(GLOBAL_NP_FLOAT_PRECISION, [None, None]) + box = tf.placeholder(GLOBAL_NP_FLOAT_PRECISION, [None, None]) + atype = tf.placeholder(tf.int32, [None, None]) + natoms_vec = tf.placeholder(tf.int32, [None]) + default_mesh = tf.placeholder(tf.int32, [None]) + stat_descrpt, descrpt_deriv, rij, nlist = op_module.prod_env_mat_a( + coord, + atype, + natoms_vec, + box, + default_mesh, + tf.constant(mean), + tf.constant(stddev), + rcut_a=-1.0, + rcut_r=rcut, + rcut_r_smth=rcut_smth, + sel_a=sel, + sel_r=[0 for i in sel], + ) + + net_deriv_reshape = tf.ones_like(stat_descrpt) + force = op_module.prod_force_se_a( + net_deriv_reshape, + descrpt_deriv, + nlist, + natoms_vec, + n_a_sel=sum(sel), + n_r_sel=0, + ) + + with tf.Session(graph=g) as sess: + y = sess.run( + [stat_descrpt, force, nlist], + feed_dict={ + coord: batch["coord"], + box: batch["box"], + natoms_vec: batch["natoms"], + atype: batch["atype"], + default_mesh: np.array([0, 0, 0, 2, 2, 2]), + }, + ) + tf.reset_default_graph() + return y + + +class TestSeA(unittest.TestCase): + def setUp(self): + dp_random.seed(20) + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.bsz = config["training"]["training_data"]["batch_size"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + ds = DeepmdDataSet( + self.systems, self.bsz, model_config["type_map"], self.rcut, self.sel + ) + self.np_batch, self.pt_batch = ds.get_batch() + self.sec = np.cumsum(self.sel) + self.ntypes = len(self.sel) + self.nnei = sum(self.sel) + + def test_consistency(self): + avg_zero = torch.zeros( + [self.ntypes, self.nnei * 4], dtype=GLOBAL_PT_FLOAT_PRECISION + ) + std_ones = torch.ones( + [self.ntypes, self.nnei * 4], dtype=GLOBAL_PT_FLOAT_PRECISION + ) + base_d, base_force, nlist = base_se_a( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + sel=self.sel, + batch=self.np_batch, + mean=avg_zero, + stddev=std_ones, + ) + + pt_coord = self.pt_batch["coord"] + pt_coord.requires_grad_(True) + index = self.pt_batch["mapping"].unsqueeze(-1).expand(-1, -1, 3) + extended_coord = torch.gather(pt_coord, dim=1, index=index) + extended_coord = extended_coord - self.pt_batch["shift"] + my_d, _, _ = prod_env_mat_se_a( + extended_coord.to(DEVICE), + self.pt_batch["nlist"], + self.pt_batch["atype"], + avg_zero.reshape([-1, self.nnei, 4]).to(DEVICE), + std_ones.reshape([-1, self.nnei, 4]).to(DEVICE), + self.rcut, + self.rcut_smth, + ) + my_d.sum().backward() + bsz = pt_coord.shape[0] + my_force = pt_coord.grad.view(bsz, -1, 3).cpu().detach().numpy() + base_force = base_force.reshape(bsz, -1, 3) + base_d = base_d.reshape(bsz, -1, self.nnei, 4) + my_d = my_d.view(bsz, -1, self.nnei, 4).cpu().detach().numpy() + nlist = nlist.reshape(bsz, -1, self.nnei) + + mapping = self.pt_batch["mapping"].cpu() + my_nlist = self.pt_batch["nlist"].view(bsz, -1).cpu() + mask = my_nlist == -1 + my_nlist = my_nlist * ~mask + my_nlist = torch.gather(mapping, dim=-1, index=my_nlist) + my_nlist = my_nlist * ~mask - mask.long() + my_nlist = my_nlist.cpu().view(bsz, -1, self.nnei).numpy() + self.assertTrue(np.allclose(nlist, my_nlist)) + self.assertTrue(np.allclose(np.mean(base_d, axis=2), np.mean(my_d, axis=2))) + self.assertTrue(np.allclose(np.std(base_d, axis=2), np.std(my_d, axis=2))) + # descriptors may be different when there are multiple neighbors in the same distance + self.assertTrue(np.allclose(base_force, -my_force)) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_descriptor_dpa1.py b/source/tests/pt/test_descriptor_dpa1.py new file mode 100644 index 0000000000..689fa7e49c --- /dev/null +++ b/source/tests/pt/test_descriptor_dpa1.py @@ -0,0 +1,367 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +import torch + +from deepmd.pt.model.descriptor import ( + DescrptBlockSeAtten, + DescrptDPA1, +) +from deepmd.pt.model.network.network import ( + TypeEmbedNet, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.nlist import ( + build_neighbor_list, + extend_coord_with_ghosts, +) +from deepmd.pt.utils.region import ( + normalize_coord, +) + +dtype = torch.float64 +torch.set_default_dtype(dtype) + +CUR_DIR = os.path.dirname(__file__) + + +class TestDPA1(unittest.TestCase): + def setUp(self): + cell = [ + 5.122106549439247480e00, + 4.016537340154059388e-01, + 6.951654033828678081e-01, + 4.016537340154059388e-01, + 6.112136112297989143e00, + 8.178091365465004481e-01, + 6.951654033828678081e-01, + 8.178091365465004481e-01, + 6.159552512682983760e00, + ] + self.cell = torch.Tensor(cell).view(1, 3, 3).to(env.DEVICE) + coord = [ + 2.978060152121375648e00, + 3.588469695887098077e00, + 2.792459820604495491e00, + 3.895592322591093115e00, + 2.712091020667753760e00, + 1.366836847133650501e00, + 9.955616170888935690e-01, + 4.121324820711413039e00, + 1.817239061889086571e00, + 3.553661462345699906e00, + 5.313046969500791583e00, + 6.635182659098815883e00, + 6.088601018589653080e00, + 6.575011420004332585e00, + 6.825240650611076099e00, + ] + self.coord = torch.Tensor(coord).view(1, -1, 3).to(env.DEVICE) + self.atype = torch.IntTensor([0, 0, 0, 1, 1]).view(1, -1).to(env.DEVICE) + self.ref_d = torch.Tensor( + [ + 8.382518544113587780e-03, + -3.390120566088597812e-03, + 6.145981571114964362e-03, + -4.880300873973819273e-03, + -3.390120566088597812e-03, + 1.372540996564941464e-03, + -2.484163690574096341e-03, + 1.972313058658722688e-03, + 6.145981571114964362e-03, + -2.484163690574096341e-03, + 4.507748738021747671e-03, + -3.579717194906019764e-03, + -4.880300873973819273e-03, + 1.972313058658722688e-03, + -3.579717194906019764e-03, + 2.842794615687799838e-03, + 6.733043802494966066e-04, + -2.721540313345096771e-04, + 4.936158526085561134e-04, + -3.919743287822345223e-04, + -1.311123004527576900e-02, + 5.301179352601203924e-03, + -9.614612349318877454e-03, + 7.634884975521277241e-03, + 8.877088452901006621e-03, + -3.590945566653638409e-03, + 6.508042782015627942e-03, + -5.167671664327699171e-03, + -2.697241463040870365e-03, + 1.091350446825975137e-03, + -1.976895708961905022e-03, + 1.569671412121975348e-03, + 8.645131636261189911e-03, + -3.557395265621639355e-03, + 6.298048561552698106e-03, + -4.999272007935521948e-03, + -3.557395265621639355e-03, + 1.467866637220284964e-03, + -2.587004431651147504e-03, + 2.052752235601402672e-03, + 6.298048561552698106e-03, + -2.587004431651147504e-03, + 4.594085551315935101e-03, + -3.647656549789176847e-03, + -4.999272007935521948e-03, + 2.052752235601402672e-03, + -3.647656549789176847e-03, + 2.896359275520481256e-03, + 6.689620176492027878e-04, + -2.753606422414641049e-04, + 4.864958810186969444e-04, + -3.860599754167503119e-04, + -1.349238259226558101e-02, + 5.547478630961994242e-03, + -9.835472300819447095e-03, + 7.808197926069362048e-03, + 9.220744348752592245e-03, + -3.795799103392961601e-03, + 6.716516319358462918e-03, + -5.331265718473574867e-03, + -2.783836698392940304e-03, + 1.147461939123531121e-03, + -2.025013030986024063e-03, + 1.606944814423778541e-03, + 9.280385723343491378e-03, + -3.515852178447095942e-03, + 7.085282215778941628e-03, + -5.675852414643783178e-03, + -3.515852178447095942e-03, + 1.337760635271160884e-03, + -2.679428786337713451e-03, + 2.145400621815936413e-03, + 7.085282215778941628e-03, + -2.679428786337713451e-03, + 5.414439648102228192e-03, + -4.338426468139268931e-03, + -5.675852414643783178e-03, + 2.145400621815936413e-03, + -4.338426468139268931e-03, + 3.476467482674507146e-03, + 7.166961981167455130e-04, + -2.697932188839837972e-04, + 5.474643906631899504e-04, + -4.386556623669893621e-04, + -1.480434821331240956e-02, + 5.604647062899507579e-03, + -1.130745349141585449e-02, + 9.059113563516829268e-03, + 9.758791063112262978e-03, + -3.701477720487638626e-03, + 7.448215522796466058e-03, + -5.966057584545172120e-03, + -2.845102393948158344e-03, + 1.078743584169829543e-03, + -2.170093031447992756e-03, + 1.738010461687942770e-03, + 9.867599071916231118e-03, + -3.811041717688905522e-03, + 7.121877634386481262e-03, + -5.703120290113914553e-03, + -3.811041717688905522e-03, + 1.474046183772771213e-03, + -2.747386907428428938e-03, + 2.199711055637492037e-03, + 7.121877634386481262e-03, + -2.747386907428428938e-03, + 5.145050639440944609e-03, + -4.120642824501622239e-03, + -5.703120290113914553e-03, + 2.199711055637492037e-03, + -4.120642824501622239e-03, + 3.300262321758350853e-03, + 1.370499995344566383e-03, + -5.313041843655797901e-04, + 9.860110343046961986e-04, + -7.892505817954784597e-04, + -1.507686316307561489e-02, + 5.818961290579217904e-03, + -1.088774506142304276e-02, + 8.719460408506790952e-03, + 9.764630842803939323e-03, + -3.770134041110058572e-03, + 7.049438389985595785e-03, + -5.645302934019884485e-03, + -3.533582373572779437e-03, + 1.367148320603491559e-03, + -2.546602904764623705e-03, + 2.038882844528267305e-03, + 7.448297038731285964e-03, + -2.924276815200288742e-03, + 5.355960540523636154e-03, + -4.280386435083473329e-03, + -2.924276815200288742e-03, + 1.150311064893848757e-03, + -2.100635980860638373e-03, + 1.678427895009850001e-03, + 5.355960540523636154e-03, + -2.100635980860638373e-03, + 3.853607053247790071e-03, + -3.080076301871465493e-03, + -4.280386435083473329e-03, + 1.678427895009850001e-03, + -3.080076301871465493e-03, + 2.461876613756722523e-03, + 9.730712866459405395e-04, + -3.821759579990726546e-04, + 6.994242056622360787e-04, + -5.589662297882965055e-04, + -1.138916742131982317e-02, + 4.469391132927387489e-03, + -8.192016282448397885e-03, + 6.547234460517113892e-03, + 7.460070829043288082e-03, + -2.929867802018087421e-03, + 5.363646855497249989e-03, + -4.286347242903034739e-03, + -2.643569023340565718e-03, + 1.038826463247002245e-03, + -1.899910089750410976e-03, + 1.518237240362583541e-03, + ] + ).to(env.DEVICE) + with open(Path(CUR_DIR) / "models" / "dpa1.json") as fp: + self.model_json = json.load(fp) + self.file_model_param = Path(CUR_DIR) / "models" / "dpa1.pth" + self.file_type_embed = Path(CUR_DIR) / "models" / "dpa2_tebd.pth" + + def test_descriptor_block(self): + # torch.manual_seed(0) + model_dpa1 = self.model_json + dparams = model_dpa1["descriptor"] + ntypes = len(model_dpa1["type_map"]) + assert "se_atten" == dparams.pop("type") + dparams["ntypes"] = ntypes + des = DescrptBlockSeAtten( + **dparams, + ) + des.load_state_dict(torch.load(self.file_model_param)) + rcut = dparams["rcut"] + nsel = dparams["sel"] + coord = self.coord + atype = self.atype + box = self.cell + nf, nloc = coord.shape[:2] + coord_normalized = normalize_coord(coord, box.reshape(-1, 3, 3)) + extended_coord, extended_atype, mapping = extend_coord_with_ghosts( + coord_normalized, atype, box, rcut + ) + # single nlist + nlist = build_neighbor_list( + extended_coord, extended_atype, nloc, rcut, nsel, distinguish_types=False + ) + # handel type_embedding + type_embedding = TypeEmbedNet(ntypes, 8) + type_embedding.load_state_dict(torch.load(self.file_type_embed)) + + ## to save model parameters + # torch.save(des.state_dict(), 'model_weights.pth') + # torch.save(type_embedding.state_dict(), 'model_weights.pth') + descriptor, env_mat, diff, rot_mat, sw = des( + nlist, + extended_coord, + extended_atype, + type_embedding(extended_atype), + mapping=None, + ) + # np.savetxt('tmp.out', descriptor.detach().numpy().reshape(1,-1), delimiter=",") + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + self.assertAlmostEqual(6.0, des.get_rcut()) + self.assertEqual(30, des.get_nsel()) + self.assertEqual(2, des.get_ntype()) + torch.testing.assert_close( + descriptor.view(-1), self.ref_d, atol=1e-10, rtol=1e-10 + ) + + def test_descriptor(self): + with open(Path(CUR_DIR) / "models" / "dpa1.json") as fp: + self.model_json = json.load(fp) + model_dpa2 = self.model_json + ntypes = len(model_dpa2["type_map"]) + dparams = model_dpa2["descriptor"] + dparams["ntypes"] = ntypes + assert dparams.pop("type") == "se_atten" + dparams["concat_output_tebd"] = False + des = DescrptDPA1( + **dparams, + ) + target_dict = des.state_dict() + source_dict = torch.load(self.file_model_param) + type_embd_dict = torch.load(self.file_type_embed) + target_dict = translate_se_atten_and_type_embd_dicts_to_dpa1( + target_dict, + source_dict, + type_embd_dict, + ) + des.load_state_dict(target_dict) + + coord = self.coord + atype = self.atype + box = self.cell + nf, nloc = coord.shape[:2] + coord_normalized = normalize_coord(coord, box.reshape(-1, 3, 3)) + extended_coord, extended_atype, mapping = extend_coord_with_ghosts( + coord_normalized, atype, box, des.get_rcut() + ) + nlist = build_neighbor_list( + extended_coord, + extended_atype, + nloc, + des.get_rcut(), + des.get_nsel(), + distinguish_types=False, + ) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + self.assertAlmostEqual(6.0, des.get_rcut()) + self.assertEqual(30, des.get_nsel()) + self.assertEqual(2, des.get_ntype()) + torch.testing.assert_close( + descriptor.view(-1), self.ref_d, atol=1e-10, rtol=1e-10 + ) + + dparams["concat_output_tebd"] = True + des = DescrptDPA1( + **dparams, + ) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + + +def translate_se_atten_and_type_embd_dicts_to_dpa1( + target_dict, + source_dict, + type_embd_dict, +): + all_keys = list(target_dict.keys()) + record = [False for ii in all_keys] + for kk, vv in source_dict.items(): + tk = "se_atten." + kk + record[all_keys.index(tk)] = True + target_dict[tk] = vv + assert len(type_embd_dict.keys()) == 1 + kk = next(iter(type_embd_dict.keys())) + tk = "type_embedding." + kk + record[all_keys.index(tk)] = True + target_dict[tk] = type_embd_dict[kk] + assert all(record) + return target_dict diff --git a/source/tests/pt/test_descriptor_dpa2.py b/source/tests/pt/test_descriptor_dpa2.py new file mode 100644 index 0000000000..45c95961fe --- /dev/null +++ b/source/tests/pt/test_descriptor_dpa2.py @@ -0,0 +1,264 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +import torch + +from deepmd.pt.model.descriptor import ( + DescrptBlockHybrid, + DescrptDPA2, +) +from deepmd.pt.model.network.network import ( + TypeEmbedNet, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.nlist import ( + build_neighbor_list, + extend_coord_with_ghosts, +) +from deepmd.pt.utils.region import ( + normalize_coord, +) + +dtype = torch.float64 +torch.set_default_dtype(dtype) + +CUR_DIR = os.path.dirname(__file__) + + +class TestDPA2(unittest.TestCase): + def setUp(self): + cell = [ + 5.122106549439247480e00, + 4.016537340154059388e-01, + 6.951654033828678081e-01, + 4.016537340154059388e-01, + 6.112136112297989143e00, + 8.178091365465004481e-01, + 6.951654033828678081e-01, + 8.178091365465004481e-01, + 6.159552512682983760e00, + ] + self.cell = torch.Tensor(cell).view(1, 3, 3).to(env.DEVICE) + coord = [ + 2.978060152121375648e00, + 3.588469695887098077e00, + 2.792459820604495491e00, + 3.895592322591093115e00, + 2.712091020667753760e00, + 1.366836847133650501e00, + 9.955616170888935690e-01, + 4.121324820711413039e00, + 1.817239061889086571e00, + 3.553661462345699906e00, + 5.313046969500791583e00, + 6.635182659098815883e00, + 6.088601018589653080e00, + 6.575011420004332585e00, + 6.825240650611076099e00, + ] + self.coord = torch.Tensor(coord).view(1, -1, 3).to(env.DEVICE) + self.atype = torch.IntTensor([0, 0, 0, 1, 1]).view(1, -1).to(env.DEVICE) + self.ref_d = torch.Tensor( + [ + 8.435412613327306630e-01, + -4.717109614540972440e-01, + -1.812643456954206256e00, + -2.315248767961955167e-01, + -7.112973006771171613e-01, + -4.162041919507591392e-01, + -1.505159810095323181e00, + -1.191652416985768403e-01, + 8.439214937875325617e-01, + -4.712976890460106594e-01, + -1.812605149396642856e00, + -2.307222236291133766e-01, + -7.115427800870099961e-01, + -4.164729253167227530e-01, + -1.505483119125936797e00, + -1.191288524278367872e-01, + 8.286420823261241297e-01, + -4.535033763979030574e-01, + -1.787877160970498425e00, + -1.961763875645104460e-01, + -7.475459187804838201e-01, + -5.231446874663764346e-01, + -1.488399984491664219e00, + -3.974117581747104583e-02, + 8.283793431613817315e-01, + -4.551551577556525729e-01, + -1.789253136645859943e00, + -1.977673627726055372e-01, + -7.448826048241211639e-01, + -5.161350182531234676e-01, + -1.487589463573479209e00, + -4.377376017839779143e-02, + 8.295404560710329944e-01, + -4.492219258475603216e-01, + -1.784484611185287450e00, + -1.901182059718481143e-01, + -7.537407667483000395e-01, + -5.384371277650709109e-01, + -1.490368056268364549e00, + -3.073744832541754762e-02, + ] + ).to(env.DEVICE) + with open(Path(CUR_DIR) / "models" / "dpa2_hyb.json") as fp: + self.model_json = json.load(fp) + self.file_model_param = Path(CUR_DIR) / "models" / "dpa2.pth" + self.file_type_embed = Path(CUR_DIR) / "models" / "dpa2_tebd.pth" + + def test_descriptor_hyb(self): + # torch.manual_seed(0) + model_hybrid_dpa2 = self.model_json + dparams = model_hybrid_dpa2["descriptor"] + ntypes = len(model_hybrid_dpa2["type_map"]) + dlist = dparams.pop("list") + des = DescrptBlockHybrid( + dlist, + ntypes, + hybrid_mode=dparams["hybrid_mode"], + ) + model_dict = torch.load(self.file_model_param) + # type_embd of repformer is removed + model_dict.pop("descriptor_list.1.type_embd.embedding.weight") + des.load_state_dict(model_dict) + all_rcut = [ii["rcut"] for ii in dlist] + all_nsel = [ii["sel"] for ii in dlist] + rcut_max = max(all_rcut) + coord = self.coord + atype = self.atype + box = self.cell + nf, nloc = coord.shape[:2] + coord_normalized = normalize_coord(coord, box.reshape(-1, 3, 3)) + extended_coord, extended_atype, mapping = extend_coord_with_ghosts( + coord_normalized, atype, box, rcut_max + ) + ## single nlist + # nlist = build_neighbor_list( + # extended_coord, extended_atype, nloc, + # rcut_max, nsel, distinguish_types=False) + nlist_list = [] + for rcut, sel in zip(all_rcut, all_nsel): + nlist_list.append( + build_neighbor_list( + extended_coord, + extended_atype, + nloc, + rcut, + sel, + distinguish_types=False, + ) + ) + nlist = torch.cat(nlist_list, -1) + # handel type_embedding + type_embedding = TypeEmbedNet(ntypes, 8) + type_embedding.load_state_dict(torch.load(self.file_type_embed)) + + ## to save model parameters + # torch.save(des.state_dict(), 'model_weights.pth') + # torch.save(type_embedding.state_dict(), 'model_weights.pth') + descriptor, env_mat, diff, rot_mat, sw = des( + nlist, + extended_coord, + extended_atype, + type_embedding(extended_atype), + mapping=mapping, + ) + torch.testing.assert_close( + descriptor.view(-1), self.ref_d, atol=1e-10, rtol=1e-10 + ) + + def test_descriptor(self): + with open(Path(CUR_DIR) / "models" / "dpa2.json") as fp: + self.model_json = json.load(fp) + model_dpa2 = self.model_json + ntypes = len(model_dpa2["type_map"]) + dparams = model_dpa2["descriptor"] + dparams["ntypes"] = ntypes + assert dparams.pop("type") == "dpa2" + dparams["concat_output_tebd"] = False + des = DescrptDPA2( + **dparams, + ) + target_dict = des.state_dict() + source_dict = torch.load(self.file_model_param) + # type_embd of repformer is removed + source_dict.pop("descriptor_list.1.type_embd.embedding.weight") + type_embd_dict = torch.load(self.file_type_embed) + target_dict = translate_hybrid_and_type_embd_dicts_to_dpa2( + target_dict, + source_dict, + type_embd_dict, + ) + des.load_state_dict(target_dict) + + coord = self.coord + atype = self.atype + box = self.cell + nf, nloc = coord.shape[:2] + coord_normalized = normalize_coord(coord, box.reshape(-1, 3, 3)) + extended_coord, extended_atype, mapping = extend_coord_with_ghosts( + coord_normalized, atype, box, des.repinit.rcut + ) + nlist = build_neighbor_list( + extended_coord, + extended_atype, + nloc, + des.repinit.rcut, + des.repinit.sel, + distinguish_types=False, + ) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + self.assertAlmostEqual(6.0, des.get_rcut()) + self.assertEqual(30, des.get_nsel()) + self.assertEqual(2, des.get_ntype()) + torch.testing.assert_close( + descriptor.view(-1), self.ref_d, atol=1e-10, rtol=1e-10 + ) + + dparams["concat_output_tebd"] = True + des = DescrptDPA2( + **dparams, + ) + descriptor, env_mat, diff, rot_mat, sw = des( + extended_coord, + extended_atype, + nlist, + mapping=mapping, + ) + self.assertEqual(descriptor.shape[-1], des.get_dim_out()) + + +def translate_hybrid_and_type_embd_dicts_to_dpa2( + target_dict, + source_dict, + type_embd_dict, +): + all_keys = list(target_dict.keys()) + record = [False for ii in all_keys] + for kk, vv in source_dict.items(): + tk = kk.replace("descriptor_list.1", "repformers") + tk = tk.replace("descriptor_list.0", "repinit") + tk = tk.replace("sequential_transform.0", "g1_shape_tranform") + record[all_keys.index(tk)] = True + target_dict[tk] = vv + assert len(type_embd_dict.keys()) == 1 + kk = next(iter(type_embd_dict.keys())) + tk = "type_embedding." + kk + record[all_keys.index(tk)] = True + target_dict[tk] = type_embd_dict[kk] + assert all(record) + return target_dict diff --git a/source/tests/pt/test_dp_test.py b/source/tests/pt/test_dp_test.py new file mode 100644 index 0000000000..3db66f073f --- /dev/null +++ b/source/tests/pt/test_dp_test.py @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import numpy as np + +from deepmd.pt.entrypoints.main import ( + get_trainer, +) +from deepmd.pt.infer import ( + inference, +) + + +class TestDPTest(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = [ + str(Path(__file__).parent / "water/data/single") + ] + self.input_json = "test_dp_test.json" + with open(self.input_json, "w") as fp: + json.dump(self.config, fp, indent=4) + + def test_dp_test(self): + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + + input_dict, label_dict, _ = trainer.get_data(is_train=False) + _, _, more_loss = trainer.wrapper(**input_dict, label=label_dict, cur_lr=1.0) + + tester = inference.Tester("model.pt", input_script=self.input_json) + try: + res = tester.run() + except StopIteration: + print("Unexpected stop iteration.(test step < total batch)") + raise StopIteration + for k, v in res.items(): + if k == "rmse" or "mae" in k or k not in more_loss: + continue + np.testing.assert_allclose( + v, more_loss[k].cpu().detach().numpy(), rtol=1e-04, atol=1e-07 + ) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pt"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + os.remove(self.input_json) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_embedding_net.py b/source/tests/pt/test_embedding_net.py new file mode 100644 index 0000000000..fc98ddc9f9 --- /dev/null +++ b/source/tests/pt/test_embedding_net.py @@ -0,0 +1,176 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import re +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf +import torch + +tf.disable_eager_execution() + +from pathlib import ( + Path, +) + +from deepmd.pt.model.descriptor import ( + DescrptSeA, +) +from deepmd.pt.utils import ( + dp_random, +) +from deepmd.pt.utils.dataset import ( + DeepmdDataSet, +) +from deepmd.pt.utils.env import ( + DEVICE, + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.descriptor import DescrptSeA as DescrptSeA_tf + +CUR_DIR = os.path.dirname(__file__) + + +def gen_key(worb, depth, elemid): + return (worb, depth, elemid) + + +def base_se_a(descriptor, coord, atype, natoms, box): + g = tf.Graph() + with g.as_default(): + name_pfx = "d_sea_" + t_coord = tf.placeholder( + GLOBAL_NP_FLOAT_PRECISION, [None, None], name=name_pfx + "t_coord" + ) + t_atype = tf.placeholder(tf.int32, [None, None], name=name_pfx + "t_type") + t_natoms = tf.placeholder( + tf.int32, [descriptor.ntypes + 2], name=name_pfx + "t_natoms" + ) + t_box = tf.placeholder( + GLOBAL_NP_FLOAT_PRECISION, [None, None], name=name_pfx + "t_box" + ) + t_default_mesh = tf.placeholder(tf.int32, [None], name=name_pfx + "t_mesh") + t_embedding = descriptor.build( + t_coord, t_atype, t_natoms, t_box, t_default_mesh, input_dict={} + ) + fake_energy = tf.reduce_sum(t_embedding) + t_force = descriptor.prod_force_virial(fake_energy, t_natoms)[0] + t_vars = {} + for var in tf.global_variables(): + ms = re.findall(r"([a-z]+)_(\d)_(\d)", var.name) + if len(ms) == 1: + m = ms[0] + key = gen_key(worb=m[0], depth=int(m[1]), elemid=int(m[2])) + t_vars[key] = var + init_op = tf.global_variables_initializer() + + with tf.Session(graph=g) as sess: + sess.run(init_op) + embedding, force, values = sess.run( + [t_embedding, t_force, t_vars], + feed_dict={ + t_coord: coord, + t_atype: atype, + t_natoms: natoms, + t_box: box, + t_default_mesh: np.array([0, 0, 0, 2, 2, 2]), + }, + ) + tf.reset_default_graph() + return embedding, force, values + + +class TestSeA(unittest.TestCase): + def setUp(self): + dp_random.seed(0) + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.bsz = config["training"]["training_data"]["batch_size"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + ds = DeepmdDataSet( + self.systems, self.bsz, model_config["type_map"], self.rcut, self.sel + ) + self.filter_neuron = model_config["descriptor"]["neuron"] + self.axis_neuron = model_config["descriptor"]["axis_neuron"] + self.np_batch, self.torch_batch = ds.get_batch() + + def test_consistency(self): + dp_d = DescrptSeA_tf( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + sel=self.sel, + neuron=self.filter_neuron, + axis_neuron=self.axis_neuron, + seed=1, + ) + dp_embedding, dp_force, dp_vars = base_se_a( + descriptor=dp_d, + coord=self.np_batch["coord"], + atype=self.np_batch["atype"], + natoms=self.np_batch["natoms"], + box=self.np_batch["box"], + ) + + # Reproduced + old_impl = False + descriptor = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + neuron=self.filter_neuron, + axis_neuron=self.axis_neuron, + old_impl=old_impl, + ).to(DEVICE) + for name, param in descriptor.named_parameters(): + if old_impl: + ms = re.findall(r"(\d)\.deep_layers\.(\d)\.([a-z]+)", name) + else: + ms = re.findall(r"(\d)\.layers\.(\d)\.([a-z]+)", name) + if len(ms) == 1: + m = ms[0] + key = gen_key(worb=m[2], depth=int(m[1]) + 1, elemid=int(m[0])) + var = dp_vars[key] + with torch.no_grad(): + # Keep parameter value consistency between 2 implentations + param.data.copy_(torch.from_numpy(var)) + + pt_coord = self.torch_batch["coord"] + pt_coord.requires_grad_(True) + index = self.torch_batch["mapping"].unsqueeze(-1).expand(-1, -1, 3) + extended_coord = torch.gather(pt_coord, dim=1, index=index) + extended_coord = extended_coord - self.torch_batch["shift"] + extended_atype = torch.gather( + self.torch_batch["atype"], dim=1, index=self.torch_batch["mapping"] + ) + descriptor_out, _, _, _, _ = descriptor( + extended_coord, + extended_atype, + self.torch_batch["nlist"], + ) + my_embedding = descriptor_out.cpu().detach().numpy() + fake_energy = torch.sum(descriptor_out) + fake_energy.backward() + my_force = -pt_coord.grad.cpu().numpy() + + # Check + np.testing.assert_allclose(dp_embedding, my_embedding) + dp_force = dp_force.reshape(*my_force.shape) + np.testing.assert_allclose(dp_force, my_force) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_env_mat.py b/source/tests/pt/test_env_mat.py new file mode 100644 index 0000000000..f4931e9ecc --- /dev/null +++ b/source/tests/pt/test_env_mat.py @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import torch + +try: + from deepmd.model_format import ( + EnvMat, + ) + + support_env_mat = True +except ModuleNotFoundError: + support_env_mat = False +except ImportError: + support_env_mat = False + +from deepmd.pt.model.descriptor.env_mat import ( + prod_env_mat_se_a, +) +from deepmd.pt.utils import ( + env, +) + +dtype = env.GLOBAL_PT_FLOAT_PRECISION + + +class TestCaseSingleFrameWithNlist: + def setUp(self): + # nloc == 3, nall == 4 + self.nloc = 3 + self.nall = 4 + self.nf, self.nt = 1, 2 + self.coord_ext = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, -2, 0], + ], + dtype=np.float64, + ).reshape([1, self.nall * 3]) + self.atype_ext = np.array([0, 0, 1, 0], dtype=int).reshape([1, self.nall]) + # sel = [5, 2] + self.sel = [5, 2] + self.nlist = np.array( + [ + [1, 3, -1, -1, -1, 2, -1], + [0, -1, -1, -1, -1, 2, -1], + [0, 1, -1, -1, -1, 0, -1], + ], + dtype=int, + ).reshape([1, self.nloc, sum(self.sel)]) + self.rcut = 0.4 + self.rcut_smth = 2.2 + + +# to be merged with the tf test case +@unittest.skipIf(not support_env_mat, "EnvMat not supported") +class TestEnvMat(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ): + rng = np.random.default_rng() + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + em0 = EnvMat(self.rcut, self.rcut_smth) + mm0, ww0 = em0.call(self.coord_ext, self.atype_ext, self.nlist, davg, dstd) + mm1, _, ww1 = prod_env_mat_se_a( + torch.tensor(self.coord_ext, dtype=dtype), + torch.tensor(self.nlist, dtype=int), + torch.tensor(self.atype_ext[:, :nloc], dtype=int), + davg, + dstd, + self.rcut, + self.rcut_smth, + ) + np.testing.assert_allclose(mm0, mm1) + np.testing.assert_allclose(ww0, ww1) diff --git a/source/tests/pt/test_fitting_net.py b/source/tests/pt/test_fitting_net.py new file mode 100644 index 0000000000..3feb4f4739 --- /dev/null +++ b/source/tests/pt/test_fitting_net.py @@ -0,0 +1,139 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import re +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf +import torch + +tf.disable_eager_execution() + +from deepmd.pt.model.task import ( + EnergyFittingNet, +) +from deepmd.pt.utils.env import ( + GLOBAL_NP_FLOAT_PRECISION, +) +from deepmd.tf.fit.ener import ( + EnerFitting, +) + + +class FakeDescriptor: + def __init__(self, ntypes, embedding_width): + self._ntypes = ntypes + self._dim_out = embedding_width + + def get_ntypes(self): + return self._ntypes + + def get_dim_out(self): + return self._dim_out + + +def gen_key(type_id, layer_id, w_or_b): + return (type_id, layer_id, w_or_b) + + +def base_fitting_net(dp_fn, embedding, natoms, atype): + g = tf.Graph() + with g.as_default(): + t_embedding = tf.placeholder(GLOBAL_NP_FLOAT_PRECISION, [None, None]) + t_natoms = tf.placeholder(tf.int32, [None]) + t_atype = tf.placeholder(tf.int32, [None, None]) + t_energy = dp_fn.build(t_embedding, t_natoms, {"atype": t_atype}) + init_op = tf.global_variables_initializer() + t_vars = {} + for var in tf.global_variables(): + key = None + matched = re.match(r"layer_(\d)_type_(\d)/([a-z]+)", var.name) + if matched: + key = gen_key( + type_id=matched.group(2), + layer_id=matched.group(1), + w_or_b=matched.group(3), + ) + else: + matched = re.match(r"final_layer_type_(\d)/([a-z]+)", var.name) + if matched: + key = gen_key( + type_id=matched.group(1), layer_id=-1, w_or_b=matched.group(2) + ) + if key is not None: + t_vars[key] = var + + with tf.Session(graph=g) as sess: + sess.run(init_op) + energy, values = sess.run( + [t_energy, t_vars], + feed_dict={ + t_embedding: embedding, + t_natoms: natoms, + t_atype: atype, + }, + ) + tf.reset_default_graph() + return energy, values + + +class TestFittingNet(unittest.TestCase): + def setUp(self): + nloc = 7 + self.embedding_width = 30 + self.natoms = np.array([nloc, nloc, 2, 5], dtype=np.int32) + rng = np.random.default_rng() + self.embedding = rng.uniform(size=[4, nloc * self.embedding_width]) + self.ntypes = self.natoms.size - 2 + self.n_neuron = [32, 32, 32] + self.atype = np.zeros([4, nloc], dtype=np.int32) + cnt = 0 + for i in range(self.ntypes): + self.atype[:, cnt : cnt + self.natoms[i + 2]] = i + cnt += self.natoms[i + 2] + + fake_d = FakeDescriptor(2, 30) + self.dp_fn = EnerFitting(fake_d, self.n_neuron) + self.dp_fn.bias_atom_e = rng.uniform(size=[self.ntypes]) + + def test_consistency(self): + dp_energy, values = base_fitting_net( + self.dp_fn, self.embedding, self.natoms, self.atype + ) + my_fn = EnergyFittingNet( + self.ntypes, + self.embedding_width, + self.n_neuron, + self.dp_fn.bias_atom_e, + use_tebd=False, + ) + for name, param in my_fn.named_parameters(): + matched = re.match("filter_layers\.(\d).deep_layers\.(\d)\.([a-z]+)", name) + key = None + if matched: + key = gen_key( + type_id=matched.group(1), + layer_id=matched.group(2), + w_or_b=matched.group(3), + ) + else: + matched = re.match("filter_layers\.(\d).final_layer\.([a-z]+)", name) + if matched: + key = gen_key( + type_id=matched.group(1), layer_id=-1, w_or_b=matched.group(2) + ) + assert key is not None + var = values[key] + with torch.no_grad(): + # Keep parameter value consistency between 2 implentations + param.data.copy_(torch.from_numpy(var)) + embedding = torch.from_numpy(self.embedding) + embedding = embedding.view(4, -1, self.embedding_width) + atype = torch.from_numpy(self.atype) + ret = my_fn(embedding, atype) + my_energy = ret["energy"] + my_energy = my_energy.detach() + self.assertTrue(np.allclose(dp_energy, my_energy.numpy().reshape([-1]))) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_force_grad.py b/source/tests/pt/test_force_grad.py new file mode 100644 index 0000000000..1ea4321d21 --- /dev/null +++ b/source/tests/pt/test_force_grad.py @@ -0,0 +1,123 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import json +import unittest +from pathlib import ( + Path, +) +from typing import ( + List, + Optional, +) + +import numpy as np +import torch + +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pt.utils.dataset import ( + DeepmdDataSystem, +) +from deepmd.pt.utils.stat import ( + make_stat_input, +) + + +class CheckSymmetry(DeepmdDataSystem): + def __init__( + self, + sys_path: str, + rcut, + sec, + type_map: Optional[List[str]] = None, + type_split=True, + ): + super().__init__(sys_path, rcut, sec, type_map, type_split) + + def get_disturb(self, index, atom_index, axis_index, delta): + for i in range( + 0, len(self._dirs) + 1 + ): # note: if different sets can be merged, prefix sum is unused to calculate + if index < self.prefix_sum[i]: + break + frames = self._load_set(self._dirs[i - 1]) + tmp = copy.deepcopy(frames["coord"].reshape(self.nframes, -1, 3)) + tmp[:, atom_index, axis_index] += delta + frames["coord"] = tmp + frame = self.single_preprocess(frames, index - self.prefix_sum[i - 1]) + return frame + + +def get_data(batch): + inputs = {} + for key in ["coord", "atype", "box"]: + inputs[key] = batch[key].unsqueeze(0).to(env.DEVICE) + return inputs + + +class TestForceGrad(unittest.TestCase): + def setUp(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + self.config = json.load(fin) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.system_index = 0 + self.batch_index = 0 + self.get_dataset(self.system_index, self.batch_index) + self.get_model() + + def get_model(self): + training_systems = self.config["training"]["training_data"]["systems"] + model_params = self.config["model"] + data_stat_nbatch = model_params.get("data_stat_nbatch", 10) + train_data = DpLoaderSet( + training_systems, + self.config["training"]["training_data"]["batch_size"], + model_params, + ) + sampled = make_stat_input( + train_data.systems, train_data.dataloaders, data_stat_nbatch + ) + self.model = get_model(self.config["model"], sampled).to(env.DEVICE) + + def get_dataset(self, system_index=0, batch_index=0): + systems = self.config["training"]["training_data"]["systems"] + rcut = self.config["model"]["descriptor"]["rcut"] + sel = self.config["model"]["descriptor"]["sel"] + sec = torch.cumsum(torch.tensor(sel), dim=0) + type_map = self.config["model"]["type_map"] + self.dpdatasystem = CheckSymmetry( + sys_path=systems[system_index], rcut=rcut, sec=sec, type_map=type_map + ) + self.origin_batch = self.dpdatasystem._get_item(batch_index) + + @unittest.skip("it can be replaced by autodiff") + def test_force_grad(self, threshold=1e-2, delta0=1e-6, seed=20): + result0 = self.model(**get_data(self.origin_batch)) + np.random.default_rng(seed) + errors = np.zeros((self.dpdatasystem._natoms, 3)) + for atom_index in range(self.dpdatasystem._natoms): + for axis_index in range(3): + delta = np.random.random() * delta0 + disturb_batch = self.dpdatasystem.get_disturb( + self.batch_index, atom_index, axis_index, delta + ) + disturb_result = self.model(**get_data(disturb_batch)) + disturb_force = -(disturb_result["energy"] - result0["energy"]) / delta + disturb_error = ( + result0["force"][0, atom_index, axis_index] - disturb_force + ) + errors[atom_index, axis_index] = disturb_error.detach().cpu().numpy() + self.assertTrue(np.abs(errors).max() < threshold, msg=str(np.abs(errors).max())) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_jit.py b/source/tests/pt/test_jit.py new file mode 100644 index 0000000000..f13dade183 --- /dev/null +++ b/source/tests/pt/test_jit.py @@ -0,0 +1,140 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +import torch + +from deepmd.pt.entrypoints.main import ( + get_trainer, +) +from deepmd.pt.infer import ( + inference, +) + +from .test_permutation import ( + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, +) + + +class JITTest: + def test_jit(self): + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + model = torch.jit.script(inference.Tester("./model.pt", numb_test=1).model) + torch.jit.save(model, "./frozen_model.pth", {}) + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith("pt"): + os.remove(f) + if f in ["lcurve.out", "frozen_model.pth"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + +class TestEnergyModelSeA(unittest.TestCase, JITTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 10 + self.config["training"]["save_freq"] = 10 + + def tearDown(self): + JITTest.tearDown(self) + + +class TestEnergyModelDPA1(unittest.TestCase, JITTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa1) + self.config["training"]["numb_steps"] = 10 + self.config["training"]["save_freq"] = 10 + + def tearDown(self): + JITTest.tearDown(self) + + +class TestEnergyModelDPA2(unittest.TestCase, JITTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa2) + self.config["model"]["descriptor"]["rcut"] = self.config["model"]["descriptor"][ + "repinit_rcut" + ] + self.config["model"]["descriptor"]["rcut_smth"] = self.config["model"][ + "descriptor" + ]["repinit_rcut_smth"] + self.config["model"]["descriptor"]["sel"] = self.config["model"]["descriptor"][ + "repinit_nsel" + ] + self.config["training"]["numb_steps"] = 10 + self.config["training"]["save_freq"] = 10 + + def tearDown(self): + JITTest.tearDown(self) + + +@unittest.skip("hybrid not supported at the moment") +class TestEnergyModelHybrid(unittest.TestCase, JITTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_hybrid) + self.config["training"]["numb_steps"] = 10 + self.config["training"]["save_freq"] = 10 + + def tearDown(self): + JITTest.tearDown(self) + + +@unittest.skip("hybrid not supported at the moment") +class TestEnergyModelHybrid2(unittest.TestCase, JITTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_hybrid) + self.config["model"]["descriptor"]["hybrid_mode"] = "sequential" + self.config["training"]["numb_steps"] = 10 + self.config["training"]["save_freq"] = 10 + + def tearDown(self): + JITTest.tearDown(self) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_loss.py b/source/tests/pt/test_loss.py new file mode 100644 index 0000000000..14934c7be0 --- /dev/null +++ b/source/tests/pt/test_loss.py @@ -0,0 +1,189 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf +import torch + +tf.disable_eager_execution() +from pathlib import ( + Path, +) + +from deepmd.pt.loss import ( + EnergyStdLoss, +) +from deepmd.pt.utils.dataset import ( + DeepmdDataSet, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.loss.ener import ( + EnerStdLoss, +) + +CUR_DIR = os.path.dirname(__file__) + + +def get_batch(): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + rcut = model_config["descriptor"]["rcut"] + # self.rcut_smth = model_config['descriptor']['rcut_smth'] + sel = model_config["descriptor"]["sel"] + batch_size = config["training"]["training_data"]["batch_size"] + systems = config["training"]["validation_data"]["systems"] + if isinstance(systems, str): + systems = expand_sys_str(systems) + dataset = DeepmdDataSet(systems, batch_size, model_config["type_map"], rcut, sel) + np_batch, pt_batch = dataset.get_batch() + return np_batch, pt_batch + + +class TestLearningRate(unittest.TestCase): + def setUp(self): + self.start_lr = 1.1 + self.start_pref_e = 0.02 + self.limit_pref_e = 1.0 + self.start_pref_f = 1000.0 + self.limit_pref_f = 1.0 + self.start_pref_v = 0.02 + self.limit_pref_v = 1.0 + self.cur_lr = 1.2 + # data + np_batch, pt_batch = get_batch() + natoms = np_batch["natoms"] + self.nloc = natoms[0] + l_energy, l_force, l_virial = ( + np_batch["energy"], + np_batch["force"], + np_batch["virial"], + ) + p_energy, p_force, p_virial = ( + np.ones_like(l_energy), + np.ones_like(l_force), + np.ones_like(l_virial), + ) + nloc = natoms[0] + batch_size = pt_batch["coord"].shape[0] + atom_energy = np.zeros(shape=[batch_size, nloc]) + atom_pref = np.zeros(shape=[batch_size, nloc * 3]) + # tf + base = EnerStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + ) + self.g = tf.Graph() + with self.g.as_default(): + t_cur_lr = tf.placeholder(shape=[], dtype=tf.float64) + t_natoms = tf.placeholder(shape=[None], dtype=tf.int32) + t_penergy = tf.placeholder(shape=[None, 1], dtype=tf.float64) + t_pforce = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_pvirial = tf.placeholder(shape=[None, 9], dtype=tf.float64) + t_patom_energy = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_lenergy = tf.placeholder(shape=[None, 1], dtype=tf.float64) + t_lforce = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_lvirial = tf.placeholder(shape=[None, 9], dtype=tf.float64) + t_latom_energy = tf.placeholder(shape=[None, None], dtype=tf.float64) + t_atom_pref = tf.placeholder(shape=[None, None], dtype=tf.float64) + find_energy = tf.constant(1.0, dtype=tf.float64) + find_force = tf.constant(1.0, dtype=tf.float64) + find_virial = tf.constant(1.0, dtype=tf.float64) + find_atom_energy = tf.constant(0.0, dtype=tf.float64) + find_atom_pref = tf.constant(0.0, dtype=tf.float64) + model_dict = { + "energy": t_penergy, + "force": t_pforce, + "virial": t_pvirial, + "atom_ener": t_patom_energy, + } + label_dict = { + "energy": t_lenergy, + "force": t_lforce, + "virial": t_lvirial, + "atom_ener": t_latom_energy, + "atom_pref": t_atom_pref, + "find_energy": find_energy, + "find_force": find_force, + "find_virial": find_virial, + "find_atom_ener": find_atom_energy, + "find_atom_pref": find_atom_pref, + } + self.base_loss_sess = base.build( + t_cur_lr, t_natoms, model_dict, label_dict, "" + ) + # torch + self.feed_dict = { + t_cur_lr: self.cur_lr, + t_natoms: natoms, + t_penergy: p_energy, + t_pforce: p_force, + t_pvirial: p_virial.reshape(-1, 9), + t_patom_energy: atom_energy, + t_lenergy: l_energy, + t_lforce: l_force, + t_lvirial: l_virial.reshape(-1, 9), + t_latom_energy: atom_energy, + t_atom_pref: atom_pref, + } + self.model_pred = { + "energy": torch.from_numpy(p_energy), + "force": torch.from_numpy(p_force), + "virial": torch.from_numpy(p_virial), + } + self.label = { + "energy": torch.from_numpy(l_energy), + "force": torch.from_numpy(l_force), + "virial": torch.from_numpy(l_virial), + } + self.natoms = pt_batch["natoms"] + + def tearDown(self) -> None: + tf.reset_default_graph() + return super().tearDown() + + def test_consistency(self): + with tf.Session(graph=self.g) as sess: + base_loss, base_more_loss = sess.run( + self.base_loss_sess, feed_dict=self.feed_dict + ) + mine = EnergyStdLoss( + self.start_lr, + self.start_pref_e, + self.limit_pref_e, + self.start_pref_f, + self.limit_pref_f, + self.start_pref_v, + self.limit_pref_v, + ) + my_loss, my_more_loss = mine( + self.label, + self.model_pred, + self.nloc, + self.cur_lr, + ) + my_loss = my_loss.detach().cpu() + self.assertTrue(np.allclose(base_loss, my_loss.numpy())) + for key in ["ener", "force", "virial"]: + self.assertTrue( + np.allclose( + base_more_loss["l2_%s_loss" % key], my_more_loss["l2_%s_loss" % key] + ) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_lr.py b/source/tests/pt/test_lr.py new file mode 100644 index 0000000000..ca1ec7e490 --- /dev/null +++ b/source/tests/pt/test_lr.py @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf + +tf.disable_eager_execution() + +from deepmd.pt.utils.learning_rate import ( + LearningRateExp, +) +from deepmd.tf.utils import ( + learning_rate, +) + + +class TestLearningRate(unittest.TestCase): + def setUp(self): + self.start_lr = 0.001 + self.stop_lr = 3.51e-8 + self.decay_steps = np.arange(400, 601, 100) + self.stop_steps = np.arange(500, 1600, 500) + + def test_consistency(self): + for decay_step in self.decay_steps: + for stop_step in self.stop_steps: + self.decay_step = decay_step + self.stop_step = stop_step + self.judge_it() + + def judge_it(self): + base_lr = learning_rate.LearningRateExp( + self.start_lr, self.stop_lr, self.decay_step + ) + g = tf.Graph() + with g.as_default(): + global_step = tf.placeholder(shape=[], dtype=tf.int32) + t_lr = base_lr.build(global_step, self.stop_step) + + my_lr = LearningRateExp( + self.start_lr, self.stop_lr, self.decay_step, self.stop_step + ) + with tf.Session(graph=g) as sess: + base_vals = [ + sess.run(t_lr, feed_dict={global_step: step_id}) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + my_vals = [ + my_lr.value(step_id) + for step_id in range(self.stop_step) + if step_id % self.decay_step != 0 + ] + self.assertTrue(np.allclose(base_vals, my_vals)) + tf.reset_default_graph() + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_mlp.py b/source/tests/pt/test_mlp.py new file mode 100644 index 0000000000..c06047b2a5 --- /dev/null +++ b/source/tests/pt/test_mlp.py @@ -0,0 +1,321 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import torch + +from deepmd.pt.utils.env import ( + PRECISION_DICT, +) + +try: + from deepmd.pt.model.network.mlp import ( + MLP, + MLPLayer, + ) + + support_native_net = True +except ModuleNotFoundError: + support_native_net = False + +try: + from deepmd.pt.model.network.mlp import ( + EmbeddingNet, + ) + + support_embedding_net = True +except ModuleNotFoundError: + support_embedding_net = False + +try: + from deepmd.pt.model.network.mlp import ( + FittingNet, + ) + + support_fitting_net = True +except ModuleNotFoundError: + support_fitting_net = False + + +try: + from deepmd.model_format import ( + NativeLayer, + NativeNet, + ) + + support_native_net = True +except ModuleNotFoundError: + support_native_net = False +except ImportError: + support_native_net = False + +try: + from deepmd.model_format import EmbeddingNet as DPEmbeddingNet + + support_embedding_net = True +except ModuleNotFoundError: + support_embedding_net = False +except ImportError: + support_embedding_net = False + +try: + from deepmd.model_format import FittingNet as DPFittingNet + + support_fitting_net = True +except ModuleNotFoundError: + support_fitting_net = False +except ImportError: + support_fitting_net = False + + +def get_tols(prec): + if prec in ["single", "float32"]: + rtol, atol = 0.0, 1e-4 + elif prec in ["double", "float64"]: + rtol, atol = 0.0, 1e-12 + # elif prec in ["half", "float16"]: + # rtol, atol=1e-2, 0 + else: + raise ValueError(f"unknown prec {prec}") + return rtol, atol + + +@unittest.skipIf(not support_native_net, "NativeLayer not supported") +class TestMLPLayer(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [(5, 5), (5, 10), (5, 8), (8, 5)], # inp, out + [True, False], # bias + [True, False], # use time step + ["tanh", "none"], # activation + [True, False], # resnet + [None, [4], [3, 2]], # prefix shapes + ["float32", "double"], # precision + ) + + def test_match_native_layer( + self, + ): + for (ninp, nout), bias, ut, ac, resnet, ashp, prec in self.test_cases: + # input + inp_shap = [ninp] + if ashp is not None: + inp_shap = ashp + inp_shap + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = torch.arange(np.prod(inp_shap), dtype=dtype).view(inp_shap) + # def mlp layer + ml = MLPLayer(ninp, nout, bias, ut, ac, resnet, precision=prec) + # check consistency + nl = NativeLayer.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().numpy(), + nl.call(xx.detach().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"(i={ninp}, o={nout}) bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + # check self-consistency + ml1 = MLPLayer.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().numpy(), + ml1.forward(xx).detach().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"(i={ninp}, o={nout}) bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + + def test_jit(self): + for (ninp, nout), bias, ut, ac, resnet, _, prec in self.test_cases: + ml = MLPLayer(ninp, nout, bias, ut, ac, resnet, precision=prec) + model = torch.jit.script(ml) + ml1 = MLPLayer.deserialize(ml.serialize()) + model = torch.jit.script(ml1) + + +@unittest.skipIf(not support_native_net, "NativeLayer not supported") +class TestMLP(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [[2, 2, 4, 8], [1, 3, 3]], # inp and hiddens + [True, False], # bias + [True, False], # use time step + ["tanh", "none"], # activation + [True, False], # resnet + [None, [4], [3, 2]], # prefix shapes + ["float32", "double"], # precision + ) + + def test_match_native_net( + self, + ): + for ndims, bias, ut, ac, resnet, ashp, prec in self.test_cases: + # input + inp_shap = [ndims[0]] + if ashp is not None: + inp_shap = ashp + inp_shap + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = torch.arange(np.prod(inp_shap), dtype=dtype).view(inp_shap) + # def MLP + layers = [] + for ii in range(1, len(ndims)): + layers.append( + MLPLayer( + ndims[ii - 1], ndims[ii], bias, ut, ac, resnet, precision=prec + ).serialize() + ) + ml = MLP(layers) + # check consistency + nl = NativeNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().numpy(), + nl.call(xx.detach().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"net={ndims} bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + # check self-consistency + ml1 = MLP.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().numpy(), + ml1.forward(xx).detach().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"net={ndims} bias={bias} use_dt={ut} act={ac} resnet={resnet} prec={prec}", + ) + + def test_jit(self): + for ndims, bias, ut, ac, resnet, _, prec in self.test_cases: + layers = [] + for ii in range(1, len(ndims)): + ml = layers.append( + MLPLayer( + ndims[ii - 1], ndims[ii], bias, ut, ac, resnet, precision=prec + ).serialize() + ) + ml = MLP(ml) + model = torch.jit.script(ml) + ml1 = MLP.deserialize(ml.serialize()) + model = torch.jit.script(ml1) + + +@unittest.skipIf(not support_embedding_net, "EmbeddingNet not supported") +class TestEmbeddingNet(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [1, 3], # inp + [[24, 48, 96], [24, 36]], # and hiddens + ["tanh", "none"], # activation + [True, False], # resnet_dt + ["float32", "double"], # precision + ) + + def test_match_embedding_net( + self, + ): + for idim, nn, act, idt, prec in self.test_cases: + # input + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = torch.arange(idim, dtype=dtype) + # def MLP + ml = EmbeddingNet(idim, nn, act, idt, prec) + # check consistency + nl = DPEmbeddingNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().numpy(), + nl.call(xx.detach().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + # check self-consistency + ml1 = EmbeddingNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().numpy(), + ml1.forward(xx).detach().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + + def test_jit( + self, + ): + for idim, nn, act, idt, prec in self.test_cases: + # def MLP + ml = EmbeddingNet(idim, nn, act, idt, prec) + ml1 = EmbeddingNet.deserialize(ml.serialize()) + model = torch.jit.script(ml) + model = torch.jit.script(ml1) + + +@unittest.skipIf(not support_fitting_net, "FittingNet not supported") +class TestFittingNet(unittest.TestCase): + def setUp(self): + self.test_cases = itertools.product( + [1, 3], # inp + [1, 5], # out + [[24, 48, 96], [24, 36]], # and hiddens + ["tanh", "none"], # activation + [True, False], # resnet_dt + ["float32", "double"], # precision + [True, False], # bias_out + ) + + def test_match_fitting_net( + self, + ): + for idim, odim, nn, act, idt, prec, ob in self.test_cases: + # input + rtol, atol = get_tols(prec) + dtype = PRECISION_DICT[prec] + xx = torch.arange(idim, dtype=dtype) + # def MLP + ml = FittingNet( + idim, + odim, + neuron=nn, + activation_function=act, + resnet_dt=idt, + precision=prec, + bias_out=ob, + ) + # check consistency + nl = DPFittingNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().numpy(), + nl.call(xx.detach().numpy()), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + # check self-consistency + ml1 = FittingNet.deserialize(ml.serialize()) + np.testing.assert_allclose( + ml.forward(xx).detach().numpy(), + ml1.forward(xx).detach().numpy(), + rtol=rtol, + atol=atol, + err_msg=f"idim={idim} nn={nn} use_dt={idt} act={act} prec={prec}", + ) + + def test_jit( + self, + ): + for idim, odim, nn, act, idt, prec, ob in self.test_cases: + # def MLP + ml = FittingNet( + idim, + odim, + neuron=nn, + activation_function=act, + resnet_dt=idt, + precision=prec, + bias_out=ob, + ) + ml1 = FittingNet.deserialize(ml.serialize()) + model = torch.jit.script(ml) + model = torch.jit.script(ml1) diff --git a/source/tests/pt/test_model.py b/source/tests/pt/test_model.py new file mode 100644 index 0000000000..5bbbc9e352 --- /dev/null +++ b/source/tests/pt/test_model.py @@ -0,0 +1,415 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import collections +import json +import unittest + +import numpy as np +import tensorflow.compat.v1 as tf +import torch + +tf.disable_eager_execution() + +from pathlib import ( + Path, +) + +from deepmd.pt.loss import ( + EnergyStdLoss, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pt.utils.env import ( + DEVICE, +) +from deepmd.pt.utils.learning_rate import LearningRateExp as MyLRExp +from deepmd.pt.utils.stat import ( + make_stat_input, +) +from deepmd.tf.common import ( + data_requirement, + expand_sys_str, +) +from deepmd.tf.descriptor import DescrptSeA as DescrptSeA_tf +from deepmd.tf.fit import ( + EnerFitting, +) +from deepmd.tf.loss import ( + EnerStdLoss, +) +from deepmd.tf.model import ( + EnerModel, +) +from deepmd.tf.utils.data_system import ( + DeepmdDataSystem, +) +from deepmd.tf.utils.learning_rate import ( + LearningRateExp, +) + +VariableState = collections.namedtuple("VariableState", ["value", "gradient"]) + + +def torch2tf(torch_name): + fields = torch_name.split(".") + offset = int(fields[2] == "networks") + element_id = int(fields[2 + offset]) + if fields[0] == "descriptor": + layer_id = int(fields[4 + offset]) + 1 + weight_type = fields[5 + offset] + return "filter_type_all/%s_%d_%d:0" % (weight_type, layer_id, element_id) + elif fields[3] == "deep_layers": + layer_id = int(fields[4]) + weight_type = fields[5] + return "layer_%d_type_%d/%s:0" % (layer_id, element_id, weight_type) + elif fields[3] == "final_layer": + weight_type = fields[4] + return "final_layer_type_%d/%s:0" % (element_id, weight_type) + else: + raise RuntimeError("Unexpected parameter name: %s" % torch_name) + + +class DpTrainer: + def __init__(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + self.batch_size = config["training"]["training_data"]["batch_size"] + self.type_map = model_config["type_map"] + self.filter_neuron = model_config["descriptor"]["neuron"] + self.axis_neuron = model_config["descriptor"]["axis_neuron"] + self.n_neuron = model_config["fitting_net"]["neuron"] + self.data_stat_nbatch = 3 + self.start_lr = 0.001 + self.stop_lr = 3.51e-8 + self.decay_steps = 500 + self.stop_steps = 1600 + self.start_pref_e = 1.0 + self.limit_pref_e = 2.0 + self.start_pref_f = 2.0 + self.limit_pref_f = 1.0 + self.ntypes = len(self.type_map) + + def get_intermediate_state(self, num_steps=1): + dp_model = self._get_dp_model() + dp_loss = self._get_dp_loss() + dp_lr = self._get_dp_lr() + dp_ds = self._get_dp_dataset() + dp_model.data_stat(dp_ds) + + # Build graph + g = tf.Graph() + with g.as_default(): + place_holders = self._get_dp_placeholders(dp_ds) + model_pred = dp_model.build( + coord_=place_holders["coord"], + atype_=place_holders["type"], + natoms=place_holders["natoms_vec"], + box=place_holders["box"], + mesh=place_holders["default_mesh"], + input_dict=place_holders, + ) + global_step = tf.train.get_or_create_global_step() + learning_rate = dp_lr.build(global_step, self.stop_steps) + l2_l, _ = dp_loss.build( + learning_rate=learning_rate, + natoms=place_holders["natoms_vec"], + model_dict=model_pred, + label_dict=place_holders, + suffix="test", + ) + t_vars = tf.trainable_variables() + optimizer = tf.train.AdamOptimizer(learning_rate) + t_grad_and_vars = optimizer.compute_gradients(l2_l, t_vars) + train_op = optimizer.apply_gradients(t_grad_and_vars, global_step) + init_op = tf.global_variables_initializer() + t_heads = { + "loss": l2_l, + "energy": model_pred["energy"], + "force": model_pred["force"], + "virial": model_pred["virial"], + "atomic_virial": model_pred["atom_virial"], + } + + # Get statistics of each component + stat_dict = { + "descriptor.mean": dp_model.descrpt.davg, + "descriptor.stddev": dp_model.descrpt.dstd, + "fitting_net.bias_atom_e": dp_model.fitting.bias_atom_e, + } + + # Get variables and their gradients + with tf.Session(graph=g) as sess: + sess.run(init_op) + for _ in range(num_steps): + batch = dp_ds.get_batch() + feeds = self._get_feed_dict(batch, place_holders) + sess.run(train_op, feed_dict=feeds) + + batch = dp_ds.get_batch() + feeds = self._get_feed_dict(batch, place_holders) + grads_and_vars, head_dict = sess.run( + [t_grad_and_vars, t_heads], feed_dict=feeds + ) + vs_dict = {} + for idx, one in enumerate(t_vars): + grad, var = grads_and_vars[idx] + vs_dict[one.name] = VariableState(var, grad) + + tf.reset_default_graph() + # Used for reproducing + return batch, head_dict, stat_dict, vs_dict + + def _get_dp_dataset(self): + data = DeepmdDataSystem( + systems=self.systems, + batch_size=self.batch_size, + test_size=1, + rcut=self.rcut, + type_map=self.type_map, + trn_all_set=True, + ) + data.add_dict(data_requirement) + return data + + def _get_dp_model(self): + dp_descrpt = DescrptSeA_tf( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + sel=self.sel, + neuron=self.filter_neuron, + axis_neuron=self.axis_neuron, + ) + dp_fitting = EnerFitting(descrpt=dp_descrpt, neuron=self.n_neuron) + return EnerModel( + dp_descrpt, + dp_fitting, + type_map=self.type_map, + data_stat_nbatch=self.data_stat_nbatch, + ) + + def _get_dp_loss(self): + return EnerStdLoss( + starter_learning_rate=self.start_lr, + start_pref_e=self.start_pref_e, + limit_pref_e=self.limit_pref_e, + start_pref_f=self.start_pref_f, + limit_pref_f=self.limit_pref_f, + ) + + def _get_dp_lr(self): + return LearningRateExp( + start_lr=self.start_lr, stop_lr=self.stop_lr, decay_steps=self.decay_steps + ) + + def _get_dp_placeholders(self, dataset): + place_holders = {} + data_dict = dataset.get_data_dict() + for kk in data_dict.keys(): + if kk == "type": + continue + prec = tf.float64 + place_holders[kk] = tf.placeholder(prec, [None], name="t_" + kk) + place_holders["find_" + kk] = tf.placeholder( + tf.float32, name="t_find_" + kk + ) + place_holders["type"] = tf.placeholder(tf.int32, [None], name="t_type") + place_holders["natoms_vec"] = tf.placeholder( + tf.int32, [self.ntypes + 2], name="t_natoms" + ) + place_holders["default_mesh"] = tf.placeholder(tf.int32, [None], name="t_mesh") + place_holders["is_training"] = tf.placeholder(tf.bool) + return place_holders + + def _get_feed_dict(self, batch, place_holders): + feed_dict = {} + for kk in batch.keys(): + if kk == "find_type" or kk == "type": + continue + if "find_" in kk: + feed_dict[place_holders[kk]] = batch[kk] + else: + feed_dict[place_holders[kk]] = np.reshape(batch[kk], [-1]) + for ii in ["type"]: + feed_dict[place_holders[ii]] = np.reshape(batch[ii], [-1]) + for ii in ["natoms_vec", "default_mesh"]: + feed_dict[place_holders[ii]] = batch[ii] + feed_dict[place_holders["is_training"]] = True + return feed_dict + + +class TestEnergy(unittest.TestCase): + def setUp(self): + self.dp_trainer = DpTrainer() + self.wanted_step = 0 + for key in dir(self.dp_trainer): + if not key.startswith("_") or key == "get_intermediate_state": + value = getattr(self.dp_trainer, key) + setattr(self, key, value) + + def test_consistency(self): + batch, head_dict, stat_dict, vs_dict = self.dp_trainer.get_intermediate_state( + self.wanted_step + ) + # Build DeePMD graph + my_ds = DpLoaderSet( + self.systems, + self.batch_size, + model_params={ + "descriptor": { + "type": "se_e2_a", + "sel": self.sel, + "rcut": self.rcut, + }, + "type_map": self.type_map, + }, + ) + sampled = make_stat_input( + my_ds.systems, my_ds.dataloaders, self.data_stat_nbatch + ) + my_model = get_model( + model_params={ + "descriptor": { + "type": "se_e2_a", + "sel": self.sel, + "rcut_smth": self.rcut_smth, + "rcut": self.rcut, + "neuron": self.filter_neuron, + "axis_neuron": self.axis_neuron, + }, + "fitting_net": {"neuron": self.n_neuron}, + "data_stat_nbatch": self.data_stat_nbatch, + "type_map": self.type_map, + }, + sampled=sampled, + ) + my_model.to(DEVICE) + my_lr = MyLRExp(self.start_lr, self.stop_lr, self.decay_steps, self.stop_steps) + my_loss = EnergyStdLoss( + starter_learning_rate=self.start_lr, + start_pref_e=self.start_pref_e, + limit_pref_e=self.limit_pref_e, + start_pref_f=self.start_pref_f, + limit_pref_f=self.limit_pref_f, + ) + + # Keep statistics consistency between 2 implentations + my_em = my_model.descriptor + mean = stat_dict["descriptor.mean"].reshape([self.ntypes, my_em.get_nsel(), 4]) + stddev = stat_dict["descriptor.stddev"].reshape( + [self.ntypes, my_em.get_nsel(), 4] + ) + my_em.set_stat_mean_and_stddev( + torch.tensor(mean, device=DEVICE), + torch.tensor(stddev, device=DEVICE), + ) + my_model.fitting_net.bias_atom_e = torch.tensor( + stat_dict["fitting_net.bias_atom_e"], device=DEVICE + ) + + # Keep parameter value consistency between 2 implentations + for name, param in my_model.named_parameters(): + name = name.replace("sea.", "") + var_name = torch2tf(name) + var = vs_dict[var_name].value + with torch.no_grad(): + src = torch.from_numpy(var) + dst = param.data + # print(name) + # print(src.mean(), src.std()) + # print(dst.mean(), dst.std()) + dst.copy_(src) + # Start forward computing + batch = my_ds.systems[0]._data_system.preprocess(batch) + batch["coord"].requires_grad_(True) + batch["natoms"] = torch.tensor( + batch["natoms_vec"], device=batch["coord"].device + ).unsqueeze(0) + model_predict = my_model( + batch["coord"], batch["atype"], batch["box"], do_atomic_virial=True + ) + model_predict_1 = my_model( + batch["coord"], batch["atype"], batch["box"], do_atomic_virial=False + ) + p_energy, p_force, p_virial, p_atomic_virial = ( + model_predict["energy"], + model_predict["force"], + model_predict["virial"], + model_predict["atomic_virial"], + ) + cur_lr = my_lr.value(self.wanted_step) + model_pred = { + "energy": p_energy, + "force": p_force, + } + label = { + "energy": batch["energy"], + "force": batch["force"], + } + loss, _ = my_loss(model_pred, label, int(batch["natoms"][0, 0]), cur_lr) + np.testing.assert_allclose( + head_dict["energy"], p_energy.view(-1).cpu().detach().numpy() + ) + np.testing.assert_allclose( + head_dict["force"], + p_force.view(*head_dict["force"].shape).cpu().detach().numpy(), + ) + rtol = 1e-5 + atol = 1e-8 + np.testing.assert_allclose( + head_dict["loss"], loss.cpu().detach().numpy(), rtol=rtol, atol=atol + ) + np.testing.assert_allclose( + head_dict["virial"], + p_virial.view(*head_dict["virial"].shape).cpu().detach().numpy(), + ) + np.testing.assert_allclose( + head_dict["virial"], + model_predict_1["virial"] + .view(*head_dict["virial"].shape) + .cpu() + .detach() + .numpy(), + ) + self.assertIsNone(model_predict_1.get("atomic_virial", None)) + np.testing.assert_allclose( + head_dict["atomic_virial"], + p_atomic_virial.view(*head_dict["atomic_virial"].shape) + .cpu() + .detach() + .numpy(), + ) + optimizer = torch.optim.Adam(my_model.parameters(), lr=cur_lr) + optimizer.zero_grad() + + def step(step_id): + bdata = self.training_data.get_trainning_batch() + optimizer.zero_grad() + + # Compare gradient for consistency + loss.backward() + + for name, param in my_model.named_parameters(): + name = name.replace("sea.", "") + var_name = torch2tf(name) + var_grad = vs_dict[var_name].gradient + param_grad = param.grad.cpu() + var_grad = torch.tensor(var_grad) + assert np.allclose(var_grad, param_grad, rtol=rtol, atol=atol) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_nlist.py b/source/tests/pt/test_nlist.py new file mode 100644 index 0000000000..27c03acfaa --- /dev/null +++ b/source/tests/pt/test_nlist.py @@ -0,0 +1,212 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import torch + +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.nlist import ( + build_multiple_neighbor_list, + build_neighbor_list, + extend_coord_with_ghosts, + get_multiple_nlist_key, +) +from deepmd.pt.utils.region import ( + inter2phys, +) + +dtype = torch.float64 + + +class TestNeighList(unittest.TestCase): + def setUp(self): + self.nf = 3 + self.nloc = 2 + self.ns = 5 * 5 * 3 + self.nall = self.ns * self.nloc + self.cell = torch.tensor( + [[1, 0, 0], [0.4, 0.8, 0], [0.1, 0.3, 2.1]], dtype=dtype + ).to(env.DEVICE) + self.icoord = torch.tensor([[0, 0, 0], [0.5, 0.5, 0.1]], dtype=dtype).to( + env.DEVICE + ) + self.atype = torch.tensor([0, 1], dtype=torch.int).to(env.DEVICE) + [self.cell, self.icoord, self.atype] = [ + ii.unsqueeze(0) for ii in [self.cell, self.icoord, self.atype] + ] + self.coord = inter2phys(self.icoord, self.cell).view([-1, self.nloc * 3]) + self.cell = self.cell.view([-1, 9]) + [self.cell, self.coord, self.atype] = [ + torch.tile(ii, [self.nf, 1]) for ii in [self.cell, self.coord, self.atype] + ] + self.rcut = 1.01 + self.prec = 1e-10 + self.nsel = [10, 10] + # genrated by preprocess.build_neighbor_list + # ref_nlist, _, _ = legacy_build_neighbor_list( + # 2, ecoord[0], eatype[0], + # self.rcut, + # torch.tensor([10,20], dtype=torch.long), + # mapping[0], type_split=True, ) + self.ref_nlist = torch.tensor( + [ + [0, 0, 0, 0, 0, 0, -1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, -1, -1], + [0, 0, 0, 0, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1], + ] + ).to(env.DEVICE) + + def test_build_notype(self): + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + nlist = build_neighbor_list( + ecoord, + eatype, + self.nloc, + self.rcut, + sum(self.nsel), + distinguish_types=False, + ) + torch.testing.assert_close(nlist[0], nlist[1]) + nlist_mask = nlist[0] == -1 + nlist_loc = mapping[0][nlist[0]] + nlist_loc[nlist_mask] = -1 + torch.testing.assert_close( + torch.sort(nlist_loc, dim=-1)[0], + torch.sort(self.ref_nlist, dim=-1)[0], + ) + + def test_build_type(self): + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + nlist = build_neighbor_list( + ecoord, + eatype, + self.nloc, + self.rcut, + self.nsel, + distinguish_types=True, + ) + torch.testing.assert_close(nlist[0], nlist[1]) + nlist_mask = nlist[0] == -1 + nlist_loc = mapping[0][nlist[0]] + nlist_loc[nlist_mask] = -1 + for ii in range(2): + torch.testing.assert_close( + torch.sort(torch.split(nlist_loc, self.nsel, dim=-1)[ii], dim=-1)[0], + torch.sort(torch.split(self.ref_nlist, self.nsel, dim=-1)[ii], dim=-1)[ + 0 + ], + ) + + def test_build_multiple_nlist(self): + rcuts = [1.01, 2.01] + nsels = [20, 80] + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, max(rcuts) + ) + nlist1 = build_neighbor_list( + ecoord, + eatype, + self.nloc, + rcuts[1], + nsels[1] - 1, + distinguish_types=False, + ) + pad = -1 * torch.ones( + [self.nf, self.nloc, 1], dtype=nlist1.dtype, device=nlist1.device + ) + nlist2 = torch.cat([nlist1, pad], dim=-1) + nlist0 = build_neighbor_list( + ecoord, + eatype, + self.nloc, + rcuts[0], + nsels[0], + distinguish_types=False, + ) + nlists = build_multiple_neighbor_list(ecoord, nlist1, rcuts, nsels) + for dd in range(2): + self.assertEqual( + nlists[get_multiple_nlist_key(rcuts[dd], nsels[dd])].shape[-1], + nsels[dd], + ) + torch.testing.assert_close( + nlists[get_multiple_nlist_key(rcuts[0], nsels[0])], + nlist0, + ) + torch.testing.assert_close( + nlists[get_multiple_nlist_key(rcuts[1], nsels[1])], + nlist2, + ) + + def test_extend_coord(self): + ecoord, eatype, mapping = extend_coord_with_ghosts( + self.coord, self.atype, self.cell, self.rcut + ) + # expected ncopy x nloc + self.assertEqual(list(ecoord.shape), [self.nf, self.nall * 3]) + self.assertEqual(list(eatype.shape), [self.nf, self.nall]) + self.assertEqual(list(mapping.shape), [self.nf, self.nall]) + # check the nloc part is identical with original coord + torch.testing.assert_close( + ecoord[:, : self.nloc * 3], self.coord, rtol=self.prec, atol=self.prec + ) + # check the shift vectors are aligned with grid + shift_vec = ( + ecoord.view([-1, self.ns, self.nloc, 3]) + - self.coord.view([-1, self.nloc, 3])[:, None, :, :] + ) + shift_vec = shift_vec.view([-1, self.nall, 3]) + # hack!!! assumes identical cell across frames + shift_vec = torch.matmul( + shift_vec, torch.linalg.inv(self.cell.view([self.nf, 3, 3])[0]) + ) + # nf x nall x 3 + shift_vec = torch.round(shift_vec) + # check: identical shift vecs + torch.testing.assert_close( + shift_vec[0], shift_vec[1], rtol=self.prec, atol=self.prec + ) + # check: shift idx aligned with grid + mm, cc = torch.unique(shift_vec[0][:, 0], dim=-1, return_counts=True) + torch.testing.assert_close( + mm, + torch.tensor([-2, -1, 0, 1, 2], dtype=dtype).to(env.DEVICE), + rtol=self.prec, + atol=self.prec, + ) + torch.testing.assert_close( + cc, + torch.tensor([30, 30, 30, 30, 30], dtype=torch.long).to(env.DEVICE), + rtol=self.prec, + atol=self.prec, + ) + mm, cc = torch.unique(shift_vec[1][:, 1], dim=-1, return_counts=True) + torch.testing.assert_close( + mm, + torch.tensor([-2, -1, 0, 1, 2], dtype=dtype).to(env.DEVICE), + rtol=self.prec, + atol=self.prec, + ) + torch.testing.assert_close( + cc, + torch.tensor([30, 30, 30, 30, 30], dtype=torch.long).to(env.DEVICE), + rtol=self.prec, + atol=self.prec, + ) + mm, cc = torch.unique(shift_vec[1][:, 2], dim=-1, return_counts=True) + torch.testing.assert_close( + mm, + torch.tensor([-1, 0, 1], dtype=dtype).to(env.DEVICE), + rtol=self.prec, + atol=self.prec, + ) + torch.testing.assert_close( + cc, + torch.tensor([50, 50, 50], dtype=torch.long).to(env.DEVICE), + rtol=self.prec, + atol=self.prec, + ) diff --git a/source/tests/pt/test_permutation.py b/source/tests/pt/test_permutation.py new file mode 100644 index 0000000000..b9724bb2af --- /dev/null +++ b/source/tests/pt/test_permutation.py @@ -0,0 +1,322 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest +from pathlib import ( + Path, +) + +import torch + +from deepmd.pt.infer.deep_eval import ( + eval_model, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pt.utils.stat import ( + make_stat_input, +) + +dtype = torch.float64 + +model_se_e2_a = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_e2_a", + "sel": [46, 92, 4], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [25, 50, 100], + "resnet_dt": False, + "axis_neuron": 16, + "seed": 1, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, + "data_stat_nbatch": 20, +} + +model_dpa2 = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "dpa2", + "repinit_rcut": 6.0, + "repinit_rcut_smth": 2.0, + "repinit_nsel": 30, + "repformer_rcut": 4.0, + "repformer_rcut_smth": 0.5, + "repformer_nsel": 20, + "repinit_neuron": [2, 4, 8], + "repinit_axis_neuron": 4, + "repinit_activation": "tanh", + "repformer_nlayers": 12, + "repformer_g1_dim": 8, + "repformer_g2_dim": 5, + "repformer_attn2_hidden": 3, + "repformer_attn2_nhead": 1, + "repformer_attn1_hidden": 5, + "repformer_attn1_nhead": 1, + "repformer_axis_dim": 4, + "repformer_update_h2": False, + "repformer_update_g1_has_conv": True, + "repformer_update_g1_has_grrg": True, + "repformer_update_g1_has_drrd": True, + "repformer_update_g1_has_attn": True, + "repformer_update_g2_has_g1g1": True, + "repformer_update_g2_has_attn": True, + "repformer_attn2_has_gate": True, + "repformer_add_type_ebd_to_seq": False, + }, + "fitting_net": { + "neuron": [24, 24], + "resnet_dt": True, + "seed": 1, + }, +} + +model_dpa1 = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "se_atten", + "sel": 40, + "rcut_smth": 0.5, + "rcut": 4.0, + "neuron": [25, 50, 100], + "axis_neuron": 16, + "attn": 64, + "attn_layer": 2, + "attn_dotr": True, + "attn_mask": False, + "post_ln": True, + "ffn": False, + "ffn_embed_dim": 512, + "activation": "tanh", + "scaling_factor": 1.0, + "head_num": 1, + "normalize": False, + "temperature": 1.0, + "set_davg_zero": True, + }, + "fitting_net": { + "neuron": [24, 24, 24], + "resnet_dt": True, + "seed": 1, + }, +} + + +model_hybrid = { + "type_map": ["O", "H", "B"], + "descriptor": { + "type": "hybrid", + "list": [ + { + "type": "se_atten", + "sel": 120, + "rcut_smth": 0.5, + "rcut": 6.0, + "neuron": [25, 50, 100], + "axis_neuron": 16, + "attn": 128, + "attn_layer": 0, + "attn_dotr": True, + "attn_mask": False, + "post_ln": True, + "ffn": False, + "ffn_embed_dim": 1024, + "activation": "tanh", + "scaling_factor": 1.0, + "head_num": 1, + "normalize": True, + "temperature": 1.0, + }, + { + "type": "dpa2", + "repinit_rcut": 6.0, + "repinit_rcut_smth": 2.0, + "repinit_nsel": 30, + "repformer_rcut": 4.0, + "repformer_rcut_smth": 0.5, + "repformer_nsel": 10, + "repinit_neuron": [2, 4, 8], + "repinit_axis_neuron": 4, + "repinit_activation": "tanh", + "repformer_nlayers": 12, + "repformer_g1_dim": 8, + "repformer_g2_dim": 5, + "repformer_attn2_hidden": 3, + "repformer_attn2_nhead": 1, + "repformer_attn1_hidden": 5, + "repformer_attn1_nhead": 1, + "repformer_axis_dim": 4, + "repformer_update_h2": False, + "repformer_update_g1_has_conv": True, + "repformer_update_g1_has_grrg": True, + "repformer_update_g1_has_drrd": True, + "repformer_update_g1_has_attn": True, + "repformer_update_g2_has_g1g1": True, + "repformer_update_g2_has_attn": True, + "repformer_attn2_has_gate": True, + "repformer_add_type_ebd_to_seq": False, + }, + ], + }, + "fitting_net": { + "neuron": [240, 240, 240], + "resnet_dt": True, + "seed": 1, + "_comment": " that's all", + }, + "_comment": " that's all", +} + + +def make_sample(model_params): + training_systems = [ + str(Path(__file__).parent / "water/data/data_0"), + ] + data_stat_nbatch = model_params.get("data_stat_nbatch", 10) + train_data = DpLoaderSet( + training_systems, + batch_size=4, + model_params=model_params.copy(), + ) + sampled = make_stat_input( + train_data.systems, train_data.dataloaders, data_stat_nbatch + ) + return sampled + + +class PermutationTest: + def test( + self, + ): + natoms = 5 + cell = torch.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * torch.eye(3).to(env.DEVICE) + coord = torch.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = torch.matmul(coord, cell) + atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) + idx_perm = [1, 0, 4, 3, 2] + e0, f0, v0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype + ) + ret0 = { + "energy": e0.squeeze(0), + "force": f0.squeeze(0), + "virial": v0.squeeze(0), + } + e1, f1, v1 = eval_model( + self.model, coord[idx_perm].unsqueeze(0), cell.unsqueeze(0), atype[idx_perm] + ) + ret1 = { + "energy": e1.squeeze(0), + "force": f1.squeeze(0), + "virial": v1.squeeze(0), + } + prec = 1e-10 + torch.testing.assert_close(ret0["energy"], ret1["energy"], rtol=prec, atol=prec) + torch.testing.assert_close( + ret0["force"][idx_perm], ret1["force"], rtol=prec, atol=prec + ) + if not hasattr(self, "test_virial") or self.test_virial: + torch.testing.assert_close( + ret0["virial"], ret1["virial"], rtol=prec, atol=prec + ) + + +class TestEnergyModelSeA(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + sampled = make_sample(model_params) + self.type_split = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelDPA1(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelDPA2(unittest.TestCase, PermutationTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestForceModelDPA2(unittest.TestCase, PermutationTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +@unittest.skip("hybrid not supported at the moment") +class TestEnergyModelHybrid(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +@unittest.skip("hybrid not supported at the moment") +class TestForceModelHybrid(unittest.TestCase, PermutationTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + model_params["fitting_net"]["type"] = "direct_force_ener" + sampled = make_sample(model_params) + self.type_split = True + self.test_virial = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +# class TestEnergyFoo(unittest.TestCase): +# def test(self): +# model_params = model_dpau +# sampled = make_sample(model_params) +# self.model = EnergyModelDPAUni(model_params, sampled).to(env.DEVICE) + +# natoms = 5 +# cell = torch.rand([3, 3], dtype=dtype) +# cell = (cell + cell.T) + 5. * torch.eye(3) +# coord = torch.rand([natoms, 3], dtype=dtype) +# coord = torch.matmul(coord, cell) +# atype = torch.IntTensor([0, 0, 0, 1, 1]) +# idx_perm = [1, 0, 4, 3, 2] +# ret0 = infer_model(self.model, coord, cell, atype, type_split=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_permutation_denoise.py b/source/tests/pt/test_permutation_denoise.py new file mode 100644 index 0000000000..47bd0360f2 --- /dev/null +++ b/source/tests/pt/test_permutation_denoise.py @@ -0,0 +1,102 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import torch + +from deepmd.pt.infer.deep_eval import ( + eval_model, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) + +from .test_permutation import ( # model_dpau, + make_sample, + model_dpa1, + model_dpa2, + model_hybrid, +) + +dtype = torch.float64 + +model_dpa1 = copy.deepcopy(model_dpa1) +model_dpa2 = copy.deepcopy(model_dpa2) +model_hybrid = copy.deepcopy(model_hybrid) +model_dpa1["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] +model_dpa1.pop("fitting_net") +model_dpa2["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] +model_dpa2.pop("fitting_net") +model_hybrid["type_map"] = ["O", "H", "B", "MASKED_TOKEN"] +model_hybrid.pop("fitting_net") + + +class PermutationDenoiseTest: + def test( + self, + ): + natoms = 5 + cell = torch.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * torch.eye(3).to(env.DEVICE) + coord = torch.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = torch.matmul(coord, cell) + atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) + idx_perm = [1, 0, 4, 3, 2] + updated_c0, logits0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + ret0 = {"updated_coord": updated_c0.squeeze(0), "logits": logits0.squeeze(0)} + updated_c1, logits1 = eval_model( + self.model, + coord[idx_perm].unsqueeze(0), + cell.unsqueeze(0), + atype[idx_perm], + denoise=True, + ) + ret1 = {"updated_coord": updated_c1.squeeze(0), "logits": logits1.squeeze(0)} + prec = 1e-10 + torch.testing.assert_close( + ret0["updated_coord"][idx_perm], ret1["updated_coord"], rtol=prec, atol=prec + ) + torch.testing.assert_close( + ret0["logits"][idx_perm], ret1["logits"], rtol=prec, atol=prec + ) + + +class TestDenoiseModelDPA1(unittest.TestCase, PermutationDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestDenoiseModelDPA2(unittest.TestCase, PermutationDenoiseTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +# @unittest.skip("hybrid not supported at the moment") +# class TestDenoiseModelHybrid(unittest.TestCase, TestPermutationDenoise): +# def setUp(self): +# model_params = copy.deepcopy(model_hybrid_denoise) +# sampled = make_sample(model_params) +# self.type_split = True +# self.model = get_model(model_params, sampled).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_region.py b/source/tests/pt/test_region.py new file mode 100644 index 0000000000..e8a3346562 --- /dev/null +++ b/source/tests/pt/test_region.py @@ -0,0 +1,78 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import unittest + +import numpy as np +import torch + +from deepmd.pt.utils.preprocess import ( + Region3D, +) +from deepmd.pt.utils.region import ( + inter2phys, + to_face_distance, +) + +dtype = torch.float64 + + +class TestRegion(unittest.TestCase): + def setUp(self): + self.cell = torch.tensor( + [[1, 0, 0], [0.4, 0.8, 0], [0.1, 0.3, 2.1]], dtype=dtype + ) + self.cell = self.cell.unsqueeze(0).unsqueeze(0) + self.cell = torch.tile(self.cell, [4, 5, 1, 1]) + self.prec = 1e-8 + + def test_inter_to_phys(self): + inter = torch.rand([4, 5, 3, 3], dtype=dtype) + phys = inter2phys(inter, self.cell) + for ii in range(4): + for jj in range(5): + expected_phys = torch.matmul(inter[ii, jj], self.cell[ii, jj]) + torch.testing.assert_close( + phys[ii, jj], expected_phys, rtol=self.prec, atol=self.prec + ) + + def test_to_face_dist(self): + cell0 = self.cell[0][0].numpy() + vol = np.linalg.det(cell0) + # area of surfaces xy, xz, yz + sxy = np.linalg.norm(np.cross(cell0[0], cell0[1])) + sxz = np.linalg.norm(np.cross(cell0[0], cell0[2])) + syz = np.linalg.norm(np.cross(cell0[1], cell0[2])) + # vol / area gives distance + dz = vol / sxy + dy = vol / sxz + dx = vol / syz + expected = torch.tensor([dx, dy, dz]) + dists = to_face_distance(self.cell) + for ii in range(4): + for jj in range(5): + torch.testing.assert_close( + dists[ii][jj], expected, rtol=self.prec, atol=self.prec + ) + + +class TestLegacyRegion(unittest.TestCase): + def setUp(self): + self.cell = torch.tensor( + [[1, 0, 0], [0.4, 0.8, 0], [0.1, 0.3, 2.1]], dtype=dtype + ) + self.prec = 1e-6 + + def test_inter_to_phys(self): + inter = torch.rand([3, 3], dtype=dtype) + reg = Region3D(self.cell) + phys = reg.inter2phys(inter) + expected_phys = torch.matmul(inter, self.cell) + torch.testing.assert_close(phys, expected_phys, rtol=self.prec, atol=self.prec) + + def test_inter_to_inter(self): + inter = torch.rand([3, 3], dtype=dtype) + reg = Region3D(self.cell) + new_inter = reg.phys2inter(reg.inter2phys(inter)) + torch.testing.assert_close(inter, new_inter, rtol=self.prec, atol=self.prec) + + def test_to_face_dist(self): + pass diff --git a/source/tests/pt/test_rot.py b/source/tests/pt/test_rot.py new file mode 100644 index 0000000000..b5d9d9b64b --- /dev/null +++ b/source/tests/pt/test_rot.py @@ -0,0 +1,181 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import torch + +from deepmd.pt.infer.deep_eval import ( + eval_model, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) + +from .test_permutation import ( # model_dpau, + make_sample, + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, +) + +dtype = torch.float64 + + +class RotTest: + def test( + self, + ): + prec = 1e-10 + natoms = 5 + cell = 10.0 * torch.eye(3, dtype=dtype).to(env.DEVICE) + coord = 2 * torch.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + shift = torch.tensor([4, 4, 4], dtype=dtype).to(env.DEVICE) + atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) + from scipy.stats import ( + special_ortho_group, + ) + + rmat = torch.tensor(special_ortho_group.rvs(3), dtype=dtype).to(env.DEVICE) + + # rotate only coord and shift to the center of cell + coord_rot = torch.matmul(coord, rmat) + e0, f0, v0 = eval_model( + self.model, (coord + shift).unsqueeze(0), cell.unsqueeze(0), atype + ) + ret0 = { + "energy": e0.squeeze(0), + "force": f0.squeeze(0), + "virial": v0.squeeze(0), + } + e1, f1, v1 = eval_model( + self.model, (coord_rot + shift).unsqueeze(0), cell.unsqueeze(0), atype + ) + ret1 = { + "energy": e1.squeeze(0), + "force": f1.squeeze(0), + "virial": v1.squeeze(0), + } + torch.testing.assert_close(ret0["energy"], ret1["energy"], rtol=prec, atol=prec) + torch.testing.assert_close( + torch.matmul(ret0["force"], rmat), ret1["force"], rtol=prec, atol=prec + ) + if not hasattr(self, "test_virial") or self.test_virial: + torch.testing.assert_close( + torch.matmul(rmat.T, torch.matmul(ret0["virial"], rmat)), + ret1["virial"], + rtol=prec, + atol=prec, + ) + + # rotate coord and cell + torch.manual_seed(0) + cell = torch.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * torch.eye(3).to(env.DEVICE) + coord = torch.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = torch.matmul(coord, cell) + atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) + coord_rot = torch.matmul(coord, rmat) + cell_rot = torch.matmul(cell, rmat) + e0, f0, v0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype + ) + ret0 = { + "energy": e0.squeeze(0), + "force": f0.squeeze(0), + "virial": v0.squeeze(0), + } + e1, f1, v1 = eval_model( + self.model, coord_rot.unsqueeze(0), cell_rot.unsqueeze(0), atype + ) + ret1 = { + "energy": e1.squeeze(0), + "force": f1.squeeze(0), + "virial": v1.squeeze(0), + } + torch.testing.assert_close(ret0["energy"], ret1["energy"], rtol=prec, atol=prec) + torch.testing.assert_close( + torch.matmul(ret0["force"], rmat), ret1["force"], rtol=prec, atol=prec + ) + if not hasattr(self, "test_virial") or self.test_virial: + torch.testing.assert_close( + torch.matmul(rmat.T, torch.matmul(ret0["virial"], rmat)), + ret1["virial"], + rtol=prec, + atol=prec, + ) + + +class TestEnergyModelSeA(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + sampled = make_sample(model_params) + self.type_split = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelDPA1(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelDPA2(unittest.TestCase, RotTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestForceModelDPA2(unittest.TestCase, RotTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +@unittest.skip("hybrid not supported at the moment") +class TestEnergyModelHybrid(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +@unittest.skip("hybrid not supported at the moment") +class TestForceModelHybrid(unittest.TestCase, RotTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + model_params["fitting_net"]["type"] = "direct_force_ener" + sampled = make_sample(model_params) + self.type_split = True + self.test_virial = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_rot_denoise.py b/source/tests/pt/test_rot_denoise.py new file mode 100644 index 0000000000..cab8de7bec --- /dev/null +++ b/source/tests/pt/test_rot_denoise.py @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import torch + +from deepmd.pt.infer.deep_eval import ( + eval_model, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) + +from .test_permutation_denoise import ( + make_sample, + model_dpa1, + model_dpa2, +) + +dtype = torch.float64 + + +class RotDenoiseTest: + def test( + self, + ): + prec = 1e-10 + natoms = 5 + cell = 10.0 * torch.eye(3, dtype=dtype).to(env.DEVICE) + coord = 2 * torch.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + shift = torch.tensor([4, 4, 4], dtype=dtype).to(env.DEVICE) + atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) + from scipy.stats import ( + special_ortho_group, + ) + + rmat = torch.tensor(special_ortho_group.rvs(3), dtype=dtype).to(env.DEVICE) + + # rotate only coord and shift to the center of cell + coord_rot = torch.matmul(coord, rmat) + update_c0, logits0 = eval_model( + self.model, + (coord + shift).unsqueeze(0), + cell.unsqueeze(0), + atype, + denoise=True, + ) + update_c0 = update_c0 - (coord + shift).unsqueeze(0) + ret0 = {"updated_coord": update_c0.squeeze(0), "logits": logits0.squeeze(0)} + update_c1, logits1 = eval_model( + self.model, + (coord_rot + shift).unsqueeze(0), + cell.unsqueeze(0), + atype, + denoise=True, + ) + update_c1 = update_c1 - (coord_rot + shift).unsqueeze(0) + ret1 = {"updated_coord": update_c1.squeeze(0), "logits": logits1.squeeze(0)} + torch.testing.assert_close( + torch.matmul(ret0["updated_coord"], rmat), + ret1["updated_coord"], + rtol=prec, + atol=prec, + ) + torch.testing.assert_close(ret0["logits"], ret1["logits"], rtol=prec, atol=prec) + + # rotate coord and cell + torch.manual_seed(0) + cell = torch.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * torch.eye(3).to(env.DEVICE) + coord = torch.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = torch.matmul(coord, cell) + atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) + coord_rot = torch.matmul(coord, rmat) + cell_rot = torch.matmul(cell, rmat) + update_c0, logits0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + ret0 = {"updated_coord": update_c0.squeeze(0), "logits": logits0.squeeze(0)} + update_c1, logits1 = eval_model( + self.model, + coord_rot.unsqueeze(0), + cell_rot.unsqueeze(0), + atype, + denoise=True, + ) + ret1 = {"updated_coord": update_c1.squeeze(0), "logits": logits1.squeeze(0)} + torch.testing.assert_close(ret0["logits"], ret1["logits"], rtol=prec, atol=prec) + torch.testing.assert_close( + torch.matmul(ret0["updated_coord"], rmat), + ret1["updated_coord"], + rtol=prec, + atol=prec, + ) + + +class TestDenoiseModelDPA1(unittest.TestCase, RotDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestDenoiseModelDPA2(unittest.TestCase, RotDenoiseTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +# @unittest.skip("hybrid not supported at the moment") +# class TestEnergyModelHybrid(unittest.TestCase, TestRotDenoise): +# def setUp(self): +# model_params = copy.deepcopy(model_hybrid_denoise) +# sampled = make_sample(model_params) +# self.type_split = True +# self.model = get_model(model_params, sampled).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_rotation.py b/source/tests/pt/test_rotation.py new file mode 100644 index 0000000000..4b49377a27 --- /dev/null +++ b/source/tests/pt/test_rotation.py @@ -0,0 +1,133 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import unittest +from pathlib import ( + Path, +) +from typing import ( + List, + Optional, +) + +import numpy as np +import torch +from scipy.stats import ( + special_ortho_group, +) + +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pt.utils.dataset import ( + DeepmdDataSystem, +) +from deepmd.pt.utils.stat import ( + make_stat_input, +) + + +class CheckSymmetry(DeepmdDataSystem): + def __init__( + self, + sys_path: str, + rcut, + sec, + type_map: Optional[List[str]] = None, + type_split=True, + ): + super().__init__(sys_path, rcut, sec, type_map, type_split) + + def get_rotation(self, index, rotation_matrix): + for i in range( + 0, len(self._dirs) + 1 + ): # note: if different sets can be merged, prefix sum is unused to calculate + if index < self.prefix_sum[i]: + break + frames = self._load_set(self._dirs[i - 1]) + frames["coord"] = np.dot( + rotation_matrix, frames["coord"].reshape(-1, 3).T + ).T.reshape(self.nframes, -1) + frames["box"] = np.dot( + rotation_matrix, frames["box"].reshape(-1, 3).T + ).T.reshape(self.nframes, -1) + frames["force"] = np.dot( + rotation_matrix, frames["force"].reshape(-1, 3).T + ).T.reshape(self.nframes, -1) + frame = self.single_preprocess(frames, index - self.prefix_sum[i - 1]) + return frame + + +def get_data(batch): + inputs = {} + for key in ["coord", "atype", "box"]: + inputs[key] = batch[key].unsqueeze(0).to(env.DEVICE) + return inputs + + +class TestRotation(unittest.TestCase): + def setUp(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + self.config = json.load(fin) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.rotation = special_ortho_group.rvs(3) + self.get_dataset(0) + self.get_model() + + def get_model(self): + training_systems = self.config["training"]["training_data"]["systems"] + model_params = self.config["model"] + data_stat_nbatch = model_params.get("data_stat_nbatch", 10) + train_data = DpLoaderSet( + training_systems, + self.config["training"]["training_data"]["batch_size"], + model_params, + ) + sampled = make_stat_input( + train_data.systems, train_data.dataloaders, data_stat_nbatch + ) + self.model = get_model(self.config["model"], sampled).to(env.DEVICE) + + def get_dataset(self, system_index=0, batch_index=0): + systems = self.config["training"]["training_data"]["systems"] + rcut = self.config["model"]["descriptor"]["rcut"] + sel = self.config["model"]["descriptor"]["sel"] + sec = torch.cumsum(torch.tensor(sel), dim=0) + type_map = self.config["model"]["type_map"] + dpdatasystem = CheckSymmetry( + sys_path=systems[system_index], rcut=rcut, sec=sec, type_map=type_map + ) + self.origin_batch = dpdatasystem._get_item(batch_index) + self.rotated_batch = dpdatasystem.get_rotation(batch_index, self.rotation) + + def test_rotation(self): + result1 = self.model(**get_data(self.origin_batch)) + result2 = self.model(**get_data(self.rotated_batch)) + rotation = torch.from_numpy(self.rotation).to(env.DEVICE) + self.assertTrue(result1["energy"] == result2["energy"]) + if "force" in result1: + self.assertTrue( + torch.allclose( + result2["force"][0], torch.matmul(rotation, result1["force"][0].T).T + ) + ) + if "virial" in result1: + self.assertTrue( + torch.allclose( + result2["virial"][0], + torch.matmul( + torch.matmul(rotation, result1["virial"][0].T), rotation.T + ), + ) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_sampler.py b/source/tests/pt/test_sampler.py new file mode 100644 index 0000000000..0ff16ed7c7 --- /dev/null +++ b/source/tests/pt/test_sampler.py @@ -0,0 +1,115 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +from torch.utils.data import ( + DataLoader, +) + +from deepmd.pt.utils.dataloader import ( + DpLoaderSet, + get_weighted_sampler, +) +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.utils import random as tf_random +from deepmd.tf.utils.data_system import ( + DeepmdDataSystem, +) + +CUR_DIR = os.path.dirname(__file__) + + +class TestSampler(unittest.TestCase): + def setUp(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.batch_size = config["training"]["training_data"]["batch_size"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + self.my_dataset = DpLoaderSet( + self.systems, + self.batch_size, + model_params={ + "descriptor": { + "type": "se_e2_a", + "sel": self.sel, + "rcut": self.rcut, + }, + "type_map": model_config["type_map"], + }, + seed=10, + shuffle=False, + ) + + tf_random.seed(10) + self.dp_dataset = DeepmdDataSystem(self.systems, self.batch_size, 1, self.rcut) + + def test_sampler_debug_info(self): + dataloader = DataLoader( + self.my_dataset, + sampler=get_weighted_sampler(self.my_dataset, prob_style="prob_sys_size"), + batch_size=None, + num_workers=0, # setting to 0 diverges the behavior of its iterator; should be >=1 + drop_last=False, + pin_memory=True, + ) + batch_data = next(iter(dataloader)) + sid = batch_data["sid"] + fid = batch_data["fid"][0] + coord = batch_data["coord"].squeeze(0) + frame = self.my_dataset.systems[sid].__getitem__(fid) + self.assertTrue(np.allclose(coord, frame["coord"])) + + def test_auto_prob_uniform(self): + auto_prob_style = "prob_uniform" + sampler = get_weighted_sampler(self.my_dataset, prob_style=auto_prob_style) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(auto_prob_style=auto_prob_style) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + def test_auto_prob_sys_size(self): + auto_prob_style = "prob_sys_size" + sampler = get_weighted_sampler(self.my_dataset, prob_style=auto_prob_style) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(auto_prob_style=auto_prob_style) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + def test_auto_prob_sys_size_ext(self): + auto_prob_style = "prob_sys_size;0:1:0.2;1:3:0.8" + sampler = get_weighted_sampler(self.my_dataset, prob_style=auto_prob_style) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(auto_prob_style=auto_prob_style) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + def test_sys_probs(self): + sys_probs = [0.1, 0.4, 0.5] + sampler = get_weighted_sampler( + self.my_dataset, prob_style=sys_probs, sys_prob=True + ) + my_probs = np.array(sampler.weights) + self.dp_dataset.set_sys_probs(sys_probs=sys_probs) + dp_probs = np.array(self.dp_dataset.sys_probs) + self.assertTrue(np.allclose(my_probs, dp_probs)) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_saveload_dpa1.py b/source/tests/pt/test_saveload_dpa1.py new file mode 100644 index 0000000000..d1043f7029 --- /dev/null +++ b/source/tests/pt/test_saveload_dpa1.py @@ -0,0 +1,151 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import json +import os +import unittest +from pathlib import ( + Path, +) + +import torch +from torch.utils.data import ( + DataLoader, +) + +from deepmd.pt.loss import ( + EnergyStdLoss, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.train.wrapper import ( + ModelWrapper, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.dataloader import ( + BufferedIterator, + DpLoaderSet, +) +from deepmd.pt.utils.stat import ( + make_stat_input, +) +from deepmd.tf.common import ( + expand_sys_str, +) + + +def get_dataset(config): + model_config = config["model"] + rcut = model_config["descriptor"]["rcut"] + sel = model_config["descriptor"]["sel"] + systems = config["training"]["validation_data"]["systems"] + if isinstance(systems, str): + systems = expand_sys_str(systems) + batch_size = config["training"]["training_data"]["batch_size"] + type_map = model_config["type_map"] + + dataset = DpLoaderSet( + systems, + batch_size, + model_params={ + "descriptor": { + "type": "dpa1", + "sel": sel, + "rcut": rcut, + }, + "type_map": type_map, + }, + ) + data_stat_nbatch = model_config.get("data_stat_nbatch", 10) + sampled = make_stat_input(dataset.systems, dataset.dataloaders, data_stat_nbatch) + return dataset, sampled + + +class TestSaveLoadDPA1(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as fin: + self.config = json.load(fin) + self.config["loss"]["starter_learning_rate"] = self.config["learning_rate"][ + "start_lr" + ] + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.dataset, self.sampled = get_dataset(self.config) + self.training_dataloader = DataLoader( + self.dataset, + sampler=torch.utils.data.RandomSampler(self.dataset), + batch_size=None, + num_workers=0, # setting to 0 diverges the behavior of its iterator; should be >=1 + drop_last=False, + pin_memory=True, + ) + self.training_data = BufferedIterator(iter(self.training_dataloader)) + self.loss = EnergyStdLoss(**self.config["loss"]) + self.cur_lr = 1 + self.task_key = "Default" + self.input_dict, self.label_dict = self.get_data() + self.start_lr = self.config["learning_rate"]["start_lr"] + + def get_model_result(self, read=False, model_file="tmp_model.pt"): + wrapper = self.create_wrapper(read) + optimizer = torch.optim.Adam(wrapper.parameters(), lr=self.start_lr) + optimizer.zero_grad() + if read: + wrapper.load_state_dict(torch.load(model_file, map_location=env.DEVICE)) + os.remove(model_file) + else: + torch.save(wrapper.state_dict(), model_file) + result = wrapper( + **self.input_dict, + cur_lr=self.cur_lr, + label=self.label_dict, + task_key=self.task_key, + )[0] + return result + + def create_wrapper(self, read: bool): + model_config = copy.deepcopy(self.config["model"]) + sampled = copy.deepcopy(self.sampled) + model_config["resuming"] = read + model_config["stat_file_dir"] = "stat_files" + model_config["stat_file"] = "stat.npz" + model_config["stat_file_path"] = os.path.join( + model_config["stat_file_dir"], model_config["stat_file"] + ) + model = get_model(model_config, sampled).to(env.DEVICE) + return ModelWrapper(model, self.loss) + + def get_data(self): + try: + batch_data = next(iter(self.training_data)) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data = BufferedIterator(iter(self.training_dataloader)) + batch_data = next(iter(self.training_data)) + input_dict = {} + for item in ["coord", "atype", "box"]: + if item in batch_data: + input_dict[item] = batch_data[item] + else: + input_dict[item] = None + label_dict = {} + for item in ["energy", "force", "virial"]: + if item in batch_data: + label_dict[item] = batch_data[item] + return input_dict, label_dict + + def test_saveload(self): + result1 = self.get_model_result() + result2 = self.get_model_result(read=True) + final_result = all( + torch.allclose(result1[item], result2[item]) for item in result1 + ) + self.assertTrue(final_result) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_saveload_se_e2_a.py b/source/tests/pt/test_saveload_se_e2_a.py new file mode 100644 index 0000000000..95d7f97a88 --- /dev/null +++ b/source/tests/pt/test_saveload_se_e2_a.py @@ -0,0 +1,145 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import json +import os +import unittest +from pathlib import ( + Path, +) + +import torch +from torch.utils.data import ( + DataLoader, +) + +from deepmd.pt.loss import ( + EnergyStdLoss, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.train.wrapper import ( + ModelWrapper, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.dataloader import ( + BufferedIterator, + DpLoaderSet, +) +from deepmd.pt.utils.stat import ( + make_stat_input, +) +from deepmd.tf.common import ( + expand_sys_str, +) + + +def get_dataset(config): + model_config = config["model"] + rcut = model_config["descriptor"]["rcut"] + sel = model_config["descriptor"]["sel"] + systems = config["training"]["validation_data"]["systems"] + if isinstance(systems, str): + systems = expand_sys_str(systems) + batch_size = config["training"]["training_data"]["batch_size"] + type_map = model_config["type_map"] + + dataset = DpLoaderSet( + systems, + batch_size, + model_params={ + "descriptor": { + "type": "se_e2_a", + "sel": sel, + "rcut": rcut, + }, + "type_map": type_map, + }, + ) + data_stat_nbatch = model_config.get("data_stat_nbatch", 10) + sampled = make_stat_input(dataset.systems, dataset.dataloaders, data_stat_nbatch) + return dataset, sampled + + +class TestSaveLoadSeA(unittest.TestCase): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_e2_a.json") + with open(input_json) as fin: + self.config = json.load(fin) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["loss"]["starter_learning_rate"] = self.config["learning_rate"][ + "start_lr" + ] + self.dataset, self.sampled = get_dataset(self.config) + self.training_dataloader = DataLoader( + self.dataset, + sampler=torch.utils.data.RandomSampler(self.dataset), + batch_size=None, + num_workers=0, # setting to 0 diverges the behavior of its iterator; should be >=1 + drop_last=False, + pin_memory=True, + ) + self.training_data = BufferedIterator(iter(self.training_dataloader)) + self.loss = EnergyStdLoss(**self.config["loss"]) + self.cur_lr = 1 + self.task_key = "Default" + self.input_dict, self.label_dict = self.get_data() + self.start_lr = self.config["learning_rate"]["start_lr"] + + def get_model_result(self, read=False, model_file="tmp_model.pt"): + wrapper = self.create_wrapper() + optimizer = torch.optim.Adam(wrapper.parameters(), lr=self.start_lr) + optimizer.zero_grad() + if read: + wrapper.load_state_dict(torch.load(model_file, map_location=env.DEVICE)) + os.remove(model_file) + else: + torch.save(wrapper.state_dict(), model_file) + result = wrapper( + **self.input_dict, + cur_lr=self.cur_lr, + label=self.label_dict, + task_key=self.task_key, + )[0] + return result + + def create_wrapper(self): + model_config = copy.deepcopy(self.config["model"]) + sampled = copy.deepcopy(self.sampled) + model = get_model(model_config, sampled).to(env.DEVICE) + return ModelWrapper(model, self.loss) + + def get_data(self): + try: + batch_data = next(iter(self.training_data)) + except StopIteration: + # Refresh the status of the dataloader to start from a new epoch + self.training_data = BufferedIterator(iter(self.training_dataloader)) + batch_data = next(iter(self.training_data)) + input_dict = {} + for item in ["coord", "atype", "box"]: + if item in batch_data: + input_dict[item] = batch_data[item] + else: + input_dict[item] = None + label_dict = {} + for item in ["energy", "force", "virial"]: + if item in batch_data: + label_dict[item] = batch_data[item] + return input_dict, label_dict + + def test_saveload(self): + result1 = self.get_model_result() + result2 = self.get_model_result(read=True) + final_result = all( + torch.allclose(result1[item], result2[item]) for item in result1 + ) + self.assertTrue(final_result) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_se_e2_a.py b/source/tests/pt/test_se_e2_a.py new file mode 100644 index 0000000000..96a17c2bad --- /dev/null +++ b/source/tests/pt/test_se_e2_a.py @@ -0,0 +1,199 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import itertools +import unittest + +import numpy as np +import torch + +try: + # from deepmd.model_format import PRECISION_DICT as DP_PRECISION_DICT + from deepmd.model_format import DescrptSeA as DPDescrptSeA + + support_se_e2_a = True +except ModuleNotFoundError: + support_se_e2_a = False +except ImportError: + support_se_e2_a = False + +from deepmd.pt.model.descriptor.se_a import ( + DescrptSeA, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.env import ( + PRECISION_DICT, +) + +from .test_mlp import ( + get_tols, +) + +dtype = env.GLOBAL_PT_FLOAT_PRECISION + + +class TestCaseSingleFrameWithNlist: + def setUp(self): + # nloc == 3, nall == 4 + self.nloc = 3 + self.nall = 4 + self.nf, self.nt = 1, 2 + self.coord_ext = np.array( + [ + [0, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, -2, 0], + ], + dtype=np.float64, + ).reshape([1, self.nall * 3]) + self.atype_ext = np.array([0, 0, 1, 0], dtype=int).reshape([1, self.nall]) + # sel = [5, 2] + self.sel = [5, 2] + self.nlist = np.array( + [ + [1, 3, -1, -1, -1, 2, -1], + [0, -1, -1, -1, -1, 2, -1], + [0, 1, -1, -1, -1, 0, -1], + ], + dtype=int, + ).reshape([1, self.nloc, sum(self.sel)]) + self.rcut = 0.4 + self.rcut_smth = 2.2 + + +# to be merged with the tf test case +@unittest.skipIf(not support_se_e2_a, "EnvMat not supported") +class TestDescrptSeA(unittest.TestCase, TestCaseSingleFrameWithNlist): + def setUp(self): + TestCaseSingleFrameWithNlist.setUp(self) + + def test_consistency( + self, + ): + rng = np.random.default_rng() + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + for idt, prec in itertools.product( + [False, True], + ["float64", "float32"], + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + err_msg = f"idt={idt} prec={prec}" + # sea new impl + dd0 = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + precision=prec, + resnet_dt=idt, + old_impl=False, + ).to(env.DEVICE) + dd0.sea.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) + dd0.sea.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) + rd0, _, _, _, _ = dd0( + torch.tensor(self.coord_ext, dtype=dtype, device=env.DEVICE), + torch.tensor(self.atype_ext, dtype=int, device=env.DEVICE), + torch.tensor(self.nlist, dtype=int, device=env.DEVICE), + ) + # serialization + dd1 = DescrptSeA.deserialize(dd0.serialize()) + rd1, _, _, _, _ = dd1( + torch.tensor(self.coord_ext, dtype=dtype, device=env.DEVICE), + torch.tensor(self.atype_ext, dtype=int, device=env.DEVICE), + torch.tensor(self.nlist, dtype=int, device=env.DEVICE), + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd1.detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + # dp impl + dd2 = DPDescrptSeA.deserialize(dd0.serialize()) + rd2 = dd2.call( + self.coord_ext, + self.atype_ext, + self.nlist, + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd2, + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + # old impl + if idt is False and prec == "float64": + dd3 = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + precision=prec, + resnet_dt=idt, + old_impl=True, + ).to(env.DEVICE) + dd0_state_dict = dd0.sea.state_dict() + dd3_state_dict = dd3.sea.state_dict() + for i in dd3_state_dict: + dd3_state_dict[i] = ( + dd0_state_dict[ + i.replace(".deep_layers.", ".layers.").replace( + "filter_layers_old.", "filter_layers.networks." + ) + ] + .detach() + .clone() + ) + if ".bias" in i: + dd3_state_dict[i] = dd3_state_dict[i].unsqueeze(0) + dd3.sea.load_state_dict(dd3_state_dict) + + rd3, _, _, _, _ = dd3( + torch.tensor(self.coord_ext, dtype=dtype, device=env.DEVICE), + torch.tensor(self.atype_ext, dtype=int, device=env.DEVICE), + torch.tensor(self.nlist, dtype=int, device=env.DEVICE), + ) + np.testing.assert_allclose( + rd0.detach().cpu().numpy(), + rd3.detach().cpu().numpy(), + rtol=rtol, + atol=atol, + err_msg=err_msg, + ) + + def test_jit( + self, + ): + rng = np.random.default_rng() + nf, nloc, nnei = self.nlist.shape + davg = rng.normal(size=(self.nt, nnei, 4)) + dstd = rng.normal(size=(self.nt, nnei, 4)) + dstd = 0.1 + np.abs(dstd) + + for idt, prec in itertools.product( + [False, True], + ["float64", "float32"], + ): + dtype = PRECISION_DICT[prec] + rtol, atol = get_tols(prec) + err_msg = f"idt={idt} prec={prec}" + # sea new impl + dd0 = DescrptSeA( + self.rcut, + self.rcut_smth, + self.sel, + precision=prec, + resnet_dt=idt, + old_impl=False, + ) + dd0.sea.mean = torch.tensor(davg, dtype=dtype, device=env.DEVICE) + dd0.sea.dstd = torch.tensor(dstd, dtype=dtype, device=env.DEVICE) + dd1 = DescrptSeA.deserialize(dd0.serialize()) + model = torch.jit.script(dd0) + model = torch.jit.script(dd1) diff --git a/source/tests/pt/test_smooth.py b/source/tests/pt/test_smooth.py new file mode 100644 index 0000000000..2e3bf61d10 --- /dev/null +++ b/source/tests/pt/test_smooth.py @@ -0,0 +1,230 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import torch + +from deepmd.pt.infer.deep_eval import ( + eval_model, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) + +from .test_permutation import ( # model_dpau, + make_sample, + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, +) + +dtype = torch.float64 + + +class SmoothTest: + def test( + self, + ): + # displacement of atoms + epsilon = 1e-5 if self.epsilon is None else self.epsilon + # required prec. relative prec is not checked. + rprec = 0 + aprec = 1e-5 if self.aprec is None else self.aprec + + natoms = 10 + cell = 8.6 * torch.eye(3, dtype=dtype).to(env.DEVICE) + atype = torch.randint(0, 3, [natoms]) + coord0 = ( + torch.tensor( + [ + 0.0, + 0.0, + 0.0, + 4.0 - 0.5 * epsilon, + 0.0, + 0.0, + 0.0, + 4.0 - 0.5 * epsilon, + 0.0, + ], + dtype=dtype, + ) + .view([-1, 3]) + .to(env.DEVICE) + ) + coord1 = torch.rand([natoms - coord0.shape[0], 3], dtype=dtype).to(env.DEVICE) + coord1 = torch.matmul(coord1, cell) + coord = torch.concat([coord0, coord1], dim=0) + + coord0 = torch.clone(coord) + coord1 = torch.clone(coord) + coord1[1][0] += epsilon + coord2 = torch.clone(coord) + coord2[2][1] += epsilon + coord3 = torch.clone(coord) + coord3[1][0] += epsilon + coord3[2][1] += epsilon + + e0, f0, v0 = eval_model( + self.model, coord0.unsqueeze(0), cell.unsqueeze(0), atype + ) + ret0 = { + "energy": e0.squeeze(0), + "force": f0.squeeze(0), + "virial": v0.squeeze(0), + } + e1, f1, v1 = eval_model( + self.model, coord1.unsqueeze(0), cell.unsqueeze(0), atype + ) + ret1 = { + "energy": e1.squeeze(0), + "force": f1.squeeze(0), + "virial": v1.squeeze(0), + } + e2, f2, v2 = eval_model( + self.model, coord2.unsqueeze(0), cell.unsqueeze(0), atype + ) + ret2 = { + "energy": e2.squeeze(0), + "force": f2.squeeze(0), + "virial": v2.squeeze(0), + } + e3, f3, v3 = eval_model( + self.model, coord3.unsqueeze(0), cell.unsqueeze(0), atype + ) + ret3 = { + "energy": e3.squeeze(0), + "force": f3.squeeze(0), + "virial": v3.squeeze(0), + } + + def compare(ret0, ret1): + torch.testing.assert_close( + ret0["energy"], ret1["energy"], rtol=rprec, atol=aprec + ) + # plus 1. to avoid the divided-by-zero issue + torch.testing.assert_close( + 1.0 + ret0["force"], 1.0 + ret1["force"], rtol=rprec, atol=aprec + ) + if not hasattr(self, "test_virial") or self.test_virial: + torch.testing.assert_close( + 1.0 + ret0["virial"], 1.0 + ret1["virial"], rtol=rprec, atol=aprec + ) + + compare(ret0, ret1) + compare(ret1, ret2) + compare(ret0, ret3) + + +class TestEnergyModelSeA(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + sampled = make_sample(model_params) + self.type_split = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + self.epsilon, self.aprec = None, None + + +# @unittest.skip("dpa-1 not smooth at the moment") +class TestEnergyModelDPA1(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + # less degree of smoothness, + # error can be systematically removed by reducing epsilon + self.epsilon = 1e-5 + self.aprec = 1e-5 + + +class TestEnergyModelDPA2(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + model_params["descriptor"]["repinit_rcut"] = 8 + model_params["descriptor"]["repinit_rcut_smth"] = 3.5 + model_params_sample = copy.deepcopy(model_params) + ####################################################### + # dirty hack here! the interface of dataload should be + # redesigned to support specifying rcut and sel + ####################################################### + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + self.epsilon, self.aprec = 1e-5, 1e-4 + + +class TestEnergyModelDPA2_1(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "ener" + model_params_sample = copy.deepcopy(model_params) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + self.type_split = True + self.test_virial = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + self.epsilon, self.aprec = None, None + + +class TestEnergyModelDPA2_2(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "ener" + model_params_sample = copy.deepcopy(model_params) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + self.type_split = True + self.test_virial = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + self.epsilon, self.aprec = None, None + + +@unittest.skip("hybrid not supported at the moment") +class TestEnergyModelHybrid(unittest.TestCase, SmoothTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + self.epsilon, self.aprec = None, None + + +# class TestEnergyFoo(unittest.TestCase): +# def test(self): +# model_params = model_dpau +# sampled = make_sample(model_params) +# self.model = EnergyModelDPAUni(model_params, sampled).to(env.DEVICE) + +# natoms = 5 +# cell = torch.rand([3, 3], dtype=dtype) +# cell = (cell + cell.T) + 5. * torch.eye(3) +# coord = torch.rand([natoms, 3], dtype=dtype) +# coord = torch.matmul(coord, cell) +# atype = torch.IntTensor([0, 0, 0, 1, 1]) +# idx_perm = [1, 0, 4, 3, 2] +# ret0 = infer_model(self.model, coord, cell, atype, type_split=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_smooth_denoise.py b/source/tests/pt/test_smooth_denoise.py new file mode 100644 index 0000000000..a66e5df957 --- /dev/null +++ b/source/tests/pt/test_smooth_denoise.py @@ -0,0 +1,151 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import torch + +from deepmd.pt.infer.deep_eval import ( + eval_model, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) + +from .test_permutation_denoise import ( + make_sample, + model_dpa2, +) + +dtype = torch.float64 + + +class SmoothDenoiseTest: + def test( + self, + ): + # displacement of atoms + epsilon = 1e-5 if self.epsilon is None else self.epsilon + # required prec. relative prec is not checked. + rprec = 0 + aprec = 1e-5 if self.aprec is None else self.aprec + + natoms = 10 + cell = 8.6 * torch.eye(3, dtype=dtype).to(env.DEVICE) + atype = torch.randint(0, 3, [natoms]) + coord0 = ( + torch.tensor( + [ + 0.0, + 0.0, + 0.0, + 4.0 - 0.5 * epsilon, + 0.0, + 0.0, + 0.0, + 4.0 - 0.5 * epsilon, + 0.0, + ], + dtype=dtype, + ) + .view([-1, 3]) + .to(env.DEVICE) + ) + coord1 = torch.rand([natoms - coord0.shape[0], 3], dtype=dtype).to(env.DEVICE) + coord1 = torch.matmul(coord1, cell) + coord = torch.concat([coord0, coord1], dim=0) + + coord0 = torch.clone(coord) + coord1 = torch.clone(coord) + coord1[1][0] += epsilon + coord2 = torch.clone(coord) + coord2[2][1] += epsilon + coord3 = torch.clone(coord) + coord3[1][0] += epsilon + coord3[2][1] += epsilon + + update_c0, logits0 = eval_model( + self.model, coord0.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + ret0 = {"updated_coord": update_c0.squeeze(0), "logits": logits0.squeeze(0)} + update_c1, logits1 = eval_model( + self.model, coord1.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + ret1 = {"updated_coord": update_c1.squeeze(0), "logits": logits1.squeeze(0)} + update_c2, logits2 = eval_model( + self.model, coord2.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + ret2 = {"updated_coord": update_c2.squeeze(0), "logits": logits2.squeeze(0)} + update_c3, logits3 = eval_model( + self.model, coord3.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + ret3 = {"updated_coord": update_c3.squeeze(0), "logits": logits3.squeeze(0)} + + def compare(ret0, ret1): + torch.testing.assert_close( + ret0["updated_coord"], ret1["updated_coord"], rtol=rprec, atol=aprec + ) + torch.testing.assert_close( + ret0["logits"], ret1["logits"], rtol=rprec, atol=aprec + ) + + compare(ret0, ret1) + compare(ret1, ret2) + compare(ret0, ret3) + + +class TestDenoiseModelDPA2(unittest.TestCase, SmoothDenoiseTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + model_params["descriptor"]["sel"] = 8 + model_params["descriptor"]["rcut_smth"] = 3.5 + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + self.epsilon, self.aprec = None, None + self.epsilon = 1e-7 + self.aprec = 1e-5 + + +class TestDenoiseModelDPA2_1(unittest.TestCase, SmoothDenoiseTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + # model_params["descriptor"]["combine_grrg"] = True + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + self.epsilon, self.aprec = None, None + self.epsilon = 1e-7 + self.aprec = 1e-5 + + +# @unittest.skip("hybrid not supported at the moment") +# class TestDenoiseModelHybrid(unittest.TestCase, TestSmoothDenoise): +# def setUp(self): +# model_params = copy.deepcopy(model_hybrid_denoise) +# sampled = make_sample(model_params) +# self.type_split = True +# self.model = get_model(model_params, sampled).to(env.DEVICE) +# self.epsilon, self.aprec = None, None +# self.epsilon = 1e-7 +# self.aprec = 1e-5 + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_stat.py b/source/tests/pt/test_stat.py new file mode 100644 index 0000000000..08fc12ff11 --- /dev/null +++ b/source/tests/pt/test_stat.py @@ -0,0 +1,194 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import unittest +from pathlib import ( + Path, +) + +import numpy as np +import torch + +from deepmd.pt.model.descriptor import ( + DescrptSeA, +) +from deepmd.pt.utils import ( + env, +) +from deepmd.pt.utils.dataloader import ( + DpLoaderSet, +) +from deepmd.pt.utils.stat import ( + compute_output_stats, +) +from deepmd.pt.utils.stat import make_stat_input as my_make +from deepmd.tf.common import ( + expand_sys_str, +) +from deepmd.tf.descriptor.se_a import DescrptSeA as DescrptSeA_tf +from deepmd.tf.fit.ener import ( + EnerFitting, +) +from deepmd.tf.model.model_stat import make_stat_input as dp_make +from deepmd.tf.model.model_stat import merge_sys_stat as dp_merge +from deepmd.tf.utils import random as tf_random +from deepmd.tf.utils.data_system import ( + DeepmdDataSystem, +) + +CUR_DIR = os.path.dirname(__file__) + + +def compare(ut, base, given): + if isinstance(base, list): + ut.assertEqual(len(base), len(given)) + for idx in range(len(base)): + compare(ut, base[idx], given[idx]) + elif isinstance(base, np.ndarray): + ut.assertTrue(np.allclose(base.reshape(-1), given.reshape(-1))) + else: + ut.assertEqual(base, given) + + +class TestDataset(unittest.TestCase): + def setUp(self): + with open(str(Path(__file__).parent / "water/se_e2_a.json")) as fin: + content = fin.read() + config = json.loads(content) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + config["training"]["training_data"]["systems"] = data_file + config["training"]["validation_data"]["systems"] = data_file + model_config = config["model"] + self.rcut = model_config["descriptor"]["rcut"] + self.rcut_smth = model_config["descriptor"]["rcut_smth"] + self.sel = model_config["descriptor"]["sel"] + self.batch_size = config["training"]["training_data"]["batch_size"] + self.systems = config["training"]["validation_data"]["systems"] + if isinstance(self.systems, str): + self.systems = expand_sys_str(self.systems) + self.my_dataset = DpLoaderSet( + self.systems, + self.batch_size, + model_params={ + "descriptor": { + "type": "se_e2_a", + "sel": self.sel, + "rcut": self.rcut, + }, + "type_map": model_config["type_map"], + }, + seed=10, + ) + self.filter_neuron = model_config["descriptor"]["neuron"] + self.axis_neuron = model_config["descriptor"]["axis_neuron"] + self.data_stat_nbatch = 2 + self.filter_neuron = model_config["descriptor"]["neuron"] + self.axis_neuron = model_config["descriptor"]["axis_neuron"] + self.n_neuron = model_config["fitting_net"]["neuron"] + + self.my_sampled = my_make( + self.my_dataset.systems, self.my_dataset.dataloaders, self.data_stat_nbatch + ) + + tf_random.seed(10) + dp_dataset = DeepmdDataSystem(self.systems, self.batch_size, 1, self.rcut) + dp_dataset.add("energy", 1, atomic=False, must=False, high_prec=True) + dp_dataset.add("force", 3, atomic=True, must=False, high_prec=False) + self.dp_sampled = dp_make(dp_dataset, self.data_stat_nbatch, False) + self.dp_merged = dp_merge(self.dp_sampled) + self.dp_mesh = self.dp_merged.pop("default_mesh") + self.dp_d = DescrptSeA_tf( + rcut=self.rcut, + rcut_smth=self.rcut_smth, + sel=self.sel, + neuron=self.filter_neuron, + axis_neuron=self.axis_neuron, + ) + + def test_stat_output(self): + def my_merge(energy, natoms): + energy_lst = [] + natoms_lst = [] + for i in range(len(energy)): + for j in range(len(energy[i])): + energy_lst.append(torch.tensor(energy[i][j])) + natoms_lst.append( + torch.tensor(natoms[i][j]) + .unsqueeze(0) + .expand(energy[i][j].shape[0], -1) + ) + return energy_lst, natoms_lst + + energy = self.dp_sampled["energy"] + natoms = self.dp_sampled["natoms_vec"] + energy, natoms = my_merge(energy, natoms) + dp_fn = EnerFitting(self.dp_d, self.n_neuron) + dp_fn.compute_output_stats(self.dp_sampled) + bias_atom_e = compute_output_stats(energy, natoms) + self.assertTrue(np.allclose(dp_fn.bias_atom_e, bias_atom_e[:, 0])) + + # temporarily delete this function for performance of seeds in tf and pytorch may be different + """ + def test_stat_input(self): + my_sampled = self.my_sampled + # list of dicts, each dict contains samples from a system + dp_keys = set(self.dp_merged.keys()) # dict of list of batches + self.dp_merged['natoms'] = self.dp_merged['natoms_vec'] + for key in dp_keys: + if not key in my_sampled[0] or key in 'coord': + # coord is pre-normalized + continue + lst = [] + for item in my_sampled: + bsz = item['energy'].shape[0]//self.data_stat_nbatch + for j in range(self.data_stat_nbatch): + lst.append(item[key][j*bsz:(j+1)*bsz].cpu().numpy()) + compare(self, self.dp_merged[key], lst) + """ + + def test_descriptor(self): + coord = self.dp_merged["coord"] + atype = self.dp_merged["type"] + natoms = self.dp_merged["natoms_vec"] + box = self.dp_merged["box"] + self.dp_d.compute_input_stats(coord, box, atype, natoms, self.dp_mesh, {}) + + my_en = DescrptSeA( + self.rcut, self.rcut_smth, self.sel, self.filter_neuron, self.axis_neuron + ) + my_en = my_en.sea # get the block who has stat as private vars + sampled = self.my_sampled + for sys in sampled: + for key in [ + "coord", + "force", + "energy", + "atype", + "natoms", + "extended_coord", + "nlist", + "shift", + "mapping", + ]: + if key in sys.keys(): + sys[key] = sys[key].to(env.DEVICE) + sumr, suma, sumn, sumr2, suma2 = my_en.compute_input_stats(sampled) + my_en.init_desc_stat(sumr, suma, sumn, sumr2, suma2) + my_en.mean = my_en.mean + my_en.stddev = my_en.stddev + self.assertTrue( + np.allclose( + self.dp_d.davg.reshape([-1]), my_en.mean.cpu().reshape([-1]), rtol=0.01 + ) + ) + self.assertTrue( + np.allclose( + self.dp_d.dstd.reshape([-1]), + my_en.stddev.cpu().reshape([-1]), + rtol=0.01, + ) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_training.py b/source/tests/pt/test_training.py new file mode 100644 index 0000000000..574ca8688e --- /dev/null +++ b/source/tests/pt/test_training.py @@ -0,0 +1,116 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import json +import os +import shutil +import unittest +from copy import ( + deepcopy, +) +from pathlib import ( + Path, +) + +from deepmd.pt.entrypoints.main import ( + get_trainer, +) + +from .test_permutation import ( + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, +) + + +class DPTrainTest: + def test_dp_train(self): + trainer = get_trainer(deepcopy(self.config)) + trainer.run() + self.tearDown() + + def tearDown(self): + for f in os.listdir("."): + if f.startswith("model") and f.endswith(".pt"): + os.remove(f) + if f in ["lcurve.out"]: + os.remove(f) + if f in ["stat_files"]: + shutil.rmtree(f) + + +class TestEnergyModelSeA(unittest.TestCase, DPTrainTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_se_e2_a) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + def tearDown(self) -> None: + DPTrainTest.tearDown(self) + + +class TestEnergyModelDPA1(unittest.TestCase, DPTrainTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa1) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + def tearDown(self) -> None: + DPTrainTest.tearDown(self) + + +class TestEnergyModelDPA2(unittest.TestCase, DPTrainTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_dpa2) + self.config["model"]["descriptor"]["rcut"] = self.config["model"]["descriptor"][ + "repinit_rcut" + ] + self.config["model"]["descriptor"]["rcut_smth"] = self.config["model"][ + "descriptor" + ]["repinit_rcut_smth"] + self.config["model"]["descriptor"]["sel"] = self.config["model"]["descriptor"][ + "repinit_nsel" + ] + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + def tearDown(self) -> None: + DPTrainTest.tearDown(self) + + +@unittest.skip("hybrid not supported at the moment") +class TestEnergyModelHybrid(unittest.TestCase, DPTrainTest): + def setUp(self): + input_json = str(Path(__file__).parent / "water/se_atten.json") + with open(input_json) as f: + self.config = json.load(f) + data_file = [str(Path(__file__).parent / "water/data/data_0")] + self.config["training"]["training_data"]["systems"] = data_file + self.config["training"]["validation_data"]["systems"] = data_file + self.config["model"] = deepcopy(model_hybrid) + self.config["training"]["numb_steps"] = 1 + self.config["training"]["save_freq"] = 1 + + def tearDown(self) -> None: + DPTrainTest.tearDown(self) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_trans.py b/source/tests/pt/test_trans.py new file mode 100644 index 0000000000..e5d379b9ff --- /dev/null +++ b/source/tests/pt/test_trans.py @@ -0,0 +1,137 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import torch + +from deepmd.pt.infer.deep_eval import ( + eval_model, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) + +from .test_permutation import ( # model_dpau, + make_sample, + model_dpa1, + model_dpa2, + model_hybrid, + model_se_e2_a, +) + +dtype = torch.float64 + + +class TransTest: + def test( + self, + ): + natoms = 5 + cell = torch.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * torch.eye(3).to(env.DEVICE) + coord = torch.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = torch.matmul(coord, cell) + atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) + shift = (torch.rand([3], dtype=dtype) - 0.5).to(env.DEVICE) * 2.0 + coord_s = torch.matmul( + torch.remainder(torch.matmul(coord + shift, torch.linalg.inv(cell)), 1.0), + cell, + ) + e0, f0, v0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype + ) + ret0 = { + "energy": e0.squeeze(0), + "force": f0.squeeze(0), + "virial": v0.squeeze(0), + } + e1, f1, v1 = eval_model( + self.model, coord_s.unsqueeze(0), cell.unsqueeze(0), atype + ) + ret1 = { + "energy": e1.squeeze(0), + "force": f1.squeeze(0), + "virial": v1.squeeze(0), + } + prec = 1e-10 + torch.testing.assert_close(ret0["energy"], ret1["energy"], rtol=prec, atol=prec) + torch.testing.assert_close(ret0["force"], ret1["force"], rtol=prec, atol=prec) + if not hasattr(self, "test_virial") or self.test_virial: + torch.testing.assert_close( + ret0["virial"], ret1["virial"], rtol=prec, atol=prec + ) + + +class TestEnergyModelSeA(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_se_e2_a) + sampled = make_sample(model_params) + self.type_split = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelDPA1(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestEnergyModelDPA2(unittest.TestCase, TransTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestForceModelDPA2(unittest.TestCase, TransTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + model_params["fitting_net"]["type"] = "direct_force_ener" + self.type_split = True + self.test_virial = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +@unittest.skip("hybrid not supported at the moment") +class TestEnergyModelHybrid(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +@unittest.skip("hybrid not supported at the moment") +class TestForceModelHybrid(unittest.TestCase, TransTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + model_params["fitting_net"]["type"] = "direct_force_ener" + sampled = make_sample(model_params) + self.type_split = True + self.test_virial = False + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_trans_denoise.py b/source/tests/pt/test_trans_denoise.py new file mode 100644 index 0000000000..360633278c --- /dev/null +++ b/source/tests/pt/test_trans_denoise.py @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import torch + +from deepmd.pt.infer.deep_eval import ( + eval_model, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) + +from .test_permutation_denoise import ( + make_sample, + model_dpa1, + model_dpa2, + model_hybrid, +) + +dtype = torch.float64 + + +class TransDenoiseTest: + def test( + self, + ): + natoms = 5 + cell = torch.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * torch.eye(3).to(env.DEVICE) + coord = torch.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = torch.matmul(coord, cell) + atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) + shift = (torch.rand([3], dtype=dtype) - 0.5).to(env.DEVICE) * 2.0 + coord_s = torch.matmul( + torch.remainder(torch.matmul(coord + shift, torch.linalg.inv(cell)), 1.0), + cell, + ) + updated_c0, logits0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + updated_c0 = updated_c0 - coord.unsqueeze(0) + ret0 = {"updated_coord": updated_c0.squeeze(0), "logits": logits0.squeeze(0)} + updated_c1, logits1 = eval_model( + self.model, coord_s.unsqueeze(0), cell.unsqueeze(0), atype, denoise=True + ) + updated_c1 = updated_c1 - coord_s.unsqueeze(0) + ret1 = {"updated_coord": updated_c1.squeeze(0), "logits": logits1.squeeze(0)} + prec = 1e-10 + torch.testing.assert_close( + ret0["updated_coord"], ret1["updated_coord"], rtol=prec, atol=prec + ) + torch.testing.assert_close(ret0["logits"], ret1["logits"], rtol=prec, atol=prec) + + +class TestDenoiseModelDPA1(unittest.TestCase, TransDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_dpa1) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +class TestDenoiseModelDPA2(unittest.TestCase, TransDenoiseTest): + def setUp(self): + model_params_sample = copy.deepcopy(model_dpa2) + model_params_sample["descriptor"]["rcut"] = model_params_sample["descriptor"][ + "repinit_rcut" + ] + model_params_sample["descriptor"]["sel"] = model_params_sample["descriptor"][ + "repinit_nsel" + ] + sampled = make_sample(model_params_sample) + model_params = copy.deepcopy(model_dpa2) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +@unittest.skip("hybrid not supported at the moment") +class TestDenoiseModelHybrid(unittest.TestCase, TransDenoiseTest): + def setUp(self): + model_params = copy.deepcopy(model_hybrid) + sampled = make_sample(model_params) + self.type_split = True + self.model = get_model(model_params, sampled).to(env.DEVICE) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/test_unused_params.py b/source/tests/pt/test_unused_params.py new file mode 100644 index 0000000000..a924979466 --- /dev/null +++ b/source/tests/pt/test_unused_params.py @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: LGPL-3.0-or-later +import copy +import unittest + +import torch + +from deepmd.pt.infer.deep_eval import ( + eval_model, +) +from deepmd.pt.model.model import ( + get_model, +) +from deepmd.pt.utils import ( + env, +) + +from .test_permutation import ( + make_sample, + model_dpa2, +) + +dtype = torch.float64 + + +class TestUnusedParamsDPA2(unittest.TestCase): + def test_unused(self): + import itertools + + for conv, drrd, grrg, attn1, g1g1, attn2, h2 in itertools.product( + [True], + [True], + [True], + [True], + [True], + [True], + [True], + ): + if (not drrd) and (not grrg) and h2: + # skip the case h2 is not envolved + continue + if (not grrg) and (not conv): + # skip the case g2 is not envolved + continue + model = copy.deepcopy(model_dpa2) + model["descriptor"]["rcut"] = model["descriptor"]["repinit_rcut"] + model["descriptor"]["sel"] = model["descriptor"]["repinit_nsel"] + model["descriptor"]["repformer_nlayers"] = 2 + # model["descriptor"]["combine_grrg"] = cmbg2 + model["descriptor"]["repformer_update_g1_has_conv"] = conv + model["descriptor"]["repformer_update_g1_has_drrd"] = drrd + model["descriptor"]["repformer_update_g1_has_grrg"] = grrg + model["descriptor"]["repformer_update_g1_has_attn"] = attn1 + model["descriptor"]["repformer_update_g2_has_g1g1"] = g1g1 + model["descriptor"]["repformer_update_g2_has_attn"] = attn2 + model["descriptor"]["repformer_update_h2"] = h2 + model["fitting_net"]["neuron"] = [12, 12, 12] + self._test_unused(model) + + def _test_unused(self, model_params): + sampled = make_sample(model_params) + self.model = get_model(model_params, sampled).to(env.DEVICE) + natoms = 5 + cell = torch.rand([3, 3], dtype=dtype).to(env.DEVICE) + cell = (cell + cell.T) + 5.0 * torch.eye(3).to(env.DEVICE) + coord = torch.rand([natoms, 3], dtype=dtype).to(env.DEVICE) + coord = torch.matmul(coord, cell) + atype = torch.IntTensor([0, 0, 0, 1, 1]).to(env.DEVICE) + idx_perm = [1, 0, 4, 3, 2] + e0, f0, v0 = eval_model( + self.model, coord.unsqueeze(0), cell.unsqueeze(0), atype + ) + ret0 = { + "energy": e0.squeeze(0), + "force": f0.squeeze(0), + "virial": v0.squeeze(0), + } + + # use computation graph to find all contributing tensors + def get_contributing_params(y, top_level=True): + nf = y.grad_fn.next_functions if top_level else y.next_functions + for f, _ in nf: + try: + yield f.variable + except AttributeError: + pass # node has no tensor + if f is not None: + yield from get_contributing_params(f, top_level=False) + + contributing_parameters = set(get_contributing_params(ret0["energy"])) + all_parameters = set(self.model.parameters()) + non_contributing = all_parameters - contributing_parameters + for ii in non_contributing: + print(ii.shape) + self.assertEqual(len(non_contributing), 0) + + +if __name__ == "__main__": + unittest.main() diff --git a/source/tests/pt/water/data/data_0/set.000/box.npy b/source/tests/pt/water/data/data_0/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..6ad2de625b40040a2d13248dd8b197a0f885bdc0 GIT binary patch literal 3008 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+l>qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= zXCxM+0{I#i2099snmP)#3giN=P+50J1|)!uk4+3o3j;`gR1G3Tu!RLSF@z3=(J&lM zhw$WpEv;Y^gKGdXK=Pw%5FvssEU<|obc}}KX!syf1GcchCWg>4ntn#Z2ay`Eg#|V- MgpSelGZe!I08k$V_W%F@ literal 0 HcmV?d00001 diff --git a/source/tests/pt/water/data/data_0/set.000/coord.npy b/source/tests/pt/water/data/data_0/set.000/coord.npy new file mode 100644 index 0000000000000000000000000000000000000000..8bd448b1254784551c11c2c238af183a8dc0a4f3 GIT binary patch literal 184448 zcmbT7_g~Hb|Nq;24-G99MN(;LoyYAS8d5S+nH91#GP0#XS}283QYwlvO4NBiZIVc4 z5hV#(8Rez;p7-ZZ_@1B6IX|7tc|0GF$NhG@U2hoyLH>cE;$rK>Hd)MH5It|Tg{z5$ zd!&QKNE3_56|2{*4v(6BWJy@ThOqoMYqw$6j$9Fbao~V zx+&1&ZXc>O{({6^dNj=q^y*n0(jHADR$PEo)glCLt;URdhS+22OS+!2biw;Q!W928 z2l)$V&oCybzCvDw{eYURB`@4H91ViUcxCsSz4>WGyTt7&>S8OV>Z(yoS`B907sBYI z5xq|^p)dUpIF0A#+^eor$Wk3li7T&h?=M8qMHO-MX+FVAvlKRRh#ob2??az=3V7<_ z<=s|bqJk#9v6H0xjUAkT`qA7+ynk;q92Od2?j}a%s$Mj8WfQmE@(wP(QlRgSM_3NkEJl%COrf^A52PSvLyW7%I2x&U@%jr35KVTJ$V#q$}5qAjVgvX zji&FnBgxj?jaH5Ef>8G~tlZ4$!krzMS>Z$JpSIJXZC;e>(T&Sf^KeW>hz~ZexOLSL zv^Vc0oW3ujh=B@xpZx_3ZPaP|`0o(xdH|akdBpbVl3KqKSx?+U3R};zo&s_D-timD zbA}1S?>J(#*#eq+TancLQlZ}?TJr&Ys`${tS^0Icp^?QnGt`{Bx;qgoJ`EvFp*>sT zI+HqgfUYd+gyUQXddP=xWqT)6PNM<;ac2=m4Vl2wmMO5*K^{~xG9OU&KY%HfwXyv8U^uxu~cA=_^n3}Fo_iIvZXVU?{u&1^EG zEz6(d#56rJ`BjaWg#|E}GL)A045K^4TxnU1D~H<$U~6GP&)-^8?v-HjkUYsIuV})% z<)v)ReoN}$%OJTk3Hxre^S)zapVr5 zY6>~*vBra+_dQxtZgH#3wsQtMUm@tZDW_0<2HyD|v}p866fSfmrPt{srW#KZKNs<< z)-K1Q)K3`5NTexphjCBbgZ{3+jQuhINVR5@yekXB%wg5t$U#5(8rb^(7^-~ ze~LIPfkmo@I5IQ^pTzp%^U8+KtSp6-&vE8tHCoWNZ#Jpp8}o0pATzg@&{00k+`eaH z?+ytv`*#rear$)HMV|)RLrGf2oqVorpx-mC=#cvu#IDSP@>c`ezGeghZ#Yu<{%D$K zAfQ*P-lF6A0{HDL#pE!^ zE;}%y+1KM##d(Nr8&1ZoQZVLwCbR6EO2I27>DD?U(lP}X;5(Bpwk+eD46cAXww}$} z?a6``Pov+hnb3-Hr2NpI$ku3ORendXWkQ#5!+BjQ5v{YkmnyAt^r98P-;i)zm3B<> zr#YJ^`o`Xi_;*TJ{raDs}^{ zIwwg}mq<`Yni;&6$5E)kENtI+4yCOUbm098gdGnjk9V6X=7k*XHolM1ld8C_uQRbY zFo8eRyPN6zdQo?*S8d0DGn0801DV0`xbHj}gI2|`<3)wSvqv6!^bQfH2PIpDHEZwV z`N&apw5%2*W>`?Q)j@h+y^*5bbNJ`cR@k~zk?xrsL*%zY*rraVsaroG&{&g-Qr;kQ z%Q}ix@`b`tJsOjrf~-y($mcFc|APhOepd~;3p}v9<}~`=e8kj$#?*Eo8xu;7c_?29 zqUm{Ztatr5SU8$fK&%lJn7OcLr9r5_)c~cE=UBSIjAm#3#K?OoO!b2}DX;Y-u3VGQ zo(9R5YBUV=z}+MpLlXu#(wa;Ef|Y2%=q>aMMq#$;ZkRsxW^uWW~u&N{fyrox}Q(PuO~C8R^O#V@f@_AxZH(UzyOB>nfZvo`?SB&Pn z{zj#-GRsPHpo^~=UcWA2t3S;`u>3A8To2ml{|e_WE79Zga^ze7lSu?bV(3OGrWWf$ z5or@}Z})4_y*q_GCsksms|{^y9YY7UM&Yl$KG_vqM8~_u$l6Evr@5ZO2XOJq~4!iSgnjj<3H|RFADOne>%x4%n>`mzF0cTn>rWD~m!zfI* z6p>N3xG!f&H=ls?5;6pX7e5kMZjML64AK0xrm@y5z=PsduCvYSKcjccbZnnsK;=&g zpkKHa3viknwa^8t7wFQ)XR7qjQ#6xEYS3^6ZC=g(1AZK?744B3lr_PFCcJyd{p#$+ z$_d?~Gm~?_dgv^wnj~nZ9$no+|%e3C9f{p1J=pRGc5L4(QcKq^HX@*%mci}`Sut9Z9aiN0n? z!(O!(y6Z<$l7bZdvsb3Udu1p#EQnSng=0&Y89AtBqI8lh7Oy&k-0zI6tCX>R_e9~x zwm4Mf+(a&^l2X-2_yp@?>8kxKIw?uG<;iPSQGFkS%Z=%?=h)i%VH=>f@d?I#`Hm$I ztVrh37hEX($y`<_P~74Gx|MB8nzFuLj^02+=5x4zxeqcM-{NeA3O$cIjSw|?EEf!=yBQXAsC6qj z8#b}ay~8Ny;WZ>?s|i;uiihW?5wtG;4fcj7pssf?3RKO=z5E4h?+s+<_sY}Uqqo`D z71c1Ul%sjiF0%4LvuID6CkajEXz&*WG8u6}V5aLzuUA zBeBK}yU07dn0h{)W!>#Tb?n|c&IgL2Q$)C}} zZvARp7!nGpaf9i~jN#<@Fdu8Rtf{Lo1MU)&uxrS0ipU~*x+jQ!l_@ddk{G<&FGH>C zyy%PQoOB(0lf4_&1g**$IPGOl!yS{cR4W1-65ny3o_S%pX#SD8t46bP&T@ah$q?xb z=cj13K-S<9q^3-zh3_3`%%78-RQO92R)0ip*8{;CnFhG+?8HT#3{Kf}C3&Piz#bg~ z(mnP>SbIW|`|)rjHp?6qJ;&N~rgtU8{7p&uMG!eP$g0^TeXyA|E3^)%YG)c&Y1P(4{!$ck`&Y9!{zQQK`pbRt6CD2 zE@w<~vAZaBp9jfn`|v$aFXPrOT~ZN?qLY!=(COw%_jBdwkw`=SC;tt9r_G~{gBh~o z%&C9JVYux&%#d~&2ZY{~cIO6rb0kafWLgANByPY-(SWk*-Xn<%#@UBa?CEkxO3%8> ze8Sq{ZKp+V zP?M(|4K*yq7L)Nz@%L!@*=a@Y*~4JBVIKYXtVzG7T)>Lv8bSS6Ti6|Spj$bT)V41l zIVtzq?OO(X!CM!BnWGjb`|&?`7uK@p)fp&U(1Ud*mssnAO>Ft6skEW68!P=k<4sjD zOZ??Zn`XFpcOewb;gK@Z^l?lT6ZhJQiKl&qfBhY(^nw}g ze`!MLhgl>oRtKcH(b_vMv^gamyQ)-a#fn;7cUg-;f&=)bodK79BS`D@6>PM_{fF6iq13Gv$LI_7^w8YQ#7 za{HxQ5Z)bA%MaYgEX^UbW3?Q`dkvxF-efAvnoiZv!JoTy3HBq!=)a(cY^F%7?R{!R z8k{t7hJV0Km!~UM{xrj3BA#!xA?IPIvEhp@3S;t6XW&2=%v)GohZ@I?K8D`L`#5z# zi?*B8!=O$P8}#PTi|KDXzGzxPMz+c zWX=!f&YL+y;oMAedM!iyo(999C7Es27|-i}TF&V&)j(tHL0q?&5>^HjV$-0ZbZksM z+xGM*yW8nYlfuMl?x+zIdCH1YToy<_4*BsiJ!Obdj$y0*#Ibs*09vvWN?Z{cKN7g?jsQJDEFCX#~JKT)<@CQ$j=PQpJ z#@@8ApONXki=1NV4ZP@8r43i}ge7aVD5ya68`}JNrSV(oi%c(UUVKEG{ZIA`Hr#(D zs!09T1lwD36t-s!R>Ul%8GH;m70b}t>PkG$ljg%hGZ6M?AHQx^FpG?LrFm16SVp5S zWq#d*sP_qUx=$WQ-byozgD&L#EZ;*u`Zac+(G`T6-9n`%QDOWZnA{&q`D-jcm0x16F6{tj;le26iYLnuq|L|O>78+6U*Lg77qX4DD zwwQZpD!EyctLhs2NXK!1N32C_>q&de6_{yq@gWD)x`rCzTAXn92!cK^LD+^uNx@hScHd9X+bw+$E5@JBUWxs?o-B z4gT1>O|);ya|my}!kZ{@oTWY7ej9fjJl>8Gj{S(7;D*E9tEnVp2bGPLBafB&80qK0 zTUW+ose2sXJ*JiwymX+1igxBPW;!)T=i+JJYDBCRXHq74RJZaF`f=dZxhW*FWgZwk7|f6WF+M zI~z4`zu>D$2x-6j#nSg%k;nB`(SLY|bxEb-@lII^bV$MU&>^(t=rFQbHd*}1k+9L$pgX4Hs6nd-qem#v9`z|?(>@Ko%LB;pMgerY(y>9h0qMqS zaP^oZ=`Kf+o`3WGLJq1j4a zH21a|Z(e%|#&fl(x;~j(7GOZL9F1tzr3HLXQVg{^{KErfapL_52v!rINK-tp_ILq&$)b&HM@eWu3r^Auaak@p07cB z&k{`NbVk&K3G9m6E}_@$6oIDUZ=msouzcrzr1MU6HU9+mJ+-780okPIw2mfbAK|SM zjUn!$NmGX8V(+|T2oo|guRofJU9=s!^Ju5 zPZ6rxDr&#fR^XU~CFMGYvhUkpGUNJ5^sGXH6myN}r`uV9;hHJ5DaW1vFu4%PgA)WJ z6DP9#MSgVbW*)}xvY{cRU!f>3h0v(&@ZU9$+tR8>5x*9pxl@_!CW<6v-e=T{&eVxR z1=KCI01^M@Q2(Qw=+el;<6|{2AFVF(*Y&0Wjvg&y%|R3QiWWw-#wxI z?@MrQSEOTQD+O}5LTI+StSCdQ#|si?cFBXuYD+p^4rD{gqL=@7ZW#tTwWu*pf=-9@ zbEl?AQ=hJYPblfa4uetnG<`A^o%f*(k4s#%NTa+D*Q5*EGKKqe^r&~5Dh->ylHYBz zktB|MM)1d%;7eaK<;_z$7wJLBA6o_86&f^jwFhp+Zlu$}apdbRO&M9I;rLLImy_Lu zxTb@=ct9*O9X*OnXO3k09)9%2Y#+SzchZSD3!dV$Js+TUt;ZsAI2fvB6 z+}2?i&b?&;xt27@s|QYVkFas4^6^!4&b^v-5lM2EByK26>o40NIn9-(xzDEj7H2wE zwGrR89)-v95p=tGCq^9`O`(V4>FqrMJu>)#h2L`Vp`;oukrup__$>O^as=L!m(q$e z*Kxi0HI8WMkXZF6n5A&o{NOA5)1XAvi>0ZarO=Y7r_7~Xivomy;5JH6Xs)7#1K<2e zxn71g&EAElW$RdDwGw@trp#a4r;1T2S?E=o&D}kijo4;0`chINsG1o}Wp}-(;$0t> z9kLR6Qh`jbV>)eltj%xUTZ5!wdp&Ml8p4h%5uNg>M1;Bc zXQoES5*H(Jt2Dj4;ZMnrB&kMpCQlF?5k8$e9q+G(lfFwaUd=fKZt6L7Sq7lvfG-8e z44_!*1s2UULEp?0gsc>4+~)`QWk*F?{JtMQ>*hyx@4rq+@H+S>^7#MkHK&({-#{l# zm-2pHK{m3+>o@E!k#G{7oOn2%~ z+8T8dFRBlt^?D~Subu*|Ri=Zx6zF+vF=ycU8#byl`BnBUB3&E_(|ePsH)ISAnBL)* z%f5y67af|asLgaWMOlBeGL0y)=g%v}P{ihkc+v6%MVP{AI=SW>yhRc7`)kQUEBf#+T+bmB@7 z-ikdGmTQ!85>Gjf+xHW3r95|jP!XotjiaV}*RZwFk*-cop$Efu(A6V_yklh;jH13k zw=A3fcAv$sL{F-A`GnwE8WiF1A5>0mBM&YF75_}hGCUb|JNsD2!(j+mKZDY>?D5J= z0gn_?A?ExOe@aKu&@;6-Se(tij8kDPDnYdPOeRDUWc4B#|DQS#Lm24SZ+Il z3V|-^UDKlEEpsU|kkO(G(RA&sHTlOZL|gG;{F^_5elE5`v)3@{+p~^_{&Jv?j;-hs z`HY`dm*HW$HUFZ+o7lHXXg-Lh^i3@oP`rTSZCbQYMUk%chz^DGPX9a$>kfrb@}5ByJao3mQ6`f7 z%vg8aZAe}>rVSC@f^E*HnbWsvbc?Byv!Xs-bu<;um_3I!+?&Juo0mdu?`nG|@FbBq&q z%7&8Eh{qW8C;`ef^{AY>?eMaM7b(+VF;D%haT!c95+T_8`B z-xUeQz4c|C3G3<1_g82Nt@MyvqfW6_Ymt2A1h%|r=QYfiLGrsP9W0ioEPogN-$OC# z@$}}kRR=IK${vPSXH(D!KkDDD$_r)hVcC6sQdJM)&dfEZ*j&+j`2_w%!YVr6^B)4n ze#6HKJ(Oh%D6e=Nf*OuvtFq{sxDt(vja%uDb{x$eD>^6ouS4QP8z&@jrij zU=a&O)4!fk0;3r-nWoKd94}dpz|tkq3JDdsZN2L;s;59$k`7Y)ewQ z_aP_BvqP8cr-p4YH2y{bA2)sk<|ir944HDw{auJ#J<}*-M?WmCs8iO)``{uYDLx<& z8nevluiajpPIN?g+8XTowwy+o2IJ~{C!G9u3W-l2z`e$T%G}Rl#N_?Lju}DJ7xuR{ z-N^t)M0eHC8*;Sx#s`-AEfC{fo3VCH8@z5P&=faGS~+`w&6=e{FV99$roA2oi>F|w zSvD3Oeu>DryK#bR;I1B-LDDyr=BoVi1=}cQ z`!!Venm#XkZMw-l3Z?A{Xs$X!oND`J=R>JzR26cJcvg|)^*yZ}k6m+x?T88}~ zqhgkoLv(q-1b0G(Ed62}tg{kfaYR7nRS&UfvmSLM|Hj-^7RXH4g5K6DL0L2CM`a8W zlv)t@AebsUZ(_HP6P;~xqT@0rp*35RCU;-P_qP)uxOxzehi9QH*odYCUWdNu2H z!)8w&rnD``{b%}|_;^=J6o1TBSeCP0I@e)5`wY`vWl5#q-Dy(G6|hz<`Z?`5vb#MY z`B#|=nmsAjy`K5Jjjb)8laKZYd8)P}&fu{-CAd|w-${*-(B6hjkuNl=dlUBkSdQ}O zbpDjNFX~QckjW_x%2sOQrln|7wboy5i)0;?MLwWIZ73Z{A4gNRlyR;{|DtNkUwm9K zopA#%@Tju~mBUQ<^%^UvH}M6=tayWI?J1m3ekyk*G72;^Nu{S9SY@2!r#xYi8v}pTJl_xYB+Vu+nr1~wmwv%k;>2fmkpzJJ$Mnc9dEuJMf!1P zQI07=^M>e>Y=SB!ThF7f7goS`lxQy3+JpI`{B+HSN_ct%(rHLRY+jjQ$%|bWd+QOx zCTo(yup2Om(7^#Z&sNk`31=!1q%~VmVW>po=OnPoKua{xOU!X<#z;}dV>IkLEMo02 zVT&$hte#C%;w(tDG94L<tlMIDNwG{9C`Q20_4()zK5?${n;19L6tT3jdg z`{oNhbj~0lax`rW`-+s>3AnvM2BmUCX?R#ZOF1vb7BtC`*TlWGIDQA$wKXYkUnQHq zCzLiV8&A9E{ekprX)=EA$y8PRN$U1g$n}3kE21v zuw<1Frw{Hz^b|b9`~44aX8vKc47SJG^LpIsePgMAX$W_G&04lp;VH@^CD6CUj0QAq zD80B6Dzk@BX~01kY@drF6GO7{Vf05bi~`$~JRDc-$D&3ZQkXKHNNqG(yiH?;Z6Z!i zDHSm@hmy75CiMRL-#V`5>kcgfs$^(cv98G5E#Yd{sSqz|&%^l~e*V0G`i5ZI-04cn zw2dp6-i15&e_@ngGMlyU54Od=!lPsze!9#i(sZ~D>FvMJFJZ>5-4o269$13vA4OPy z$dG1)NuzbF2Q}PUNZEbLFk93n}f(|tMB0ukTwQFI2%4M40;j}n{`Xr<*insh#zpMI_k zy{T{Es}x6nY|mm={8*Z7D@G-Sa&+8KmHNgmq1WO*Fi;yp`s0(pO5HGjK?C;5OrsM! zjPTLs5KCW|0^8D?SSsS*PAb$uEMN-Cb{%IH-fpztKn>lEAF*UeKc<_gBXPbpoa#?^b9g>x2rP_3yOy!JjuSY9Y~Yd*&x&XY7xxsg6Q zfr*V$lzjUPk_}g4pG!PuJ;+6HyD9BWdI6blXR%=6U6_ir?3}ty7_eH+l_hvm>jV=z zsydb>DmB2W$$~MV0qNI|rBxpuV4=J$#bzAFoSE|=aZ{RxpK_w!F$*bPJcO-l%0p|j z1o`!M3+JU)3Xi2OBe8c~@GIDhU9P&+sB;MA1MvvplX&TyYp}uAkR}{bp?<$hT;X+7 z5l7a}8O?r)k48nPJL^wHPbZR_vlO4H{uKug{=&^)k62=LJwnz&K<$cNf<@vXbk*E#pA#qN2=^m!hvC&JYSYL^X-TvV0nq?l=EB--JelNdg zY${@#2Ggz9>1-rk5)8&R37l9oD0>!J>68 z@`cZNN7{L`3_BBLa4Gj3;)hKlFOz*NdW|AmEI5Jx<=??WRLFX1DO@&+xXH<3^fyF_ z8@C}Ek6gP&v!f|JRsH0?N-7-h_ufJ4;OmIC96|rQdJ!Yx%+4mLP}-kybSG1j&do?h z?CQ&qS@jC7Z+2nPR|Wp?{vdjM;41>vK4VXVGlWSZ=0Q1+=FVGS&6-VVXMByn2%q?^6pSb?xx;uLx8F&n2*!LmP0qokr9{8%7M zmSYXs{7r7OsK6e(Dq9(MXD8k&XQQOulP=A^3Guu^q%}{6Rw$2#LFXo{k)J2Hrs_#2 z4gI0|;jx3Q_zksdM^n%liyw2#!NyZr=wk``dZlnQwMPQjuUQ<9Yv;#kjb z?0q6eF>5(Uio4-tL=Lwi--{y2fakrW@Y?zsG@IlhUSdF9oHZTZauGGsBkASz3uyXk zhrn+`D9d#e8CZ?*7-Od^kd)hrBb&u&nt~#i_i{WP8F`-hySzq2ekf{8bg5mG)fD?K zgSXZN?#Dy{f=hL%`r#n5QIhA!E6bBGN1Bf?=)vCfgSb#Rk!(+Rl9_x9r%V4~Z1#6} z8W#(Si)*n+QQ#BmKJ8fWlUm_N3edtNjeZIN@qQgmrX+H}MX9!7DG z=hNpTEn1`e6gHpE2%0p+sbaPT|5PdgT9=GT(`z^kx~Eb1Rp93NRdhW&3-7v*vy$e8 zG?wq?avQ%weaU%&*y3tPL|M_piJDa3XF-*xl1OrnCvkNfc!|7rg%xKmFIZT{5m~E0*B^R$dY~}DfSfeCj zl+!n`n)sNkvOcvCLh9e(^r^F`EZ1b)b^-o!p}>+u$iSh-R9LJm8Q1 zD9*1&c4rZ$&#|H7;ib5g^&9-g{}8j{G7gUHXJ3bPaGNeO`WN?>iaz*`R zO&XhJN^re^Kbu92Q+YNVx4J=mzBQe+cB0nb0hB6sM<8Rp9e+ltQi0P1T0Y=T8@9h; z`So>BKRg49d(DVq;?XcE0D}iLaaRKaFwaAt()`rvr{4jtHC)7Xq-ycoM!&@<(-Q3M zVpR8S4B=E8_oeMMjE4Nc?Xxz_WJoi5Uv@yB4dDh$ttPF~PMr9rPjBz|a9dMzIGK!5 zSg|V^H^0b}_Twoywtom+beus8Z)(xL2GK0G-(Ry;%P4EwZCF2aqI+XHa4cAkmhb4o!$aYuxx^accIIT2k_#<*#X8R1 zz_v)xz7ujdsp!XUH$~%GWF02l*P<`>FX7S^fLT$^g8R1|NCSVF+uD1`E7Bs*Z;rw} zugBv2w>S77?;;&!O5A{`N6|f*EX54yl_;az*snqd8$)o2uY!K>H;i_lgI->nqDIUl zz3i_Tv_Nz&b(-RqMvy4eyX#?BF`juVikcrf)R!(##g#h95@f>T#8aW_OHiA?J^m|f!K#cPsvKy=qYI!% zQf`#EM_-*uc-HzbP#9A3?|LqLHpRjwOOH#??~rMeMY z?U9QI+P`q^W)e5X$kapZ%VPSp{0T-r+XjmaW$Fw(0A(%%Mdz+@>y|9R)D#nPovcdz z|0(eOp1O3vL543gkfV27_M_stH?5i%K-?Bp{*~A_R4)38v5R9_gWg+&LHl?hQgJ98}+VBnTuuc>}uW}paZMw%C=MBfv@(!-|;UlC>6c>1p zxrPyBNW&XNnYFtOg{+S!)pS2Pq8rBFoqreOB2{Qk&26S^P>7Dm;ba~14xfMh!Jbfg z3ZCmv*G+s8*l$Pf=TD*Wix;-`RbzCY9Vt9~#ir*!77h}5!_6z-V}8CoMZdd(BTllg z)1N{9)3t@-V<%!zX+2hn*i%!rY$nmZ3tBJQaA|cd?1v7fT@!y|u+dYN&}B&8Rgp9< z+l-p#r(%O$2`W~}(!=?Q5Nwy>O_zm};UaYk5mP7WbZ;2FxQb8tvjnc1<7kj#A`+ji zryDyjL*v|QWQ%hDHI-VF;B!&bzqrlL2+T-M%mv$aX`-foFolk3#iA?e!rc~2 z(D`W`)%+865<=%dy&;$lT``K!nEu2={DU+%HmCsdiMhh|@FIL|)}g)4F)a6f2b-2R zgN7{oiC_Pu$U&z=5EJJ~fkG=Tq+b^5%hK4YUcg*w3VmqFhyM&?YWg5ab+TWXt)m}~ z6h{cWZ@AJL3c>hGA8_*NB$|KXx@b;xpdNJx`jU~4GgItn{_nH+Z|xG`V>54X-Ld>r=x)pqqT%-|7sw2u4p%z-imihNO>HB(Be!oTqbSQ9;x&ZIfh)Xp08 zt{+Bc8&5#%@@_b)>QM7f8yXgOmR)I0U=tff&!&iFUASM4%hvLs#q}@Ql;I~)q3i*5 zBT?41v=BQlrXXY85x3b`g3+Zs@Y_!~2 zN80OcMAAH$$?pp%`ObBm-_l>W6H_VF9a4|FA}gw?lBCojTJ&M#Zd&*&oIYzL@s9o5 z@o<|Yjruzsk?Zy#&V3qvO6f+svMeRV%h2wqFzOoEg-Vf!s1}}tbWx;?dFhxE4kJv#n|Y0TdyOv>>?j_-RU#||bLbFj~U({Xb0 z5B%MJ4TZ*{8E?}Q6c~PETY?R#z$b!!z8Xy%W~JgI&!c0Mhz$##2gmvLH2HowRb=Z@ zM2tSU$}h!byR(?zaYf+$cPbUCC1Hqa9Jy7UhR&zg= zAy#oZB{Wpy?F}o+cp^bNs#2U<5Qj6#3)6n|co(z}0NBEy6EGz1O`#=k3c{jrRqo@bB%ZVIBRd}A8k*QbPuq=`{(f2U=hB36KUPM1^D5&5+eq(_+u_{m~&2<)~e`}&FC-O zbWyetIc^R=)Vdg!$w~NLI*yji6L~1ZuW%K;9oXfmK|fk=ih2hBLH7I)%)Ttg&uWRL z3Dk{|=h_gw+?-AK8|Lxc%Nfq{=a4&5jXvkRW^xmwXkFt9lI_zYf#waUH9z&Zvg0A* zHYM^#xdY5H)smhk?_<9l<`Ny>jmdiJ$YnqmEl6X5lAg?bQ8jlV_CI(?9pe7p=h0hd zO?gTm;1#Y$(!M*WM=yd_ZcXDSD*NL@i~&!saj=Op&5x*i{VZP9w@|#6m<{o8y4!y{h zs5_DC40pVo-;1nR36j>PsH5ZE&MI=%4bkzl@;AyzMN*+o6&mt zxwwAj08XFzh2YUKNZL48KGj9+knvxJg*`kCE4#!zeS4q3q0N z1igEWpWmgZW~?@`qepOaO#*WmH@2a%_v9wts&K;Mt5kRae5nj3UD;kYy`x-*Fq z#8qie@m!27tQ92OT+DwR|G4&uh*b}Luoo}3%6lwLD8bBlV`|)R-$QD09Lrldk4Dyf zg4IHmp z@PMW(eQp_s*B1QxQ6EvZK$|ehRjY)e&Ih4(%xu9ahrvLl@4cdZkv!vI+)U}G65~0{1EBA z_GI3D0I|6#xHUnaG(Crq&+8ECiT}-(=bgn;FA3WBNJ%&#;sx@itRg4vWH^nphSZ%w zbSkbE(>CPdmC`BxR8uC3HB6~xo02Gtkmr}G=+OGecSfMCa31_b znm?oa7yH>G4!`z5PA=dn9!b^+Lv5bI#MOZ0nKVU+d zlbQ(Zza*&5iJ+094q8%;EDbMLBg=!4q?{jyx!XpPMeqT<)enS`Z3$XEi88l76=?oy z=ei?KAyc^(1Mf^}m&$&emAcFlj?bl;7pK*pk=}vS%RO*ds!qn0N7$O?#duWz4CRJz z5#y;wnNcF%FRYhwLS34cxQN_1ZCb6Gj1|tAka_qPvWwD@{PY`V@+6Wf8Z~Kr$RD_j z41|+W8Qyv13T9clQ~1$TWS!kcqs&B!#Pv_`;$+C>hbl!kpU34str8H-$wE;VxkEf#Lop6}64ExNFvCqT2_&sNDcu04wVW<6@AY^?) zjd>MV{8*jDC;G6AtQ&0H@Tv6nWfvqC_aV$%iX|D0rFVy9AaS6MnZzVPF*6DNzP{wK zvmWce3?}IXigb8wJDb;NgGu!wAa1n>1wV?#`Hm-;X%bFds%2=&8cjhiV`yVl0n#f? zX~Bs~ynE~q*@X$ndyxzK2}8*L%?rF*%|YW^JCXuUVE0{7KkUI9Ztpu+QqoZ5%Zr-X z_skmHU*F3vDTq2pGsaNKq>DIzOqVXrxQ@EJA$Zm?nA}8n&0B#CdvIk3OMRA$%Qo_4 z+AAe|Cgn<6#kZM>Y6B`$H(<`T5p?(dArUt;5nA0l`2dB9IC0mAB9v8WiJl_wGe?Qs z%6<5Vx0mr{(P0eknn5$geW-4EEw}LRPdu&c#gj4#wzj?vvpYT@Y_Ti9Yy28IEYX44 zTc2U}djU7++ClEgu~_^+j?Ob6$F~jR+Iuf5Es@ezw6EiAs+5_85JEyxRzgOlltg8w z60)*0qUV0h2&s%xX0lb1O}y9t{op&E`@Zh$JjU-trg^4wFM&-m2s`co6LwkV7MK$Iyq+_2Pch%Tbj48F9as;oSOCB$+so zL(@lOCmPb2Pm<*48%-(6k+{Cyn%XuWM8IV|$jfcR#}gqWWh;fNH!PUkq;!0>{s`ZB zdX#?b5@d(Bv$E<(OfDu#bghZsm$csC`F%YaKF6P(56DJ+)C-XQOWZqZM8kR3Lczik z2R)4`YHSq!=--cQM`dE&j2!$-t-+S7yD)UQHO=ZbkHQ_zsLi31-=9-Z8L|@@VNUGp zjh@wpn}(xAKZFkTtA|BdKe{zc zg0z@l{x1zZKr!?5V=^9F9#3 zg6xfr5FD!zs>%N!a-N`K`$H6rYrq$S-FW6Y6b~k-i{F~NQ`wlqLjQ&mwt(lpjT@{m z_qHC%53r`|I!ADNxgmWBz5)4983=!EN!3%0KT^dBqoVP`Mx7S@QKR`Pf?zF{Cb8-aarA^T1ecwL z(w<0KcGHE{->(*2>pL*er3c}=SFyukzj5r!8#MOn5xiWMl7q&5r0o5S;vFAE*(qy< zn%Wt7Jn9Ud@_VYxWksxNWmFXqO;4VwlDhU)Jo=jA8F%*G*}*}XIR;(A=kf7C~!K&GgE$4T`Wmn2A1@^FrB(K zjiBU}^Tdj#=aG}xf}Mxfk>%E%xc0-FmgYRd{4?tGYBcY*eVj$|U+sXxu+$s!tlyr4UY=!{EXeY)H>R)RsQ9=*d?oHeJRFjmH=qe+5cD zi?DM0A0b>KkYxTC(%*N7nKs=(&hU-wK#&$?>|i8w=sNZcQKC^VD-gIa1WPXK5-YN# z7hlIvfqw`4w5kkyd6)l&n|{^2^07ktg*du&<{>gBZpDVZ_VjP>G4xd#3w68cV%71h z5v@0XCVp0=Q&z1);4RK|2=x?8eY}H+Rfo~5ZX~(A38kqi>f-k^-yqej1BGhan4DHE z0)nJSGB`wR|7$+IoheD|-xKt4oaXat!m0{G3H|Ue4 zO$F!n;gNj{oBwPUEtH(-p`RTGSC^l#pE{6QTaI|O9v+W%`|5D-_$9ntYfMX1C8vr7OGxleeii-LuNY$`fMf_TiaQ{WVaX`dpauc^dW6)THO~pHaWW5t~=Xq5bk$ zc4p;Ja&z4RGvfVE*-JP%koQnK{=g$knPhh4qA}w(yDJPJV{Io=_c6h$>*Xv=-oM_mPHFE;7o4Styygd^G{+CYSs|$EzZGY1kEZQ?>)~@mjo!;E zl1um(R#~zTr=JT|O3Cgts=^ay0i8HqMV@t6p>xU za;`UGY+(!@^RDj+h z3V-~>D9{;7x&4fxpWwwNoz26=@1HSvq9h$KZbR3Puk6jKl}x!VQuNUJ4!fuE0FUD> z=~#!Yr{$k?ELV67HJyj(JI{m;x_v`?-D@_hP?t8%BFZ-~qhVh+!m8{T{5REM44+Sj zggQ}=W+ZJG(w72Pbm7^7c+k8VkbQiYh5HSrdyOTSJTj5)M^)p9>=*6@r-_eOa}2+fFDB<$l=oM%-Ik2+I+wu&V7x zLamE`;Apxs4W4TtM6h90d1yYS zh0KM0CTGe_KZFgcLG;b04K)D@G@?$4w#7SOnRW>50w)N|+dQe>n$PhXlH@qkk3956 zc$jKUZeCLVdEMyT)tf>$tVi0>Iarr@0y7UC$6>c2^fUShT2_C;ea@_DO)5bT{bpbE z9|~I6ohV^ugAkM5ADwfrB68sZB-R;H@D~TN=l$>g9hTH#nuF%(&AfYULyBDv6p|TE zM!{~Zr*bA%u28044IwmVniomUt7ZUL*%s0K-EZNit46XlZP@AK zf;;;LuqRC}^hd3dSw4FYw@gD?ua|!zH{TU1ryAiu;}Fi(=ur1P33^{Ul8R26)APMU zXwG95a^`1b`SP<+I`$sY{{pb5#E{y*PavzInw0tPGXhs?Atl-!zm7cjy3yf7(yxnA zz<7q}d?Qk$9wAavg*0c$ko?iD2={YgW>z*d)ySS21YPuV982A=HOci`DdZ!sdiCxJ z$Ju`aY5RU@3K?IHgwVrm(oJ3Q*1VTq@AI^TUlZH^pXaE^-3GH!zmR1zk*%-Z&hjpg zAa|cmlmz{P@7sawn_Dm)H&BDe_(>S3G#aZs;_-S{2yMG^4=VirHfPx%Y`pf2Md@!v znud%pZKfOP=bPidZg*Ev1l8!ZV5!KNGUm8Y$(4l|U}R2<{7=A^Yg-m2*=STf0!1%7 z$`5bE)6B=Hw0VUgZdW0=`Xf<)hTv1iwKo3Ej&!-gUe&e0*&!HHc4^V^W&R{J^b&s# z^=R*cz38%E#_za>)UUyqv&JUTT%$bp`12-o27bcr6O)9Khwpe^UlmKH`t>*+84tg) zUrO&RbKGgbY zfLKST1N~=pus$BM2bs=#3Nas;0< zqdN(sF?^aPO$?kuebm&+e}#a5zH3BHVQtWMoFbl5l#i%PE%KVG3irK=BG<>WkdzgR z)aGPdD(Hn`IXP_HFD;h#y#(*L!OX{~4oI=2(g}ZH6Jti&w^q@eE?;`Mz(XA3{Tio- z$Wg7tWmZtV4+RhV)5EWw*kvM1t@r=nq}MQNDse~FKS!Eu)quo~0BmzPh+oa_WK?#E z^-q|)MOkz;2GAxLl)7&FQ@QwGtVI|RimASN|ZcxAJ6J#u#GWt^xjXG z)ZXpHhjfCwj4HjX_yg7NzeH2d$f0s|7`MVhW>OjrCKvxTDJbozRV_~5@2D0}G1E-vPr zykk5kai|yzM%mD!ovIY6@|?w#&Be9(xq^aHf67yf#rVZPp>fcgLhkgS>v3P|x#dRc z+fp!M@Muc5dj_d-J0UDN4evv}aP6=!*-HL`Bk%F%@*L-sHD?gMN{(Kw+bC|hDo2-+ ze8s098-lv(G1jJs#ZK!-d$?y%#(y{aTpg-vI|QfFv-q3QkH)-npxSinD)~RRJVghu z;LvgzN^i93~X_wF?usD3W`aM6Yq@+^wETPjagNL*68xd0n^uX0STGSxQ! z5mJ7)BBsGi>}q%u@mqQ!rkv>hicm62_$NGS?1m)I8X8|#5CRlcX~|w0GM_m{yrnFj zwr^>{HtlbC7`2*>Rgo2KIBtcqXJ@g(@+*Qn88mN1)0I&xY1vb0TKZfBmmRkSwK1`1 zv&t9u?w`d@U2`JEy9ZdR%4k}!qX6FH*3l*}6RfsTWHiZ_Zhff|{KM{}s-KN#jcGj^ z9(hyjhl_Z7(~=r3^CcN`?`1{N)VcdG_;Dg_U3?S~ zD)n%hp+r*Wf8d<+0nAz}0sl1aODAhm^R;w}G}6G3mojAgs0)S0N<#A1Lbhn`C=xYG zlJ~j{>{@Qmj=xu@f|=>UHkI8hJ?ALeE(Qozo!R(S+?!&3k#uB*2VpYzbo+5{ ztIuurIcFUGZVnPZ7Ksq%+v#;rJ)6B6K9N>69DuQd3q@4&e4j*LXxIBfT1H#=yWN0J zO^!g%^LFS-jieo$ROtCUGn(u7Su`Yl0(`Q^(zkP*DU-MZWgm+0z19xTBYo+`)>oL| z^bi3%{1NDO5b4s|WKmPbbD2FDU*j)6q}2;cB;I4De_v=y7|2;eojhj7;R#bdrn%pxD5Blb{wU-lZle+-9 zkktsY=@5Hv$VHWn60LZzLj8j-3v%~bkS;%7oGM#^M?Bm6ka<$XM3BzUCgHZ?J49bm zCzFJOUJJbWx1Rf~$G2#SC!SqE^RHjV7q@qidUug^i@QWCr-X2ArxtabCua584Wr*C zQjOIb3Rtc|wY!ev&#-rbR{dbkUP~8m5pJ`^7wqWtN?Ba%oJr=isU(=dgy%|qMD9_(J6 zMJE&sP;ESfM3=rpIzpfP^Lo(jzMS_9S3qG6X9MId!Y|3wtjWO&lUXR~&h3j!{c6~> zhk1zT-HjD$w&c=QhVu$@S>!1Nc1mR`J?^>2{sr69;@Rz3@-&ILWfkL7aXW-u&J4{m zC!-Jg^g?PTHF6U+sdObR9yy34uO(pE$rFeiWI?YsMBBz(> zg*CKaLy@}FJ2Bbzpx1{CGyFO;nOsh5)8*PIXgY;5>nDEVN59^A#U!S%bHY}{XF9ST ztz+1jIFLTqg?e2mKF6w`@GklwHIn4t>4ATKvCTimQt+WXaoNS2P|jV#z0ZTo0aP?2QwI-Akiqr&^# z>x2b)-t5rRB~&@58Ci1aLVt21AB!w}3oU@}urFfSDN8YVmN7|&>e0ZnF5=>49T-$> zD^7E~2fs*7j5;uq7QG%#OQidX&5fGj)k~U2DJ~Ftw`tOb2U=t^Yn*tbIzQ*0y+hYL zN$PBW%&csK>CCn;wAb&&L+f5N?$=`6_KczrGg9d0e0ef;evF7^$A$fi(y;K*Qt`%3 zk6Du$=ymm4CiQhKJGyBDgjXY>ARmG`dxo*%P$@Q2q#@jwxeZIdeV*~TkFY0|^A&3E z!E>ECy$sky)7s)WU*?kd?4U8|SHyYDC-c#nkc*FlLixGW2?IXkoO7$ikW;g0L-05} zyUTsqPb;|g=7`sw{5eUCrX+O-IG4R(ue#6RvStHZfAbl)wg3}nc8H_~@E(wOkJsSO znrPiKkj9_Uq33tY*lgZ6mM*-GTyxH3Tx3aEIv=6%d>!lJY$Bt+R*9XpAaFXhPpXm6a(YG(>-N6`+s9_)?&hcr)V zHh8KNmHhUE-OK&#_o@(dUCqF^Ca!VzxsQND8q}OBL;k;YFr+C7hni|Ht~%~c?Kk4E zZ$>+2XO5x`S=aIBj2*cF`RCAaR})o(49IwwJGliNK~|Ru<(%1r3CV-e zlghc1>pdx|zY>#CkN0|Vc`hb2en-@)dsU4M-jsb_2`%-V2)G}O|CoV){SPBaX(FbC z-xU^&@kLUYHfbGFC(i@QVjCMRa@o>fd|cxRPERX`>GE(AaZkqZ#|5D({0j`j-eK-h z7nafcBJMaz)4=H$gb^~abbCM({x~a<;yJ~O53>UWUng$_?B(2tVndoEn+V!8h-^k_9pr2916$_qQe0(;%ARERek_{yS5poh5J#ys~JU0ilD1Fh}1kskm*D%n!e~gUUbC5r#T%V z*MG3+3%)eGC=2Y?I|gaxL?tSckuei&m!b36)-VmwS+Bp9whF`xSeToPwUXNl1zyda&UooRd`P zaZ5WYt{6hI$dC7@UWoSHb0u5PVHhmMxh1DYlBVfZGzC~w%Oq*>&S?Qi3pb#aZ`n`@ znt*VllQ0cGiKQ>>$f$WUWM|&Rh1VbW`F{ZpQwCxPZU{NY7(L%pC>RZPLY;L5%vSs2 zL*L#M9biLSZT6${hy^_?+K8C>~}D|HEXfn8iB zd#H5|3pIl%M9vt&Uq&;prVvb0;-#JlUg}6)W(qX+$u4Xbx3OM{a^kj) zyS%1!^bwq|NK@ymqpV`>9;`F_jc>N^Sf8{LY+KDFvRK-VtxX-cH*+UzS>{2{I+Ree zF=`BSD(y(Qo|r$NmG2@ahhu#Yc%ETBR61wuF6g{E5ycGNr) zq6gShO{OWmnzf&8QoDq|!#SHKR*edF`_L2V^RVK2$pXMrp6Q;{=Qf^AGiw3ujtdB zEL9pFLt>Y0I@Gy0K&;*8D_XNo!qz{OHXa*JPrOw5u7Kxw)%+T{Hy^UErRTBys50sI zXo^R^iK86X?-S6kDW3Et3%T8f)Ns-S!@Nz{>2dxDeQ$%*)_e>Z+|Bm9mO$lSY4J8E{_P1e z5S=Zk#<+uKG+jxCT)20^wq?_HnF&OH!^Fa-O9&e+LFyItENkHog#ERl^KW{fA1Ol) zpXI1BZZwq?x*|c=nXaiGN5DBnK)jLOF@uRB;DXGEfR_mKKg zns+6})2e?=^yYvW!abYdv0sDY+@z5pibAzV8ytoT2tI5;KW<9W?2+lr=b{$p`A(rT z^`^XU%{h)^i=kdAL*=v$HuBBF(Xv@&$}@_RUhjCeHv$U_%dp!!U!*_6hv#kAL8Cm0 zMsmJ^UgT@kA5`XD86C1suZH#7@2up3DYYE4AU8`l&YV}pLSHS~r2ifXt@0wt{Dp{{ z;Ykz9rD@OJDD2Hy&IUSLiLaXd_BwUY+Ux4kN=SdxV&b>wv2UIxC7d&5*0bL*o3W$F z;AIQ4lYhf4{;OB{AEFui=CgjUWsw*7fPLAYj9}jpL_S6QyyQ$@TUBzscZI2N{+sBX zr04KpTi(YAg4kPdRI8J$j+HYTH3JJVFAp&WH*?*3^~OfuNs~QQa^f~?Q=Vc-FJr|WU(1J z74triML1Ubio~mYmtd)qHdV}$C;d+!glmQ>^v20X{OHmtd{fSZ)UiM^2??R*m6wDl z`(8xyN+i+Sh#d*6!??CLIPFd1?xMw1)w>-IoNJ+~F~Lg%Qw5V+eH8xNjPb`b=*({m z6z&^C^QZ9cNr(#Bj(dp489i0qgE}#{cbeFuG#;0A?dX=PA0AzeppDOF;kn{!>hYTh zsRscpIVp@PPZ)@=+-c)q4bB_A zDr&owgEJMiC{EVq&)q3paQ?~0FVcbkWkb=~CSzDAbU^C6G09ETKxtSa6k1;+V#Ea` zcn_kV<#KBLWBN(8&5&}9DC5X27PuPLPeS?RexQJlmc7Ylaol3LQUv&VLM)q z+kjo45212`oUp%b6bY-=L(hCQJ=~pv*_)rEeTFnS2XH>O`Ez_IbHIG2Pdj;LamC(5 z8mf4K=YfnUdB9Vg_$1FFDnem*p6>?9`3Fy%4d_~4%I?lKCS}uBQG?KjeR;)my^^M) zcPnmVpsfxC96irc)TWTnRv&79{RY|RWT{QkO_aPXkfby%nBkQ|wzBv)Yc~$Ytmu)H zwc|9>Zds8{FIiHEQ-8r-E)jXPP`QO&D`s znl+qn#Rtg{e7;~#V-w7&|A*`Nz?uK&)K6kwsVRcoD zNF#(kcR7*d#SQFx?ok9JkHa1ddrF?M154`-P}NZ(Hrl!lj(IZF5F}6i?qms5IX9{L z$w={AshvpMybZ;*zO+bQ$6;d^Eh@k7HX0FROZ3p(RdA`wG|YN}=}r74&*wOXtjf z;L}V4s#}vzYMZA}R=`PdWlALezT)h!b4gSz@er;nhVk78Kd{lD3z{#~>5<=Tstp=} z$93jpwJHl2uX$koi?Mj{*^iRr)eyYmjF&|CI!H;?;zB0RENY&^GM?*A*m#FcE_0(t zk;)joQIf9uaF6+JIqT__ie7_1;aS^$jC#!3VEVo2)8nz!;9y3_C(Y-KI&(U9E(X!+ z*{D3-&Dj(i;LLM3xHp3a?9e5z(SNXefInn*=Hfu6yRa_pviK%;=h=A)UEyg3`R{6g^*-HcfbeSw&MkdvDqb6{kSbZdRo$ zbLL^9tcTaoykPMahgz?Vb0me|Kg+N#b}f4vcmaQZ_a&EeGM;&>^Vq(TkyOmH*6uSn zxAVziR_;8EHqV>K*jand?(d5ukNBRH&5`ulBqt7PPxa1%F zj$X#9)0Yv@(F}vf+#AX`FX**+(@(#C^qF(FM+8*!jGH7(3XJHEpF54vxr?WI*0gQ) z8pQJ%PCwg(dU2*wjsH0MEZ@Wo-t5N1u)i?e^T5OVr@k;=GoC7K3XtUJiC!=u-^6pA ze>NIHCiUWKp#W}E`cm*u1sdOYL+IFLK)Na>;-=u+s5RV;*=a$fkUEA6k7w+YQOHpND* zP&=T^w4f?aEE~HFWeeQs$>9Z@rKyDVlP2P8*aD=MSaW}} zoJC0QVO#rO6?AJ`pmF7@$iV*@vfTPoRInVKvoj~tznN4rE|Iguwu-Cjm*b3jJI>ei zg;vBF)Gu_U)?#%Ub4#9HOw^#q-$(P#h&SeKvZuRE{H|G?0C_du1qpGdiE`)Jtl{Cp zC)K0eKd-@>%HA}Np94nj7ucm!(KNC~&Z{GCEI+S);v4%58|joXEs8rp#sY; z_Mx(WoKt0Bk4=9$XIOhYm25C0wL2@39J?RZB?@%?RW@QnQiOp@DWqkoPj7?j5tFtA zX}x3N5+W;JSv!_&rtQVNb&IJX;XI1oH=)r+nVuD?QP{on_*-#-d3hSplzIzFEM5nn zRrlG6%Nk_#=K+#^Ckf?o30U0aO}4#xR&Zki9(hh@Gd^q(Z!5p%+4^{2)wDiav98`m zD2O_W8&S3-SG$k3JHKJmU8CvZ%kPljea%iig^SyId}#2pNkU{uFEoFsWM`h3Bc{}! zYTg_}^dx=yZYfQ7#m`yB_IRwAl_U(AIf#^=??l;sS#syhy|%o^_%z&xo{w{(;k=JH zeu52M6D}adX9eb+dBA6MhBcO!l+QB-={)~D=HYD|@#uoI_CxsiY!wzSwWN_sHNvoh zW6XGYE#6*NsehxqKhIe7P6j~>NIQOPqYvE_1U zD)q1uf9D0Zf4 zJo?Umt4AuL{+F(a;v<8A#C?z+qC&H*RAG=FNs41;lg$%pn)K-=&(xZFy_?wq%eYK& z?UDYFt&?~=QWW+U0gK4dT`Pn9XHrOqyyJ8*4BZ|c3|Cwhi>A#J!3<$sw%JFM(@_h18@ zhq#PNIUJHLa54ylR)4`&?q?Pm*8S64}b>B#6Kd%r@{hdq=ACIE9D2Z;g3vj9b zhRRnG6mU#|V!Cg^#$_F|x6q|u1qQT#>KZcp62P{TE-ia^6^lJ|gy~y>Kbu`C_SR>Z zDQv-&zz_kt${1PEOQli8xmO%N|0B;=ngzz)s;YvJFp1AQWB`_ze)mD7ucwCK{^ zH5S6=^kBOBV6reRJfG<{e`Ze074Ub`NYZ(G6+L}9+oeQ;$iWC>7feKJO_Sh$(v042 zIt|ykfAEs?&-)zU-0CBaG-Uc);eOH@lomMAkD7CE>`B4FSz=spn+GjzdkPP1!(itY ztcY*G@16!M9a4{vGmZ+m9|q7m*M5TIArlsMo^#=LTI2HmzSO73f|4!nKzeaM5{8_D z&G|A6aIvM*d}}h)oKE@b=7^tr7NK`|M!PzI+%LP+s#oENu&zR5CQ!Z5j#8fOhup3V zEIRZ@tiL-A!=9>=;Ur~BvMm;*rF!5x(?`6RYd@oYCi1@UDB5#~XzkdoLU~pfY%H|t z-mc*+=%^O88@__{&_UvNZb#)EUI zF?cr3eXmH>kINx3b}@4ce1!x`6vv*Eg)Q&v+g6@pcF7Ycs5T2pwLJ4b$qII1uSIzW zD@4*t65IndcTF5Bk!Gb# zLW~OCO_@urVrv@oERvS5F(ajMz&N!$T=M%1xu?k(xWj@ptX9!luIG(2c#3Rq4|JVe zfW`<(anc`ua*jWZ*>e_l~{2` zd5a-@lP*mkSWGxMHABf zSq^7;52VUJL_aAz&Ydx)?SFsceJW?eNS?;R1E0{ueT5ChZq)tT}=Z|EEad=lS}|UFcU>>(%%pn%dmT*cQ%& zF+SXhp!0m+<&@`)l2_qKP&edWow2Ve zz3h73EPVLRv-sXzGpo~~RMk2Rem55>H5KqxQxy-}!26#9-*3||jwT;Hgl5^-2%V-$ zS!H~`MZY6>zVQaT#&y=t-v%UkVj5_VJ|eUX$$xGeCY7!eT*89U_RpKMcmE<|NEAH! zYr(E#rFh-7KEgW36t+O~A{N~_ESeNuih%>{=xy*$Hg%&9iV0L<{2VWD$k6fe7M|Dh zK?Mh7#fc{Vcy-l-E!W=9%6PA`XiO^C3k^xrxSjWbZ?hhUp_t>}EDAEUraANWB2Z46 z)-Ls?vp$0TPP z$_V6t8V`#S)=Q&V;yxCu=u`OB^TJ`?S(qqqh3COHp|-GxNl5gkP35aGpl>>6wS5$S zR7t>#0lleYo*ebO)E2AoZsG!EA8}dT89bNr!Q zP?M&EJnL~Yd%pP4@n!UO@jrZBc^F5F%Gsq8jTe7sj>G(_+kBU34`#*6LT_;-)h&vr z+Bylk)_ey;vib^1xhJ4@Z-KaY#B`6O>>)I7^%eGL!U)o&KPd6sYRFa>h>B}M0RSJO6)BJBO;K-YOL;r6W~?DoJ+~0^1gJj|p$Cv+AH{>}E3WWKH1vBh4PO0U5cN-}x4OI`3k;t|f&o z=K0yNeIQYjwOG)SHbAT`qle#r;mGZ5+>qn!y8>6SW3-tp^B4_@MW(iCGKk#nI;jPKJ-XvoYN0K_nc(U;v?D6&>+$j@5Z;G z7GyW~4JxCxu%xgLeEl{ElUn*wcGwX}T6d!&D1@e~oxqi8!q7`o=Wy$`M$e?#vBW}*Yi(sGW21N68G@> zz))QJF_40C?TOZK?Y&!@{%T)Dk&sB$Z<5^+>at^9tw|+Hbar$`?Za3copnbrkJ7aP`hR?j@Dj6jS3Ppq4@|PUFGHS zM$*g7dkun*@LdAycwe}eG^{k|(WcI2bZm(N{XKsktvdpQ*Z|JXyAUSs8aF0Ozm_cL~IAAepU-+wV#k(7KVkuui^$MnpndTj$*)X2Nz{ND6w-!H67 zT!MFVY^cg)Hg#3^AhO~IHE7<%$p92Z&}F8trHvD zq+s^F0c0d7Q1sJQ;rt6Z>gyXUc09m&qzeb(&djmYu|JIB-q?y)Dc(Zjc}bF1*AY(r z_=WsPo_|hSCGN4CM;SfxG*jm)tk=}CeDe@0T@{Nb%_neTwGvIpv%rh#5oD{9Ky$aHO7ScJ_h z5z~Bz+aDyc)rtzu|H1X) zJa%QQ5>3e(Nzu=B=v??_o^LG0e#Mt~$5|_H-@X;TWQ?X~o2BXcvCk-J3d7jx8&EgL zfcfw_wU zMw0AwojH?g9x_`jJ+q>`XslYqe||D{934epj|jLH5jrNEd9ZE zd#dcktmHg&-?N}$oO^L;D&LvMSzq-$FZ^J7D6VPD5sIa}NpbIWq4UH)w&&snsMz$y z{a9o2n8)b){XLkrW)K+d(u~&6AuT zonq7Ke&WQQ@hDr;o7~Rshk@2Y=p}24GjDkzx!#zZIH$+_g`W7nj0%nTt}O22JhvS? zcHmS|D23i)v~JpcK_f+i9((?TNFj$c<08H+?Ltw=DIsR%H2UH30@p4n)5~k#B5~&~ zVV9pX5IYNp$D5PdbSnf5A53+JCz0Q4Im-Of3abV4MAF*r(3%q}mT!*0fmefQ{}~6= z@cVk_AV0JWnn%rnyWl%AiKT9xL8|v;#A~<~9_waL-r`GM{x!2O?aVVAiZ!HttS|1sHmfBcQM@^`HFA$~tS8bbA^{VDcO9v-d8K&eeV-Y(9? zB{$ytYaP!uO$}O+@U((!$2GJZ@E*e9zXTIQ3Di(yOX?rJd_9{j*veJN6MGoXeF z&NkO>V?JL7(5X;!(m%42UPx?W@s51gRrO;is2voUWtCv_P#fx!`H1RitFR?co%gB+ z(9^UBtfYQ8>nN9{Z2!CL3xAg^E!4?A=Md|j5J~^-FJ0cygCVOG>EOEYp2IV}=u<)z zTqAz7n%nC@x|4W^g?IDS9^>4q-!T9C0h_En@GKUb?Zb=XcJ36v-U0t{cYCIdpuU;h zdkh&&Kacbl-}^ihx89hLobDD#|FnjO^kH1tdmOU*{Lb>L07p1eVwZml6s8@8X)j$g z%xV(KlRaq5h$P{}I$ivJcp8!0$3dmYfEI^3QqR_7cp7d)XJ>E5fs$Z6*X1mzcWxwS zJdOg+UV zFYn9mNqZ>xo|L7-Ee_&&ag7)segIvAys041h0ODY2|G{xM1$iGJ{#t-rP+_MP=X9$$k8{*a+NE7GIeo=$7?thuz2r{{)NEd{wc%4$^ zm687%(?%PMTW7}L^Ex-0lEU*-Cnj^fY9+F!#n86lS+Gr4VzYe;*rV^=LUf%hy?&-F z6t?WcyXFTXOw8!L!+KKmb0Pmd4&o8cw_(KRR=XDwWcc(lGP!m&Nl%jIr>jzB z>MOV(pFwNa4&t+DKT7NnF(3byBDL{18K0`Zb&_w zN%pZ%F**MwqExlXagaRS+O!XEKMrNu%bh4w%br4S1RznJ=ThxB1L>Ox)6ZQK7HbtC z({Kpw`y)+Z{zbSv*jpiJ?!Sv4G6d?O-aGG*owVHY`4`& zT7Iex-6mc5CcU0b+vY`KN7T_&^^Uc*1Y!2yOf2P$lmGb7|KsSq1F?MHH*D`cBBP9k z5g|>y_j!p*X$fsfI}J@~qDT>;P&A}c3P~yJeV%BjR7gWZLsNa)QHg%{_xE3am0mp0 zeP7pk9!KaaE$--(r*XNRY`Nzy9LP`-td|aSH@%9PvjjM_+`x~Yya)egIXt`%2y?D`P)0<{Fn4!XCBG`C4`qV=c&rdOOYFj|xVs-yN^H`vHPU3U(JFd;zB-*}< z<6MbeB%5!Ivy;C|tg`s5?d3qQU9r%O+sBUg>V-e!HN+2P&ryExhS;*qEfOhj<^=lwo6qW&PD0o7Lil-f2|r!9hc~?pRx?*od*n&z)Lntb z5?R{!mG`oZ^Z0y3mzAGaAlVpoQkYSPO@nwVBh;X8$Wgzdw0N_d$JB=^>R1L9IQt7FZIXHZYv?Te1P!fpEd=3 ziQs%Ug0=>P^iX~Uo$t89;85S&pLv8>?|JR5Y#V6rlU4!0hEpcM%VA9|J4r`v@ z!?5+2B<+qRIKIV%;$8E&uT_bDDtn4=`#T~?wh52A_rYx|LwfPgo(vB1{+}^tnf*J1 z*w#GsZFQs>MX{gbiau(mY8q|FFbs&sehf%kzQ%c9I95b;!?N z$EN!5y!-5amF?SPNwM~d#H&{s=D+r&l-g66cG#J|HK$O@@-4J&+agiI-M*r}E463m z;^Il}933-)4qPtB>jB-V`^GvHJddW&w*&B|;tMKqm)uxhJ~Iihb{!FJ1epJMQieMKyX5hG>&c%SiTBau6Z?E$DgQ5;SOd zlYYM*q!f_IUVYV~>rX=Hc9b`LZ5@v(WqYCe-I)g3$Kv9}$#iynJk_^xw!A&(@%$Hs zqEn}kxKk7g!e)_u##N+guA{2;=~$fq1irTv$$Z{-O!O_ns*jZ{=(ajp4_Bvm8v96a z@kEmOOPpGzK`s#|Br#5TY)sW^nii-+uj8`Of7owke!!e=9J($9d|rxvGqX|Jv)SX% zS`jTrthfVHmyO8>^&7x{rvG(er`I65Z+KIhU*$zDd(6e;*@xgb&jcPn2eG4(Lnt%u zG`e?s(^cqG^oUVdSF#wn+BVXZ7jl$SF^#*&J7CRsI9DX?_&ZL6z9yy!TVKt?8p#~W zzS08IwMTK*s0h}pe4*s*O=UHo&}%{q_v2)-Q*FoL#XZsCM!Q8k=bnN&oDFMfg^BH- zK{bBZmZVEhQ+U_+K@IXa<2!x81#DfChO5_{>CIhL8u5ZVR&vh?W%Z+BnrcnE=eg3K znqb;BHApfb<^t9{TEQy3ZE1P#Y1sB!!}|fh#J*AdeQc&q_3p-G3x#|ElnmDWi~;2smP6Qjmn-wVVp@aXhAp$=^oTS(FK9$vJtk! zfqsv0z$81+DWf?0aloJUb>ke>26HUgcm$Q9-NXfFe8^6wjyuhHo+IlT3L0<1@|gl% zOi-sm&k}I&jjFUoP@_i&3@J4I8rEcn(mx~al&$5wl^G+2>0*n-H)k!K{b9(@eT%R$ z@(kPD`A+bN$dURDbj2b><80biM$?*+czGa=QjcbOH!P*>;3T?`DMusaxbrM2RS<4F z(It<0;@jFHo@r@d7r$3ZToT4pc0(3?xL;{psWSOKc*kCKq+(OxA!+|L2IL<+9vR;+ZR9N)KCXL|uXd$dZH z{H{Z)x;|aLa!|be!+>;~4awHElPxK^2J25ou-nT!VWI=I&-(<`G98MWoDWuh6^a?w z)P6vP91JbV^{lPnCBF-;876d9Q(nxs?Slln06OsODH8RDFq7#fIu9Y1_RlDu5Z}Ap%aNkxdiR&-p1T<_ zPxGhuVGPNZJ8-Gcm4ZI_V}9TW$=9wZdT$N-@aQFG%nw6%z5`WT;v!CO zo=M#X@lIdAwbVK07#?T}sMxJU``+r4P4qs<2xFy3YLv-pQ%@>Yyo_(I0i12kcTQSz zYvZZD)TO+rcmfqE&r)k4*cbAE@fBP-r`mG&8>|YN|Pnm@?8lloH-|k>advE%e z`<*`Iq=9m}whcNx#~ zZqSaN`q*9`i=P>P1jk`2w6J#&?AB@0*vR2@W&Az-nEeI~{sZYRXN*iW8b|Mcy+nro zd6-=Oh@R(`;%VIglKY?k-QpcyUGK&+6D}gTMu$!pm5LT_hO}tZQc(yUh36w~qht98 z=J3*lhVJM`|NI;Am*;!_TXqSD8#!m_Ob?pW*p9xk^My%avBJY4{_x*yK&mo*XMIT;M`?=ex0Br%&)JNvvRrygTH;Xv?6p2#;_j z*Vqi2IcO`@I?fUEp7ufYEf?|}mWrVd&q6mCbThw-vz2veT3ZE*wnmf7*fdOYSEY4( z;_yD+4Ldg}L4D&0+U78qGaMZ8b?rb*c=i^E6S#{Pq8=vg?3 z>N{gl&x=%t8=}Z22|RPufn5RdNDL~))NwzBi+Xd(%drNzDjTVG)nU%Ec!KF(nsj7= z5;ZAg!Km0y@`Zo)ipzL+uxA=E!(jYPZbfmX2C0{uv%4+fY(~vWI&-rNDWh#?jcKY$Ei#atA9)j3_2kU}Hb~&}+95bTd$c7FXHP^TsE_r5&K_ zrAA_MST+u|TfrmIhCSRpoF-r3y{J!9NHV({{c>}F@uDPr+ngrkq{+~d;{Nzzu1SwN z#!^_f_lUUj6mKJ*3uTMe;iKzR(yji5*r*F=kFCSHe^J;THH=mnE0T^e@1}e|&t!71 zV%XGf6z+6bbiHRzA-0ReeEm^au<#>nZ@*-@ng*opW=)BA1!xcNMHhL;?5Il-26G?B zu9HtuS1+L{>JGxL3NxG<*PS-ixYMkPNo1YnE?ulqfir78n0&Am%`K?leES%vZ~HAa zu98EjqAKNW=le{rV6o)KZ`7%55bwXZfvs2K@z-=Tx!xW|nTsC@9!Fn*xpt#;`>n$4 zE(1C!HKCc6s^a-QbIEt>Lu^g>$>(JHn9_fe&~Ru7!iRmp?{jU~72U-KOCu<6;#Mm6 zRHmv0rO>|+D|S`xzyy`aV)3{CSi!9^)U@_0YaBG44o2+9Ec@LQ*SZli_EigB1&ZQu znNpTJwh3C7jU;UGVT@@TNkDAtpyx=6tM%S=ARd#H(>3W)WVKrNs#`*UhPY`#}fh`|@4F6mP(DKxD+-y^$ImiBOQFpsp4dK4^B z#HUfF^trGXKkt2lCVv;y1Q%hF+#?iTG@@YVubeYtN7EL5mwI?@K?6+4JH11Y-+YtV zJm*=C(3falF3Ym&IWOO0CvJXA#a8n+@#rO6G%qoywbgRu+7KcJ@os@rz$`JM@F_}c zrr<^5RQf$^0%_?eiR-$5!IRMjRH>sTK7Q7V?j<@>$X`S8uJ0=9EvrDKD)Q7nNfojD z9(W*p7WcAV=2<^wIv?PNm$O&X&WbQn3RR&E#Ca~O9L4B9(b#9_FZOOU!{x-0{-6$?a_v)+p&vpQ0pm z8y(DN&ciTm(L_q^JqKNUw)HY%0Ve-_fO(?~X-)GD=y$YBF13xLp67>PTD}fk%p58F zxCVLjc+2iPZH2#h56hpGpzRI!rS3MQtJYO))&1@i@n{;|nQ23#6eHp9wGBJ%9Lf4> z0^Us8AQT5h(B3sxbX4aHGQt-?<7OqSPMr`O^=47=&pp^uw1VD0;CV6Ljk?TdB85R( z)K*`JPd)Nj_6jBXG=XQ{rBN6aYL4Mg3}}k@n#Hv4>37f`eiz@tWL5Y zPj`qhF>R7NZ9Q4s(G2Xmf2(p`MmF;6`jelej6K?-i)eoTS;<}O!!GO5D+dd9!JYGp z&E|^%!6#r_-HY`;dV_6RG=={1JRsTFlAo3`jnU6%eGiR9=&>onp(lOli1%iQzA9w1 zJCH0gpTdPRB45?H(}&)1nAI_gwl&pZ+VNzpHM#`dC+b+<51iA|1*N9zxS4nnH)`u} z$+r7V-wzS4iO7C{b9$?T;`l*W0wVHbnLkwr)|Po9w)>-P(uF9r?sY7E~hI*@C^@V#8e-C!t%v3(k>$#YM^M zaly%e`h@7xBvU&vyW}Az?{O5TeXGDrQ&oH%IGzUhOd_*>N@8uuf0#H+i`?j`#3=;;h$ya`B-si^-sxw8e1CqH<=yoJD!|fB4GF`f`&{}hGa{D`*A1e?bX0B^OxPJnn=Wq(l)vO)27gBa4}v1pkt6xF1%8UnP8p7o|k@%N{fN z^G0O4k@qpq4WM6Nxg)417E?IWY_d)+e)|lia=He4>B3PomLBtY<;^iXJ6mu zP6s6+)zi3P?Jk0F2$(7+Z=3NE}0tuG}=;Lfo3G5>L&u$*{N)r{&^+KGM%HLN-L1y&5Qz!+B> zieAaxLV5)#+R%g4HWc$2UM}wH4xrb^`cpo4Cmb882)&`_pxe`c8dcmWGfYAwuKi(0 z^aO;lo$TkzL9{LHGM}&QKtHb!qW0HRh;^Fe%y(C_o*Rgx4_-%)h?!#lKgC#`G91%X z$I`C+k<|CpUm^Bi6E5j-uiMC%9zmS{tDN42su;dA*u9o|y{pBSUQZEt_AV<>(UlGh zjD?lo0~}1(rjL=<$XdLJ!k(|C#}VBq&*KV|IyVbe`}V`V!C%z%wqd8|*-}QGJl?m? zAh&M3Qx&qB6bEpImUn#R-xpV;#^yD`u;@BI+bkC3m=r7aaIfZ`y9netxbMO7bYONQ z&ABIto!g@@!08uuuoMb>mW%Bt+~`qm0~+W3#h|8VoO>8W*5?kxvuO~0J;dEPy^pc? zbC*F$Swf11x0sB}dS<#chr2Ol=t|09N*K5YYGFmpf~whDQ$Gq3osdwkMV32M=yq!u z>*}{0-QL$?smn3w6q-_Ev>~;xpG=E*A0uE{DD_scC&kQ7yz|c8&a-T&bl!G6&bKFX z>t%G&(}H&Rcj4-33EniF!qxe^1yz+vR2O;zu1mt`^_-iis6P)6QyuzQsYQqMGVrX# zm3^J1N%MKX-rr*)wyzH*Th0YATk`^wmKY0rrf5tc0Z`ge|F=A#QW+ z#}=IFkeL2ijS_!fkGYwpn91{$sw+I1^6U4kYgqtQ+bUAme|luG?jkG78AWsbqD0B$ zOW4!0lO6b=j^X@VcRnf^3nn{~_Vji%Ye?88F$&-N%Lw;A_n}W7Zur6Ht|i(2BzNpC zgu|w^ne!KBZVE!Sb zqqu-OZB{+vee$CsTRgiGE6y&BkE$M)XPHzRMeCq|q{p zZmTvhcm91Y*XKFJh1Z0$I*%p#tt;u(hYKTFNG;>Ma_%v@tZ zL;bsxO^37i+^h{7f4Pg!HILv>r-|2d0!e>B5Z!;`Eb94k9~;jj&L8wr`Yq=bdY9`^ zRfnGl{=MvT>lm#m$^^*t%GaG6NUFozIbqr*9+$Gi?f-(S|6a zx(bKk7a==lyXdtni`iQZpnw1TSZ~(=$)zm;SpC%$mPcKX@y3y9Pl;hO*4PSjdt8LJ zPm;9CstExvZ0PuY5#vr;(`Nl-y3o9ezNEK`KBJaEjn9I8FCK?pZ9b0n@&A8@ty+UN z?d$A<%&QILsLT6}|EwvuEdwozebDu82MnzzQhMA?Hl*5767N^V`-g8);b==6lDV&R zrkZ>E*aZ|&5YAg)-carH6_VU8JQLrt(>63}Sl)W5iPABHg zBF91Y^erzM20<|><5{qmc6)KYbdhi^jCNw*+|F~!Vw2l$Y;|#YmW6a?1XiiQwoFlFtDV&sB z3GZ}Yy8c;#j!v5d&w;zy_2w|KYu};Dad|yhr5Sg~WV;I1&ZStp*Oo$Dq-@|pJ&bG~ zLE+P6IGa+7dOcpxtZPT{=g?|#_^~|Pm^YruPO4*{cTXl*?1y)~J?VG-#fRxJ%&c!H zj5-DjDL%GT@_YkKpDI(T_e9bw<~~p-drIUy_jG=iRp6b35B}GXp`8Z1F<0Sp+6o`r z2h;1EAIR8o1BZ4MVD!Alxb@)`Y*f9(usVI}jP@2MT7G10+wO8+w<_MA8bB5roJFh6 z-4tg{X~lpvc;r?$1ns)?l%MuX0QTEdN@VPkhA7{e!=+4i6eMQy02EOC*7bV%;NprynYmQGKxn=(3 zeOyuOQ~d@P&+AiI*(<4s`A@vKsYHuz1d9bDR#DHgX8d(|%NfL#Z1&_w9>;!#wly(pDU7r$KBWb%eClz6Xz={ij0dz~a) z4ckQ1$-^EHtfd!i>+Z{1#D*bY@K(gHx%u$!4-2Er_A7 z_u|K1B^t%~mT{x^3H zKIj)-Ig}tXfU^!&)F{_N6CGXMNi$?BDzXq&%B)T;0iv{ebgT6SDtn&-%wsAf;m@4!m#A3cKZmBVP_&42jw;2_I7?#=n+s={p1oK}3?h+Ul>2)aFk^Y#_#+faMT3m7Qm z8|;9^0zc}wpND~4_G8tmb9j3*5c=IcXh*FKdA7cSty3Ao_*}a@>pT2Rti^twCe+ii zyI2;(^SP&ABf_&Ep2pbIlO|iT8;F)NwflB{V9VE^tGrYFhL!-I~PI&TH_T@y(gJ@sgb8E2IZ(iIf7WAU5 zge$0osk<7zAFD#Kv-hA`?VaR5#cpK3Mw41~79zZ6Az44^Msc@W@$pc&u;|oYq*+X$ zfE&7$JUxv6Ud>^9t%ixCwn4-j3wvyD&G+?@Z?~KmRP1 zrQBYsbiU6Mc4W;&3h$pJzO5E8-YkwyzTO>H-)2y9<4!Cuu%nPQuVA?UjfAx&p>mF= zG~uHiowV}C4|5e7tUQMPB(-4VK{M(cGMJw9n}=PMW4Q~n34LcTMf|I)SfbM%_sr~Q z_2n*{R=tXpIL?TVxsMj5*EqC$f|$l#I>$8BDfI|HPnSN#JZUETCud9Z+J?}loO-NI z>PrL1q~O(@denTjrVpEqDeJcnUEcDDWw^e8j*%wS?avjAl0sO=jOC;ea1R%C%A_V6 zxHn8O6{X`2L0a-j^nJP*It^wtRboL_|9Xi(oqwRY*iU?PqY6(p^jh!}afR@}mJw^$Hd1oR`v+3!iYOtOJ>0|JZoVF*N7$Y8<6Ztl@p2 zvbc4;PZ&fipKKu86PmQW{yV;m`zmpHk&8&JonpiAbaq#77)>7ow#3$kZHQcjxLg}V zAMwDXrFzUqeH!zBFhdX*6~Ux@rI7Zq0as-EP}R*woL|wCY~H2Ol3?!3KJ!pK?6?yZ z4>c&lJspk9^N>6%fV?+-LiQaE%IhUZ7anh*sOmN7IL`V1`AK-XY$(?DjO9I@=~Vts z4I2l@vH8a@%r$+`Z{JbXhbPS$rVeIKG2~vVFpn7Xhj<9=acFJGqP6*!_}r@=v`H&CH_%(Su$HV#r-ZaHQb{f z*n*Yq3!yr<4C`bK#j*@OZ)nNI4}Slh=XnuHOW$GqZ#}xXUXR)W3SpdF%D#KpP=!_x zI{0Z4s(BzL}|CGe>FQ6#5eS3}t%UsgNNj-C#S4_FPI3=H4=d*ZxlS^i4b4 zyniHJiciGJjNYX5P?_#Z9G`+#&@F-Hh%y)^Y6{zpXF#?e+CZuFCojGVMSMwpmEfmbcV=_ z_I<9g(DfB4y=jIhIeXgG;Y8CmK8ERQZe#a93Ez`xxU|cGez`hR^3Y-IX5=fjq_rNc z+_QM&u(mKda3nqa<&M4o_$;zi4n?bb@^=DvcDBV~)vecJ@{t`_cBKdXyWfTEZuVkl zgbdkfLd-Z>h%3fDpqcDLgJlA#n{Y&UoBtZ;?ip~;=y0h_m=-PKTn@WMQ$@p(;S^l{ z5Xau!MD+X=NuS@Rgt^a#V%NYkII=~70-e<0eJ+&7{9Q>s8cZLgbv$WOsdnz%^}b!vjN1ne&8Axj&i?c%DY$TF$h(q(GWk1{kn*2yUGA6~1^_ z(x*?ks9W$Ek;BH4wCM`8&p6UjZ96gO1b2+i@gOc}@g?0}K9u|}00%yb7~3j`h@Fm9*1Z7! zCnFKs+9GZ?N<-@g*|q88wd9%ost5W+lQd4bDc- zHlSfYhB2jH`c(9YJEk5Pi}uxPsA={)STykrOo*INadE4VVda5&X16i=f(+@bk;9tY z_0&9jC3ShJ(arm(&~zHEqtE;OIR5904YU@!pr_mxT`#v{2x7oW~dSE^Ut6T*+rx=$x|%kzjNE0UvpP- z62(s_K*E)AG-psVQqBKhcgi=UFNma7lhe@OiqXfwS=g|rI|iQeKq2Q}tv6MJrtC3E zSY;-5dnuAd4`<3>v;lqJUt>`}hOl7|#*x1-I4zVV3F zIgfp^j->M^(S66ux14uAhyDb-#++?()Vtxcq&?H0{!FNmresOboTY=$dZzH;U8uS_ zvB+)iOXGq&(J*R0Q#6RjzoSJ`G}+MgHH%SM&HWN9d`Zr(9+%{ZhM3q>TvaB>9Im3~9e2>$2QiHNp8h;ekB@Ar&xfBV8(}EU*U=_hZx^xU z*A@QtX~Ojj&*OfaPKlmM;taPYe6~^|ox6vn4f|Bclb5zEd=`nRok{4jCWyIwgd@ZeMKC44yo|;6JAWUpovXwP_y8f zsC$XzHg*k-Rox_7TkeF&vuUM4+&LY={TJ6})6@U457)_1Sce?>Hb&5|ZfoHkWJll2 z7osxV6TjPb;!@>kifzeYSzi;RnI`#YIr|lRmh~Wo>pStzBv0ZoY9@^;EN6?4#NmH; z{q&Ab6!v?}N~`yx@NyG8W*)=aBpVve-7{-bKd@{aJu3H|O)`NF^e$oo4s}a}u3}%Z z8MYm%!-fcr9=zW<#gv|J<_^s>k&wS%2A{U?!s+TjdZ)h+zn`wBx_9UBX~Y$*^Wa$< zbrrfW>oNxJPh>N`t5TYwKFz$%JxRTdpvc+QBjotkTbd>Ou2>5Bp}v%ytxT6X=W-5a zEbDx;Tr3GVE&X?FCu^=dfzG(z9=*>W#M`FcAsW4m#t%|S@k#0XBbmrKs=jb zPo!ZOEj~~?gW4^9m{099c2#>4joqAvJyv$~`wMr8+-+md^LN1i3kijMU$rPB1m*wa z$-8bO1>Jjzw?q2S{OgR4+a0Rxd@OZzW_nH@w^5-SE<9J#|Rn?&c9mTDh%E89y=tOJmZ@o ztvT=ujfyJNba|7geRDINy7dsJS8%@BvombnyJ~62YEN7ZEkWWzIT{d2$W08PKX;c> z$W?8+)m4GTd((wWZTrx5GF{X&J|+2k&6RpjT+Zwsj3$*%&L6zGk*2#Dq4$Jz$r%mq zaE!Vw_(s$t;bx(9{QcWlzRrV2eXqx`982yOil<|R5w!AShNw1p0p4;p>zMddEbE?z zTUJEdX5B$`wKiS*-GMB5IFQeuBuMa6a%jRGrtsn}OUzNml1dAT;QM|t`AK`lwAyFYC~)`w0|(yPc`% zDw1@WJn0YKCM;F)Lh(%>>fc+Qc4;PKUdeFwWWEaTG2{s8bcdyR9l@lt?!uqVyU>rb zKr*Zku>B8%X!#1FkelC-5zKvMc0ufInlDWprh zI*6|(IXE`DKlDdQNVTL9X_Yte;MYYgd0&R_hh!+)I$nI=WJy0KtBEFwmss?Ok37fP zABO3@=+E+=G~BKVPCUExqWKg?F3f~gt1fIqLfo^RI+sh zS>%6b?W^u!sKHpa&dQNW*(IoCY=_gDFJh5j9E7dPq-5Nko`yXT##jEp^Gg#%`C~;W zR~>)=>)|wIpckd>YZAmQ*^@dcjZtUr{JEJ!#?5b!^g}u zbBeIlZ#v}u*7Ci-BE6Iu$8$|9XieK1ntj@s3P+wqic-4ZyJ{P9%_GIXo940;JR9FI z;RS1o^P`Mzr?Ho3Sf(xE?CT|w9u9l=2oe6xLSXq@#I1`IUU2Ra3-P9%kLnRP-kEAM zcaZk9b@X!35%HDcRvfqd#`oJvbl}@Qxa=K9lV_eqzgjISRg$HIshg?GGXsyVInkhX zNoaXi$(qNE!?$#PW}V!{(yzZ|5qc*Ox>%0BWDO?y@EkmuzJXm)U(a6LnnOM-%h`^` zepIUQ8TEbVvL}uQ5!`kkGmq^-G53>9kQh+>CGNKS)Ps8Ghmk2~^-m0$%y|`Q`17SN z@x&9lR`jQx<}0|>)0kf5d`125k(i`agkiFeghZa}Zb~_Zan0*!cLwiCT&;xn0UbJ0 zB~Rauo`k}Bb>D;Ug?h@KSsaJHUo&KWi!5ks|)@}sIO{9P2-6(ot2-vs* zY^c^C@sxs2<;C)C3=0yFBxti!8Ru}cS8wui8_U{nzF_m^C(}NG^MO;fX^dMVGl=pb zhnQ5c=eHX;cy1f-znFI)v`On}IBEevWW>f_a5=vDB#&YbTgsxq=hVv34Tq%Qc*g+lvv4 zPGUvgJ2<`$6CW?(y~XRgG;?ejySnu{_H4St{FoK(ju=St%OB$3o}T2v^DIf5Yq4gx zIcI{J)9uu0l%>|dcFcW_YaE_s@SfaRBR9Lcv1%LIR6P*_&i1uWgoHk&};C?&{1=yTfBiUVZO)~ho zlMtC&1IV;X2Xxlrm6tPR8NK3sEi2mMl}d#?FW6BtJt~VXx;kXGD zZ2kg&p6O9He`PZAT}p3j!tmsxC0QOz#llWITwkyQ^V8;$u97YmPW;G@cE5>M*H;M5 zbfm~H`>?F&l{91LY_gLd%0`~kfmq#>YI8e~-ymh9G7g}&*M0P?&qusPKT2vhq#xUx znEzQ_vh6*GLQNbWMOcNel*+?w`mUKetEy8F$KAkUxksEg< zR!`)-nIufH-$?CMXArT7?*(%7>E>Wn3gCNt>6}7l!WkOAHb(S!@lovcGQ)H=P0Ds} z#~s%)VWSM^D29)tUvrcwW=9YjZf#;Cj?5RwA3N{yYef*dmUI|YK2|a=@F0ZcedtI3 ztL(0p7H-}cN6o37Sj2lBnJNh^>$x{wzOYq%_#_`&z7-!# zGa!F%h1Foza6{Ttd>X!Yix6(WcV*`t$ZPB|_U3yt>l|HyZ>!{Kg4#5}Z|`L4_U|`y zxN;W_YpW#ce|V?$=wa;dpMcMCa+iiFbJl*N9@X8_pzD#(h0_{x)c353$Wf9AIH8Hh z=A)^Ua}3r;JQjp*UvaTRog#9tOH*YzD}e96T7E}~<2Of=#nPwHKiP`V3l~}B5FO!e zu|2#@&tdLkd8$6*irS1>)aPLs_2KzzGp|cXzdB#&Zj*t#>H9^UuTHFxduayL#jz;( z(!4wIn3@ww#p5}1vTv^B1kX^FY&|Vpn8ICHK55cIzk2u?52YKcYv4A~isp~mMdTSy zqbN%ZxiueJ8#Jlk@I+YmO~#xmPf{6CkM`aAWI6XU5K8wb5*WYaW z;*BsmH-(y)*&@vIE<3ue0tV5%Kij<@MOE&E=T9$oto9NMZfRt}GTPXkYD!Oe&Nuf$ zJ{#_|A3s)pz~t0Z_>?@DOwVZYzOMy}ax^KWB81kR7)A$wMq#5uGMZ&vDD_M#<}VsX zBO5nUZM_qPQqE7lR_4#upo<84I#$KCFXiF7oOrOQV<(@wHQs6_wVic)8Zx?<)Qe!9D_`cW` z!qUa3S;Xf(P_5Jy1|HspEehr&nHa$?CCnnPjNx?s=U<*l=tIwUdcFNE>6FIW& zJ4!rW8cBn_s?nnP6vangvL_z`h1{tVk@WT!Jj>+h#c^kR4&$BF-4S$uVRv$VcLI6$ za)r!}1Z1p;6d!9$XHmhI_YuY&RL;rae>fd&NJA1 zWCM-8pJC9!6 z5z5TFuxjsm?%>-G1ofwG4`VUr_%)`KH5@y;#?WRJ1>n;Yw!HcX)EyMa&u9=et~vm# z-j%GVXeX|#b(tN%L`#ae?Nx=Rx(4M(fs+aOB|Yd6BqS#+0q_1u$&u2t6Jmn$kKsl(|#bJZ!B|H z-jAN2`%0VNb|Wp7A)Mi+LZSP;C@1C-rt6zfKVcZ@+RTBOi66Zh*^FAR75LM$ia+1< zpv7mKXF_@_?Ttkjl_pm$i ziqxyfkw(exf-l>Tr9(f9=R7y#$_s0%Yto?tITNu_R+FUDti;FF_pvX+2ug~6bf$J1 z8S1Kv2O^%~?umbx6sjW}v44&?#(L!TXRCO;dM#b+YURF^F2n~Y!TqWyJ+_+ynIDHC z>!m`A9xlUnxf%3n^ahf>qDfQw+{2^2(uxB@4m3Zch`VfWuzstD(!ORp_R-Uc#hzab zt`9pjno)hrRX4kB+hEiSf`K z_z*6GYGAUZ2Ym?8Bvtu7$WY{WpOWl+P%f%yoMiJ^-WymK!3fEP01mW^Z@^aRq z>|i;XAUh2y9s+i3cNVgy`%vM6!*C9Wq%GshaBbfW)a%HR;qdO{l~{=%wcpvpa6V@$ zF(bnfewfc)1G1()=-k7n7}udK{NA_{sV0MH>?j%fRulE8)YuU71S2Tjf}TrF3LD34Z{KCH)`^(x}Pd{>J2 zttXCD31Z5JOQCMB%R6*dWM0X=wcDOyUxF3qv>ZfvN-FlhHYLkqSNah$lzn{jl3g8F zgp;ubR3cL(&Ec`=y))F2lr2Et!3KY~nbWQ3=dkbaI@t35Z}^fBY&Fv%d0Sb^?EOzT zkf=_lt#}Vld<3CmCQKE^kj!>p>Rjn3n4FfOg^oN0i>VTm{;HI+@GFKq@e254SrS+`YrDN^JvO5VR>9TQYU&WU(4%jGIG8nz;YhunT!bcQL*3gmf{VBi=EN z5&f^YLQdO)76!;8Pkt^*I!0o%ayZR9u8yiLpCp-T)9AlhF+%g6cIZUjm7Z*`fX24| zwBeL2*-SE`J+X;2ByTQ_zmqM_O;5mp0Sd&vrbFE)7YcL7(dUb8IJ-iV`d4svUCLa_ zs#}G-J%-Y-@K{Xh*2$b7PsKZ9Px`wslc_}Q7e)p}p;K@+``+6pG+@7nHJ<2M(hWre?#YcH!{TIS|FJLa`*DNciFSH8ZuG-NHAkxc zKaS4BA;o`LyG8#riC6$>?R*^JNlo@4oxo;!cSs}>^ zp~zcSg?`uf_ZPh0p69vm>pIWl_Xnv&o@Pk^IriN*^ylSJ6z3wforyp@T&6}qW;z6%9ehZRikQjUt0g=9A2gz!3f^}8nsVCC(9?1(bE^qyITngN?m<9VO;2P&{czXFKm>`J`D$d-h2jEJ?uT{_>>0Mum!9jtDW_N#&S7 zRt&#)6K^!rAk7^?J-xiC*Ec)i%>g+IxxzED-7}f(Aq~p0Z9>@>B~kxv0$nZqhFM3y z`1>_ z-sAB)byC$&qhB>KlsdOmoHlU*))XnxKg)E=J5hZBdNtnBo#*FSoeXW(K>9;wu49YNm!#2SvW(XDXUBJ;j3(3@gXVE7< zz^SA^NY>ZkT&aDioKVd48=WZFTbsT;alzr2k#ux{CY31OMMvp8LB4G>`l$Gj;Xh>> z;=2}STSqdPS?k11l|9m@pR9$~N|``=ceXYE0{o@+G<1OmQ;2!U{&Sy6tNS*?KdK9R z4wtaWk=(=TY+k-5*9-Svwy?&UFxY$urhiw`5j4YsCS}TzTkd+cr6V1go)&_J7vI4? z3P9jC&PRSdgxs$@N43omij)qd3srlOx{7yr;t#+!#umA0+#h(s0*Zb0Xn5sctor&G zV}?kPvFtM>)+P8H?JDZNaVM(@1~f?}M^YD5hMM{1>|d?{8U6C6hu?V*cCI5euHS$Y zW!qu;RG&^>vEr<;33T7o3~d*$qRvK-;s#bo2ZbJ%20ULznSAD&$bF{sf{jUO#y%W8 zmID2IZ^cd7X?Sg{N2|@+us`yZ;IhGx#_X{br@1u2Va_BZH;kvMk0WT${6j*V$9p)* z|A)9y`cj#<3RM50JI(GGC0_U%PxAY_^Z7oX&&P(b#YF*hM#BcNYH4`KIgH~BmEmeR zg`UlvMW^n_(MH+#xRLfs;*#?oUtjJJz2`fjIfc6bNBm`blpQ(y$_Wp;55p&2Wt@pC zWqscqXN3uegqX>-a1|7#m;T6SYD} z{n>lxy3@JL@633eHbozdrOTrYsL^X97F+E^y5|eXb>D=k?`wpv#3?j6RGkLzeTfxY z24JF9B5t;sv%@Oh#I(1d<;HwEWL65_?z}^KlV?phx8`^6O6X0x!TRR$-g-aIXJ~iE z$i_0(v)+KL=f1_z&ga6zgdEh)_NP7#GIUBI3@?i^*pLWgQGdctrr9loP4RBPsV!E5 z{ICnS_*aWGe+*)JiypCB?mODTU1WQ=%2Lakg>1=bZ%VM%#?!XAx?{;z_}vUTDp@E=0d|c+f-$# zF3BcefZAY&PfK<~ci}GF=L}PGn>y~OsY2EG_ZYtF3?hdmC@Z?HE8=W8e{L6(2wPwH1(|(OS*5zMjhRRe|tC^ z+&D(6-j8Tm$#&$Q5Dx9(q#Q7mnoC=Sp~t$g=*3^uybWOuV{W3^=?CVnk`?cD#!4~a@05P8OCLV3O@|$@Wy((n0`C~ zsk|$6pobyKWkbpSW+K9W@cGEZRoHUtAbagFgOXJ^o5JollFqkEH7g2uSKOW~G}P&& zuO-cp&7xs;W9Y%)L!ybr2@F?mM6hHIqBti~xr%=-S~8@|okRa)&-qtG(v_{F;dRE5 zZt(qE4eusbS(V_r26udPQ^mQrb69Fk67F&T@kH)(37-DH?u;3Ztj}ca<13_JbTm-X z|0%K_^X@~IKKtMi2g@%{@jo|TL0@C~JpUJWVGMxXa2;B)ek$FsvZlBEPWqZsP}J`# z=B?er`&o|EZWT*5=Zq-V<2Cl$%;$cR^%%Y@pIMCbqvNSNP*9gZ^(qxmztEkuw=2?C z?$3RF_zmIJz_w)~S zv%-nJDaVn)Zj^XhRt#y%M%XgW>3ewrYiD`E+ITM_EJ|@{js=A}J%gI;Q{-el!Uz;% z-QO|jt#MG8KG~BbqZbMB&95X@h0jo-tBrx9&1kr}9hC)K!EMIbxpmbj4o>3CbzAak z@}#{LVKi*hT2>X30@o&8x@Q+a_k@8|P{T9gKMSyQ>>3Q=+}9U=3s88)2GM3q#0L2Y z?oyYh-aOZnIO~qEdcGX(w;wIWiHC4dp%5~~;dH#*ozl~;2o3j|aAZL%23k#JiGv%V z!*dwv70-q6#mh+5?HUgF^WQ^}kC1!yl;FS974n-3F>a*^VkeZDN4<=#EdhGF=z2$>Jz1iH4&i{?VOAK`b((MEe_7{rL6sJKC}MML7W)a zjSg1j33vE8K!)FUs`6w=Z>BvptV-jon^DwbaE2JFUV$%5Ucke5HLcoFg0&HTbYW#P zD)lr;VZR!QGh)ck$rPRzRy3z>EB5D_;iuMJd~+O2UpMqaOvPKa;bjsk7hS=x75q2J z@9HHRreI>qDYko=J57HfhX&g=^uDi4D-XFayGS+6dsmCc)z2|}i7d^FkfC1TQ)%;7 zGn&$SF$=-gw@ zuPJ_iuoIVR#?#<0iga-3LZqv2X3I^L#S4~MlD=~0%rf{hzMiQT8a2>CE%ktX$VzYG$WSw@vM&K+!zdLx`vp0=n%5-97 zI2(S7_q3Z*q@t!L<*jzYtQoJ788DshZFzy9y%L%j;YOF0cJsRf=g)sG!s>tWsJxhq ztoNI6z=LPu>pI}ZyFYD*??Wcx27KKzu`ako80+gx#TR)$p}i*yQM`<7)2YmQyd{lo z8%p}qZX$VypeK^Hl@85~*m;a&Xp*J3fPo0y>cUw%& zXC9(NZ8xHg9O$I+ejMI86$3iviL%8Daddz&=a}$*ROJPs`i&8tq!Hq~NPb7CI*Hte zV>riP1l>7eCl*a=g)prJ_rtHVXx`%wouo*=XWNP2ugxICf>tEHc!Tgp+ng<7?dW3E+Mdexp%HJ(RmWfx`y8pDvt=nWeb*?XY@XT<=-P71Tke@9y_QR<7 zg77qT99_F@K*xl5)r3OMG0fO3OR>{^Ixz zWUgyv{NPM3asB8^pby+ETUpk3Jt{7##p3xvQo}dFXdUiFaxuzO^Jf+`r_W?gTU^DO z3OlxJ$3bt`rXm>5U*#R&nFn)iBl3B+l)2an?A75=s!8|;gXX_zEmvio_aziPRF*ZZ zuVZoly=6U`m*8&xXv*4p5qqxnriHS~^idSpiiN?b815{66fu}APfo%5jz6gR<3nm! z`1fO^D+O+KqAEW3Kd{<~X3mo$_xw;~lzqcPmn`g@Wludanz6R+JUU!HA#mIcSfxsl zIe370U9CU0hzjDh=13Nt^AHze2jIxEel%;$V2a%H4rFIZ7tR!;-eM|zt{c$5B<}^eswwx9s4yfH3EIf7{g~8j|8(p-O0#Snfh0@NWNIs;{4t} z_|eE2jcLoNc;zol4!VG$L$jn&Eq=oOIsE;?XH!?E>Cze=}@Z2As z|56tA@j0j?XI77VsZ0lD&B@C?lk6%(s5&@W{8_|%$0__7j0BiPoWhH_VKgJY19KO4 zVewLV&YtG`g}E!>n`%v+mogA?BLMELoAG$&D9Rdek-3{K6Sh8EiOVx;V5r19=JyJr zquUcUqh!%XO;+f?r9XZS_>ApC4C%4WWVXm89nQ6%khkbOoMrlv^|$vp(pLrS^o+eM%oU_qxvIo{ZwSMlVAF*#o-49@`W$Wl46t{Q5$#*hn=?6<(daAdnW?oI<@~(~=gUTH zNAfT%$#9`aQ(3Azn9Z{rg-qVWl>Ss&NpGgsNnA_waCEh;ppQ+6Yv@a#{O+^Wqrzx# zq%Y0=^c`P~lu2*O*NO??j27AM67^9@EW1kw9U~;L|2LMBXNjn-<+Er0n@r>RNU5NGyP z@9RQAi{E4Jp1b&dwgZ2*KEh$28g%rt6$3a+Pq$^8F#LLqu zL;Oz@Qn4t;9p@QnmhKV1c1}k}QFqGP!auLVi-N?96WjKNiGR=RM*LI${2by>BQFqH z{;m=V@A7`Ht~_18&GR5Vz9Y=%JG8nfikTmlQkBpI*Gu=Ye%&HT)$LV6>VD2xwmySt zuDZ1O+dDRGNep?YMo@)yFS-)Mxm3r_NLEjH$i0c_;-qWF(0XGmQ*)&9n~k4B^zyq-qi455Oa;bOGj zBsjiPBK4@XWR`jt79+;-Zb&nN-!)>*yB=iH5Jj)zCt>;4K6EB!KbVRSCTv@P(rcrs zHtH$c?W*Q}^$+(@&VPw>nVkR0J1GAB9AMp2z%=dn9jlmofKTyz|1mi__gRZg)Q!ZY zOaEcj)guUm7HzKN+~7SEXkm&u$?C>Zy|N8ycf{k?uWfj{M}};!Y{9W{{M^eu#J5uQ z=m^hsMojcU+?ky?n>taLX)B>#%PUa*Gl{nFnc%%qZK&_3NOP5W2BOb#$iCUio?Pd% z^ntx8dz3YnI`E8zq6)S4y^XB_#=?IulX<^>I4!8srZEGSVEeX(64nwS?l!t0$!dxa z>=TY)&fhhXVUjZ#xxtbiKl&+g%k^REz~0VOLOXR;vDa-_rmJ9kC<|FDQ9q+b2q?0Bo|)AZ|*eAqI#^!El27$1M%<* zzPFrcLI)1dVcRd?L0;S^c9EZ@Ke!OZKYNYUzwPPFqg@!XX9*(0jmY_!6|L+Hp_$S@ ztSf0BK01Aek>yaKwqvG{ylffuTD%JzW^19MpDt&o-o%a83_R|8MJ(p|b%pKv^hv5l z#Ra{@AzeKws>Md!{`e+>9&LuPCCFw`D5>h0iUzN~!?AxSzt_I0h}5`;cRKRqY7{Nn zX~fg>MampedKNPBx{?^}06JmojDgKtu)fBMlC$&?*EyaBE}2XJ(cz6PME=c2KO(_p$&!h zRGR7y^N)Gh->)}W99ayXEn&ja{1n>tsXzA>HelblwXpLSu=1CM=)F9Iw##pa^_L>fmT${ma^bx8H`ysmOKu~ z(4ss;>-}OC+(#P= zMF;e0rgaqhFO%ne??5^?=0C{rJLTotVPt$N8Kd~${Yc&|T)l7@5!)+ZTB;4t8T=S? zR)JPHm+^n+OlaK8!^`fnl$yC!%$fEBkIL;t+5b%7m{fxUj(U8)Zbn*b`qM0^ycQN2}?{Z5| zE2@mo<=OvKIBR#i9GtTj%ec#=GeDjuo%kzsl*rJs?KWa4d&~M-r0e-l zg@lAwl%LR|Z6!w~VO5-WZKg&;Cyf%@2d*X-(~ONqpYi&n3^vq!mF|``!nE#ElxQoE zN0BGOmE!2>rj?XnFH5o!7tz@9N!T5q4w=yH;xzkEc30v?QFVH3#n3Tib2}NEx343a z4Smu3PY81e@StO(3IyF3b=YPZCJm0~J+nf0GH@=0!6O$sxqBP!c1xybmvTiLM}PcG zSEN7PPoPsa3mMX3Bv#dPj*uA*Ine@3nFVy2pFwVC_9vZDVOY$c7eYiBI%ZF#bY&~_ zT-C_*M&+UN%QvX3?Mr8kx5Hp_7o90(vNGNC@`C!VK4e@QODj&eayEQ4&Mz!M^<5jP^-n|o7YP-|r;&-dH%;pH z2b1ErW5|y4c(b9qI9n-#Lbvd}xbp(q>{Wz}*|#u1RD*sEZ$WON2%SJpG+p6)JkC|! zkeW^>Ry|}}Rpdy|xE^PXo25dy2HaHmZ~Un$?d!70^EJ}r0v?A^8!QE`b3xr#R zors?lEZW#sq2F0=XwUYgdp@3|u(?JE+x-zQTJ*^@Z4WbD(39eK@XlDEy4WSZl8jxd z5pTxx5&JX{`!ZT+-Q$TjgC9d*tqcD~2ElgaB9hi6(LQ~iTbXqbYa5KY_c{^E6V`}h z4jyLL9GyuxdW03}Or~QqcH-vY6pGCohTFF{OX}szgcb9z2*c7pLjBh!A;{<)da5w) z+ZHg;0i3IzMTS^QPj2rMuaAsCkHA(aUd^V9KlWq9Bu~y860!Jq9UhcXI zI|zjdW@H-gf{3lQbYS*c+8e+*rST2$`5uTx_NS1TXC)rG=uhh0g;z0q3FrSjMsoIX zBu>$wZbd3|-R}^;5BswfDZRMsK%L63q`}^MISp1-q$YcLs@>%%(OYN-ecvZeR}#N^mAC&M7(9 zUXEv_X|U2SMp&E*IVWxwho*9mNPio0?HV$jc&_Le3? zu+K%Wjy5repBKgDW>j!Sor1p%7Mpu^qr$df;@l}85tL{E)iW{9 zz`O69bwBRgSaSRG1(ip2X!4Q{bTgbu8P{T<>SIkwV^*Oy(Hb=mSHP<{meR9zaK23g zCj7y)b3iNpyy5dV$#xX`D+$NvPojCd(j?XYhT%rA6*;Vzr5`Panf!aA!Mfy~k7f&ai4X)$ME`6ezT``SWuWmG}Iy&I`clcPNw3$Um>h&|oaozI5kC_15W`1iQBVmRCHA1fq`^QV6!73umFJO2IMAU!$ThgA1Y5mguGqxSwS7J5CKU3Lki z)N}jLX6Qi{Mhay9I0*0hE=IO+N4k24HmO`+fVsNLwBW~R8vnfoV!jex8OW%r#{`(@ zgwYl4r!dbe#+<2VU@qeb9sYc|-{S?2>O8?yKSOqnGce-9OsOdDjObCKNw5AJFTRej z#nyuR7@61?dD(_EXP-59*1U%4MeZPzDTDIK(>UqxL>>Ek(xz_uV*i5E!pU1x@rL^z zGJa1My5E>X<=YL|@=ng!D(KC^V+`okoXzmbnT6DDGMCpq34r%M&eM?9qEnsz;s%R0 z+}|`$eD|i3&p#Ow8%EO81%9OAB_}EjUkeP`g_q$tuc|=bme@?iD6Z=kCcaWQu+6 zCa~XEyvfvmx1=zhpVoJ6;`5SCR3EQ~zdpGv=&u94-gQ{|NKuy7cf@(?mR`o|vEKB! z?Ig-PZD{xOEmZh;4d0C~7uWH9uG|bKYFKd$kE2SVTsf3Fy#%=T>qY%@?!#7f4f(yE z4qL^4aGyCEw)RF?eO&{aGRM#$&VC-U-XC|0@{syWhOFc4Y0ko(s2^*=Vsgu*kMfNX z@OnPN8qDcZk`~=+oyqF%?L@zgI+Xgmke};8`+4W-r-db(yjh#~0Ar|jl|PM52*n`I z<62+pLOxNeInyYJ{MTjDEq*3=aPtc^$ED+&^bAZ&M8VT6jGpNW81Z`r6+PLB^XvtF z7%0)+^)Zn0%6RmrYhg{FJ&qM%R1kzCS(5yeX6=vXER&6lzVgBFv_H+9a$ z;5>#Eovi9C@4Wt3EyPDgAhJ4_XOGWHt_>^1--CTOA8{0YbB5EPeoAyQdJdAGN8xL@y?3m>Jn84f zq0+b=&zr_mMu-gk;&V54DpJ_+CK3ls_PVY*b!Jb=+hH}Tya>V0-cwQ6NkE*LDXzSz@4#B{G>r86&55NdIvG)+B7-51SyvM z|A%|#wPn?4)hkDu82&~0(-w$~t$ar8??U+_BPdDLR5He}46>agn8^WaI)3&TA}_|l zGwzERv?>8JEYzrWVlS%d=O?av`xOHUc8Kj==Q+E2F$OI6qr;`{^yYDw(3!_m1YTAWGe?mFWl@yunu zws?A9j?^UeBhoroNDV5AaOfiEb)LJ3=#zuV$|Q@PsjR20zH3Aavlv`FWJe>rZQ|dt zYP6VmP|CM^_%T3-Hr9T|rMfiwNl|z#cB9r6fmm8`iJkLO#^NU<=+-qOY1XA|Y3gRN6{5vD1!AD-s9NxGU?$sC3Spd-0Zai(M<3Bm~id%H24n z=1rAqoPCz4hao{bvC&Og>U@H;a1MDwk8_C}?E^^v-8Xc6lB4Peqv+088w^*SNOH%o z;_~2aXt~S%Jr>*vl`)v)Bmzv@-eOa&J`3UdEQOxBJZp1M{Ay`TJp*`#XV@pUa?)k= zIAMYCQQA}&VM7XKoj9tdPD@Nqq9gte;&=C>?y*WV%+`S1H^d1InH%uR(}+R~p9%j( zS;1a!A{~o*iLnW%yf0K3Q$(L72&_m!zScMK%ZxA_c*FBp;o6jKG*0X{u?BIY$BDlN zRl{DzAGd=ilh*zK+SoNr40(JVBh_^2*BBYm)X1D}-Z!VjTr2V5l~weZpB76bZ8~{PvrUMn3ov+x}T_hHv=t?6UE1QMQrSv z;gmG?mUQZL56OYyJP$A+3w29e5r6j(>o#t@V7$CTvT+Lcm&Tov8cw~6Rw>^XaK~VY zvm@=v*-3_vlE^h3^)hWT{cEBG3FTSV1nW3$U7V zd>cCF;nGkCocQF!^WszKb-4|``}yHF?_$aBQXt*Ywsdq$0nBS%rS46`$!V%SnmyKH z8GZYsr?@$zCtYz+AgRefc30hqtUpFmqN^cA|6Yqnjm6wu zr$&a$_rZUQx;UnIDXkmYhaLoW;BUb~d^>am$A7taYi{$TeYQC;nZ1I-H*Cd^-c7LL z-ImWIdB&&V0@kd*&)nVr*IVyHVwpR(ch#|iE1KjXZA8t|{ZeTqcWwU!4VUozvu+gn z9KOSBeXPU-a!({3nSRVfeuH}Bp%lHwjMPJ%3dtWl~XSzJ+^?gg4rKM_2==CWkhal9=uJ(+XLLzLC$X zGIp@)(x=!z`!%dLd5GJ$%%fzrX84@1hIi;L>9B)`1m*lctSRFFzHt7B`+8rvriDY^+%k;MdVVp@#-s|jA?x9E z;~olCbm_%(DW+#UWgnZXSwr4N;bFr7+$iL(GY?ZL-Jy(uSC=Ab&KnpQRzo(;g6hY% zLFQ{NQ)< z>Ltm1;zz>p1K2ueE#3UI2*-KPR8F=#CCD1k`i$pTJgg@II5%QT7iY`%TShnJQ-C;xH+gV zZzLv#n0|C6T9FgzH$>OAQ`~yoEKx5?ghj;o6+WKo|7yxM(~?d=F#Cy$E)lxxVsv4 zD^RBD=2BK(5DwEj&FGrN(VMlZ&&=Ow4ZaB?>yno-kN*N z=)9q*9;75#uIR)2ASIj`Kb?JG6&P{Zh+aK15dzH)c$vvYQGLvJbX@<8=k*&UeQu1T z9JdY3FT@hZnt&Hq3CV{hlgFK0+~2H68Mpa<>Gc^ljl26o)K5wKU?3?;`k>y4Gch^; zXSLA-bdPqU`auI}-}}ROmTE_%dz?UZR5Wy4Z$Zq;Kx2d%sTlI?S+^_D9PkXqrSGvb z^a$>!Xo;3OL;`1=-`RPPDNMS6FVq_e7Yu1S&)_LMzk?M4##Hq;56gCMMD0lvs!}zl zJuZ9)8hDiztlEt}MePX107=XKO@jTZh2#{x2V*={G2fN@-(T)Pk(xRF-LDom^S;QD z4xWF`RHK7u3xwAV#$?)JC~n#E7#ciVJ?dTn=QoX}57ye^l=&Zde&RR&jepO+Yuv)! zbDfy^DO7yaa~35k$Wy)h15BOUQ`q;wn|{RGVy5e1OpLapptl1t@&S^XeE_B%Kgu++0H+PWPvTtminFv>H#}U&CSFAaVa% zKYAFo9c`EAQlQ=?e&!WXRAET+e6FJUssfAu<+F4Pb&69{rt-o9Wb=K%9f>-%%XZ*U zuLPmIaUj^zAllHXL^n1EA#GzJn;$ew^r~Agj9sUIy5|QF-L+UUF^!)&e%g>tP6l%k zf3fR@qqwI;mQJYmp;FD~l21#=kvs3U_8wG(I@_%B#IMoJy?Y3)ntc#OVUCn>{U7?i z{lqLcEkla-RN)tQGMO|bp=PQYy=Waqiu@Tgl(Q5Dc^|N8+6G261A3;0xAfl&K zV)amcxZfK_&3tZn02ko7wFi>g&%@VNm4x3pqHMYx>FgXOCfb|8*6K6fTRvx+XN_rQ z+hB^ddWS1_v?wp87)8S`BB-Yw)lawOIm-h=toa-1X7MVL+_XqVrHAzTmGSgtx-s;2 z-9d!BhUDrN&YXAIit%%H!&-{F*oA#t4)N#vUm-AI1>WgriR(3kn4U7tMK*Q{vuSO)6k(rUq54%X8vp4hyk1no`HMP= z;?(I@yc|9Ln?>_;PO#Z?wdrrlD{Mb6E8MiU#fa}?$UUMPt$vh@C;Rf*jvSuzXENf< z9sRI()>a$~elDH)^$_2Wn-Mqiu%-GFX_L`#8nWdN)_T~`cZ-eu%o9PaleNVY!!E+_ z;Xui)O~$Mx&X*!W&S2sy4@x*BPcxm>@yaC-^=@fWeS0mcEQ&x@xDs7kFqYm8P^IZ+ zx-@pm(Q>uN!D!5mBmeamA*l12chPyAToVN2fBy7#U=!arJ;KnZM$mtjkDZpiXk6cI z;;3a!kUi=qYNXY$vR&U0`>rR3-qfYMGuG7p=`Cj28<5y}7HrmDBr7=4rTNz6^osZ0 zTa<;dHFq%JU~iI~2%tq9M^MVgcBXx<8h)EsNtPw_r+;S)vEq9+Jn&YO;hAU+CQp8j zS`?&vLP!(-K;z+Z@l@Mm6cAk5E6sGovf%`)Y|EWN`t6j0JFo|C0B-7;Msub(81v6K> ziWT$cL4U$pF-#?&&GK-dA=j?4+nvE=IBhrPDQ_gLWq$Devp}kMa)7VsM`RYO)4<#FylVe2^6wl8(06RB4;@FMmsiRF8F!pO$EkZ zKE(d{Zf8NSCs5?RdN!l~U|Mpr5)B7q*f5)1{A&9Pvn_jJ)Zc_gx$Dun{P8q2#+wdI zT}Fz}?P=ZYAPhOenbYA`w5r(`o$Ejajn8nT*ATe)TtE-58&~e+nv{kT zsB)(Kw!2R-L7^1?z1O7ig(|d2{UBT?$FQ1rhGg5ox!TRULA__vrHOyx*CtDol;woZ z#5mk-38TQfy~zH-c%*$#W*=vh_^afzbluH&?524JKCK(YW=J1nTe2lN7=*ADKdYEa z%0!y{r3XDI(xb02i@ZI5O{MpN^F_Tr#c1vDWYwBeBu%rYP}lL3kRQ#N+mruckV*ji zH*!1He65lm%+x1)+wm~I*OPuI@s8HR-$*;rjb=^>pr`M=a2R29XL$prx@|^IQaw&D z)WR7)pPa7v2UvFrU2+b1bp0NRerZs}(w*YEp*^T6){LT+)$wxB17!PbW!78tY5rRY zP1X4e3BQBi-nbV&$L~SEy+0Yw>P>47^`pkn50Y+QFTl@UlQd0Fc-svg#pHgiA{qKedav}4c=MJ)Ka5*gzik8Zvf?p)% zbn8W;**9p|y2EtUPB=U-kjz^S$}Qdk9quf7Imaho7dgYr2D& z=7!IlOL#=EP&kQH*{8I?8abP z_NG6b2|0)JV|&m!p6eSka3pT&X5no54n)p0B7={OaCH}Otp5l8jy{WP!zSbC#yDZ6 zw+mTyJrR~PZf1e2ZXx@5IjeN+M?niEw14Iu-0;z(jU}&5nA2iQi<1w*|KD2d`;ji9e=vFpT2$k# zMvKZ7#M=+l=vJ+lINq!rgC1_jye$*RRyv$&)~kvmf634fuTIW=9mk$et-(8s_i!P5 z@zaJ`G&kubuxSZ`>!WR`bK|X<5j$HK*!EPw4Jtq^227clk3R{dFxirgj$s z4BvnsYQ#3{<@nfWMJ-2lQ4%zk+U~DI@!2IbOM4@dcJ^Zx<}*mX-9>bX`GxV_nxrn) zCD?Y%fd>4Mr~gjs(WyI`blfq7jvZJpsxB|a*T(-~l)eD%NjY$68%Vm({~%_z9t{{N zPmOQGXk^u7JePBz312h$e5*V5uRe^?C4PK2Wen%^u`I|T?f*MTp4xg;=~fG83k~er zd5Yb4>MjHi*TjZy4S4!pi^|QCaYk&vgaM;5)$P_}2*O7%2GJ6mjM+ zcixXzrnU-oNoOsoo8e6sM;@$GMxI*SiUsw*m3VSTp9-3Gvxm{+XaxgRU;2%r z3v%@Kl)1#<0;7EvPW)DTodqAygmMrtTr+@HYQDo`7Xw;tp+ZMBEztOpyGA<|r6%5t zWO6s*jPDLFA~){b801c)=L>Aa-%{AFUWSUi!K9hJ4SstA z@#RpO_|i5W)3{&UeW4b0v%4>K%`;&R&rk zmNcRDQ5c0j`pif2o1B{8vTC>>k+r61f_Sw!5hn&1(Vi7b)I0fsV7uLjdDMcSHr zWLTF-=ZO92KHn8Zb#BChqfQjndjR$*@EmN!Y+55x1@o+emGQTC9a?S0=6L&Dopr3SL$uey*Zp! z)7_lhYB$sIk5g%$SB`kDWe=3%DQ-yBD$?RoZ4 z&4a=_k71d96O&imj-!Rc$^Bgg(`nnxVp@-2>gy-yv$!Ycx`^1s`QY&%rcrqR0~L>I zJ@Ak}vo{XvNs~5iWzUkrkiPjj)^bj*$8t5Yvf~-36?d72vJp+ui=_)S`qbH(1#jMY zU2OW2pJmdK6xJX-*&ahfHF}alLpzLB1Cb`bAKUi6lGyXS`7Wn)>^EOYsre@mb@LmP zOBE=iQ=8UYEW-0oY3x<6f#kT`io&1K|8aC4ZaKd1A8%+68d9kw?NX_<=(*l!Nyy45 zD-wn5>`^2#LZx9vB{LDJPf0!Z9a-5DA{AMYC@TrS>-+ly90$j9yYK5d&-eTFf`?iq zn<(m1SI{H)raB5)e3o0s{k#KgoAJPO4g7Npn8rvqF=^y^iM%zj%|CBr-r~W++nJnA z%$YEe$$i)d1K#5@3#Mx)UgK9#D<Yon ze;x-lhQlW!88cRKuQt!DJu<7q)%!Q`ecfBUPCtjx$WR1n$cRH`cu?q}o}%%pB9^2i zBKqemwrYe4^~v<6A7}Zysi!d|PN_iorzx<0Voa}iN4LDmkLB$j%@*ZlLw~gzsXN7$ zP1+_Q%6h;St-g=HXSU!_fE9(9A4bBGDNst-EzWolgf1f^s^AQu`YUSU979cNK0HwD zzqtb9?qsOyO{0F}`JO&hDs<$2!*9b5Ec!E+~Y_&jA-6(?#dk3*8>$t}=?*sCUJY4=^8FY zr$JNOfh;a9Alc`pbm!DB+;-f6a2d{kZl1=>!}vM8Y$ujxY@))WY3O&M1InZSVo!Px zO7FT3^0>@olls$8R*_Ar<=DJk4si{I^|4b7%JwAdkqx~pTnnq(zdeHtI4MIQm5@`$zoOTfjZ(LJ2Zdp3v-z`Ef>Z9jY5CXQi2BHVnpSh^!&&wJ zcZh%VzgS*h{Sfx2;>07z!jb#Tm5vYSfz&1A=@Lu9cGD%aPkR#NxNE~Bw1frRH4x=y z|H9e_`-BZYPvFgYAM*P68zD~}NxZ=O@BGj8_%Kck822B{+@9mh*Nx<@dj>ZHe98V$ z6OM$bk^B)wT04z*m92WCA=Hxcx9^6S?+NqgDX959iV7DRfx6#hBc^UbV8?x^8}_4r zi>f%U%mq*7YMIGyH@e|rf?%~yq}))VeL1PjuDdfg(|QG&J`>@?-L}tqb05y*9^@V^Pw&e5qTMG6Uiyl{$)C!W9fiMr#w&mpE^%{^C1$9q>={Kt%(OH85AFqvE$l_>6ofIpHf zp}R~PYFAF6$9%4G=fV#BFv@2qH=2l3!!?DAOYB+gyDM1vxJI~@!}B4Wi?v`}u(!#h z0G4GLMlDO;VCSFbDCJzAW-)-W#@=K;PFCoPW$-cBf&51kX?t-IRAuyNxVk(III*6s z$eD}V+qX-97ZcreABTjLdq@fjC68~Hp*h-{)VS-WGA#$+4rox|>Z6ExWP*lKX&4u} z69F@fDRRYkIKJl&$M1J>r&7RRnP_-?{43aZ4W+_C}g?El{VyODbSsHw8nTjOa>^9qs)%ndaQCVXE=R(8#-{BWpb+Mgv==ef5^m>po2| z)jSNx)7HEvl?V6i#R!gEAg23nMz3L+#WCbdoCgq&R~td(FCivu0rPrbBdi8gjN1~ zkjvdP%Xa8u)bQc7~_g`%uTL+f410xwNY# z3SE1qWApo2_-3EQHi=$PebhsYu;zWhFXd$w{!(n8>OlXyx2!X^pwkl*X=c$JN|fI& z>YUsS$6GPR-G?76u+^MByPV+RgG&zU3BMYa320zV0k

8_D17ES@Pzxp;`@_~) zABN<=U-&ZcEC%sigxu*~L~E*9tC=&!*36*Ei7u2@Ksfw99>;H4Q026l*z43=aH@@_ zI}2S%VZ?jhCtD5gr2Ft4=`7AP9!KlH9N>PO6%_nV#4{Nws-_!r7pe-qi4?KsR3VF% z*XH?a?#sEGh$FKHAfZEn%oZzBPiIr%^Ltk`-wUGoi7J%hIu)<8-I;>p2yxS+kJ6E2 zPO=cMLbNS^w3s8&7;_`2ubK|kB3ONUm8V!l>Nm^ddD%& zOO~1TQ)FRnBPnP60XS#&r)0igSY&R1FA?itQnXW$;rk$^!!uyXGtX{2eJSf*Gn{O6 zsD{t8WJb(D!O39q8YDvb?Q!(q#dBLz&9LLZaGpkyrIfbo*!fd|&tv#}AXbx9l5)gp zj-Ak$G+3_jj{&RS$_!@U(1ooyVzZ6=Ct}qj!^em zSukz5j$3YebjmnXsuw?t!Y{mI>Uj?^-+Mc2>uo@#P8qQE+m5w~-^8Pp%kX@G27US> zN5*;@;su_8O%Cx8TX`=(ptdjCmJcUpKb+qs>jhPlyT}exp-1lOf<`R&oBvj)d;2Gf zvI}CUo^@bq<68vG5!s26dxWza&Cu|*1drG2Q0HJP?puna9Hlj+Fh-Sv51qhg^Y20r zkF7{`*&|N9W6s7-7))Q?v{=xBv1F~b7iR|VphKbr;^QNdPE!fh@(#?$9v`tmHc{H3 zR0wf}7sa2+!zXV$T2PS4`KK{-W@?=1`h6(IC}rAIScAq7K84ClyKZ{GJ*_d(c#p&sQagn_{bfoTmA7_RrSx>r zRon`DhNzx~R6g}9wEk5>afmt`^>nCivnq*^yXfS0B{cE(zxbvV19bLE)pt2y_s_}H z7SfF+!pUcrC>~5r?gp17I*)R^`LFx-{4*E zM7oCkqz>JKDWa~sc)LIEv_@W#ctuMkHPd{kpYl1LQ{${1?!RdEbH?8Y?k@(Wg8@)7?_Z^W1b9HWfzHyGog7m3xbZYU}B@v=N63YVcru8&lDJD9G+%*cN>kc4Zn=D$@sJ z6F7q|IfiE=ITvbm3M{SRaz-~GezpP5Me`%geZ z`oHp;Z>I!C9|XDhPUz0rBqW<;V8=rr8u?!_l5{<&=k8r}-E$Mw#H5K)#^Z3`=pQa@ z-9z4vr?6$G7Y&^Kgu8Q8XhiFKxGs*RbB$pr=DydSClX=ahij*LT9_fbeX07lDGTbenJMK8~(P|rvD^fm3GG?TLg#lm@FKcjN+4GY`9d#<#2SrE-; z85sG5pWC^oXvX+rmSCBR_P3o<*Dzh`Ki?8}=Bv@?Pu|pTbUVIQE6_iOVWe}mKjwD@ z(kOm+s2Grd?uFM8KEMcPbG@jss|!26@xDVpTZ~)FJ=foRa$f8SG4HqnB~)5a<_kqw z9Vs*sJgex>5b;qJ;q+&9|jD=er~u4qrn=fG#Pw zrwUu$nwVaB1c{H|;?jg8l5X6a@W3bz_v7}#>UXo~G$$5zgShj!TAieKhKmRHcO(6u z0phQ?U)URBh@vk%FT;I0S?BD;9g3H+rKcwOz0DL>EbdERM{?(r-b_)>W+lbh$&lBh zU(lbgjm#B{!kb3p@39MT&DEuW=$T?XVQdogcf|&5)fqZ@&o{;v$rK`?7zJUzbOm&=ERRJ5kg-R+^q#iU*4wsdDXQ zEafiUK}Yt|F1_`nm9j@nO*O?VGd()+;5>wYY>0Z}$@y<9ULET}nm$)hw{Zde@f-;~ z-@eqGkOWU#XIyYw3$uazIXW^1>XlZw)OZRWyWXOj(4aWwApJ4q7n zh$Zv$U3_qF8gy(e3zCe7iF`FGwN)dYS85^{D0PXQ`rb{m;Lf*vyo(~Plx8NeW-903A8y19>CUz9y{S9*# z=iv2V1(f*Ni;>4xu}o*XG9Nv8ifC6w!{g&n*(Fc2u4`cJ+xP6{x6yRuuN-9$;jG}s zHxi|X+>hBT!NulXtj%@;JoC0fcOd8W&v*ulGn!PatU@7)Elk5b5=wn{Nv4f=r=cEW zF{kw{7O0G)_f_R!^3J4{;7&`X&qtJ}J{?_F0t3l>er_y6+n5xL>SjgbKc2$ooN6TQ z`hwT3=lJ<(H6~XL6g+eX5f;jdR%!3q#o^`n&)*EAx0=x({@pLH{12axT2jXJqsT~g zgyS7!dfeti@wbjif?WzF?;plO_53%ev}6rl>BuPc>SNY?p$+RRC!q0BFYb9eg>_xC zIXk38nDxRPF`Ii*ul3sGTcaVm_~_8`4Vq%DN&_}MEJNtmDU@X+q2^zAg%SPRaklIu z-i+EQku5Jkajhb0O)C~49I009FkwWECqmyX!>+%kJpaRez9v@G zH+Cxh{U}GXc~5vv!BJ`Tn%DfkJG!ky#S_IqzRSc2}CEJ%H-8a3S3<1@rvBuO4Y#+|c7g`Zc@xKxSs^uDmL z_vLuB+MX7K%h0}94cfm=g?_yrPj?1{W8yjsx;k+eVy6FO3JGUmG{uK5`F5~BkAtKu zpTr^ocQJoR4;r_z4H1^QIR9e{Tc70y4nOWfGL!TDWB-%%aF0b;?i0NE@E!B+ zI?y4lR&^#1&4x^to*=}ULxivA&ta;d}QKPTYjZ%Y?>9&osEFIxKdEvz^b zf8~099<6=Ak}XG){lj=n-MX4i{;ELyhEE7R-HfJ@M)Zj1T?}V+!!T1zx_;1x+CIn7 zA-ns0KWa{S4%eajOH!_1ACJ4Cwv?mr7xT9AS!kXE-mdCL(|%oHV{@$7S_=(&9QBF~ znkzyfk!PgSpR;z$X>`@xmx4)=5=>R;mAjTCnz_)VOS7=*=t&lp7Kgx5lM#~aOF{eI zVaiSg`V#UB+UzsCEgb-Zkk0b0YuxBbj58i@mL+fAoj=k`icRVLNq%&v@E|uB3;#2w zeTxp^!GocwK3oKAb_UA*2a@Sxo&_7<2>AWBaSopsD~e&ex!VqGhEPXCupea7rc~^`6f3?DuV?HW|ofMN^lwskz1{n0J z#7}wtPO9B5Y%+GDCqc&aa_crG9J-6yN-ns|J>d7Jd(u^{0-U{MNC^ffuupvgLIZWU z>(P@QRCC@~_IB28uoq4bzF=gBsw7tHu=M+jrPLDNfo>&R5m2W~N#~EjsVxzjca8{J zn)A8e+lapDY0#7d-lFZf?qsswRct8k#Qy&-!XjflZEN+Ty9ZRnlCTCGoY;=|URzjE zMJAr}UaG@*194f}eE#n4MDs6oGMj$OTmOzD9gWw*<=PognPyA%gTi3L8AZD1!|4!b zHMLlM#>}YG<%3h-VzE)AX#9K|ytp@X{{wd{xTVc*_V+~AMprD_$lqTF^4QrlIqdkZ zDbD-Z0ONjc<=zR^e1^fFHSReYIIs^%cPG%l>R{Tqh{R^zyYL+N6RmyEv8y#l&_Yg> z!1Mp#w*JPTGb(g>R{&k}3`9e?Gu4;4yF(#4nRs zozqa#S(6O8fAi^I#Vt&^@d>v+{lQ>$MOuDsC$>j7GSdcA5@H6@y}s7i*UXtkE4dfQ zhu;OK8VUNjtN3iqhK}j{z>;vz9~-ilyLRldK z65EfSG0uOk6LII8DFsKBU~1L^B)qqx?w1{CC@YgJFp^{AwKn3(^q;tO{;RZ6bvPMF z8`y%{3yqi^18Fk?9q} zfKmQfp_q;xnw--Z5`&w)_2|OiVA^?4fxd@a#aWZD-v9i&AZQH|S+Cg`EpJBa^ZMhU zLjY~vyb?ioqey#J61Mp5U{fk1Na=~J7{j@?zJ`qbym}p1;Yzw$3N)jCU-D|-LLNUD zZM2IJ+rm2jKN~f#7xzZ}IE&}J+qpxd18z^X>1T2aLX*dkdBq?sYO|tsR_EYS?Tll4 zDly%c`)-wt@MiyC$L;CZ}L;2NHL594BmMmz7&0LgHj870H0dB(<)8$ePjd7@GK|)b;cVt~ zNAmBZA^J50BZ0eSHhJtt!K*=--FgJw4pd^jh9ljc%Gv%W|6x$ZH}q^Q#MGUSS@p4a zAtuw68Xs~~_!(uSY`TU6BNpRpfGNpxpNJwA;NpdTR5U6BCh?nKf6#?orn}S4t>Y+N zcaG%I=+#JbQl-`#0hC?DC~j6K&u*9Fj>!moE$%^MCWP}ntsTNA*@}tbQ<0TqMk)Di zsNOS4cyL~c{(JRUI9%KWpdaFz7CU?b)m{;GxI+93J)zlV%wV` z!nKRhl<~6>I(*hS*-~Ek9=%uiHOmuWF6S|pXI(^U%pdu|fuzX$FkPKdv~UA=SQptaaYrJHEmjd#k9aXQF zJD*`}A*iSiXUA}!yP14Ejo?1W+gl*Ep5PvWS!VpZI-8W_N};^NmFSWTdEV$wk`MP# zSwEFFb~8a%V_%YT-ay9hHg;!a6^6$9QNmeWd&{~Aq|3`@M; zNM5P9)1;|2?9J|0eAVwoYQrx`a-*kWc=vh~@O@{5cTYO(tUwd)2hpdZzBFl?g#5Sb z(h0+G%v+s}>wzyZf95dkdBVM4hU2L%R-3fPw&3?dQw)bY%I_3P9^MT`-({04pxVvfs;XXm5lADLN0vdYkdIWq(gnXgrPhAv%&fW5;8| zWEUEpu1E`6E*7pj!`2Rw5ffG#u_-l^g(-Y?vL)~%3**`I^}NFs^JyEiOV4M0ua5iw zESmpMSu!}}%ofh}rxW#U?A9cEcpvtKUi1<;t{g^t9C=o4V^6yI{4X?Z@3WSx*|4a$ z5qcDPk?i7sY`(M}b_!GI$DTGAzICVMViy|E@AhRpV<|iP3|h zXuwMx+t+y9tXLFAVfBk(PKmP&x0Kz+*47ecpuke?8GMUXPM_kFve#3N-d< z)6^fuP^cM?XvG-) z#cy?}*ein02WpV{<|}v|Y%X-&`U1~s(?qBB>yi9Um!4<#Li`L3;U~{S?lqo({eBTR z^!ETuJE(^zek$Uh%yL|RuggC7uf?7s8=5%b4~&mkQc`;~nYH=Rq-9=W%dK~?t<|6a z`Gf4VY6>!z*pYR(Jk>a;QKP;b9a=DgjJ#}kcWofO6H3ug=!VGt$DuujXl=2`bRW)P z5&zC2arbx3IoXcH!dq}#{ff;{52oJDj^3Aw?O_`I5^qXAanD9KG%riQ`I(=QaWe<6 z!mKFqaW9g7>xXe8ZE5o61=O|5j;bW1us!M^`aJAI*S0J}cBHGIHGT{2U2jMJzw5c{ zbsNkaE8x9JTio-G`^xM0V_oi2-d`_4bVxq(hW4b^I3)_^-Y0FX95&*S66us{QSzmO z7^tQXBMW89;n~<%ZCXOw%U^7)?0B*>R^=SkiMVrb8yk9eiFk3ucWKRx>8$NtCX&63 zndXbr_{n$F$(bA3`-XRH*7ZR88`_PG`F<$w;s|y)aRk*U%oc|ZA)Q8`0U> z4l8r+qrx zMn3ueGRr^%hCwhaU9Xl<$luju9=~)=_z{ z&(PiY9%?Ng+2(EPg0+tper8?9K|2*X)?+B|8ii9p@;d4{T#-0rA0y-x#Oyoq*yWil zwhJN5;-m`|DcZ5u2ggy_zHP|-x`pQTa)sfG5p4KB?#ogv7d(%>z!+y4sbBA#{QKd> z^SNhnHEkd*aM(*tmtx6p{t?kWDG)o0Rmf)h3C=!DL*>UIB;WZI&7X`(J*^eYCyLr7 z378x1NO8+T@N}jMJgUZH?i|iQUfUN2cjWPQ&H*g);9lM-mb7>59<0pMV@5ZRu>GDJ z*tNW#Fx+EGXZkD9xh*f*o9sQXv6H8JVkZ7maHbtoRq5FR2?F>tA|pi7>?a;XFP7rV z_A|(owWdBi2b2~F5=yrbjQE}Hc?Slc-V256+;vyjDEM@nM%7CBP~?4vjqRr~I`9q- zoYo|RQ|$=teFaL6>U>u&Pc3em+&;x#taZN9hFggB!F_?s}gU_ zqxs}k7O+Q+T7DJ_{X9G0!6H{MIq0QWb^DnkHWvR&3`zebNt=JUP#Zv)Z%!b)0L{tzY~xC1ys5ndHDgruJX@oC31bm_@ahQU~T>9>sb zDQ~8EJJd)S`IsSf5u2XI;dH?o@%h+OEahH*YSk}el|eHod3ie2-fSgF*I2}dPmy%3 zEEdA8YXq+y@9=d)oG_#6JYMp%Xi?@Rb^eg@KAax?7xN44xA?9)bra0^S>|ZoI{cY>iCHB&p&@JxEtPF%^G)PX+jAHE z>c65Y+L^9j&xP!a-ORjT5&LDxT}oAN*vU_Jbg1hYz9^qz?jKXpQ1BJ|-tNM{>%FPw zj{!BehSR;efiym1J*~>LqGIiF(9X?+nI-R5@At*Sehy^rwTZfo>`#&BxD&%77*C2X z^4+MeD01(^Ov~dCe??KkgU7tveF3-jtCQv|H5xmG=Vv}|VGo0~>Cp^Dy4t|chd7Ps||s$;uSZ{gv{gYb2G0uPdoW*QNnfZgU+2?s}8i zkqnZxh^CiA%f!%JXWqZ!4!V*YjHo*WZRbGRl>7zfxPNNI;7;t_wUE|khoOX7Q~11C zeGbIgWKrpOkp65(y#l&aWKlzc=YuF|zX1S3~%x{FBnvZ934Qx-$ z60t~qTlvCYWzyTuoQ*$hC8r)1!`auI;@U?s<+7?`bTcUIybJBPn29A_cI3sq>=#p`U|h?c_O$`nz0HCel^gKc{w~&@t3X}qN8Z1C zhOHsLg<%mMbT0jeaCy}rW*1e8&z1(T{%uLQ%iL)8={l@2GvmIoWCWc!0HYccDqZZx z^Vgf$VfUA;!kuSPcs}a44bKYO5zSj*j85fpd~vhHai`uCta2K@g|X0l^I6<-DF})^ zdQnNL9KF2qK=?FAg`P&(h{H;5V*CCu?lttM9}&Yz`-7fP93@L@|ESTW;qu=5-4)4T z>`%Ve9VfQcN7Lg`KcJBF7VgtE1`S)5THa;gjfZImF?@D+k_)%S;i0p6kA4>4Y01*^ z?l)j}C8}KI(OYPx?G&4=hC&i+LB81>S$;EA#KhZB>jVaXQZ zwftB32VRsulDmzC22OM)SBcK@8Or$~yQpXWEE4Q<#8WBTU>NlW4U={uv-uQG^8DVE zZhR-xszL1^HOZ!XB*iBz#^O~TG`(dVYIZB3xBq-Z1`MIecBk2d_w~YE{Wzp9YlQ!0 zJ$gUoEX=OtFpWp$?2G$UX1Q+->=(bqDivcoFwKTl-rf!6YMw6*x`*35i&XE|fpTw4 zjLy?1%`elby@K;yf6rQ(;empaxyQMsL;=T+6WKZKrV5d~i zKA2LHaxlwx6S>e02qDjSAoLro|7j5WR1NPI4Q$+MO4eRIY3{}-?m!>M^3AR2?4Bxo zb<33uY*~Qk{|%sBDnBs1a}UPHRIcCH;iiai`d)?f;mAE9Yqp98HIMN|E``jAVAm zQ-HD_)~4HGXYE-bx7mU2_RT`V4<*j~9zn_v`HU{plQOGh#ck8KV@KRT`r%%LG0%Da zBJ2c`X%V8$oycuYEuNabfjH+Ozw%TJa>7=?YV}EL*CfE z#E4{`+EMAn`&eDnj|y~ohbKH6eK;rAajrMnuLz|+kE@vf;JrxDHl-@lF;w2n$aIn_ zHp_}AT;>7x%bH%xRN)x!R{0lx7VA@&V^OU-X-DzCe(`pp-z0frhl54GoIN{?LC6Bc!t>T;Xee<=bTQTm2`7T9D=qpvYNaDE5E*C zWrsX4cI6N{rdq`&eIFxq-?kAsDj)Hz+MG^16~VWmi}?qRXFqr*eu`TtT6=WjGoQVh zIlPhV+_M>D)81i}aWO6r(xdG&)OhiE4$T{DL*xACQQxI~Xhpz0^!<{6+$C-BmRpV< z53Q+pc!a%L%yzLo#crB7zv_7Iw^*=1T@Q~Sk2_Vg` zIB9N~GXmEYG8;AnADX!%ASn&kew)!XKKod|eJC4cyAi$W+N6p<2har zNN3km)C~0}X9;&e(tfy#eaLuTE>2AxfSMsCQ0A=kJE{ z4eitDO@DJJJCtFIGw4w60@{+wSxvp` zu(M}}B)RSjbWR-=!%q-Yue;K;5kJ_Vc2m5~_r<7rUYOKN3s!g5Gp#RK?9${2;pp)i zl;2K~hR$rjt{(m9iM|reH`1fc8zj@Q;h>ZLS~gl{wJx`^o6>(glgM1oz2 zRp5$}2wVC*>?xLwxrtZ)4={W0M|=-?3h(*#LREwtiP|p&+vystacBt~V^v^r!;%aK zy3&`ykFm+yg5JfYp%3p-_L4WItwWvZi$MU3oUG2X>!+~tP7jhf;V7KhIfPce)Z)(J zGMFtJfRhvZ(8gJ(apTJlT>A4q}RUuJO)P zcY-hHQ#fR35Do5+ktLy&wK|;qs%7a`)ir1v)q86kZ^5d+o5V9AA$)#lLF|_S;sYmB zlb0Wy{1#D&T^Bp&c23f@Yzj4P4;BubXu-pTQ_?ZQUEI#KBgsw`S~Xmko@yo08?D*= zTwfqcx9x=6s9!i)y%`@C9>avgqiEmbKN!`{xubn_iLHsCZ7OS^RpLR0WiwDZQ4Zgg zBM=!eglDC-GfgWfR7|Goc=a z6InmwO>mjc89mh(@lmj&?X}$~L)#Uz))>;gyxFwG%!Q&;GGKpBimqry>fgQ=Iy&A| z+IK0bt}~?Rf;t_GT8S-xGjK(_o3P{RBpRS~3`x)9sNlu}Sa81UE>VW(OS_YO68~P_ z{mM2g8dLO2J?dY!hE!K>W^>cbXy2y_^w@7N88$W?BaPhXv)NA+x}?IzwS+YXYSETa z6ND;_X307}`}*HGYuTL>C{wYexjRc)&An-K=EVf6i1~_=ek!DKY@4^d?=Vt)xJ?K- zkj-Am-e3jAc2IpijwUJI#PbK%bm@R9_v;#SXSWV!&N?A1JmWwPFEVju@?T`{^&#O1 z=SFsNZnBM*c)lVY_eMHXu`Fj*C2%(FgQIXN34&*?8@+3M3Tbsc9xtzhk;Yvpes~Jc z7I|@P(msD)^C$QIB(QDcOqJe|u2x zkPvG7p}@P}S$O5!hi3B5`AhEgxR?BiscyQA+9dASNN^yP?l)m`auckBzKVNYX5jn? zHIm8Zd*O&F!q!$53RL$K3-d0(L~#pDDS(=7LB72ch0kZXlO|e~G>7(OkK=mKF8)0z zHs~#OZ(c?3>^{Slcf%5|Wl67&uwnOFhGV&15thDFqkrSnFgjo%&5oZ*)S*JjvS)Eg zX#fjr<=)pnkz)AW@u=mzSi|-wtVcsIJ^m608~4Q|4$;E$PuILJoy?MSi(f6sw*Nxj zr^A91e=m;7w4tH@{^0X64LWJIokl$kBhS<2;_0L%crBwyi&Q!1=TQZQB>K~`KHo7z z^Dlm9=+mq}Q52WI1yX0wb$-q|8h4+0r+XmlC{fw)0#>r

)*OElAOC#W~T87Hl|+ z6MZ_^S%XBjddvtq;=de59Ua(SqD&#Tj!Kj^t-@2k*O=gzk4MvWNNyKrZ>7#66V7)1 z8a|IU9`8$Uey+gWp1Yx1&AVJ0o1m|2L)I;;c+ZHtcAv@9?YD-|w>tzSg)G5tyf3Z% zT8#G>me9g@5hr*CqDWnV>?idgYo}v~C3W5hvZSL`yz@QN3tlY|WWrfDUg~A&W;#~N)upUfA&}m6o|Wd$wZIgO|Cp18H&!{0rO^2YVBl^={)XIhS-*`9 z4vI!qX`Eo-??S!_<4_(WM?U6$bmPQr9M&F1Wt>f3(>DVm=kI0rD}vS|Z%iw=z`H1Z zm=V;6j@W#}<()NH+e1XJ8*L~%R*vq{zGBBNcaoWFMzS-%dM|!bgrNgZutFsxs_i?7 zY@$UPT=vb!pEKj= z+xf{9oSiMqc-eud)-HIgQy2V~tJ1_nvh<^Hf#{IBl4p-)XRC_C^ zZ-~QxHk=jn-5TBXr&0R3`BZGl^I*%K;O$RmmRa%x_4oFRdk+l4$}P_HDpHekAr*0D z4QGt+w??q;cNP|r%kr1)VG$Q*3V|MX5gn>5%$i&a2Rmc#66SYDsS)MGB~g^jQrek* zMEvFu4TbiXST{BX>(tJo#vz3KGdP3VK#P31o9oPX?xb76@b`@~InUXQ;t}>3IwuA* z9`XLc9U1n>_?BSJ_w&EC-eTA&erA%ng1KjYNVb2RO1CYpvb*gGXfN!**g3!9KKmM* zXng=Tem_Rf)Li5`+Ebj09&PB}#18e*qLOW4^m$-^`tdZ3JBN1Sz!M7^JbxP!WhV%u z^`dFZ-#+xU@C~+=M#FsKMV=9s6P4$Tr+T#%Tq{^d>HqSvCb$C85?xvv%AJ0J*Pz>x z&Ftb;X#KUGRNS6}4V-V1$N7hmp+BK`xKNmQa4ro08%5GRibN4Jp|&HAbu}*#jda$O zD+bPCneInmX{aqZ?3E3tatn%>AZ7CIN>HpCO)W2fb9R;yont##e%27$=C@Up9eM^^ z94aL1zP)9aw@e_F4@p?K$d)e8kfWw+KUv1XSY+h)6Jiqe>8{&CSiYC1y$wU?=#M6R zt+FC%Z_roTjRB{|^WUi&uI6cw?asXw)e5Ma1*&=S4fi`M5I*NPbRCW$Pg|A_d#(}{ zrkav;zOQ)MP#$-W-9}l0AwIscqsiG$bjRx!ra#do)4BP0&N)vF`r14*U{C0Go~=Co zl6C)h3C-LIrKcJ${4EJ04F?&h+dsiN!$Xpv`P?CVD2r$860kW>u44J>b!d{)p%F#u z^d;q)@V8Np8a5k=D|D{FAX*C}`0Qugs6cx9_^Dt}|BO% zqtapEdO(~q*?@KMyhpDKo7q|Qk@RfoE@<7@Kuwlrd@gxX5~w91yVwFDZt8tBZa6AE zw&MXFob@DY$0}sy^rh7uJIQ6lDl#ZOAifR9K$G=X5F9Ki`M%fA6F2wl-Z% z`Hbedi^%0n8hV_vq^G7^Fi7SX=U=YFX*P*s`&nbsx_fLl=Wd>uE>AOt_ov4n5-?28 zfqkBLo5ikr!Yp2@;%}r8i&a_*>lL;Qljb+3KYDz7Cj==@lRQUMuqCopmW>l;8b~7aPIs8ZzUS=JxDN* z3C5K90ThwKJtLQQBf2<%>5f*T^9p#I zF9jU$#H4EA_(Q>FYeQxU|R(g~&^^PXqnQ*`oCv&vxpq>*3;>HeLC2O@2#u$3ZI@Pqt$DT_$n}w#q4yZLug^TZ9#P1B?oP@ z)>FMcLuSKPX`)@a;AePRP|0qBzICE752a9Z@Fn*rHK?2JNaxLWQ2o`lRD1J~cpRHy zANvnKG1!twFzNjGsHj;cFR_>n{iwU?Qqsvjm5jixn| zJ~I8|pIGMb;~4U%3v8@A&6mkSkzXoPve?3k&+r*;<6TzLVoeE`KEi5e3fsV4uDMwa z7_oXUSdYH6=7knL85Kh9y-dhCIf^uk>?v&FJjCtWi@`t4X}nu7DysVbA4lgMmgD<| z@sK9%rJ=MXQd&~)eO^&Y$w-Bg8OcbpM?^9rWkt#sB1IWd@AFiY5m|`_m6424$o9Lx zzkeM^4u{_NdG7nV&hzu3j)qujwlSc;i`tQ8&0y#1*I*E4!$FlN^I+V6foyOUu!Q|g68k)s9vtIAf9czV|H@8BmS1=`A z=6REyLD*SB>}s-;m>HNLx%y-g>ngty?j0)3+LE($3#}+?=jHlEl70%E05EE2T?Q z^-1|ze{7o~PrW(2C*PqKl_GZ#MM$XfUpOA058yuSdfYk}jn39$q^PN5=Cy&OKKBQX z@;TkbJNwWpA_qpBUvpMMq*#{Ik2Yp0)67dXY;%`#M13z~(HX|{bgc_*TV4+#)|k!) z9K+B%H@W|bv*?$YQo@zVyc2(g{T=WG-%fU?)NOl(7b84bzekIy$KPtK{F5&2Gi)Gb znr}wa*c}*K)Gp=~t;JxT%}e3?vcWSg#ezm9s-9&kN*CATn7bvKL&wsThVlG)rz|#q zc#Qp)zfhMdBdA?{32z^D@_4&dyt8OAeXDAP({}C*`rMuSY=(2z$pjdcXJOSFd1@WH z6x|PU-|fsL^g5vj1#*wq$5rh`8E%JAZj>%QbIxFPQg=$|`bW|>{<*}cWIhyQJn;UP zFEm%!v-v|_dX#JblUiLYfs5Tr>63;#m~+R5%qsZoAjp!Emh7OdKj%@$r}N^iG$1mX zXA`5jL!$RV+;}va-Y7I7;)xF3r0!((o%gCTW8mJ!j9x14M0Cg?Xg9d z|Hpjf@8VkJM`%v&PZtmEMz`pA>59zBbniMbG0XzqnFjPE?>F~7?qk_!cf*deeo|}B zAkbG3UtXE3pesu8oc)#G$afMm(y3z)NApC)M>7PGaLXi~*g zF)ZB}S(WvO@-l$@LqoD{;JvSY)sP*dOyy2jaK_>s2H&uz@C`l4V@IoM$h03A;tmOMjwfX!xcGQf5JB~rYPmY~%ZIPChcE=x^ zN$8nnPOEq(!9y~O9Wyxq*{{5x-gFLfN}hCf1Mh|23S|MDjcOSaMdwC&(c$i+vF`RM zTvT_a)UgRzb$t>sn`F|c4W{7l`hbwnpR}I?AS(|@gJ~Kkp4l;no>khPpcp)H4o2;Vrd1v_sWY_cjZD27hWBB{0 zuO01K#yfxgT3GjwB+WUa=v8to+4stjHjXL8zZ=IS{X48^V&FyC&KZTQ z!9T>D%JJxNT!l72>qWVR=HkPxRoM7ullV=&0w%L&W7vpcw0o=@^)RXx<|NeMK!0^| z7X}JWe7|Y&gTHsD%7`mAFQZ=kP7+#Jjfzov$oXO*)b7*dewa59mhp~2>_AK&uz)5% z+rXLZ-RM#99elM~CYoPdgcFrJ#HTfP*uJYC6s?=hCN7*pFC!13p>iuR9XGg3cSfHp2dU!}&wkr^eFK{>C*=^)? zH3tIU$x43;cr;&=dQbd}quUcHk^-T(tt;((5{yGXF0$0LKG=1hGt;h_Vc{`d{2X;0 z4RT6!Zn6Wde9Kt@-Ll!8dt*F4-~&_8@FEge{dlflKPAF4pNWYp;U`8`h!`8aaDxJZ=c93RW$)htNGl8xIFLS2sR zL1!Ncb&FOY)xAI1xh`8#arv24<`CaeMEhZrp(f2z@S|Py8pm^g!sKHB<#_5NW_bv` zJX?kjeoJt$S;VCKJ)mIbM4PKykknPg{y8lkGx&3{t7A_xc1RYLd52rARgG$X-(aIZ z@%P4iUHJ2z-_H-$w12`^m|s#M{Zr>*KY%l%>n-TE?_ZqHH6yh?f29e&39ufiLnRfx z1kXLPXxQpc>KmJ}>bJ7wQ4YVScW!}&1FLBVO#81pUnm*tcyvgruV19((_Fr)93$LssToz?SiYs5$={ zf+`(I&o`6$SjJP#zYH-}btE1>bKqRd97sZo;W&OMt!k~raW73;5^^0IcQ2uA&-nF@5l0ISh;mLy_lwfi|e`bY*#TP^_(A>!MSQtCorL`SlUN}cb}eELQQoQ zuD07zST_x_{&b!lsPTo3v;yvR5BPc7hD`c@g!`2Q7Ou+sbetPt%5&W-PV7e8nDdyl zuM?%;(~%SMRqz`Y!go)+i+K43HUxUW`|&!2&pO4T27}ZU_d@M$EU6yLM=D-p6uMDw zrbBxV<-e`cit*r|#dVtG`$CqYj`o4|q-ZRDI8Yj0Dxm|3u{aU) z0eTAqsjKr%SoN?c&%+LsmCl*Y!3Na)IEUx{BauDsIJ7nPLa*3_Ox7PopI()icI`7l ze@YQ27mPXo_6yoChfu|^ufn=>s(e;o0IOB%Xl*c{H%Hw`QIESlOHAno?^16Z=7#Is z3)W+r8(CH(AYZY^p-@)#es zn!BfS3uLKUR}lV=kD_CeCy02gM9eW%s+ciT7`N9Kv!p8_3F}P@cFe*92V2UC4db1H zE@a#B8hux|3z{G5P`PcqxKTL@_4{~+rSAZUgF;E{p-zIEF0BWao^h@vQZ4 zp|*z4rcXZb(43dc-P3(}zM(tW=jl_EQz|W6GlFL1EEU7FDslIg8a>s$$2taGM6I(u z1$gkC{ADdljqggc>VoLW+p#$I+MN1q--Q9gdLi5J1S&N=Nk>#flJkM0-6Q6JCO-h% zsz*-$-lKZ2F1{zbvu%6Sq=}rP9Li^+Jz90hYQ!KGV7>s&{#Dq=-XrJmAo_LrJuE){ zW0pLhyy3)1I%&^6ZC&@{^ZX<5k#E2RmtFAG9YBuV0{C51mBt#s!#=mASa_6k3!}cX z@BSlbh29pt$Xm_xEZomDT!wabX+wa8F6ll#4Bdlo*{nD-3cqVkU5n#LwKblFPP3-p z*>`w9g`^F&+hNH2KXNJGkw1AYCYKC^9`)g~!XnoAZvL&P5lm1_fzpp}Xl!PL)HfQ?m6^0iFzOf1V>!6=H8JDd`QaR_oTz6BVO~XE7 z(smopavT9~^D%WM^c3D7RyU?m*LUE?^kBSw zcnVf~F5tH<-;vDW9gTo`3^r^=j~4E!4K~5Xqh*5U{lWCQJYJ|G3$&UPAaI^9^gnCS zAul`HUveBc)0b3#q+p5=4kiBoIevpX&6yrXW7YE{-6F?AXGafuRy~S71P$hW0UiQU zdW6P4GjYR1kM>D-VGKMmr~aRC^!F5GAL~XNL$&F3^)KOTbayIIvJ=}|ZgCL(MFc$X zp>s0sq&w!Ba3r%09^6$~|3Qa2w$#HZ=o20(92Q~}7V>Ue75;A0Bm4R4!ri?+grcjK z&^WUj_Z-zIvLOt~b(XZbY&vbSQ=peTzjUeKVzK}HJLtQ_NnEyZIRY!4$;ZDRF51i> zi=E35tvQ=CO!lEqqy&nW?YFlO*LTz4 z2omF~Z$o*b9)$&jlg^$(9O4;7cCRZHT4~eu53itVHH~a1+QE7gKifN{LryGZiJ5#? zzs!%S-|@3sha;1EdctDsHCT9>QTfkTSZfu8H|=AYeyJ-Ng`}}X1@|%ifGQm`Ph>$7 zbv%B<|4;Z%F*wYErcC|}Pt`!W#k22jJALSBp(YhqMk90MMFcY#RzMLPjN>N;N(8@!` zR9kQw-knV7A{+WV6Tf2rU~8YZZ1Ll4)~%ZRz5QDujOapPtvlJ@u0$tu)DbqXot<)% zpl9|%M89&Si%uobD>9((jWTp{@-tTG$L|=Q^o9NdB;=(Kir*6+BUdquj%<651A9Ga zrNbclxp)&sm}}C8n^~Az!E;bT3cOG4!;6iUw6x$7Os8FdXJsRX+_{6GervI=vQ*f0 z(2mcv`8>_(EGz9=is4(;aq%htJ2E{f#8SlG1}zE?&%x{C0T>jmN4+;WkThy6ts8rS z8LwQ4nCV|o(tfpQ?Y&#lx*03T#f;CZ%68+`1Xa3SbCTySH(*fR4#7EiAvj}#)*b9g zJ(YaL7oW7qFWN^O`Mnb-J1@c0bnAu;jl;)_)uG2_Vx9MrC3zF(xAdnZdA=q(;< zm_wO*oji-7NK+S;6rT>UrHqlbcw96K)1O%o=RqJpr$22Q~szr+fbYtW9(Jg}~D8B=M4? zH!1RI${sfUjWT;-+%v2@2QSqr2JLi{k0ILDLvQws#^gqn;y(XVyOrwQ$ zW~9N-WPfe;;UM>aRU5}+Vc~EgSamw3?boL#-2Hs_@p6=pD}=mbfiP5N5~aP_gK0st zNjP;C(xZiFJETpATNFv+atSP|p0UMhw0xK`ecixtNyi*UWI%>xzwu`E2%*}09yC@5 zP-4GtoQpph-TM4yKfkRK{T@${*xm?YIs*?w>1JYadT=;Lg2tQo%dA4)xX z{6yhOW!kX3PGa-Lod#wFii@L9U{^>SOMUW$$;%Fb>}`@rTO<8qWEcQ^q)w<^BFNV8>qQe&av;o^!jElTnF#@CvrYhHFD; z>-iSsnP0)+Nyl+}VL1wfkC+o75!F06?_JAI43Vp5TX`R-@t-NGxG%>(--)!I-oTWD zYSgy*9L90R+L$xev@pzu_PCvt-Zt+JIhT95b(Z%f!m6eFM}<2>~Kf1Uwfclka zh>>~U(7=1VyF2QogEG3%zr_l)efuOaZuN31k$Z~{ch2>^JCTie5H8*REQIf}%b>l0 z=LFmBk#m0

H?XNMZeMO;TrLWR`v$6lOz5hME79F>d}oM`5~MD}4=AT>3P#FF53 zbX4~z>vzbKPrPnU3wiWQjDn&@toJCVtoWT^InN;$J?}IYAvrJcshR#-`^FDX5o;$jC_t&TS z`iAuCT0FnY{boVo26X)q&-~3!5oT!&N6Kd}%1ZczsPjpvRsOqJI2*UUxh{wHzi??Fk_ z+*NG%ZGRdP#(jX^E)=QSg|>U?;l(T;-f1k9uHC0c%Idpd(4`wa?>d95Z@{zPBXvLTlQ+qZlg#49Q`34i}8@!4^qSljOsRyjvv2A_+h<`5a3cqiPnZAFE>GTn`TCHZczz|WNOblc8|3kp|~ zLDM_j<(!>My=O?)SDlmIN!*BxO%Jf9u^Ww8Wr>0vF?36LA$8s~pkLF^qd+xSDqWj^ z$%DPctrw@UdED#j(p<%qswdDWE$;9g7Dol2O_2L%uSCTlTyVHmDcp&uN08+KAus3# zX0LH2O_v8qzG6)e_14jzW9#WedWkrza~3L(wPF77-E@ou#MTa@8IK;qeBcjs7_?!M z*Lu1ycNosKLuj-Z51aRw*|)coVCLXOChgj2cJ9Yo)iW?ZNrqlucA!-rJJId^9hUNN zJ?Dr7kXKXgS|sW7 zeeb}EJT$nd(k2r{k~^{wOQtVkW374DcCS9IcQL_=v-9ZegPwH#T_@7NWeVLE^IXfI zadey%No_WF6CB#aj%lwGN1y&Bd7<=Al6Y$!%B=0BYX0RgPZ>-j6V6Etcc{RxVKn70 z{(y)MCHl>uLxGmQRGGCx43-E;De`0AcN$>K7ykJ_xeGZ9Y-oAw7ZfZ^WiIs_k^j?5 zc$9BL!wqKQ^%xa0852aeYg-UL(VEWw;WL4Y@hHD8p%Om7%P)+;E%`EhuQ5R6BU=ic z-G)e>@z@-^5e~O2aH_l=;RKdOJYj2&n$n!F?sPBlDc%%l z(DwTWp^(R!v-d5jt<8`w*ZR`A>s|29^CMQ+sFC^Dxq^>l33JU@M)n@p@$XR(6Xuz6 zzQs&jirbF|Pv443S27V3WKOzU<>`~Rp=dv|D~&6eCe8_dfP`KaXb%da@`&-Y_?@+= zHoOrl$H~&qbQPg{>j%z$&?e8s>0($$4DC1VKvIef=?B)bfRla{WHbpq-kd~CHwDr> zI1e{lr_$P6i%9dj1{KFWLyq%FiTZ}KNH{l3R7ji4h8uy-j;eni}!yK@V~ne3R{*KL$GLwXo*1 z-$(rZzvJXr_+Ch5BfYgKvV001tT79Esiz8hEkAJ^S{*j7B5Fo%90xQ_GgoTE^xK_9qtMR{Nu?!CFi1}QsJ#V0E| z9O@6wGiS%@dr_1c=SX)h5&B$9L6oI3dfTKfFhK zN*|#}IS-c?YE#fGCAQk}8hdOzlFEjBg=(}s_4m2N?pA^<7HT5Srj{)!OvIMaQP6YX zT+e>DF=DhP#h&U)Hudf7hkPKurIkn$dC$uC**x^BevD;JL3HIu9=y1RK>O@KnmOSN zR*f*C8y06VWs5hwUSuJb_oe^iV1$|1!A20!lKBqjaSRQ+;_z9sT!^Z5raMo|1QYHp z`|oZQy5=qG``LzKg4}6!K^0~+n$qOb3#c(1i`MDrX{i7|&dN~3o+;@r-&iX<(K%2fBY15PiZNgmcLap!F zPdqrO67Gt}aqm|sEp789e^x8x?2;j)KshSDtH^U-HP|BCj!j48#H14ubfWYn^gUl; z>DnOzz1}7)Zyb!(6H;(*aX)Iw7=quNo&RZ8INjXMdzK1yI3*n|SjyBRXT?TwSb8{; z=3CITPR>vA9Y+tREPzheCFCi;8tc26uq4|#l)7qwIH#%|!EXJ88uha8$^5nbc)ET992 zKaQaz6Z3Fq<`DYPQ(HVZD+O~0nDM=GK4dDmpIH4Qn0*m0`CHN={g?1)sl}e?7TlkC z0N!thA}d=%Ou0COu9q$sHnhvI#&^|N6=e*!878z`+n#Lu-NavQTblj22oHFk)=b`# zY?rvxmgq?oYkrhn_2Hh21-ewe(VGtgPq~}3m|Et2b*iTrL_>+To99iDd zkC0scjPzwK!uDs2NX@Jc*#ZA>PBvZ|ZaGc(t2+QIZF7;CW=ytgbYW#MgrpCql9cC? z6!nWyXzV0paL3Fa!-ZnrE1IYrt#LIQr(l8)sw|kcstlblx>(?}p~FR{`o` zdb|v+Rmm4-xM$;v??4*b(vBG3q3D{)yYdCYsDE&vxNqE9IPI%NTz&%Yv0mg{HjsV) zcW5l*tXCZcy7F|+|M{ZD1FfiJ)GquTsfhrGTeu!HiuxK4gYo<_mTj1b)3Ie3%=aQo zmitS`C$NfjZq#(`DVurt8_Eo?uL>V$P|TFz~$dEgIj%cU_>rjX}j4RSW^LPeQ|Fq^jn-XHV@XAa_e zRB{4cuFs@7MNMcu^9KEUE0UtQDXsZ_2J4eXvaxWY1ILVM_wha`yc$BnQ*~<3tU~oB zQP>=o0qHA$I@Zq5qIO$gesC_UOz$eDb`F#Nh_v@;UicY5G_MG=tMYNF!-SlCWToF% ztYHaV#&f4{3yROQ^PK}{?f)G{m0{g+Fwq>7Y*#}@bu+38eCgPe>$tqE7j51oLs83D zvE?os;C7~4QE8SNeQ6npA?s>kbALPq6-x2Iek4VF9Z0ABPC>|3A)`E=J3gj|zkB)q zH+~zC!F$Bs+?_D{0!-IGLA&Es$m_4ii!PUi8F@sBz8X}y-Fcr&5_mnBy)J3Ww& zmyIU7M18SQ=_9i0%&UrZq_p`16OT+I=>i8ly@ZTRtLHyigo7yaDT^8RC>li_kR3kV^kGvXWJb5~IIS z+>19I?>>)1)pWkMx$c1p>w1VT-D**~=aqEs#G6<#&zu}*ai7JkeuS@ENt_r)e_b|+ z%cic!M~5eP`!@s)dy?^ShBL+8QY0@Q?%mK)r4EIuG|tNhyA<82-0%W+eE-XKJ;*|7 zxG!DXUc=|Q>zSne5FBf(aQL|@4a>{L>Y-gx{AD^lI#A+qdbk8z7yrbiLKV_De}N@s zZ^oyV8qV~o#5#V*mK1)&-QXscU}i;+a;K3>ya|oTOT>eMOuXi-8|m_kSni=Bo-yXW zf!ikZGeDlyazpU!x`^4>z^g;!wQ~7A5e!4rMl9RFN@ITL}dI@j`DcVO*?fT`!Rf^7~FfM#AmBF zn|JvRF3#C5$REx}da5do_}!bm(=KKEFN`G1UY`-s{0{ojH;UWZhSQzfH<&Ua+iUId*X~pbsIJqMmYS(Wf&T2p2DQ!gCkz2yg z)$U{+{8~`zv5T#5=MJh$9e4$qkV`6~K^3=gf4nt?rswgy*K|yM+lPAP521w%QdxJS zFsAW&IVKkMAj2w8=4azUDOV1&#^qO$HeexY3i^;=YX-i>j>pL*dqw@L-tgvo@A^K9 zboH2pm_JpOn*5x_%oC4LHDDjo3WMmtenumYGzd#abYg*m43(Prv!QNZu;#;84DIP4 zE?T{aQl7lPu(vJ9%{V9&A385A4q&KSa0WUuCgk69Af~(?Ne?t8QdjPXi~98xi|1XH znkCm^+RGHNHfAKcUNNH&1OGA4oq=Snvl%)n@*s`Kw6XXpj~+(tT2$Iv9{Z1EY=k#|Lb5;|t0Y5iWr@LrtZxlg$2piJv7 zeZ!UN88j)!5jo%NNxM27GFSVdpg9e?aw8}vOab=BuFPZVX8gzIjh3p@M+1>}9Xr^U z?QhsQIcGt;qZ?LP{YJC_-|JQGXZvRI{>7vQBm_T3O%Tt#t$l`*BlNIpg*jQBpFk%D z4WPM06ES%~Ax?S!#UgJhs#o--=4!h4Y25W>Bh&Qg_o&Aq87=C++5PYWs%dTls&$Ls_F*209@A&=n z!3Qj@kTaB!#8Cd%w5SQdW3Lj1}lK&Vb^vW}3tLDE!GS4Y2U#(9EQXT2NPa!;a zSkow;xiQ)nj~F#8s%VtZit3s4CnKHB*fJHf2kTQtOfacs4WteJj%><_L%6zaJ-*9Z zldVxIOtu;$;`UbY$@dVbR>;%*d=*l1{wlN-D^b#r38HqI6zi?dV&?r2l2LY|5|?CQ z|KC>hwwI-*Z4oSPA@3dz>ww}J-j`KeLRw#H;LZ4VW8fI!PSh5mson}rDdqovPqnqn z^Zda$&S(gw4V-V}`mP9mO;OUsiDkSCx{BXf!=QD{fx_;{{ok>9SUrpTj@HrCJpp*R z@u6gU`FwVxMOCZ{<-W+)OM;0Dx0Bcrxd(k9{*;&3mnKXDis==B5>V56?bz+(r!6xC`MGU z*(EEW>|BB71>7s$s}ypzL-1~AE%Ub-NPqseu#KE?FlduJZM&(@Cb)9{TF86!yHJ3c zymLNv_D5`U3ZdJ#?dZ?y`DD7xl!hOi0h1ovacoKlN~av>oGdL`?me9z^fRL#Ir6l$ zOK)__5 z+%j$yo$o78J4~}d#?qjDd`io_+^n_Tq`I2`$JJ$PF zsdU!4GkCw!kg63Pd8AewvJa)>X!WMAP&(R-vo3!n-`d7f(6R$;XH9>^4OxaPv-wy# zU^1;r$b|qyk}i~|zE!=L{pronSi45b= zu~&-xJID9#K0UQ*SjBJ}W>QfwT*o9%y!|7b^Fw$SHE6QGJ#)_z4==P?8rB(C1&uKZ{Z}SkJmCdHq$1*(g z{F2XnV}u>2`p~HeKm1&HhVuvdQ(&Vi)Z<2z?TXoyznMD z)X~x&-KWaZn^;vkdor6fxF?`0vjz=%Rd}b|gJxg;f_18oS@db{!dT1qU%gByY-bWI zM;?Y=c1uY~>8CG=PG2*yS| z!?!q9Qg-P@?L6zdSm`+%R&PfgW6Y@|ZzIBm5iG7sfj-=9K;7o@VsGa~{QM4@lcGSi zGM*T`d^h|1Jy>*_q$GJ(=*q0R-N$vG4T6T|d8AxWqxZ(z%)a&p8_9VmOW%FKgsUHL z^0;~NlN2ACexQ^MuxV!TdnduV*GN=-45S$S^Ek;fn!Ve2pDyz-Q!fs~X7y8&?;5W3 zQ*|+>_jrsnw{etd%V%#fc2qZHAeB`f!)0|R3NgBZyQUH7$-jfRoP;&z{i$+i2VUo8 z;aB=?Oo_|HrcqlkVPm0C`oN7|xI7p7vvk(W>LHxkjgbD(lTYVE~l{{6cgp&)b{co{o~Sdnk40@=6sr<$qT=&ErbEk6@3s@Cqsv6`RT+G#F;qV#V+}7!`3R zsYfGb>@kAv3-kZG=7)73Kts#7!J4z`-p}kp+I>&sz*!qA@`|MYe*cBe-yuI>DR*WU zAli4I)VygF-5s(P=D}-8_sbTj)imSjKY41;(V#63H*s>XEN&%P(qOAzWUH1y2E9ix z#c#T_A-@_nAp_Y_y%6p{cBRRjdAexVV!RD$U{~_AsHk5A>pW}6qCfE+NV2yOp~d|U z(dtw%e-~@|6+$1~E?!$;GTq?@o? zXG30|sKPDnMwI6w<>_@;nbHw&c5^rRL<9HnBr@mIEC!ZRH>DQD=(JfTeuH%{c zJ^XuX7fhJpN-gatge85OFu~{tYL`c|M=KR5WOOTD4t^~Zja)*zyFP{W&VTS}jTPqA z%@gikw!uf^3iMIdqHl&4_#HN$I_oFXwPFoY*OTJ>xaA(Q?jqor#Lq0_5wGJ&$_k%Y z(SXrpS#$(lOIOq2^a-fx6Um-JaM7 zW?_)cHrV+1ljO4wm`X7_&p-bgb4yV1%z(3)Z{omvTX^;^V#Dv)Q}y_7Y}f?_lI44l zQ!fXyB@G+k_2V<@yGyacRGF^Pd&C$`q}hIhXcXt)#(p)SsK6O~x03{8m!EjF`vg8e z;$FmoQ%QHb8Fxv^Q}G&0yjpVtyo)EK>UdDz`BV(c;{X1Om!N;}Gv>~hBLhnVs`pC6 zk3Yj%*IN#>qq+~pxsAp5jpJy=5?SiC=m zspC`#r8i62xfY|8q3$F$sL!Q<@IM&So%`C769vA5rjBbq_-vGi8!K#Se2FX`I|b6IH4$W&ra@Du zwsEiRA<4B}A9y}!zqom3B7DtF=;o_8%p$I&C}#B*6eRA(-uwMAW?eGdC$EN(svcs| z;+xpeH(u&F<374S>`i?Scc*C&Z0J_Sb}FW+G|_ab_$4v{4r!hE_I@xzCnQ2&i{~nM zR%OgZB^tu>)5bj}P?*L5*xr|r_NXKjWow|^ekDF09zouxRD zLYkcmkW{jk{@aOVO5bD44MpWh9qOwcgPr~^!^EM|9Dya-_9ewSe51^$Flpn z^{lZdknWCaLt!1~c0P+|!Ihqra=Vk6oS(+h8rPxA!A01*z=yv079pn5m}b4_U9rVy zSdZ-t{YEQEzIeOQ!*cGG+j|?&qo>o@)HB!;?LsHbJ*f2XRoG7NN8Z|baM78A&E-?D z`&b5c?KY>-vL-C@xQv(sPcSCo4E&?lBUhRyj2PiW5t9wYQS!yC`tk*YH0hzZFX#0x z0Ns&Z#nE(4QVHifs$t&vepjDc#;{~Yw#@@O5q%(Es-9x0Nk-)`yZaYXd=Fv~i8 z7vCj|QM%lTEJ}`HlZ_vSr>_&sT|6-Olp!s=&(DzC%Y~u5b2(tHmv~V59>xh7c-cOQ zvMPP4L!&^Dt87B}*Uvb-oyX(XzQbatZ|GkaC~BOFp&gm8ka@Ze7ayuflOOICbmpyx zOqXovrJ9gde{0ltb*CqNXV71suf0}T1q&nRt6h&b;Yw|qIPCU#)Th~y%`qJWG>xUX zvwabKeJu$VV>sI@lex~FN@L!jpr{P?3hA+tLLNWjT7Cud6URPBO5;z4)D~Z!7XOv?*j+5kgmWM`>X*v-GND9 zuY~s0*GTBvfLEMLa(nS}ybZF(&w33?(hH)zJTq$l7Xw-SWZWNp2MV*VaVND6U0FAs z`mHqOv++;J;QY~E4|wO_&77?W8%jm#t1)M3B0YMt4~Nr#VCc}F*wM|H{wWsY@zrML zoo`M(I}K=w%3|`!&SvRdEy%2@4CA%lN#s+EA><9Fz!ibR$zoBcFR~`#imtyQ9H7fe?iY;vjrA$pP`r7v`wzPGp{a5NGj(3NXXV5^-R~W)} zDC|ODhBu*GXSI1li#MJV2fZo<%$XR&SF0NUs995pulo=&H<30F;gT}Uu4v%(mV?_ zs!sP8dgaJcvGz>yP{0MaasGDX{3*1#ol&;w7U9;Jo0vWMBmBpCFlQs~3ViVqf0r7F zjt>^mU-2R8)^tE)YPpA;;zZ6$4@Z+*876mXQ{5*E9CGp|ckKv@S*bxG^UtBu?uX=k z@NIMyZ4qM*$Ka0WN+lu6I591l?z*qxp5A5D6yt(@IzH^d{TO!hfw4G_@8|98p9$N$ z6k*36XIk37E45W|=iG@+v`a6Ljwo{eiR>LXeXd1kd;<09R*3bJhtfei?xq_hPdyS8 zDQMda`gz?2+4p${UnYxZYb^2P_8ELX;ZGs8mU!N!nzif1V*C6WXfNzTBgRPa)kA`l z>lGo(nbA{!bVrE#U#R_3px$mt5|fp-NL$qc-=h_na#DrrYQN$&&#W*-d+y#17k-|R(Bp4cY*oOA4vuKO4+%)4QvzVKuyd`#(^h+Wc*b`zhfq}PhOeu@ulSK(WMB` znJ0A`JA{4~`Jm^g>(Ct=Ny($jki49u&-92~*5|Y(Q^mgg82=0qxfL(3vuh)_)pJl@ZEf zQNlx%Sv|+VWg5(0>oN9h=e@g_K(V%UCdH-6P^QXz7>c6QWvK=2AHEbrYmY)WYD=?v zs32{;CnY&VQ^Y{+|L-;5shKi=_m7ynC0l%wkpT1NK2(2G0awm87jH5RN3F|x9DdXv zU#@aL-B=?e_EF(Q;YFwzow)3{g3P#<^{lJGBx9g>dR-i<=k`RR132j^~cAF1yM zPk2txf~x8esZ$_mu5<(Re=Q@Qb?5MG_**!-D$v=fY82G-G7KNAW5Ny1o z*dK%&-_>Y*)20)m3U*y3tY?Zss{3G)9UPBU;ZGsEC6v|#s2Q?ECt zC$~?aIhAvlD|h1c({1p+Y(`04UVxOdVAlQu%fhnp_FOVjZpw(7Kl!=!nXQ=N&<$r_ zmBDVNu*GiiXX136)iR-0KSlcP-Xtg_@>#>+;o^Zm zx8Ub@5`$h$qpK;NbUg5^aDQ&7fOY0LnO8Ug}-owNYw^bDVz6Esa1q@U? zq_4-G5iadqfVG@+^yqV6>P)c0sO`R#cz7D+=_}E@q6YpQIwf70`y93xQpA1H&KTRR z4@GKp$7cQ+);{pX>iO^X=&GK%)%)Vt5w!?iI<>mCbYM<&3dBZc}dv9$#U4HOZWMyVVf1-|| z!>X|SoW3rWeRQMybGqTxlY${e_1yJlRr(et!~HHB!PQ2+$D+9dxa#UEl(UT9WrHp* zxpop6+m0sPj89O${|BO&%btZ!W2kk25sW)xxZ8OfFmkI47R(8t|NNT4{|1r}-j~ii z8jg;eeXv{D#E&&~rk95@uzyDv;=HEOGuQL~|96IZ^zhBKhj290n$F~ulMJV3*jBdADT3Zd6ju#_D}7uSE`yTli{lTLNGd@CFp&f26j z-<~8(PNF}{ZoJ(qM(>L#=2kJMWjP}Hl{%irOuWSn4@gCoEpx?q6OHa3Na}xPbJLDq zgZKD2JUQ+`XEb)<^uR&*aCftCu6r(IF88O*SXJgm%;4WTO3;}%i-p83j7QK^ir#Z! z)cSo0oi#bcKdt|V`8mxP`fCHHaj6YE#cjw~BqK=giKU+d9^!4?Pi%GC;i=}kiH|)O zj+uiP7p;ipU;fD=tRaweFU+B-ll#$w3;{2``HA3L1Npsi!Y*SsmKk&;TzbcyXBpde znJhe4ThCltbMR~UDp5g$DOYH(ChXWNL)_hC{L1cIsH$*bo(2iZcx_8>47X5i!WgQ1 z6(G#x#V|DdjgPbAXu_CVjJeNIc-&`L*7e}R21WW2x`L*zF~P;-EK4Xl2#f80*uQW; zb~AUTwUr!||GH~X+N1zj2lZN~Tr*6xDZKufOd zlN(&;)WBg-8HUHJkri_UF7F7YrJVz4mBoD8KF5UqU5>!sH~EOT)q>*-qS1%39z#!t z(y*C^6fIIDh4a?fjSPf;4d+M9;mCOLE~Hk*l5n6JQVYA$W+FjH80WY1RUy1DrHH~K z22yUX7A+a-4lnOuYL8H+jW;jjYTX3>xpM@oLGY~gpE#$=M+=Wvj;o#H>Jlivf zt{0c$sZb?4Kl41MzcZp;JB+xKhBn-8``HxT`U$O(PZ;OFfYWUmMb|Fe=F*!rG2r=X z*c{vfL%U$IdRK^Smcy~Wr$Q|uC0uyQN*IOiXt* zRQg1pJ|DZseQFd#URoX-o~zRB5neRwA&<<5c-Nt)J?4H1>4ZMl2HaWUnGG1ua zSVLcfB`B-23$eM0;zwu9XoT)GB#kVEVVWz6m*L@f<33L+;&Z( z>XFk$$J^%N-7-a*H03iw<}erV!(6N`*C4nSvEMN#(mG;CGA^sRgi0-%X1ti1Z#t3k z9cR27R|u2+c66jC9EDLMIoYZ-ie;RHjH+jNV7UP|Vy`f+h?QVCWgJPZI)-0|SJE|Q zmJhe6#L8gSlV|Ujb;>8$bJBqOP@qLS<|tG2`I~6EtBR2cvJ`PbhwOxO(eA~XP-~b; z`UT3gpehAnn%g*KPglY6`4)bn=H+1-464 z$gZLELRz*Wq3=j~wJ2E7eti}z?IiJZSPZw7IlTOhvtb`GjGa9y)HllzJ7m`&Vyh3| z*G7qku3d`HCQ8&{J&q(wUqRuK0<|sk`i}#EBd;dYrr9@PyKo15pI0Hj!W!E@x|7@R zk7&ErjjE|V+Tr2!{h$6R#uigy8MO5c^`3-{rv0Qu0`EZd&mR?(827HG+e2g z|Fo&|$8 z!<;mAEI`;m+ zUB7gkeR}~TRON&pyQWa|_4BaW98J#qcY_8!#;il~bgkhl)*L^@-v3qHdSk}$sFWv9 z$y`cP@PI1YTe+D1Psn3Di`y&_|>xo`nqHG5#I{ZqKhFELkP|hK2vFl4YExE$6$r3c}_Ee$Y z;s>19p=MZ}A<#Juy0gHTeA^!3=PF|oy}yFD{f^=DD>K?BuSNpnGFCd@7t1on(J3Wc zT39lS&d&@d`$G>!8b)HwAG?TGw_`ktt|NH7d^6)QbqeNVX5;)I#;Ro8hd_H1fjiuc zl6&)ovh6pKJS7}EF8R{i53Zyy-@&KNsmA9Mnk1i~S+Olti)^}-Xs<>afBC~Qb{D<@ zqZMxvGSiBk|G)W$CDXCQ@g7jIUg^; zT#|8--Kf7zHaEbKea9IKJj67OT$Yc-A}!wY?!0c^@J*e#T&)FJst)4j&AS;x*_R%8 z-hl6DH`=P3L6`3(QvBEhf?1dw^egmfuw@p_sJx1QW}v^P?qbJUZOUl*iU;B(^7`q6 zBd30&>lkCCd%fj+vl_Wm2Hy0c#1@Cos-SgoGWM{!yhbr|I;b;mOYSAkcgP3PyL0nN z|41w6`rC{Yk{)67;Q3tUnj$o;RHiVUR6JPiK!c_arm61(X(xkn z>N$^dwN?}nHykP-$C7n&5^Y)R&pcu;z~UPaEjo(~FMA6JbGbSSNCIFzZWc&7>bUe`89Z4!wRGBbpq``Z#)X7zjX@ zZW%?x#U+LN*GEFgjy31=X8B;>nz=Zty_B=scLU?tOy=pzPSG<_0hienNbg2SQ-V8l zQXb0_UsnvE=l+|8Qxey)RaOGq(ICpy2%#}k;$fLHj2z1QP_@QYPX6j{+Y-RD|X*?Bkl2w`Ss;EKHDjf%f2|g zNQ|J}IdN3xEk$2F8W4ISpLajG4gF}Dpua&H*~b|Fr=YdsnXx6eVdXlgNUTHr_gF}e zAHzL6<<6UrSS?PEVf(16XX2{S7va+DOcT>tr;Qs-QBrx-eq|Gd&dU@G6t$6-J%}W? zoPyHd%Qz`arNmPWNZFuHRxVfZXvS)C-#;7+mE>v5_yk~)HLm=0N8W?!)MRgjgOBxa zrKgbb$l9PUGN3DOjzLHIZ-s7QAoB%cEGgj0-?nb0Jy=LF60a^#~ z=%@*z4L?9bN0&;4UE=6GE0o&IU_Dt`s+p69Y4&bh>3m-yH1`0Xeo_kGlCy9(H^pOO z*ghQDX+bTIa=F8P0(XM#C)2SepfWhyyt`oI|EOmXw?xe%;Yc^Hp7p2 z5j(S8TuEaC<4r+?Ie+6(GK$29m|NXv<~^x@zbU*Voe^#X_qXZ#>u?iNvZ{Zj3?2i$Q zmzm?y7rPpbXvTFrigSO8+o!bXLGL}xnNfkaYP$5^+K^TmZ01|q_laFY55Yo4o%X!C z!ylAfNbwFAxlJxFF=?qe_r}10Xz6~I+1r8xzk7ur)+mW zgg4i2U~sM)gb`z?t;(AklkW4zX6I1ctWK|6C-ZGzw8*tUgT9~k5iTByVO`4>lm)k8 zT89DNC=~J0rxkH%&~1FTmL`=MgOM&5Mv5+RbfBacF-g~P-&J4uGBgo)x9k-9jmYPW z6^D@0TT4!LqaRh6Z-R8%7J6Xk3U@PU(UAkLWH&59tTXE)%l&kV(qCS{%fS=q^2Kb{ zX&6WYk7m+X!+6U7zD@AEKN*Qi`Lh9^|Myn7s|9v0~>y3g0%Lo|(JQWBVnrUUUqh z^%|5oCLI^;{pmw+0*y~#o}%*-^wgygO{ryY@Rt$>zn@6SAB*ARv78R57vb2c_t2TC zOtw`$*rC8$s$W|$OzGxx#cLw0fsWg$o8Rp{Aw?$eX z36h^SnbO$L(^PL=;$LKl#vKku-JKO=ekB{er}ra!(0Pp9J{mPDV`$V~)}y%e2lpb} zk=uR%b<5Pr;_zwVXxulr=6&R?#&>X|W`DtmOlQo})}pt^jp*RJNBC94yh15gA<_2` z{3^`I?U@-p_ye*#qtExqcq0X=g` zt#FxYNQbM`>Frc~;o|2=+A)M>GpQAqcMrj}H%)w`rY8<`)*DwzfJl}vaG9KenM-ut;u>DIVW6Ax9g2i7M+}FR!U6vok-b=Eud*2@_Vg6XS zMUs9g4Ix+cL(orO!O>qsu26C=9o$;ZjrAHxUtWB|q^(~#H~maB>Pyiv=R6FZYeNNB ztf=H=ASJWj&$az)>D~hy`sNpiK=D4Lu$lSAdS~W~cW1NU7`k6RhR8{RCNG$UHx@_H zIb^7?y3>>PM3k((5L&@g`ig%rzk8!uyp50EJqSca-=skq1Fg6svx0vxmx-79QC;`JQ2XYN2<+x#H zMib2@a=&EDxIIPF$gy6T-Z|UQ;FCW5gHf~S+`MEV(1Ay%wU`V4yNoMp2%-^ehQ4b# zQEUAlY?-Kp;cIte$Cw+Uz3F}E!$1vqF4Uk`7n%R-XA5kY3nU?X3Qa1oLHELGgup_4 zD~$(ztj3eg+PHIjJZ0Flv8+`WmiBSPBZE3PUeTiNSC@q@162xJ%Z%#zm7M;;I$RiX zk83nBpt|Hil+Su6Ib%%eap?^dzpumcDr3qY*Pmt<*pqp=AveF}918ksP_4F}FwMXW zV?Kpa@n<%>PptK9Uaw0AcheA6y%VoTw+bujQt*S_C#xUJQ}L34LZ7IoIBsep>^ank z?r(a~{u4+y&-l>4evE}-c^W2p8l;nalRp}0O4{CL6u&N9$UnB0HvIa7P&+BwYUqja zeLcvai8;Tlim~jm3@ylEy_@f|7$bKhr5{(I@9$oq>{J7PI6ULOEUqx{)DNzApes#n zQT5E-?94s)pN#t5F|b|}iuwshL{GY^Dnir#mjC+w6TY!W#j(4}AE*qzQE+#(q3>5WOZRG4S9-3Ne4pcte`>rMC<}+~eup=yBL@p+`~| z*CWV32%n{tv94|w4N%p`^Xp1TI=%~YC3^7Wp$$nM-G%Efo{ASwpTV5fE4W$C>@8zq zPR9E+=yAmnF8-k#x;H<@w=17<#zUFbE%}SLL2aCF3gb!snoGZj=}^o$#+e?IgTE`^ zVz+DtIUcX%$i1Y+fy^k*eWI;;#|A6Xe*nIJ*~aXgZYRTPZ>@1dn8ENE)4T~ z_j6LQ2Ew~g<}ltelA9{qg7>~0Z>D+~XK$-f%{gsu+@o6V#KdvbXLdVU*0L@vn;GqN z5z(e>C8)Q5;ItXPB=#NyiQVxe1Kq1Vn`J< zp(JuH<;C1W$O2cYv>Zz0&T{=d>NK_f0^SsgFnRJm?3|K^`zJN2(&h!8%icxY)fX_8 zy?{fP;?U6`;28i0RFn#(cdl=%ALhryqXzg88Dur#_tsl$eh9vwRdQnwG4|~_MyGajn)bO zu-M*+79LQbndxB^_G%(*0|!%c{dU|L@|*j0y%dQL1E|Jx02e zt8yP_5A{X<(0J}kRF>E;Oad`7FCh6#pPo56a>l+h(4GDSQ_sAIN4`GoZtuX^fWi1u zs7~SgW)pwIl74>Lg^@a2Fd(A__L+MTG;|q73;K;I&-uc|? z@xe6A#*>Qky6|!aV@r#gM8RK1(N+x(%P&e`@q!{uEAqqd=b#M3CP-Qv(GDL;I=4g< zUqlWlm>%4Gr;Y~jEH0z6N=JNW3-MH>8^%!%)5c%&-$v0pi zCC{%!zSxBPMYU{?lZeQ^wse5aQnNFrQOuo9-1p6iNML#KwP(lBi&ewuyG9u|KI{aX zjxUEvz605>S&aoTx=47qRyZd&1yRa!^lF7J1?S)7y$a=N_~2QB**UwRs`Zp=F7r}<-r>5XB&m95a8ZUV^QHd{*7mLZ=Oe3V(-Q%P23=4e{z#ns<{A$xcknxo zAk4^|K3$N8gdRs}tS7w4L5aR;E;8L{e$$uSHYk${H;+DP+u_%H3woiOjcGv^2ui9!t_t%6 zM-N2&qZ^!#X#&*l3Wyy)fQ0?Gur=2gx1wXXEgRfu-{Mwoc*GCJ+*hM?4>xXOjy!5F zy+D)O1MFsR;?+O?qW-}wGTy>6GE0}z=r_8wQ6>sbOA;af_#Jka%tsTOM;cg!G55O) z#pkr5{h2Y244Z?#TdOL(IgmP=DII#dl&+~ig7w4q_`Ob^a+z0NXTn+7*N^4AmM{jw zI~!7{G=U=P@MVs;Rh_;gLK3o~llWmWqew2< zh;)-xxS`e8pjsNseHm;@#zo%raD;$|Z~IW;SpgL}3nAUab^%3>bYoWt$#m`EHnMwS zq5EH?>aVT%$bKF*u11mV@;f+nXg6k+S`*j12ls-)@mpe*kbh?l{8lqYjdef9Q9Hm# z9@eAnHCBSS@fJS4x{OCb0kqu1k7|l6h5wijA4~thh=0i0*Rf1%o&vosb`t)MTSWi$ zhDvt4!?@?8`HtCUL}sq|t+gJ@89V*!eM`idfZC0hkisK5QasiPmwOo%?~^}4PvwB{ z*&qc9UB>kK=LhcVk|AQ_FR>W1dKDbaCc(b4n%i}01WsSH6>M!^;rhQo@!pT+*f-pQ z`p;IR1eOEepqN2lKF**D-BRHSjw46527jiwV4CVS>~wY|?J5PTi8rH+b?ohaWG0O= z8ji$V5wYwMcJxtaOrs*K{N+cR#_r}!-^Ov?#RY5!+6c`G4f<~fzTNHz*W)^qUVdYL zXrdGH54=Hmj3)hHuB7x1N0c`3xSw?&&vt6iz$_{19GA#7r1c}|?#Xm&h8CUwaTG1j zkK(;;Gqy0^K(%`_@9#g4YOeL8i`rlCYcU5)>;=EkgqzXC+>lYsf9$Y;7N+08nM?1W zwpWu(-)mDA<7772{Njuw9O;zaU2&|j;NPny0HeyPLhUfL%} zvi?kO(gUtbZme*jREfEz)c92|yKt&xFSoemJhbzbsBTv$SDSf)JMbcyB#ZxG=`YrA z`cqiZknTp;1Sdo}-sH^dgRv=t&G1~96QlPo0X=9$4y8|eC|6Vu^ z42E4C^S_#TQrOE)+`SMF&+uXSSiso)#*uSH;y%ppD3cT#&MBdfT76n1;@p}R5LM|8x$IHwi z!2VyJDylP1m?Y&(%;y~qI`CR{j-Vr&0(DnYq7Z$I-aDCoibWWm6hW0HyOB0<9p||s zoZ9mOgt?=CVZ67LxG1g?dCg9=vCm%=m=2)+^U|sQ;}{aT=L;&sN)fWU18x(o@%7Y6 z6doQ%6>KMy9M+%gQY6X!a4;<{=Abvnk^bG-h9_^lVUvFnmCQwTLH90q`Q=T~2kq%d z3x11te>I4fy@r%mKWq=1$a0}RyvHFgB#ECuG|Z58OjyMI>*#|wKknkT(1<3Mi~o=3 z@n2WII!}u<**?B>wmIdo-pa(}OsI?MAcGa~c3?BBxCjd14QX9y7dHFFU^?TLJaak7 zS<8%}=aUj}@KhwF%QfKg&_DRxu11$U`w?GIgRwzss2=7>j$_T}@3weS{GP{6NgqIN zui7AeqE)OYeF$G?J5s}{2RQtA1N!{bMO>UNC9h89T32rrNih!0lzFk740jEuX6w-V zdtF@Czd$Ok^dsk$%o8s3A;YddyxAykx+NNcFYD#8Pi8Y_l#FIG*>SXO^%E2|O3|rM zCMG#*i}@md-2JD`t8Z|p?FK_}Lo(xk9qL+WZA-Z>cq>m^JTV5WN_6|@IU+-J2sZU zlJ#x87+;YO*vt#%A8={lZwzl5#f@`nfrd&ejJyi?$9-1Q-SWFA-^v&Z2RHGv{9^fE zkrvD$w+{x=8uU$2#(HT_id36I`+lm^;V{OEc;Am-w!R)dwt>Q~#bJ={cBBP0dN|I$ znbm3Q5g8LfSMDSu?-$2qeUInvX}Sq#^ig!uW{K<<- zZpI2Z9xNjn_yONTR#VxOQusxAlcQ-nCaqx|80O}+LpTLJF-E3_6|KL!9fL;(;OE`D zI5A)hr9Y6ujqDvADSzM)r z3w54wV6%G%sIjh<^95BZzH$@F5_kDTrTx&GZBP2)l9U^ngo(M&x$>QkLQ48Y@!>-S ze6YoDZ0=Ix%=?vKc#jNe7sYU{krTM#TjmqRci~v}4@lbSbI|vqUl--kcAotkr+Of* zISF4U*1IOq;TrBd5&FH!zn4x ziCSciAkAKhT;vX-&1NAmZ!5;9W@F@LE9$fV31-(`#Gs3hkhV>LnNuWotgz+fliX-_ zDa-$e%0vdvxA4a08JDxph-8m=Q;7E!YG3oJvfUs}-+Zzv{<=s| zzoE+n*H0-pu}Y6FKT*Xhw*ZlPxD#~7jlh&$8(BtS7dJ@X5X&%J_>VI(#90s*H8T&5 zl{Gp2>_%^=4b=vx)8403Db)Iy5V?LoE^E9*pAE9u5txPL`ZNA@$+BST>5r&oiRB1uYmmXE`2kt3U3zI)&@x;9dA?l9xM! z#b=qvX}cOpm?1aA)08d>_`u_ei%36bXEywv7Si!Oj z%uSe+IvvJR1>DQVRctP7Ac|7)<`RnXFtKQ;SR&;#+Jo&##&B`HBd9rht#CBrFcx{86M6L3aV`5MQcOlJMyA+M|e~y^`DA6T@HMT zsTFl^jfCQbzEsS1zTeKjMP0rP?TmAwoT7C2uN+6)%W818CJT?HSXc9e9(EYIkyC9i z&?`n7W1_4MevJE#uV9@!L}*ITCg+l1fsbwFe4Fmz_>z9u{A~y=`RPpSm}@+>K#%6e z9LCQt6_~7LMW*|W>B@RlUgM-9ZnORQAl4696n>Te$Xo>VI-Oied?|8zK8rRQJJ3Cr zMVGpl37spygu;w4=m{DW=&VKgOKk<6-H+fEJzg*vEJmP-3Z7U7&^)PNGV4_rj;OZd z^GS6|N*%?EUv#20ybndJix5;cttPj~dZ=AtOi+zsVikz~{u=rBIr)EF81QSOL7 ztar8JZY)L1s*s%OBRJa`@b)}&!G>)Y0sMkIErurV*G9cv zh-kUJFP)w7i0{2!jXRUZ@{wK7ah~}R8t>nNiLNa@ez%>nmd4ZWueXHdZClxQR*sCe z9fm}3K5n%6P>TaD9~@ySKPSITJ$}CIz?o<)5CX381*C`nJMgE-gy+G*SJ&np4F6KWKYo#|3R-R z1Ceuhtp6U#C;H5%^UeDZm%fVLd?Py^%i$k39LedxvN3cT^|*R_-V~xj&vNBm=2))=&NkWBh!S(x#bvEX9w8$PFIn4wZ?nL*b}9pXZnagNe(9 ze+x6Y*_Q^=kXy#sv!CS^eD-2}`D(HlD`FD=S=&;(qR7TKKI3DF7uO)po7&7oO0GSuDIy{&i7ZK?Ij-MP_i2mnR~g}#dTcN z{7K~S*$!vQ)aZHRUyROO%{fj>f=bZ?tk6A(mA4FO7t7)rdQPDhc0P?QUqC$vtZ2yj z&De1^2Sy$i^yGd91{pd~SJ?tO{Y;-?yE-vvi5HHpJc~tnWxRE{KlQnD2#PDBDBk!U zrkb6>EGtzih+zHfe%oM_Y{bpjphZ)(H0Y!G47ksYpykD?<19of1#?<;Uc#m=7F?Lu08Y?e2So=7QP9XLI3*1vo6Z<+jcz-4R+vPCqoqlA zxDt)qeS>>A+n1zArVAa$JbuO+aUX3=FvV;dUFpq0??rox+1U;u#)Q+1NI_qpE8;fR zX*e@v5F#$CupW#r#Vo2v1KXAUR2WL~rx&6y)|U+3-l0)x4ju&EghT!S++uUf$}|bu zn_rGAFLq%1rADN_ZbFfYyKtu3f*z0()jHO2digi8)$%AOK4U>*)nW8i@eb4rEr`vF zk+H2717BLxvK{I)iuIF+MSkLzv+R2493=`2JHty4kK>|ett7k3d)V-Dl=wp15Smw! ziYv4mpUdA1vt+{HB497 zPb^01e5v}tLlhPQvj6ZEB~=59Qs^|Wr~dN@8cr{*y!s9EX(LucyDqO$nOM|vUaQJb6<+Bwr#k7Hwd^(<9 zT)2P-eHklYM?czn%Ly7T88Dq%!!M2KL$mFCaF}r+GZ!)D>Whacv;Bk(nj&&6SdAj% z$s~R21J1FYT6bj=%9k&LD!UuiD7K^Bx*aOR#iE`5N3c4bvFc`)3!B`uXsK(YFeiQt z%AFs<^0flGe(KSNAX^gHygBb)KkAC(vG3zWtX^+GzGvDYx}Cx=Ii1hvwff`HZ)LhK zox%&sb4b2@y(q|~66=z8b6ZC&U78{SsHX}Mi1mxr3!aF-(sxC zIN|HkCj4lQLHMY#^s|TPeE$OeXU|Kxm-MAip%3_}Yjo(!31iCXuoOaV){#H{qW_Rj znEB~9R}^f>?=&+<3d&gC)B>g@oW~`d&!n2&6IOV)iIB&6s zTVL!=vtp7(WHF9{jrU@qV=8IPX3*eI8bDXKEv9% z)(QjKGen8Z=k4W2A3u(rhURo-dLi?f_|uBz{i!l1jl06!I1(r3QDS2NxpYUtr+Obk z$c>uAmts-Mbh3V$K#g%kK8L?!fOa-k&t_TwG5>h8bwPA_(?htviz0K&LX3U)oVnUm zX;{l|#?3j*^0!HzZ+K|mFQK3}?~%s7OaE3!b3OUR+}Feydhoq3MX97< zQ{xuTc(xlIFRd-8`!2yC7skA}@=tu9y`{Vo^eA<6EjP_)EZt!Ii}hoeo2u5H<}{lL zYk!R;?!KCk_4o+9W)4Ng5ys)n@gfDI>yS^JLN5oYQKX+ALXPGlefLDZe4R9TPgjE0 zCRGY-7{T0Uj0f)Bfwk&~_<=pk8OLY_RkEG*v~OqFJMkuNr!B;&`6KAm%%Awk@`IbZ zi@0K!Qz)9IP9HZN7VJhEQ%r5J@L}C#jP>on>;_X5xN6Y7Mnj4^UW4)P4e8a{>!>L@ ziED=qNq1~5rZb8R2MmeWpF3nrs9A5nbuB|KH$#ohU4MOP9|A?+9Q zAAS2N&_mYm$zgBbQY8xAn<$(z?!=7&>xG&*EHm~y844x7bkT1JT^wq2+{DfmpI3~w@z}s1`K%qarjE$ye z=}9CSAxCRQRbj)B6k*cqMC`j6BS>~ua5DWz(%Ae0&W6pQ`+YiqWFejWRF~qG-75ao zHs)SZxyA)nyhOFx5N^r711KHtOBS22;X#xq>9y^kQ>T*{f7eIw{4*1i+=kJurRn5v zcoXT{IO=e&#wkyhy_w7yjej;%(~Cs-8_83tvLD>XGyh?$23EDW)41z|=~}wjcEbd2 zq2G`*%!-^?Z{?Z#32yOEOU`HBDk?j+gj@UFgbJ9q;@G?pZu9pYm}2WhDe<|e+vHA7 z5h~)*NG-22sJS zCrG+@o$dTTpmWqtDZD>&mWA`QYYnvVuP)yfEv5sdt7!XW#toINL(jBVDa306?ZP~;VT_dM5-rR5ur9utWtOpKf0ahs4S1d!OnCh zc^_G(#ZyjshVZ0w3?{3((X+jq5VG?IG@JdY!~PAL&neUF{-?00F^XJ=Ovji#O62@# z3u0!E$0U(GitNWwq+vX=57u&F_hun`>O;IfU_iCv+mI@s%mw@#OF@o*=TpDZ--JAFy=B zMm%)9%1f|2<=kL(a#{8k{9|wMj5T&OdQtf=TOT@Tuo)>9%P6dib+#2>AXQO^-neU$ z!KN}OZk2?2nLSCw*-+-C>4<1(;AU1EQ==1`KlGc!d)e$jRf`L?QV$HCE``&|7hF`Q zhj8G>V8(H-5lspH3bk5mzWGWS27T{Oi*Ih^hNwK|EX;#w-c;u1^pK(bx4T8pe|gaj zWm)`_?%=vVuSA_F2^-gXQ;zsG4xLb;ti#>#7TaRUs8z@@d+U)D1Df%DEoQkq#(9a! zG<#DO@-7ah#+A-wlC%e=<+?OGrX0g!0-(L)47!>7=VO2-IlmLI>_!#-gtg#Edl`gz z;W!v!%g@htr97#Z{GOiUT-@oqSYg=0eXzBpmFzvY$LbpEZ7}~+VK(L_g+Ot&5$)?8 zO-B|savd?pMLYD4!>zTO`Oi*?-i{KHyQIK%6g9zN|9lMgupyNt`S`PWG$!k86XL%I z;F7lveT(Zuy5FUQ;c}|fhdGTpr~v_2((rRV%LR6L(SkWCyjougy64vmqq`2A+x2=_ zjFYA-Rtkc%?h=}cYSdnT2anEEyvdw=K76<(wun=(+{22@e1_n?tOsL4hS0g+{b}f@ zR~WlHkyn!Wfw9lBneucE&VMtY0~2hKpBqT3%nQ8e`Z7|C*$C%7A4Eg_7gMmxB4Lx- zFBEAX6Tc`pjVmi0Xd9D0{`#d!GsdJ*W!nTAq?#v8@<~Vc$$w~?o`9OE`B)AI+IZm~ zCVkYSI!%^OkeW@E`l0aIIFO{cbSMl|!ue_Ekowu5<~&wI(E%my3*(BfsdbZ{-s<_C@$>MiY1|T%x8M1F_kTLT=<{le`r+?Y(y5cjiM29N=wc&dOt&2-XZ*>}Oq&$Fj=w|GcbRb2aB@~%p!uIMPVe%*r*Qew|YNI|kfBGa! z8np>){MJ%M!yOc?_>Sk1s&sRRCf(|3fY*L`1hV&9dLiSVMr|OiwFTTZ3kNEG^#u=> z81feF`(apUL&}{>0!Mj@vML4qUR=ZHXKjV$qsnn{*$vm!;AonC*%8svDDx5BW46hQR$d>P-^wS zME6O!w9cN-UgAPG2eLc${0^-7!@4fsl_+;%bD8X4eDH!4gxFb7NUInJ8FvI*OVE95 z5B|n7*ZZDl7~ozFx0*%>^(U~4&AyKt1@d_-m}_JAT3+$qX6|ub6J$(Hu~EX5GB`&X zp-=(2QghlKw*vC9ixC=aPyfz()3n_)$yi333wpU1s_e~FB0GtBuLseA+@oCILsgg; zz!)?094N_pHx8+*DggY+sVYO9@AiSn5i8#nP0-GGzBdk|;#VkJPSh5_Ai4 z5FX!(>VXLqU{``^yIrXNfi6r=R;EwA(sZe7;eR`Mgsa+8(%c+0XPL6RW;Lq+_|w-d z!=RpZo-<(#veI=GSUW+FBw}ju{)!bE8!Nd)=47rGw4tokg_5&>Fk(w3mn$;_M|3_y zY2a;yWXjW9#>O4x589i^Gn4A2Rq1}8 zZd|)>fTNMS(H3FHC;E<~7ruLN>eUjGZ~BD4eIH@ngT5ro{M}^|MOaXm$<;p`OdVIP zDPjLG7;j@c_4(SA$L>NgmiKtyz=IeN=SGL0vYF?mE$ES#L|(PI@NSQS_?Ub@zNz;! z7L_gJj|Nm=n1n9H>lliHZ{~APPtKxU!`iU>V>{A{CU9;o;^WH48T(d6Wt1B=n`1KUn@P z)PQON`wI8vJ7C(GjT1?eNPX)#+O_>7uUhZ}repshFZ?05JhKLeV&tf9pSMsHxRfM2 znsNB*7pzVT;g7I6?cVg~Tx?`4-sTP?*@vcR91pT;pG#s%9pbLEF&DvVvFAK#GM>6u zsF}6{N%98N{7w>lLX@ZVqvhZQKX_yYFm6^DSDs>pzuwb@n*~qtX3__7Ad5yeBb|%>`fsvTUJCyNkQsHmwwJxs=tuXj#?hm1?xc53f(*X`Oyu`u7t0~A+hQZEE?mLy#ZFZCxsAI% zRw1C`2EN=f5HsHmqsZ4AvBGyA9g1$igF60Mk2R!IOO$EH*Q1=lAIEx3=XvPV?liZe z0)`=uxL)}WBV$zQvF%prhdvJI)f7pQ+;|WRIJX0kP&&HNNh z$I_~~*R2ojTq{Foc3Pq2Q96d)^%A=3_2}c*7}&R{QP_Y$s`h$@yumtT@(#SmxfGoV z+*7lNchIA2aW_^(%3T|*_a96dxj!-T2In;?$Re;p3c=KX@9uVq7v^z+W~yk>j$w(?|2U7sKZY|1u7`{Br!1m=t)Jk< zxeMl|&)B7xD+R|RW(d1@6q~Yo&|UIDkX$^q+b2^3sZ;#feK_~`p`dS(fh_Ahanq-< ztb+StcJ&>`uALY{dd0=?(aWaRk^b1Qyjs#nu|M_xutP{#@e;FlpC91yu@SO-rg1^6 z!KPSCs`-#d!ic5hYnLqs3>k*a_Bym}^HDtgn1?SZK@@BF8dbHNk^J;7B5yCI2^Xdy zX0`>*;q25v6*)AF@_^OiiTwRA3Kdm-@$_;b9=~{s_ZimYaAXUVPnWRrJ-yiqjg9PO z$xWulJJ{nAWU0I5S@!x!1-@0u(TLsqFqw1tyLNGoOie3$w9uOzd8a@(&xZ_a6FJXx z2VzPcC`Tg!2f34S^{jNxu;uySI~`aVvI#y#*HQlXt>AlUEHx~v#pE{$^vUQLEI3zk zLvLf!oc2;*lN&~A(E zC=A|%zdbFebD0h88stl51Kow}+}&8JYDdONjQZZ={oUAg>=NH={Z$TQLYx!zn{))8 z!*XCY<&jAJ7U1{_McQ&li9f4t!k7ymq2d)TE>CQQ#oQ?FEeW6p+1}Jo@w*WJu>}$z z&P1E8%#2ts+CD**k|Xp*SCb^VvhXezShQlBrWUTJR|%uu`GE0zZm$Yua+vOoYmM_r zMNjYn& zWlS~apZn23pQo@$(V?jaJ|oE`os7qiN7FTZ5+@hp+2+^mjg2h)c`jn_YbU((wZ@g+ z`FN=K6B+e=DD{gJo_DS>ZwnRX*=rK58M4m<^!LwuRKL<1DBy;-0ie$^`1FPZ#BvHgXPFHLL&o+;2NZkjui& z_@>{NM*sSO|32Mh56Y^bQBfd$DB;|kYDd`L&?d(cY^(18YWGopy`sos=l4KWE~Pe#=UE zf7N2zR@RwhNF|8_sJ^fhNAL2^SN1L(?fwWk0p`@?YCs~hqx!u$5-p!gP~!Q--ve(8 zKlD`L)j5SCM!ZC$Mv0^?lCv{|jqEaop3Zy&qD z)b9+Uu%*K#R(^xn+N&9mm01Co&^b` zVydYv)i;!o+>aDOO11dF&>7M6V4m~6QJ;>l_w>Y1|v&mWfa zPDU$?6qn-PTXpg8obe>a>ylFXOXTh72h9hiSh#9);|!$%G&Cd~x7wD_l(&1a{N5|D z{i>9ru0ylKk6^!d4zmy9t|87+8I?oW9;<}k936}(*yf$tJL4i2Nui|@kTgEL)&bZN9sJF9;bf$B}4CBOJiEMAUym%#C4mbMEIN?mBlU2pRC+>ed?`8Ai9!nZ91W>kb^As?}&HuS#n4(8ek_#b;~ z^$(tF9J&-pd?+lx>Pan=WJRg=7S^rkJbJq~v(+WNXf=1R&Rf}ldDFT7*#9PWO&Ez% z4HLQ??ZeqX9xTW^g_UgGj3MD_MBl@uZ$>i;9C?7y0FGz<>DJvu zax>?-q^<|(G&=0FsrOGj`B5Ui^_`3Euk9(FbD!7NjikWi+?}nHLeY0Cuw$n(n-dhz z-K>`4rfc6|bk$RM9k&;&-1?Hua&+0j1S2SDrj5}y| zy!!t$HmB#v=z7C6UY=niGL?nqLoT?V{1wsXjA^vVRmt?(=~$id0$&C{z*K&pJGkx{ z<}NqHzys!#qaR71?%42|N-2yAGvO~@!tJGN@vN6MO%I(+UV#?Wb+rv!x6Z>7xl$P9 zc1es!2a^h=A?Mvn+PPvA&d0yOgf0cTe}QwMLT^KV>JxTH)`!OFB4HIGzg@AB&LsSe5oTT?Bg= zgr^fKkt5b2>6{Ba4t|EI^KW5zJ5-I{`v%<1gTv)CJ z6LsEMHF2S%PmiNly%nv{yMQc%bZolsK=QL0nI?@SW2cpD;{COV(l?;vZ--K94u8kq zTf;V#)nH7-TKpK~Ox-;S@Ya#zdk=3C_x=fo&mYcg@##saF-^iZO?8SJJxgrcx}W#; zc4G2?38dGb?=5Y%2vZjF9m3^r{QS6vHDvL*-0}bL)3H;inUz8d4%~)h-#ZL@sU-|D zIW24)Fon+#4?}B}9@Sj>z>c37O!HsQA{EB>BwtVAaA~&Wm3J%N9Lf}X+NERUQZE`m z-W@qUV<@yg_eI$yQdswu5LJ!;=f7lW0cv8L(jV-3e@*b5wiPcred!4A5!muw)Ra$| zG%i1aHh&HfTjloQLhM~k?ntA?fX#?64Izb-kD=41N!HD(bYCxyI&vHlHQ$Q%)~~=o zjSysXFTfkaa5||v5S`yovW@c=U|P#PEY@+L>!W$+X0bD>YHl;p!;KOW4PY`$j$Tbw zC7CbFSkSdVtVn->y7!lHLs60DZ2N)2*O7F{qc{EHGpw!QrgV5hD(_O{<4foxXbc>P z4&FtcYY|J`6t(DAdRD zeyPuw9851^69?Sj-S5A`cgt7j=q$1>5sFb}IQMbhh=Qp~9^Aoiac9ec5g9ZE^Xjq&3?3Xj)Y29?LwYZm69wrfdy$Gxvc=%4>BTY4ML zO|n9s{3WO!??Vgil_`+%`+&riN=Yy}JNXM7o#dEQIgctp9FV*r) zhXPNi(`u_IYQ5)*X^P(D>aiCC6RdHydoHT>htmG&)2ybefu%mL!02_Y$l72)B{i3! zR93^H3Z|02OGl%_k+GQdf@e7R+~UxZd(3-8E|!f_p;Pzj@w>*2G;AHHe#t&oHpP;f zAIu~c;6UGx3`D}4axC*RqX4y3%)DLPI7?*>9l0FL_ZLsFt{@%6`6(@Hdx=ZF4yO?B zYP|7Fq&cIDap+<*CR|mZWS&ucJ-C^(DhIQ@9}Q_`gBlHa&1cSS{gG1Ei3hGG1e~pH!lbqSc%MUCfRh9U#`x)XhTjjR`BnjLeEoB zzAwW)mi}aZ`#CDwIBVEd4hiNBFt+SL`zN0e@BZMsi1EF}AKQL1U-|#gFuIHNoM%d> z{aq=0WD9DFv}sLn6NZ*ugqEHOHJA0G9ge4jXBm?kySEo`E;Zk+m~RnMUFTByDD2^*c61wCAka!smQ8WBm?(x}VtLExCf< zS!cMKUBkFgHL_l9jhg)Fv~d0sO1ttFKQ&JxJ(6SUSEmw%{Aum|O5iimiD#J{( z29weh{{0JFLqlv_(Clc)=J#}?nmzr69F?DV{o}+5$=Z{!at$VBlRcQC>O`;WN~mOR zI@Mgr6E`dzfqvoKThww84M&U7Hj?PNx5zV(<`h+Y7p|%c>AQJ2Uf{sqc_2Qjg&dacf zTE`CXuAMBOMZc2YjxrmfrIw!5cjgWjKz#P`E{=HlmOhxzzmr}9?>OjSIlsfrJP=4=%TJ7 zIV|OTp&1&&8>``nNti)3k_N=^eCmo%dvNBw6VB%jqK8xdBG+1m*iLhpM)EmytTv7O zStUAJ$WWg>{-S#6Ll%1d1>$tv5bw{ung1*~$LubuhH}5DsT7*9UdV zyre;Q<-Z6oUw*>@ugRk8`x{txeGnXW29U0~H%SjY5T0`0Q281I%0Aa1S;y~x!3JuS z<7ps1{=Sg+-_N60^-CP-+YJQ~_k?`cVQ^D>iWkEa>D>iASe#2HgI}qXuK}e~XMezAo-^6sdpitI8;i~9f%G}- z2;QAZBH!A2*iXEUx123Cr>JB#1xJn6!8CF**= zlZ}hog+RxxQqNbM!EDO;d3`h~W~M*In0PGsf;P`QhEv~`!1wAE zT%P@cO&t_ML;bEG_y_;(mG(wW*(KV-nyi=3hO z1*3+GjnlkL>D=e}=(Nj6sQL%dy=fMvJ+YyyClu)04qI{Qzz10LUoY{q`VY*m*2SIi z!|9LfAZqyATkQX88~%&n3^RpPVN0S3xlJ~q6K_Y0TdyvpA-}#t_0&JK2AIIL+K)Ca zjKW>xQ+U~~L7w-fp}sbbl9W>DggLyF}>mOWN2Yh&$(e3OV zz&+*g1lQsX%8`OUb> znhqbvlz?yC-?tC?XWS?#Rgbhsn!u~O4Q=6GShEmU8ZjaaCGU#xX>u<*Wgm_Yq%Qo( zOsAZieP~C-1AO3Y2pWG1HEz8`&%MJ*cFPXP_e~=0CMhlj)#Ka}&hnV4#ChpQP&{i5 zGwoEThBzGx$=U|v+8-=;s}6OPcfvJtv-I2%TkPVwBg;+&`t1>d6#D{plw!oFkpY62 z+if;)=oT1G7A12->Y>}imiFsuvv>Lr*aG>%WRuj5cBL55pObT0_VppOwoa((q_}XbgN-c@ly|No!IZO2E^G|*j z(xtI^1Nr`OCQ7o#&>Xw-m|Alf{V$!t-HAGw`_rEa*R(_a^EqJeFl^m-2>rIoQbgG% z(X`_a_N}uQU+ep!ugzl&rMK)e@41teBbhFF2+1mSa;`gyta1AxFIthGvnhSNzEWtY z&uDzkpV?d|RhoaGLV9@WND4l#i5vCYp>gnwG>Yd2t@e~d=kYq;kCtmXb88yPi}mQT zbvK&ftt~b+bm7hNmieR-~TObi|I+fO|_}b zudk@8xtP*=$f9pQ@J5Z6>hI?F z1_SX)XffKyZxCZ7YAG>P{BloRW4R|)KLQG4R3J0#*kXvD$%>e zlr7ZQ&NAn$Vb}NmWGiM_QS}-{nt0?0`@EB)w{NMFzv-sit69nsPi@Iq&kX`u=zgKG^!QX(++&877 zi2rcQm!H}H9>PA}=N#I^XMt5_l#t^`B_@G>qa1hR;siq)&}|USQyoOj-XGX?m+Mg6 z@~&~$U3dDvpJ#Dr3T?HZ?wCi6tZGKR+KgXq`; zp1J$dCiF~w00j#@x?=ZGGQZrA8ad1P^RaJ25@*$$o4p0OJ%W|F8k#aD3i0EHpwavq z{CJKv^>{e$B`&5uN-OE;Lrq$%SB@tQ?&4IJHP~}{nHU>*gN@kfMrAMlV;v>Ybbj9+ zD08RR^N+y59Tw8l#kGQ4R+aFz@I4-tZxa$Ts$tK2(7k1jp{If`^>Ztvgw!-%9oQin zeOZJP%Ce;OpooH#HzL@CbB8qd@y|q`W|?v(>%eqMSj`>5Yx1EdEv@2=8c?9{BQOvxv z9#vP`(O0(;mtNYC(`3Gzb&Vs~xlu{uVj9J}3*ocFk;J`6Bel%v%G&No3w5O@naju} z&xKZAdWx4j2czqeh;<(<#N(#?J2ATf>64Oah4~}AD&3C@`?M*F^HiQXRlsAWJ=2-l zohDC}Ck^Eyys?>1UYF(R!LM$law1+5-#8rM1EOhwTMuf)U@U%`&1V%ZqE)?u^vKD( zEa1dyj5-p@zB^yVa&JpI`?*S@Gi5)!IdL>OK2W064imLc2=jVZ zLV5IBF{$AebK{)0n!f&$GtXzT%ays1D#fDw^cXDP`$AH2?9PA{=j{Bxp8twjpS*;W zQ~?gV-RSqElc+Fsp}}`5DM)=0oqsJA;|~qOw&^`cwBuaVrByg{BZ4CQ9$^+|o#mBu z;Na6_QvDbOtzvtc)w~{xac&5dreR+1ndGq$h&t+l1m69;7x@Vvf=y|BL=A>JmPqq_ z#?tPW?)Tc)r?%mK0Y>kNk5- zoAp@Qc|naQ1r9f?!OtDJM>ZdD^wIE7dvV{{ua zh8^7*NxZ4!e$W)%ezzO$R_cGwHzt;eI*orRK=U~4={hC3jQuPqZZFT6jS*G zEloXX*6A}a;r!>yD?RDpG-HYhm?d;MR!E(Lx1z6;7A;O}65Oq))3!+k?AwaFaCsHX z_J8d~`ujIxv1}IVuF5pk9bAO!T}Jee&(bexD~kR}?eJ`r7awf9h`MYUP+EDvJnk((WmN0688^WMx7n}b4>h>lmCpdf98B)#taRNEp39w zF-4kvtq;HR#nRuUDRfi)7s9?D$MQZs#2aBLh+kDIF5qsFIf4CXfvzjFO$w!*8LKeP zJckU*osm#KSkfnE09AL_lMX3u_dP-u_X(PG4mboUQJfw-ns)!t)moY z`T6AKhNJL5|ODTRdR2| zvLt1inq5GVXJw(p_sX*uYtTjG2Pe0;S>jv3RQi5}=TLi>qf<4G-5b`Eto-D~OWk$g zeytYm5fE+{u0@KTA$Q_TXR?T*bHXSx$$F1E4+*JESSu7&g_B{_Kykd;PIz8c!mZ)v zY(GD*Tdpq0mYhDGESug~ z-xLc8XN0ViTL-4H853UaLErAdn6h#h6)xjDnTh=W!CqekR_(&ovps0^fBQvy`FDJ- z@JPr%@{sLGdW67EUT1AKq6KTssjt&*^h!0O(5(XUY_{MC&z0OzwW6KtK=u<1g{zI5 z@xs)MBGwu9!<(?W}(8}B&Z#<{l&w710u8t)V7WVdwsQYTO8Jfr)uI8Z#NJPlrN zvqe?AOYEkhEBzgMm?=r4XkSACZjN3{ts^`!du2o8>h+g}mS9bxdCEoJ)m-Oi&u5N9 zYe$j`|8Bl5@*r*BwRCQ68hy;zDXus^1MjXXQvRVl+G16UMiU>3f3y#3E#_3X?iLya)G&J)GN@<{W&C@{l%mU`-a%qPvm1RyCe4w4kMhPIO*1lGgA%?j-AFl*aGB zYljbm-=wv8k=cV*uJY#$TyL`VO(Bn00d#A92SUv!AgKpuUtaPSBkuW=u7iNpdy;9> zM4mm$zJ`M*w1}P8q>1($@o&a-mOkE%-V`ZQmqQMGm(L*$$#rbrYeJR^ryHm3n}?lw z(R6^jn-Us%4=PlLc^v|o}?}id-lDEci#`Q;|UTbb1{lm>dZsnmHu>l*bnS)_{A<6SL3N^ zZDY$g9Xe#H^nd)&Sn{W}&R;N9TZ4CE#?qT&JACGu=31_!*d+Zy7i<*Z~KzLq+e(a zGozg=>oIrfO|0eqzngQpkIC1aeAb(?7N`B_ZLLG=?ih%%d*q>I6ieCd?+{e<%eNxK zoLYTyct30d%#XeouWU$#>u!5Gv`dj5UGoq-T%RH*OIPeY?Q z^d*V7d^3hJf>uz;KY40Wyp4vGtHNs69Bj1A60@zE+3#r*a*NQE_^h#Gv3XOVwT?Rg z+@?Z7^^4@kxm0Q2SIZk+=XIjBV4PIf{WKcO{3$w@J84TCY3QmAbo4c6jV~_~Plp@9 ztyGWPUhKxOWX=hTi6D#W`*7I9`I(&m-#%|C{V0ya@?IwN^}!M(bw*&+K^4qM8&97z zO^|cDgWWByz{tP9`Ln@i%Hbu5b?GKZ$4ww_F_87*&$rBDTQZ;i7CQZ9v2A??Li=}M zkL6xiX!fS=js|q{EVCoy2~QJlSGMD-;$l%~B+*gpE5G~v)$ zOx&zTzq39yp6ouE?B{)EhI9iCp3B*#Oe1oCT!9a}a^btST|91{jP$D-G}T;=40F{) z>(L67veI3wDSHbQ8FQ%j8caisLnwt^7OE^S;!S}nSzqpwUJ5m!!^72S=F2GY=HeCf zm2<;dmb}E#H8&Y)7YLtES>wa&V^DstODAM)VaIpS8NU~k;w)u~bK8Ng&0mH0xw&v? zUN1g;(VK1f<4bL8WLWfj&Teg6kE0FQ6z${3bK`lEm;d?E$o&U}Mc3Y;@4gCY^Ui~i z?((HuOZVc_9!FAIluwiYte}ImT69m6U|YL76>k#orB@l23<#q2mG>aW&;M??$N4!) zRBoGsnob*vVJq<7P8siH#-gKr5*__K2TOJuK;zRI4Ayyp3Qa4zxT%)^2DtE?!w5E9 zsf4*Zzhf2Ad|1Mn3q7`D=CO&=S3>bb+!CxU#+ zyV1Wjxp=*364STVqvX0H!j^btsK3aErv4jg)AkCOYnaj8pgPIgxJarr_u<~X??_K| zquw))OLwFXr@)h@;<#rg;4c5fFR7cNw6f#xZJ3MF@h4Xo0bEFZ~Br6D-M~2}>{sfBRdB~Wct#Dtv7f+v!#O`L!X_@^R zvotQkBTpNpU0dLBUXPUZOT=SSct^0yTC9Ae1j7;U5VB7LW}Zfr9cn{z_itj}4Ri7- zK8|5g6|k-{C*$>fNvYO{j^l>(*_5N$n9t8ckAmox_gMP7ZY7(Pc$K?7R@>q(%499M9eC{C@JqHIJb`waf3hq3;SDZMk%pdWEtq5gL`b1+w-&Agj4u4Ose ze@E3&L*Pu1dbELxgco_0|=7Qx0&1@7voS}Rzrz~DZ%<`! zd@kaA2lwBlIkK(sd)PpSF!Fw^LY6y?=~K?{#t$3D(NU8H;%?7FcsyV-TY9&fbgIEP z+RZ)1b7fqp*zzZ)*q&p~HJmZra8FvFp+}#mIbrfvZ6a%;j*;)6nW;>}>q6-8R%-;A zj-*>wx8V0N7uQ-Z;9Wv@1k3x9)&d#I*m4OEbv;nL+P-db< z1^RhH<4iuCgw<2q1Q?i8ds%A1GWtoKWKKN z(bc}#QYuira;@Mq-js?G;bz3g( z9w)LT+y!LNJi;$`)+m<0E06c&k}&b(OqiE_m%KO=?$;#wAdR{51&5YDlb%{7z;Tl+ zDLfG|ey9_Dj4Gj=C&_eGu2ih)u8+MhdT?j)eiS)v$DyJTq(0>_nk>2 z=Y+zWe;5Cp$%kyV4^pQlVy5~$3h|4EhL#zg^sU9~-)|xJ(~@LA*JE1uG^ylk1pOOd zE14>2;(WUeO}-|}nZXa4VM7MyJ$i!Dqo>f{+L5yJ)M(J67DnxsbnECus(9>3Po~XB zd_*NWU3yXB%K~hwN*B)do=<^1ZT98pQyeQ_fIS5#pp<-9m{2^1N`J3M594IIwEZlu ze!sgL4z{M_wIlk#|8%+?5Fc@{_U8{avV z>C={`+f0Rb{`?EqiiwA7k$+u@r9ZmFHa3pp=dN{#?(ItUe-y~q;xPL$XENT(_z8Ec ztmyvFB(P*nYV9$M)-UGF0nwUf@&42m*D{RkA4uz-ofp~NtW65gGE6-j+Oktysf~u zMN{apWC2jeDh6jL`!*mAReblrA4D_$~;ysoCfXZ1qbKFkhr zm1@jFHIR1a{Szcx&R}rxG3kAqHbh_dqV~Vn5G7dC$ml$Jb|{H#w;dN}k6Hy!OBFh! zPy$CjFE`u4y)pxzA#;)j*{ZjrIbt>)s?LLKvlVSJPvbLzKg?lT1}yYIp9jY4G%v!D2BG>2(LN^?T z)@LtzAK8Hy4R_eXC|mk)bOKGB=0mabGmxXc6-z!iQda5~Tz%D_#x$>>bJrc{o3RXC zv|El{ZB6jsnIlZ9h@}ozgTStMdd+k3P7@x%)I*KT>ea}#?g}!j6(LMjr=2|v$>V+& z?Rq3-J-k$DPVHwH2UZ9zPlsT`xlqz8_<^vR^_W^z$$r%;(8~we!cN0<#j)g#R40cU$X_of>f?+~eHMvKIJwutwTl;%Z=+so^qRHM)8er#e7|BIp3 zUu*b1-Ism@DALVEiU=4Vj7=zzzRA}o`j~>+hjNt7IVovgvV0b7O#=&W`CV7eNAiep za_Vyk2R4?W(44>1ej4Lw1t`S26$PCSag1mA&h~BOxi8N2UYI8S`C&zEPR|9!!x!0t zzRyvu&hun$eD3?ima<=7M3;g&ZC_IZnKxx<;4`<^q6clS;k$2?=5}@ zbtjASYiPXtd{VJJAkOMogqC^V(b1JpJ?8Gl>G7V_YJM5oRkHN-Zzp0{3f)p$#~GQ9 zq!wAgyRffVon!>6UksxD+rF}Y3Er%)XC>zGOmXygZe^BvPat|kzdSx zHs-JyDURlx^Y&3}KwTD6gI}P%Z4bWZ^rGe5L#sPHh9>-RrN-ttG{)MA)NZBWK=U?K zODyTmrfGOw*qhexPoO)c)+8r=25;X1$UIz!(+Ywx%rAt-$a9A8++=cBZ9@>>KlS>i zM<2LvXr03*to`TD6l(bW<{;l!cAtg}#Y77FCP%^JxVy1WnUFHA7~~X5ISSh3zIg$P zOp2IBd|z?i+p~@D8l;jr-?NZ(MO$k5Tnej-KJ;uwCCfE<&yGhAr%O(9lz&l^{_O05 z{+S6?EgCGy{#g4HZxiU}TUm_SonbD$mRN^{Gt%sdgK~1kbGXH!rh3^gogcxBho1dpL7-)E8nx{VJ>uX^IyDJcm^`3 zIFoJOZLI8i36D9W#JXb+bmy%em5)2fayTzaVRk*6b+e~%w(f}&86Xn7f?3TOPD&JJ8IAJeaola zXukShJQ_a(d#_qjk#$d6Hh!Ae`IPUYe={KCX`|pQ}6RPG7p?S)qP#CoV7b;}Q@$n*D`8b+OX;c0*!w?T%y>*Ra`=5`soiKO2S>Ab zM;W%}V1c0Fegcmts|jnaw!%=~f%mCfk-E={oaYzQaQQi$J$qeDTQvb|P1Nb|=Ny>m z?}9?gAj;5qi}TBRa3{SA?VK`;W>n9B%S?Q~0q(~CA<=UuOjbH_ z9=|@d#lK?fYqV)?ODx&@IMXMGD3tTeMSL$i>IhzqI+GE?+Y3vnzp)uT_H4t%+EgU* zuKUZ(zrv4SBWYJn3EE>*`1h|Kai(WsK1h$$m@+MHY{s9M687$q3O#7;!95IH(0sBd zj0|*WPV^7hXYCc@F2=!hR45hPSEOeYi}tKc_QyR@RGsN1%`Ho3%g5K??g}%%ytpk` zw#0_semcPlJ7nN|p7S#@enRn$F&UX>u|HM*)NGn54j#7$drp0ljQstI9ojsC*4h^1 zc)0`h%$K3HX`O6}Lk4cOdkMXt_T=5%c*mNe-0nP$%bL4zB%m7S4P|KK$7C_}k2$sH4ix*i$RKja zH9U8<9C;soKMU-*#Z+!d;Z-U}@*5I_ zzpqEqsaxIP6W#*3xG2djT^lmW-G;E(HPAaK+Z5j?1)HwuQi7ZYcQW4*yd?6JJI6xY zdF%|&iRi+pAegpjjiBY*?g?H-JiE*}QiJ5$rB7CUgT*5SQuvT64u7f6c*qN?w|vpR?Fe?uccZqugrx^#Y18gGbpDPmwf=6zx3#MTpFiudeMy<nUORu$qwV3F0ertPe8s@mU{^L&~v3cm>Zk1tbP|+f#O}}SEmG( zQZpLQ^EbvzIG1H5pa0mt#r}XTi2CEjXG$8J7i@*8DVj7ijQ4Oy4xrf$vyiM(4YkR9 zR{Nt8+iN|kE7v&5gn z=(gKfQP=b&w))li&0ki^W-3RK+L7G|{^do>Z_3fCW@BvrJrUz~)HXUi=X~u6Goi8K zEfzEdkyF2)@KLg%^rb}-%kw!1P#;g%oQ`7G<4S}Y)S&f?2j(3mYW(sa>i=K z;=$*I&$y%QV5;b{rZ>F{QWM`iImI%}-(bo#7u3bt(T~d(qp?$fjiMX}65F(2~9Z)U&Bs$er>Ke($(jw5DD% zq)V0_Zi} zN1tX#?txmwa$(ELJUqXw+oo*6m(u2b{>I64!z8oRd*H_!7dN|QzrmFE3icZy17 z49Tn_NfaVuD3S;fk*TE2i3+u!r=lp4A$esMDpX`neCzl91solFuf5iNU)OnZSQQAP zpMiAdf)tLL^l)7oN3oavmW^(AqTiB*$SexyK05E`hWAB~LiTOWU<t(Xm_% zJGXurzs23{+1O=2kZQiM9KP*rYV9|W)L+EX{OdOKY-v2p?H$61aK=AjzC5oS2O6|u z31!_IKq_;)@JGrYZVS(1i~e1{cC8457`PkpPwXl2-4Ap-ZRG-19YyV~W#R}hR-VUTEcI3($s%92i(~$a zxrU@Z-;I3Q=Rm!A9H|U^2HX5tteH^-t8F^)UpJJ#O#O*Hw<|F1PZkDFWc#J^HcVQw zLg=^1o<7`Fp<#QF97B_;FMn7ZD5Mv6{D8$FQ1`OY2NF_nW z?fi9-Gye4iJ{M%^RL(6v;L;i{_k28cvu;~$*In_NWCwCNoPpJvd*EO2Nf>xE3Cj+d zk?bRNdTMSiysuTJhezy$Ol#)c{$~S~(utJ!cp8PKD+%ULA0l_%U;L107uSt@4=>W8 zvC~ro{&WI8>imGyBY(j4p$3Nj8AYbUCqp^w2p*KmkW|D5D9@Nh$Hnnf&pft@T2GN+ zmnE)KDnRn*9HDB!Htz3AFVdUeB?|oXR^%@mhZjd2u}I1j(+f;EyT<1tDH4We-6-P9NoL*Fw?grxNGsP9%G{{AlbU(SUl<63P)^flJqh;u@pGVVVjd8g^6`X4wR4=u`H^q*+-=-t~ z%N6nd=W{7I$D6Zeox|SMEEn|Y4_+*)c)@2yXA%j9wK`T%Oq zi=Y9G<}@f~G5+o>Mx|R1n-!%p7YcZ&wRR(EBkm5>eY%~mn6k-A2*l^R%CIvdYy5& zVl<@;ZNk;j+LV-Q%4PJ4=*+o#p5NLZ{kxxXo7(nrd(s2w{PPTC+_52e{lUo?_M@T1 zVR(62h0Z)Hy2gIDdwzngUum&nQrQ(P`^hr6$!FU=IEt!&A|!kNE;o>Oj_!EjL z?HlOM_uE|SvQ1p@?@{#W@pEy|;aBMVuTp$QXoBL;AvAY}96h{mNOg>L``s^`Dy5SI z``bG(SWS_x*d2ko%o(UkPNGX@Jt&KiCyj-FFoxaL##kN)Yl6{^Hana;C4;F&i!oOf z^eM_3`juXMUGy%f%QC)?p%EEdGZta{0q*d?N8H)v*SJy?HF! zl$If4E91LuF(YBB0#(GQ;9`g&#W~HPmCnP+E@2DieXhgC?p`QHFU0EW9`uyW%m##+ z(+&AfIKJP4JIyQ;-0v2zAHRSG6<I??k=j`W6U(zU&{m|s%BwTw0) z&$}A5IdvUfvTEkS5>2SBy8)YgCh~SC9AJCik7l?2L66-o=$>llMjce8`6X$5=l366 z>2$^fSbI-w)O8Les~H1hWIET-5=HCT|EXB#CEl`~t*iZJal}P88uRP|FDrMGQ=j;Y zi@^LX|`kzG>X5W;Y|?1M!<*3PE@C+ zA^e+|3}5C~y`6Fn1}`_^_ej=H(OLr~nGs~7`4*?vKg3b9Q+U;S6E;pyFz)pW{#d_3 zv~BGnK4;!?ZvXg)P!$iurZN*!9%4&7Z`Gj;`qaRDNfRz+Vf73r+LSzu4$qoQW8bk| z17m-bj54A^bc46#>ymR~?!&VG=!sbeG8eB)6CZ(R}#%5VZAzU$z zZe0D%axiz0ZTdv)dN6_S-LMlcZJ*+To+K@=dBlkx#go0td^)p5g;qD7Mu_o3zV_x> z)c=_#s41M~wz52j?#$ub*wQhisk;@2i`USQc4f3C`*IJ*jO0J=-pJ#5J9Jyq`3+Sy z`1!$^%H{CWZhI19mZ)m=P*9843X4k63*78l`h zn45ie0vYcZ&0H_?6wa9SHN7LbS&XH3Z|W`hmb*Cdf-wcTc~t(!j^ZNb&`vpP z8qzf!RozFBANd@MEfZi_J(%vet)X}yO}g6q7H`H5$IaNwh_w2TzxpGXhI&-tkKqam zd-(uAM+<1pS7Z)*1F~t`2S=rLQQvqAvNL1dm8XEygC+FDR)bazy^at2L;3V~sc76W zku>y~6HFoodn2PcBby8%YwSRA=W`J!r@J0YA8Cn4O5ecb76*!XoFYmM^g;ZPU@9r? zg6uVAx@9|bO z;)on8+JATs-h7p%)@>|1;e8!*Jp<{mqaD51-GF{EcJw=L2Z9yGz;nPgoY>_F>*IrH zQ|v!Psa0U^$!bKXzl7)Y5*T1<0tj~Og#s<+;k0ct~djVsz)sX6E zLgyzW;pC^S2oN_4QzSCj&CQe!hI~YdqN>2BsL+oiZo>TV*EsoA7aJv}lDlUJ&0A_H zyxs5^8Y87i=>8;rZK6zD<&!3eC6kxN0jGI<;AkGs&~$d>%XE;l)|d z_tC)k!b9B8Ogk>4Oh=q&as$Wy)QVf1UgM*-DM|j4q!Vq-;k;xU?KriR5`8m-6EioX zXve{F^`H_#&X7G37S!>NEI`aNNr~d{G4nlsl^{l{4HSNw*x#!-U8n5DQ>G0YgD6n+qWS6K>&OF9TUZPE5$!YkX4|>x27tmO-6|$?81kLp!bh)^j z-3vQ$__Zs}h&Q3;W+W$9;7Oh#`{C0upRp}!@Cz-N!b?&6H(4reEk|S4U(Ry8J)O)N zLFIi;P|s-MIP!c;N4!gS=A8D7wFX zfbjFKG$c)`F(6P0QN;sj@f~~mUQmX^_Jb+x;ALnobbyt$84dPkOktaIp7~*Vq8Axk zPM<~54F@G()JMwjW)f79iK zK5a$%<|QYL)BA#^n+G5-1k%PS-c;D|g!dcx8=tTAV8^TiQPJrNe2M>uod?02kBMS^ zw+@`GQzrYNuf@5QEqv^+;gATNg~n0?YCb*%B2P@pH40Oz^BdmjNpUCqN9JxaC5yK zNg8WY(_Jg(S4*YviHuP_c%=}0;1$b5=~KGuKW=zgB~0{f$aB+uY+NQw1!WRsQZt3z z^5SDZs zF=KM^Hu?_c%wUmH6DO#o#AeA|2Z9boZ{k0$X zo81S8pbET~{>y3EyyIWHiKyaR8~^#39maXy#G?LQa5mSbM;lDYy!;}LDBDuupS^f= z*pqS2?dj?YXWD|X6yI}7l)iE!=2oba*u|eF7J5>PWD6JfrWVI!!Z3E8IsK4JhR3LJ zkX*q$E+>N!GF*{1&($LH3w!w90vU3$?6=mR zf-stVK%3UjXbSlY{OuE$jju;Tsx$2#phgEw2hrTg$+X~xGwtjR7StSn;QFV2wAWw}6%V|L zu|Hj?(B?JjeB|j;*E{T4$?oL2_Gl{}L}RzJp5?F?+#t4>&)>ySY_&49o%}cxxp-vB z*JCrAvn)4wf>*3l)e?70G-j+5V_ALYoQB>(QXF$Gd|V|iZW)D-My)s^(EzQ9W~A;d z#rTdBDgWyLnh-pif}+$&w{ix;zq78sX%`A#&%{pFAsuctjqK&SQS9~_w#Mw;@oXFp z`uy^mnCe4LVY?6zH-{#!e#>0a{~=DRHK7eL9t% z{$f6uWrf%tyhs$~?u&C1hmnL-FCOpDh4yU;l#c(!8+$l&FI?V=KY9GbLf33=P;xe! zOMbwydpo!8O#eDO&9xmszQbYwOlxf9+~W=D5=)X8{l2 z=#%)f0_jB=pyKF06e~80moUEZmq-UBe*F(A3qnZF_&M%dfc^{jpd*(L!QzJ^saT%I z47Vuk=*eI_|1?NQ3?kDT_t6~i3fmd~e@4wMENz>C*8_rhRgnh;H|SBq^3z;H%75r- zSH~sBfB2j4Mpv1m;@x5+TDqQHiPsLne;@RznE8O;%TJ)1Pe#l-w!k=Jz=Ak4N%4F|{&!&`lb{uNM8K?PO97&hzP(=AH_-IY%CU#3v*P9tad{`FS-RP0S zDS5PQSF3oU;07}tAbsFQ^gOELj=i+&)bDq(dURY&_oOOqb_t)3Z||?)yU(3b?wGrjBr@DTR~4UhOCuK7dAE zPQ=0tP0r417e#narft{m1(DU4O@2;pO1MU^8Fty?PKTq`cVGoFCWx2gj3;2UAmf?h*wHC zn2Te8V4dd1_k>P??f5iw#VBy@Yp!GTR9nie&*bD+>+!X!6KK&I37XPkOO=r$xPua7 z=+;GVAtf&#Ri__t2hZ*1s-uF*IC=-Xc6hLTlP2x;_dh}8Nl2ZCG3+<_ z@URIK7}S9^ehTC?o=pi=tPv7EgC3|d=Th?tT*?#}e@7QirtTCb^%}LV-SD`O$Zb6= zApfI2eY|;E$Q!9bBTAMu+#x`mKvcN{JqOVd0?QnAp+uN@?YSVNO;)wl9C zpEW7&p9W(Z&J{Y+mXb+l7dpl~!(0nh44>e@>xG!XG_L|Sg)+3H#vbm+7cw`~YO37x z6Nf$iLxZ%H@cdRPtoj`j9QIt|{*pHpU1Y_{1d zURSL|+F@P!2;SpNTvy(&H2 zZbi4JmE!Zq_oDQrgLs2#4IJ|e!+{)Y8rv*MO^ZiyAOAapmyNn)702>a9X_Oc(u@q| z&Er%KWe%6P}^<9ODeLz5(u@z^u4IoaZ_N8kMd^Gi|fzh_eUwNIk=N zWtP`Sz9K&6H6N4SL@|f2Cd-i=KwO_3x>&jW@ED$dVYV3$bq-<0RpL-vh@LGLButve zZ5+(p;jD|ZIEJ~et6a$Ycb6!oWg^Kl|5v=<6(rksa(89yxz9!;sjHmvu5XN{pK5v} zvuFl>b$CH>&m>XeT1m1p55-+J|GXlaPT!dy)$a%Uj6U+@H5#07-q}3{JBzuTc)I_dk{@;1Hde>g3^a@oO0En7ae*e|K3t~XWfEbE8>L%TGcSww+x9+ zW2o%B6P0~X5b7hYW2dbaZPyFnLzA?~_@o*I>@X7s%~(n@)ejJMqXoT*wis`}li&4g z430|Q$G%Eg(z&RM_r|Mej&d?>h>@bGj3$=j2@#&iEyRT(hlIs8Qt;GuC!M)Gr`bN8 zPIhGC$GB}&FYAqEITuCh^V;}|!qy7eq90haq#svsIt7zu##3bKH7u_Jjoz}G`gw1r z^;X9PpDE7xlgildb_eL++v~Ve;!ZQ=Zy}D&gTFX@g9gitE!#K=`a@-?)oLbsw=tLf z@BaAO8ch8Ms^W{)GfqD22zKpOBYUS2w0~U{{6?MT`l_yq_r=DN!w_SHUb3T=w;w{| zRh8((^gOKD$ht?X*P`U;a0)3_r{f*dY0+}#EHY#r$1X>z>WG8fAs%t}E$H9sDd;Wq zrS%KbXpwCQ-KcyE#X3Eh{>RRGV+&zfxi{51)S_*`3d)qJhIe)qjBYEDZM-6VWWI-2 zV?y~;Nrq&9RGVc91-y=(PyLyf{JN15nJK69d|EidGZxYE1Ev)6F%nmQ)o_{j>Uj4A zU-3wz5Jbt&#p31+E@Jv)$geP?u1YDc#^aj!663tiKG%lf%>!xR31jYcQwWuYE@M5! zs}Obcag%pia#PQQ(7{15P+r54cCQwZvLyU(rb6doTE*h2s?%>N?B_teRv$ZHo5`Fpr_Z?d7eS8Y(>q=pA(U59}kSC zHsQj_H_PbEBX%A}en%CVz{J3p@z6(MawXf9Usa>D#_4dG5KR?36DTWGmPW>Q#ZvTWkKxke<@kdcXGF3mHRz4T zB=M=mdof|08{N~dM({LOx-s`KCGSY0@`VS5UFt??Y#BlB58U>B{{e=~#-QKou%eFeRr+taCEN zPJF@9kG9@%fyvQ$TuXIiPFOa(Z2!i2V-mg?zl2lgWBg%%!?m@96HJbB&Fr_Lq;j*cyy%Wg{&^ew4X?1#W7m7H^u23d`3g{DJ~_`Wa$ zGhTtdEtRIY6H$2f@GfWez(n|~uEIHvo68NXya369WG>@i6*xC_8uqEyt8x#x3&*FD zMjZRMW_`!4(~B#Hp7y553%j^kg>RY9VK^GPMxc5BI6CKZ1&?m_C+|Col%deXnN16V zsdaYwmZ!r=>^L6$k#A6%S#kYja>2 zVoP!|UodpoHC+DO2JO;2Fk-!&mmW8GJvKx9m-3#UmQu>;)n7yFpGELwbFHO*&UC8h zI@SeQP(R^3_|A9)%rRpBwgc_%(cmWh8pD-bISi9wjDLD>hWP6bH#+z91$Xu&kA@@C z82{RmPV^Mw@i|L;yI&&MoSp$!JuOPvp-SzadwE?~4bpi*f|~9%OgO#{XV3Ukc9S2e zR^H^VE%=MIE9Gctz*SDW@Bwc6zsABccfsii+w(u~#DjnJ5XRex4Z4f?!NdBv1APZ^ zVsAf6K1k@DGLgRMM9_CtMS8TO86N`W#a0KtqM|56_&h%fSLJM}%~ctri`o5Wd?cQ| zOQjKu!XaZcgu8Cc90pTt1^u7T5h7o}?|X3>8+C`!hNDC@DP~br;#&WWjnEu-@(F zM}6F!vk9V|`?h1j!`qk@rA3870?ULoaH(=nxscUy{J%a|9Cei-ml4LaYNQyg@JL<@U9;Nz2v+>VX`^dxvT?M$?#>;I+0#EtPmnUnXYVlw9WTG8CGYso*+ ziR?n#P;x(mbz(E|)N2v9Bh8QcdltYMf)+Jw&~pRen#Ze_ZYS1M@4Fz0lTLzWNpjdR@NDekD-sN?0fcPW)$~lWC~|& z+<>oHX592Kbx>rv(!eQexeN1xXkz3TGC1CYn{3yn^GS}6?H52hq8+&#mPy==Gj6y# zMjI-+A&j?p54y%yWaP&7yfWHod1r~ACHB1AXy#+SwGqc$r779OpYrAuG3Tr!sXjIm zj;8HE-G`CX$TAm}qZ6S)0!-Ad;gi;I8a$yJ=2gu|V6*J^S}nMAuMk<5vO<@dJKevu zgT`z+{9NMk#}$+TJLvJkp|D&F&w6zjf@#2dtLTuIDr_6t_yW$Eh|Rmx=? zx?w*pu;0&&HUtzRSi=L$6@n0VWem-~D1q-i+c=dio3T#eI{y4MA$?r|M*Gw;Z)Fmv zvzECwdniviRf zun=V}*(g2t6Uv=ASZS(9={I6Xm(B1V-1?69F5b9mosNMETE%4xJZM=#J~YEOl3w&l zMELe%q5gN2M;MT2>uJ_wTg%Y3cr2Vz#?L%|AkTfLEGN%W~ zh54Urm%%JU%o#7%5cY9*#GAX4Dn^Yz2miro+@Ee9tJn7Te{RLR0BXtz;Qq!NVUE%aBrf(w@#HYZYtBQvRDbeiE-a-Dr#Z7XNhpw!;jir+ zMpF+Nvpt#&&11fxZ69u7s-Y)cl^j9WST` zg2Xtg-HhDycGN$}L0C&C|BrchFF!S>s8LtAEA|)gU2!<1ZVx2wrQSrkH;_0`pO%}R zK$;(8Q`Z>N^=Bqzm={8uWUg@!vkxGidQmZ7Np$^h7++wplsmD9uMdDjDN1LN5PlZanYwY;ohTGY&-QFdv@!S z|IU91vE0KAcy37fCbQ{5v<*1~?1V+eXOg&X47#58}(M<<#zT3!=^&Nc&_!yXGoW`%=a~(S6MMia+9oa4JNdfojAhw2!QH>#~dWt}hcRV0h+?M(42{S;?V6pD_{0QzZo z4)+tx$abL|9mvs!*`6_|i$5reU}t%D(o8JEcYJpArtB(q-#avdB9a^^DJU0xtL#Zr z>Ns9#1R~1d4ovD6A$f@tg*|+X=!UEKS;w-%?;as&AL}#jRTd6~xs${1r+l~44DOrt z4SZT7iED{NsOau6dbza;&(@lg|H^b+h~0Q8Moa@6}lU8sG)?m-hx zgolr6vEb4|l!yXo%`rc!meu5YpDNPdUImh#-&yW`rxn7=UL+1(AT&E}q@O|$<{qoT z%JN;JakHGo_r0vKt8O222x!u`eF`|&GmCN$&7+ALtp64N5R2WK#o6UwP_t~Up#Nzw z#!B0f%0xxhp<%xI36W3@SxLVZ6ZTav;?^n8VE2T}e7)uu#*Xn9UlCW}31>@hhJJ)p zf)QnmOQ%JF5p>o%OVI7k!1TrPG)cS}!o-tcd`hbNEJG7Nv%JzBDO&w4f>IhXP^M@{ z6Q3}~L?Rnf$F9M7<{gR{dw|P1UBNqhY(+%OJG3#6%t+%3*e`76%GW5tzhstJ>ijG? z4{F7eIt%J=;K{9ZOGBb|2O{?~xAq}Rs+`I^w(f7Zj91L9o-lS5N2R!;d0_yo;-d?wUgB%Gv1azmX!@JMqz+`;b|y zO_$kw!UMYO3jJ^KSb zvv4f6UP~yqO}xzwh`Pt6I6EWkY9O^Tr}F-t4s>mZ0_FKS;Df3q@~8e42R$81Cf7IM z1k1bIoeiY1EPJBo?8G=3`hv{+?T}P+qv>Z(;4R~MdbU=h+gXxGQ`2{yDK%#zB^RO3n^b6%9>Vsqh1 z?3`^$x09KpYxzu4I@ZDIRh(kq3oV+b9ZWZU+-St%Zf@Dm3f%JaLtUvYE%&Pi7afZa zWlw}Y<~kWRgmvF{DN^3}TfFpA=2F}2BfR~78r_#SqUeu5nVW;=j7cv)aYviZGp>e2 z;}22B=f7AN){ARmYvI}0Bs#|S%Z<#}yh=TZlkcBXF1f`6*^@6Juatd;9A0o^r597G ze*~%h)2F!!6>!s8C%Pf`1QX&Dg$chE@m0s1+BTGM&VGzTYqcBW^H$Phli?^gUdr_| zcqt<1kITc!`p8vhD`x`v(s%9#Tb%DzZ44GKbvMxJ1gweT7?w_DkQD3kzzly z?$nn6dQtEL|J{|QZ$lNSaOF~Z{DHZ4W1T2j=O6|Z>Y}b`GR~OzlB|6@XVmP)YmC~2 zz+3N-DH=rOyI9sJ)fjhM?{Qh$E>!9@4(juJP}8hT@ges`Q(bmq%E>moOsK}j`OFP9 z{Vxvcji*_SRur~r0X2*4Y2S!Nc*1;BKVrHdhd6ARWI)T*B53+zJ^E26OK*5~w>GK6 zGQpA`Z{tIs{#}BH>r&>Ue~-y7H?i)E3dJ!lrhIw<+dXaOJo5+BEg3`l^G*-9_D0cG zwg;&Qy@S5$^}Oq@tw>+MTn3X=N$UPaOrCK^bmPbx!M|aIXhDMoU-Tdu?^a*%(ms5N zdBytCpGU_bUu97o8m&S9{IW*M@U>clS=TyIogTFU8ZzOV0rh)GD6oX=HzhF7G`8V z#l4X2&@=O=4-Opljtb*TjQcR`O)oaIyYNSATM_+Hkz7<(3BUYSlYb>+z|Iuo3_p_l z=sJ#~W^yQ(Vp*JCV|p&30JT9=>6z>znp(xUEN4Gpby2dH=5JZLwr!hGH^Ch(J6&nM z^((HucMygT4n<;R1R`TBp!Bw$Tk@`hJKj>r%P{BM*CT!6S&a{IpG+uZ`bYek%vk>> z+sS+OGJ3mXk1%fL7Pw`0;UIG(i$N_mkzuIpy-PcTeKWs20!=eOgp@3O=U$xQ33|bhE2ol(>H*Ox2mU zn`JlmSZjE$pR@us536x(`CaD4cA}~0-=chX1b4w7!Y zuLM&So8_#T>Bzi-D*uk)`9|ZsLLmzO=nQj~jHP3G<@nV&`NX`a1X^c4{nxYGVc; z@;4a90sYvlT%YofcJTX;DA18}20~}{SL8b8V8^szS{+HuwOh+qod1eJ{r)0+pAmPw zibuAy0*&aG#fOF>RCtm(XMWQ&nDQ7=})UptbpNdHlylgvzJ+F zRB74<$@n_ahlEbnXCfi(=qA)=TacHJ0qWyJC_5t%+cjb+lV$WK7S(az>Lcjbk4}DB zLO0&GG>L|Br?HkCD5Xo8?WV11==?NFH}s|5M>h*`qZ`rOs70kK9iofGVkd;M$tokPHHSfS;1TJMBAF(XG z?|hAL6C-e2&vMgOeh{Ax(M67bBfe`5r0jri;&HK=C{B97Izt^OIpsp)+!sg;dC9d} zYtXd-#`j~KJcZId&>nFFk*go#?}hC+^V*u$iGwI*EbCC;l%n+Zv3Tyya&gS@EM^%N zvlA&;-WS7o?Kdz~>m42)>R|c7KIFw8!_v3v*dJp{-JzCb9K4DiC1!C?Y%Hi}PXR97 zzggkGU;~P!2Gcpl(jRer7bd^9MZfPR#Dpc>?Gs+ypGYM-H1Qsn&X^*aEz;C}B!YYM zej3GVfUY*RvbiAJg9gZRk)>YD8$JLVwyEIF_I-G(JO@v&2GH`zyBMOWOR3xC>4jb> zhN_8BAA3xkDdR=q>88+TUf3ar#?v(a8q9q@oJPHP#p@_;f|Ro{jb5<{;=$t>Pv|T< zo?OJYu3=OjavCXYf9<5y2g5lRA(^Iy`04q4F56}8F>2-S+3G-=@p|~!$p{&tL0QSB zH15SU#HS3QR%O;j;KDI=xjlu}Iu-()I3TW`6r_)bx5wZfJ zx)YGby4$vr8}WTxD3Si=D77;2r$gZZrRyH*MbP zL@rNw{*C@?B#62&_16~eYuQ7r_x%p*+(5o)(?WV;)P}85%*kix%+Hfu%wLvsfRh*V zc(Ivdm(g+zWHW;UD)XpBN{cz_>#?f!2sifoOLPr(6f%sMLt?KZnO*CLqC=r1e|8i4 ztfHyieHF_?9N}6UleuNj@AAI~NYko$3&n>9Uc$-)BH}kH(#_k}jH$Dgyp3Jxgz`*b zx9BN~9uJ^DqnFSqkNa@nOy)I=Dta@Uwc{Q&*w9dEbzI={zGp|+Ra+w3Wdz#?enF+d352bZM)wjq!M$msXx4wq zyky}wywq&wytZZ{aLg}^m-6M_TI}JPJ*JSA_De(@{fjL@0(ZySk2F#rai`<7p(r~I zPY$JEx;@K;PP&N0Bm2=x6}J0^fk1-dx)*{34IdR;oG|yS7V3qYto%* z0%L(bJ#m6N{$DfxzGa@Of$DU05IbXB@1Qiti26wu!SjF*4t~|5=Gfunxi*+qY}~}v zZp(n-nZGbHD-!>{`oiPn;%Itvq814zv1s>`r{wZXe6`GCJm~#=!=xmX53-=?ifn5B zS6Xn&(xjklU*T|uBHPd8VE!Y2;swTOylW@~vU|k}1|n5uV(#0UPGV%A99>+ZBsgX* zr1_C@AF zOD9ooa}Z;Hc+mU$D%`KM#qy(h&>rnbvA#O6UmwFy+LDPaMonlFOOdrC`wiKmhG8*b zB#~YzR{H9TTa~T&qaa5oF86ZrWj=WM`vrzAdB(b{YD6<7Np{U0F8`fAO_z$K6-w-^ zG~0%ys}I3p)+6-h@50C{ReZQ%7>%$|qN6KbVaRePxasAi@{2S#j;lGU|xPMzNh?YoxhjjE~v2Qf%w~k>xp(sUe%eiuHeEuZD ziXT{A_XlI;E4ZWfzT`d04wE!*bA#r`X zwdlug5jn@KK;!6#csDtej;Y>- z)ZE15^cplZx4=&}7cJH6PlRnXSV}iPtU}dv z#_exg!+ePIv48Y#A#wItq&?K6_Xl+;oaIet|J9_ygSz<|+gjWjdO{tqn?|Ybo-O`ROSfn9sR)6p zQTR%qYcF1nduJYE?lmo1{izxYSK2{OqMVa&3lqOoA-sG35?qJ^JyhMty*)4pTV3BH za?4YgRvD4x-yW>#GsNm0IyAWdY;y54rx^oMAX~{8AU7%z;*;W>QlAF zGhB&T1as+ZoLF;`+wnY@OrC8=TKZC2_a_g}KKDRswH9g2(4o7Y_pqW|8n>nlqLAgL z6jQa1RtJ}GXtJU;3tyq3Xb7L5avqMw&LnZ{E52QujFk;KkO(rP;{0sxPX0fU(PRZm z$u$sd;_9(sq&8{j?&8e<=Q`31pbeoC6n;~dn&nvA+B=Be>==jZLGqYhzXM5o!?2vO z9JiP>qw22?O$~Sqt3XSfO&E{)`%}c>pGT00HlzQ)FYr~EOw2oph1*;xt4UEP>PbgV ze-k=1g!zA0S|HK&Fq$Tvg`tA||L4nvpu0Gi-H77qBY3si8%3!jdF!d(q&RW}lXi>PJ1xdyQ{n+D9!8C^gBeFy_vDu8h){p8W z{phVNGiWX>=QK{0BCjS18B5uW&S3!->yCg&>IR`)aVFk1Ns`MOUE2SlmY=p)nYMS^ z3(tO>hA)2wLh=*}|LRIJTDR~4R-J4XEkVD`!nj>IU+{4!V|Dc?3stjMu`cp`nEmU+ zelK%grrVPL{vrtfHJ!pA=5_Q?V&_DH3pGxQpb*AkyEwlF)A~j5SE3)otutPDz_=Op zKZa7Vr302+3!}{j>4?mVrnyo=#VK^ZTSOt<6NUL_j-tBd9V}uu(%&4`DV$1FGvy09t(Y(UuM#;{FJ#`1U|2IY z!5rrG%osKd&mY`}=i_k{GSnL9Mig;3>k?q~O^lVtjcHLYkFBgzTdj14ou6*BIBg(c z_8I#uzCwA6Hgg}DqC4a##3S#aY=j)W)MI(4sk2GP+lpQ^$5Fst9cmdIhk=aA?Wgnt zKQ$uZ9yO3Y`^}_cCnb8iy$9Y!df2{uH*)zjao&gkx}LQUdi@rV%)fgWr__NLS7d4S zb|VVgbqa1?i@DQLt~7S96>XCmfM|tj^hB&hUjH!;&hKyhp4~a<4h1U?!(SbhXuFt$ijq|w4Yj0x9R$p$LWq+2p8AX}ytXnffg$xT1L-`Tw%Chd_ zzcUULm^70Pi)^?%dgmGEPnvH0vg318o`|28E~f8G9^nS#%J?nRq0x<5*x!YRWjDCI z9d%fDRF3L?&J|*PV`%83KWJ!c#dRACUV4oKc_)m)=?^OrzTAbbDX{a=z;>HQhh}~dE;(>A$+-N{8{5D$4{3WOE?G_r#({W?#V+?=okGhzADfd zb!|?KGa&1ii|I_OKc#1zfhKIlnFL#!-Rln3pKjcol2p1V_)=KN1Ei)#L5e~0WYk=R zgtHUrUDG4B`%7T&q>Fe|T8?v96{+N=0`U!E=84=S{KXGBFV~_a&yJ>? z&f*-dNQunj#!)PDpZAV7r`%mi!a$t>dSf<7ux~z$ix<1NjVc$oS-E3r)!UOe6wHyG zwjNE1H^s%p@o3(Bn17zAN<+5h0mBjO_mEfdI27Xwdm`L>q5+z{*-rz6Kaa5qGI=d7<}ae zS5|I9ktVE@8uSiQ#_}}wKpD_i0Ud+>G=n)|y0ee*YB}5amHmTZQlvqnhvtgM`A5^! zL3&=AHrHUTpUQ1~sYX^$H$fqM8~l9aD?7ibp>~lPDL$5@(7^e^gpqHdRv#^-vhT4| z{#?X6vGYhSh@M0XymiDUoN?5qT-P7G)`|Wk^Gchfh6V{0n(OG{%pZvU-Hg0$1MK{L zQyg8R2bZ+FkPMcfH*V&*uy-N(7j7b_CTZI3BcSooETQaJDrPpX5~N>ba>EDu({Gc> zBI7oHvif!acm9u~^A5=IecO1_(vYMnrIIER?Ru``ETc$@mQc!;m4wWO3Pss8C?s1% zp?dB+p-A}}C7UEOWGnPuzxRKkb>G)@p2zX|cx)!U=t-E+=Y-_ap|GImtL-)gh9Qn;d8+Q#{bnr*{&~Ls;WS~ zWCPn-Yb11;_JGmv{!rKKOP2HH=|)C4i}5eQNxcDN`>qftcY0HXp#}vPE@4TLs?^oB zfZB=Y`E#ej(S8>a-wozomc{tFE`n|j*+j#Sd2nyZ8>~2+hDl%cqq~u!sPib83U6IS zH=kuREhrxoI~!4K*qu@|zC+6MeBMheCCXpaDcs14G*<4Sr#Xx9WXBskbW|c5n$ApD z?POgGmXRO#PQ5&miN5+%7`_do(hObk=N!&_QrwQOuk3{Le;Z(*YDjMy=CiwpxZAJH zi&PZk$#~!p!ete4&9;ejY2!Dc#_BL~<;^hi-YT}B#e=4ixgHv@R}4O-ZAzA3a|C z0Ec2V$$Z&WtVlV7TJE|Sbm1YC4D2a>$^@a$C{vjH*O$I1k0nj_FnV{UcZE+@F$$f* zrgRzb{zne{PcFlb2${-{d+bo6)}0b_5Z^d9!r3Mhx?8;H@o7f8 z65k14rOz=~PJ>PT-tEy zoh;|26yZCZ2Oia6-nYkn!K}~&AZi~e5;;fcWysc-M$A0 zL!C)weIb7J;5_5iDx@Vhi5|t8(S{!@X-fnnC$oM~t;xsrzZT>Z+Y8N8$I}C&M7n!# z1TB?&jwdcdFmHY?-}?^}Uvk#cl~Yf#?B812KeZN*##LcHpWSBfP@|6q2XJ!CZecg? z1JC6%hM0*}*xwmVL-#1~vr%vQrF%dcSi;x;TwW+8+1Yb*2*~+zLg6-|G!pSkS zkXR9p6@?*^It%WVF>|0und2-+ZYV1{IEA|R?nB9f9!VZJi08-o(gXcq@vQp^L=Mry zFGMq!vy;j3=Xr#j9!b}IRA}Ir4{V3tE|g^55PY39Xu*9mY^v9$v_^ZX5I-ZUl<(3$ z1kv`>Zjko(r8zxrK!1M@Zd`wY(mn35>~f=~^B?i&=PT$|6;vcIy$Z8Zobfe{0xLd>V5YF7Z&GUXd*OK-d9!X|own$Of zWEGEaeeTRG&k*MQkVF1+M%gbKk+H{`jr^`kG8753yStG+_@}t=v?+#H_2u^^1)8yK zy4cZ&JH}_t6qTYLCPC+`{04_U>{PmQ7s?ejH#ZqJVY2j;I|&x1a}UpzUeq?{8uKYIp~Y9i>GMr< zdZgrq-j?yW#GTP%_C(BR2oV01FQkA%1Mkqz~s}v@EBn zsuGU-zJnutzV~BSZz@@G674ZR*f&2-Dx1RhDn*W%a&aKuCTkl1vCWxgHS@+S+k zJ%`Yn>^$7xsZ8U3d6H-6N!;pmB@gbU?>6lO(mcn|sQH($&pHvV`nAxVY=O0tTZ}Xt^0aAAXBF`)omT1<%#b;rW2+Lqy4gZ-`qJAkIElhWDc+Sj@X@ms^5qpGF_i zY1u0{#p{#SpM^rpD-}xU_yxyvGsJb%meGs8-!QjL#8dTDNwVM8ia$o-&>K;Mikq^u zm-WV%ep6|dbOjxHCr4@3QWzf{FF0;Jjf*yG#EvUU!px6u6ur8VY4-J{wl#5>XO}>s zEu~Ds%ZYWV_|SdX!@>&t|6rG~S$Z$289!&*P;%pUq#x)D<7#v#R$DRmKI+AhF9xk$h2E4YSog+DX|beB1~w&zc_4O_l^CvkH?9Mv2>Ap z7j*d`e zs^!n>IWql7cB&$&nP~96Q$NyCm`?3O9BA~&IdEB&1CP_%BwB>xo-rtEJMT=$S&>;z z2R`8wrqDVp@^ z+7i-C{KZb@451yzD)8g|bYZ~;3#>Euq^bn&beP0vj7c3#?}R!@zWEB*#&@zqx|QhD z<}BPu5FGGOfr>|&5Z%{;`hDDwNmZTLyVr+eimDJR<3z6#gG9%v z1<1S@NXc6ZU_NdOO61FV2POntO&Hbne}?BL@4|RQAyiLXM$==?vR?8;@HFLo*8B{i zYWWoQX*&0+sJr6Jv;mZT$da_iR3P=K4t*WGAHBRwq1UWTssJB&X*jwNris2sf#nu zyujk7SSzXf0|6Ear~k zUpQ=Cj`L|GRE^B6@NnLQ$n}j_5X?EGi};-UeH5)}TuWx&D%2)>7-@Nlf<@7GOqm!d z9{VjX+4R4=&>cmr^$Vu<`b_+8UP(Crj7>jpAh}t7M>=8F6QLvZAOV?EE`PS__J)Y=QozhBjhrl+^FY+aqv@nDH;Cd0(J+;(TRmNG`M&Zn#0br2_qS6 z=4_rN#`oE1p7j_R{u_q}*{~+fP55TffzFZ~JmJi%Ze7Z>+9ZTBb`2)gw+rahH@^Rk z9EAm66VW6qOW!uT;VA!m<*R0Mr%``0SAPq6B|98TJc@~zYlP(|_`lJ=9KCen$ej1& zB5G<7Y2K4Q#A}e>6z-aMc!&uNedwa6It485jo72}IEz+?Y=>ULnp%mFTbYW0G2Zl9 zSDTdbr$KJ%D>mlcMDb?9aQ5-ZKGs^WA3^D{?i*#VL#fh*3qoz&O<$P8JHn4D9=wA0 zBnUS|x&y0_NU`1vp5 zIx4YByAaI21ar6#%50aa__D!-oYM@c;>cNcYezj;{#|y^(vrMC+fkv}8>p8V)4v-B z@o#=BEr(uAl8214z@vGu`}Tr=AS<$sa?Q(t+}%iy7~m>o#lDwr4E=h zE|kVCT1Ux+Ds(ra3BND*c003%XPo^b#9bHi+`ndxqP+iBvhY~rimbQ^sF#jLFL@0# z1P5}*N;vDKQ!Z_gxsHsge!_f<=a?onpe=D<@hrMOgvLl%{KF;j;Xxq3K$Gmo z?SN``DLxiXqG(H5I(_Fistda?u}3^r)o#SQohB61u?E`{obWL*1zJP>$U9pDNz!b| z!_o$f&uhi97e-_ikphQvl@+Fk1Ie~v2~#~{00*@o%zc=R z=|^V6&&K4PWmrB=nQ zc-e0tExw>cz1{e;HhwycJW47W<+;mudlJvWb>ZcDB2}+)7>u>2oKXYG%Htq%=h)Jh zzlX6xGYroZD={@R1jrjij+^W8Ip79P%qYUE_GcLNzdh2LcES0UJ^4-%g}Y;u*{9~S z@JlvD_Bk^;sXL4|g+9T{OGfma&)$S%TQJSfkfh7GtLN)sw%GA5dpY|E{1*1))61U~ zD>sdz&HpJQRO3AUD;bQgU;U`~&q>6KN%(N}p*ZQ|T->|Zo7yfY)9>JJ;pMUv35uY0*KDokwrbhn8zc1yO zSXNf?;B{j?{vtqtUtOU8mDiqf9KTLdXBQXwW$A?>~qNf_? zC3AmeM0gjg4x2&QcfH}+9z%OI>|wDcRbs3+nIy4E!VC9qbn{q>^ou5Eg+&ddJCizK zr)Ws#IqBrSZW@J}9uXyDcH@I@ce;N#1}?lOGqBx{9v}UOCr*lFCZ|N^@v|tzB^e!$ zhSPk`Bg^33zV$~YL%xCYPyQTbl(S37^G(K5`P=vts6|l|D{y~G1;dD3mK49CB6HDv zOd0$JGT?V$`K`>wnfJl3wW49;f7sM%Nhi$zpjrBzmBt%U@{&pPVd5Yf@p>z4x;V$- zw*pl)L}O9HV3G-*P47GnXq~PyeH@*PZ+utX@olqoK7URu9(4eVR1;}U?qyUbH{nf7 z4@!8fK=SvVqAF4eF)s(uW6n_7;lGNu8y;m5=S=B&=wo;_?5(&n$Q8xwT46e^WEH!4Ei4u)YCLr+~tF4Fftb`RMHTeXhNx1nwfhZ_r;e5)4}O)knJc>`q~m9 zC()0lcHEMNAAi7_Zw$>ztwdVUVDcR8D%#km!?Vnl7WO*`wfW&V7hb}hPaDu@gA<*(d_P*Y{cIPT-8hYAFHXcJnI+`3o}b5cW=Pil zJSCZubY8F+B}YSbcL?%^XW`qEcit{NMRc$(P0>oG-uEJCYo7~ZQQB(Ey{bf)WfSTC z>^q1m;2e+r?NBt5q45he>E7cQ+Ie;}dI&C*k&*`wy@A;1XaNVFa~dtQvQZxkrPnqs zN14h!jOAWgHnALu=XKz^J%o*Hb)n|SV7%?dp zJ|#lPdqH$Hf#;ig#nAeuNBA|f8l7L1NrQ9qQof(WhPoN-C-&U&{Ls)u-ca6XMQ1(I@n%|X-@1C0MDuTsJ^=qU*{*wsZ&K^UzMWF9I9m6@Qm|bs?;sT=y&ck*us)j{-&_Af2F{o z+id<4U1F6^^l8sK#1FQl(L2{ec~K%Bw{jLvKIi&7&Y>d*_$7P-FTw$M^&NIm4u#n*Oq zc$%Ds!&cr@SK>q!ogTs*RT+A3^A*P$W(zL!eQ5gmzDC%NKQ$Zs9Y;(YmxxHVg>=wZ(JQFbJK z+RnCT4nb zTc=>%sYDEe3+L%HNeV{o5~TKrAY)OF^jICLe0vDzOQP5eK9@D`=)ttMtVf4qCz|WD z=;8iK7VD6S=bDX}v*tf6Q8A!?Z+lXnw+>P!nD8@a2>sk_Kr_oYhc@&Wq>cZuAY=`O zA6zMf^Si-p?sTbHr9k@KXW~WvRb=;>NjHR=e!RiI_B{0m<+F@$AQF z@~JLI-*HYfW3ny9?AeP$M*RQ$vk*8k8`Y-Q;nnJjCsR!6<%#zw#ub?Ts6oB`D`@)M z#+UHlLiBS-Dt1*7r@2Kkx$f7Xtjzg8`X;2e*^b7}eu$?P2DCr$0BUA#gY7&+^3xth z9&^7)n%sXdFY6-+&*@E%SJg?wIs2jVmoi544*iZJ!=TT*Lw6raaY}w3s&#*f?V-W= zo~usHX$sU4qa`|D?m-@(hlxAyKfpTc2rNF#{f`(!MOTLj%I{^UV|;hA{PCbd*r!B; zetbvDm2sl{$(0oN@jFiLx{Z*tUnCDQE>_(9Jn;W#y;nG!py5t`oNt)HxfqcYV*MLl zUUi76Hx_C=USQzIB(bKy6&?f_Q$b%n_@&RLhjE;dcVi_fuIr7aO|6pWnmj9zoi1GB z`(nS^z0$>#FTiQBHO*WoM`r!>Xz<-Ma{0skG6PPCea~^PgE>Ddm}a8(%03)u@+GU` zA8}^BI&E31Mt$_>(ul9iQT&E;sg`f#e%I%0Y`=+^_k}3G`$@JXq)-^hJG4RVkI~qT z_e~WF;8&N!)W;dIqUyoy*V*~-@Oq2&V+WAL<`~n6+>JNkkI{Di8q|HPNZy@$cU=e( zdAjtFR~U__iAjF`dQP2nIBLt11=6rC{&nGuVFhxOB*_ ziR30zfZMNA>C#r7^Am4E?ZP)W^Lcjeg<3co8^f)P?|Xh|k#Fic8gk?q%i&p$`^)nEWZ$|PnE5;lRH~1oZpVvZ^=%+MF;JlZ zZBq<&az*n~6Tv>sikdF$L};c8jXX1!wzx{s_+S{tyK0LmL)OBs+LqR3@bgdebOe4Y zf#HeqV^Kii0Y9pT--KBKrN_5%x< zUXC3GlQGeA5a(GvK>YehZ0&j@`u$sk+_l|lALqZ-=iL_Uv}I`CAwO}zr|T$~n}VI& z$J2lm4+<$A;odEW&-Nfofq(T`hM6V_ms?RdOJ6*j$oGw%zwlG~852}$B&#RQbax%X zQH2vO;i0_-{XU`t{g*M+wt5=v2v?+G^Ga}g$m)vH*9~|vV~RNJs4~2=4QNfkO=kCG z7Oyw+_lE8&>hswU>1qWMP0f#zJp*nEI{IB`^xrA$Yc9h2IvW~!i93pUzutCnGA&*@ zotoOp#J_76qh&%*nzuWZCjNJk^MOZ`eBcXIzLq6VbuG%h6H62RtVeN(C;5!rh6z3R zyT88ZBXporVsZO&M04PZ>R(hII9#$W5`BWU&KHb3dczOCaWF zS&*$=G%3&F&Oz>-HJM_KFiinLnY)E`o)Rj3dj^X_`TYNF4QTvT6!nrJ$r&wL{5%_V zFHW%#?IBdaIXid8ctFZ~IY;W%C?~lJL0;ZM_O4B^|KLIQj`Spd&Ql8bQ56 z>QOgjBndCH_;xRo5=KwwfdD<4wo}48o@>-UZyu8L)Ae(JoU7kHQudKP$%V zmxIJb+Kx2hyFM+jdL&s>dIc+IJYbg|YST6D9{#tq1?x?%Xw;3ZIA)uUrA3B(r#gT- zR|V3d+x9pwIfr9bn$%ITTY7(Hcj@w;E6JHZgHF!1!Ra>c%Km;7r5>yC=xDR3Fpc-u z()6hK8t2x$mJ_SWbm^Fyoj6DKD-N3k;pAg~D)Qnr8~1I(v#%fd9pW9{jd~}|9fN2u`U=E=ublJh1iJ%_HaO=INxP}yXP`3rj=U&|3$e{%YbZ*t+${adb?TL zD>-!29!v58e4i7mNB`vVn6|1Ht(9FRqJZBQ6243HQmfhPzCjebaxXuN@XzVFJT*4S zBY9gqzbBasg93Hwj9oNRWd35%>Cq%JndbL2x~v>l zV-!(xbOgOP-Uh|=5`0k3M5$UKjF}AOd|E9w8}%V~1v~L=KRG@xdkA`M&3lbDv{u)S zN`}?J`?n5VEk6NWkCRAi*Q2o}gUPt|G@BlInYsMCh4=ks$m!1|X<~i=ZCqQ+4j;RT zrN3`VH}U;}jpt!pP~n{(%MNka_H>l;v*m+xD!i}VQ?%zZ;0eZ#Vwn3`tbW%EK4->| zszo3TQn5Q5imsYhNY38EuY6~^TE#OR54b~T_I5H_wUUnf zDiVLTcp{~-7eyIn;_i-2oRf8@Ld7TWTBSqFhrY#)m2*j*pOGHLn9#ldF$gx8VBCxa zP)!OYZ66=ruaw~(*2~bjB1w1+)^~c+vA#E$ zPrr1Wl>G#abI0-V&~RFNO_3%vzhsMT!gA0h!;P?C%gIN)#3f zYUX>{P1k+6*R@4(__h^o&3d%ldo3HpJEBFm+{uu4e_Kylkbc*GcI@L=y7!xjh9&1= zZnH_^KhKvX6igxUZXy0P^D&uj57MvIgtpd9%vsuAk-kQa(6t0xHh#dQ!2$H`Jf9OD zGNC8R_a&EJ#v^`LIITZ_2q!k>ple?tX6X*Wx|iJb;Qs-dqwb)S?-4Kmx`6j1ROxum zHu1>ZKGZy-o0$AbU>oh*@O@MO=GdE)@e5O$q{aIJ4*f{?NiniawxS2`7H3_GuF5Sc7_ zrbW_cKe6U*EYDS@A?COhbqz|z7Yh~q-QW&A&UXE<&>CasK4;?G!+6be-lm@&=vUKb zbQPXvMpGBFtk=)QQ(D8T3~2J zCjZ6JA@2WiTNnqw%xp|`;tcEv2^zS2JAZEsEzRspt#3ZT+1mlMT!cq_tFTbRmrmN` zpvZ7FWjQ|Md9?}@G-=XWe>uwIdDZj2RqW?u9kS!Ts(>fU@wI9N{ryjl97nfcae9bg z(l!I?-6v5;m;rhCEW!^L9~S82FD_mp$J_$OGlPV!D9gDoskgp>trsk5{V2v9RjZj{ za4_BHOoLhco^fsGFJ`amM>WNnV&~dQ&I62MSBJ}C(v=A6`)V`pa#rxd@0}>B%wq3Y zE{rD>NN>v;P+`C@81~~?kbk3T`oLzmWonSzbsy?*^M|$m1e#i0gNs8}!M(K>^*Ppf zqUJzvJ$@l-#YG(9_x45ai*Q`+Js$6l5f5E4q5rOE^WUFRmjCM}^qsrGvBa3Xn;oda zx)~P-51_jH2e6Yk=dZ?$I?wW+#lJ8*cJw9_-+zFkrV{O5ttJ@uvtXYdEuzNqyLfPJ zt6&&pM?)uW#jZOCu_)}dcquv+6SB-`dC#7-bGVh5>DG-tjUFVvm|llpzNYBQ`62O1 zVf0{wf!H4Z2t&`vP{GQjf?19{`SjBzyLF4i+(#>@@76D{efbTgdD&u5PRdi{qKGnFqu(c_pl*lK0)5G9>Xy87#Xh z0S9i+#N5Xw&@t9vHuH8$n-ooi&wR!g=4T>RmA!-8t8A&YqZVN{!)TX!Hffr!pdOk3 zi3|D_%#8x zn`Y4>vf?h&Mt1S|O)TE?1|50UG$btrsecDaV-|#xj>OpA|A-8hYwJ?va}BC&Qbyd* ziI^kz6v<9a`1GeYeX>&^2V+Mz`mH|Acbv&{YWnn7aXnsH6~S0rjcWh#Ib4;R7+JWS z=SNH_^2jgrY>vU=iv^gk_R9T=hZ~)?+m6tGE9nOJW`(`xo%yE&2TNj)R%hnceN75|9ioLWqhzG*SLJbO#Xd%=!u(M?eOk1md$!IqkongD;oAV(m<~kHu-fa^ePn5M`IwZFgyrBgS+0g7Kc(oJD4cNBE@P#|xe9Ja&gJ`z)&^F5ZU z_;KMPepYBha!Wn3jZ&nGbqWN%)W1xzc0cZI=}S+~kHA2a08;atNmojgNU=Ac4Zqtj z9r)}wtQKw&^KUOf+AZD*KdK5>{^#=TX2IV!jZ7Q@aK+c0DfXC3H?Nt9-8d&^=FSq~ zzv(5MXKq7><>cw<8B1EcJCpj>gwm6mePZ&Lwb<<-M`L$QhuXD#{1@azzy5TgLsge1 zwX0A+C(cRHnSrHI)>M6J4=Nsw#&5F?IGQ<<9`^sq)le3bH5%F9&_)mZ3WWX z?z53g|6~1Kw+Kz+hGT90U-S+(qUclwsCO>I>>IB!AnGo99yX?2m39=@?qy#S^l9(x z2-+5CL7g%gNFUAf=lo83&vHE~Qu~wB;$`$yc^Fv^e1n&Fc!o0P5N>7Nmn=>4q6Xdr zTvwGy2RE$2@5Z;V^p~X(n!1#B>j55L?St98OMZ&=rCZ6d^d^53v)R^32ft=eshP`b)%60}-LdU|AIUl>l85cTqnZ+|hO<(%N zz1uEBKV$EHMN$a06_%HKQ}0_YZ0xus)~W0RAE@9(T`)~_y^izK&B%1O9GU3pLe+K< z5_gT{cQd~Ka^`*4;@|LmGKx-Blpwo!7=53`fA`*PgyC(F7r&Qo`Memug;K0+y@0^S z!^vn`8{}r+#>x${6gl%D3Lc)sO`}f1ue$>s@;xNHDU`5JtN}CmzKZb-ul#Tu+Rw`1 z_i+G~U%HO+pe=Y-!?{>D>?l|}n0^$^Wm%Owk)EVU3%UF7h~Hq^@*$fks&aSE4+;Ki z4yMC1w(;I%Gi!-DC{8J#i#6O26u3u|g6v*W*gppN5a|`v`<$MgEN*<=GP~X+xQ+rzj&$hng4{{4}LQ_&irHmw&Ky&X{qyn1+my z6iROl!80`(=5X=4MBc8uxV-%fGE>`ySl3d{CLB(hmY=ccqbXHv-bkx`Ceh=qC8F%& zD9k?b6L4Nd*?-DVcZ$(0ey+8YQ6{seJ!!PX9P$sc<-@6g}1COTCqN^2m$) zmtJHEdVVbPLmbwR5|CP9LXQWR!Q!+5&U7R(xx04M{B{tM2KOXW-s9A*JI&5ShvUtN z4|t#{f1cISmwX3ol9(PmLjmog3IScOzWeJPv1E zJn`;vC>=VPg}@KG6n2k)O$U^)yT_7|o{=H7{Ax?b>MSwgmI6g?^rb#|e1EjTg#x38 z)2xESu<@~`Z$4+?G$s@->#t$+qAj=`WkM6B-AQ(5H3DjyFkx*g&W$a^ICT{KRl+_@x7v(*HAZyBr9bsaok)WluCPHD zc4I+nC#1P;?juw)gw@V5^yFbSE;Op5_^~1NYut_Gnf>uw_O@7edJ|-}>yeSo3vm-r)_3gJg zGj#wRH&CQJH*4;b%%oWpqsXf~MRe%30K=NTVQ&ha z6Y_n9apUKLn!6Ck&sY`fs@XlA1t>h*fbV~+aGq!T?!Nws=@n#JCfeSlZe|Bj*&_Kq0}=E&dgj|XGECk z_9x3fN3e1b&!fok_t+g5lIgjNDfdxf{o}UdT#GVyhG?<1=5C}=vWwZrl_SDD7?~e< z*6nmYR4%$Bclcp(i_#QK&EPEgh3X_ID~gXk_n>j_y~WR~ui;?l4!nODLeFA}GS2vd6Yl!{>>`rY68dfM9;JjzEux58}_%t5E_G5Z< zJKP2@eg)FFnbSydr7Bt3JVNBBo6@%vU!wTxZc+R75^PT5TigZ*5DA5ilO4}+)Uj2 z+J%)7>ri5F0%t}HBm2m|yj!bGdzQ=6nlW>!M-`t_Z+D~xYtnFMjvI`PQy`b;MPbtJ z_-!+VJ#}1-PX7knGVeu4g0EpfsXR_s{bO-afr8coEex^vjXEO(8u+}L&28O?Vu$C@ znOuW!KlMnTy+mU|54dFZr~i&mp%>Q&@qJP%Tm~PU0OJ&5D$ zK_-P7ZjNQ;Gt_C{iMyDasKwGl+;Qxj6K%W-n1SybzG4`^>t&nr^WH04dh*|Ep&>UN zEvwzA&71Q|>_?%$e;IQj@Gh1e~10xpe z(d8|EB->^~R~~L)^_=@Y*f$Ars@8OGX*Plebi>acN5t{+5pWqTM>A7m;b{Nv zls7F-oZ)pAudfv%t1OIGY_q4!!J<%+`-aaH<)|xf9;>tIPWxB9$I=s>LWgTC^)|1; z9QiMhTUH`vsvCsWe6Btw@G3@nX>vbRA4pb5(p0(WG<4}f} z&@vzO5__`Pua8=fAkvz)ALb`kQTN|7P$`aKTTdFYZB2&a@$o-#*Zra}&-M~@`wbzj zn?G^$mpL&pg`STJq1(fQ#Y3D=+jdx)_76;=YmFlIzLd}p_pi`-@eBWA6{#~Tnu7P} zLV930x&A0XbhPuxD~Ky3bF}j}0Q--$N*G%{=;atuJTR&&P_A zJ-ED+@6Qv<&`P>A9W&@5&q%cIlqDao8&zMw6S1!gg zRP;11W=5%wVqEf9>0KdD8npZh4oYPtcebBJ)ocTjJ?JU%>KM#g`28uBJKgW{Jbs={ z5~~Xcq-V(wS;aVW#Gaprtvb^Y%Ng_AHXMR1pDmdTmnW^4E7-CLoD1}Kll0AKd(w&Y z|9^k=<*!po?%)mVRduJ)#qQLPXZz*+E$E+L30&1AXn%VTP0gt=vNWKHb7iQ!=p-r> zjzU-QGDdgfy_BAUaLvb!@@ANj;f~eJXU%=Aw>!f+>~%?Rj|WkYTgYnDruP~|Vr&^Y4XSl4Q*o6=CW|&K)l}g>L_*K-pN6nxApT;<>RH zDqA5YuHffIC4RRuRi$1}ZV9QUw8_cKO&qni5!8MF`dvOWZXb7pZ!;5Ty?zI|pl;N0 zQ)FeAk3Z;rM!M`bKhzvSTWb2FM0DUZL;Cerh+ zC~|Y}P3J>CqPMqHqW`lUXD1&JrH3*BzM~JG`;QH~wm=e|m&Efs8_~MkjdvL@u+@9J z*e7Qr@qZ4p7{?50mh&UnC>T+}AvxOJ+ky(GZY7&vGpWb1U@<8o32X1Xgt~4d-)U_} z(;z3xdZ|LOv1$|=p-j>n)2MHqE1vflLF!!IyAe!=4&K zOz6#vCRUs`kY1@Rq}4`dWNbDL@+bG9Z?HDCmPX)d)z=ESoyk-&bQGPD=UMjPE$F6p z0IMRE!~yZ+iTyi(w`-Tsg4z2KTy+^M6=bONgepm2ok!SmZ?>FgSX`r3>EzziaCm5f zUTtqt7phOrqm+f=#SZAXGn~5a$dbXtEx2_#mbIK4DP{%k5Mt)r;@0*oSeAK7Ds7J9 z2j{OhwXa}z3-j3NWxW5N`xAA;hts>8uchTtzI@gaB6jK@hSGVF-T%Iysp@!9?V=1! z4je{nhI0P%4OeVBn~5f&Rr>an61}YjQg$iOf}viN&s`|d1xB>9V>Efs3c{gYk<^iX z21>VcvEgt9KEjW{AT2z}`Fa3y-MQC>XQjdxm}Sc&?# z|F@4+!qDF%D67Vs4&Ut%{<)v$+&O;tdlx2nS!qy^t}4BfFtK9aa&i&*+5`{<|SRJRTz-y@w{x+6KQ%MvwmznR2Ow$zR&Q)@sUOAOF5S z=kFM2wbRV0=>V+4lxg#bv-oeP2kmejMEz8HVFvFGydE}}OkVPHY~5mf=g*%_AqG^! zcU?=1CenlBDYWYF7?OMZ4}P-_qLt_4Oyn;MH8qo{Xx3>Y_%EXV++%pfpc((E^0R#Q zUv#ue;XJ>I6$*y5w2Zs_dTpl-2mG4?> z`}L$9M-GV_g5L3G-4`L;@+lk9`vq>k@`McM!TlV_S#AE;;a{y!i*Xs>ALe63|H0(A z!IJ)}In(z;Zi1|07EW)sple5mQbqP83Qo^tD^E8fJ|cz5%rvLi>PsMnOq@OON36Dr zL^gLdm!xP@!pagMd;VwCtAvW{RCveCRf2u>qsZWoJINeq7nJPz{-Q;TYRr06h_5(1 zzEXo6#&;LHJI2x1N7teL{TULi^pM)?mY_Xn81e!iWAMRl^oaAXx+TTZ;pjx#+`StG z*p~3#)dbOKwSS}!KNyv@!8526PF581H5KziQ12ery+WKlRBn&Ygb=U%9aEB#jq z-<)buc0-f|wsCh3KPNBD$i?HE4pf()L5XV<>4A2ssLxqKE$=z&zuzwU$e-QrdW_V@ z7r<(c7CDBs;K%s&B)2dC_skS&UiC_hHr2(f%Yxcn-(soORHoLl1A}b;aURD$3^g4>DpLp2zynk1 z@Uh`^dD$8&3^StP8G-2ZJBq4_hIDcJVAO@VlR-fO?LX;H`i5T+b~F-QGqQM(L|wd5 z?Mc0LFW_VEI7(vW_`|y*6A#E!9`C=3$bsz42Y^6)Qp4ml{^#XdbF z8b5zwd0NdZ7yc~7CEH-oj|YNHABa_8!cJsjNl%3an-*!b$2~0hLx(&p9{9+X!?-d8+8q>OWv?; zyc4W&Ybffpx3w3C_IU-%_txSBuV&PQ zD&zav@pM<0=Shr*iaIkcfVZp3$Fo?-m+4Eg*Y#=B(IC-z|1wILz&++y{~&9TJzC^J z>qRe2{Z@|ql}co=d>WPyoKBhi8U6E(4Bc*g1E2q^=sf(fdfzxs$P7`T$jlaslJT6+ zrIe8oDybADMM+yjDWpM4q$ouh4W;3mhUc6lEk&t>RMMoqi0F6!{sS-1bIx;L_x1U_ z-_xG+a?HvC%iqtR8m4vVV7Q5OA8^q|s_mr;8n&RFpS zyBQ;Dz{2y;=vkz3FSl!qS0@tJ`%(WnmB z{WWQ+{yiv4EhqB^FZNDVr+0(m;NUU?XJ(H_oDXA@_84LHI8%7s&V|$YpP1UKPm3<@ zz$d>^qUL)usHIzri`r<8dIK|(&-{u4uYTzJmxfLWC3@^}6tmBmkQo)E5)CD9#bK`!No(l@GRb<1%-UoOPpCwnt%-1?c?$Ku zEk^mJB@~c!hB3M?;aQ0s9o#;EvJ0v)=pTUj{Iox;=n)m(^j3MU=qO&X^%G0McA`gTfC_DU@@ zc1A(_l!DkwRfB#?&Ot_#0` zmN()1kb$(}2+Ko-mtlMI5ZdaZOIgv^`L%L^E*5!tSkGqS-T{^Te~A&SPj`qrW>x{~ zh?CrBo8gqrr^Cx~C%h}At|>2y$LfPxuIHN=G~H1zLA@i*qNu9Z+F(NtmC;ZZ}zk@j?fs<&8l12@f+m0WDnA+N6?^|yXk6T96j#b zCCo2!!+}c*G;`7ccwc7u#|m${-gO;^JPk=&x*iqn3FNYWE-Yscr;8ck$jg_*E1S{i zWAjP#1vR`@vBL6A88CnI8=@};RHmMeKpz!O?Y$Sbee!8;>KzUI-lR=lZnCsQ<1?o` zEf;oa>>e?Ixkg(VoAsd%t!|TtS`+IhuUtrp9~`MVV-?a?T*997!zop58$ReVKD>Ss zNsVAEv&EmVbVx1|)?J48p|8C9`x#VbaRK{ZEM(tb_RO$u!Iuw=RiV~{@g2927yFTW zG+&!?Y6j4hj#M(-(8_(P_=u5{*{ssOuOhC}2`VwO$aiHwDoxyuG1?iN$0;4Usrs4U z)2#~s*M)G-TEh>VybJn|htkV?$(;P9DRl7HM6ye4N5kiFWb81JPjLyPLD^P9Ps=fE znf#tBF)tHM`aXeHWgUU@SfY6gBq{GAhvBbX;rB1dC0L#@dUO3y(DfIx2>~?KPl_(P zXpxevH2*7P4OV-H(BF_mD9+e$dSUF=M>&f^rF0A*ncbNRg2lVS~9k;o;gjOZ%MKjgXnO=8m_<09X!r@%#AHFCu7}WtkT{ARiAG{m~<-Q1Qm*A z+4#h|QvPY>J3KoxN~qAOLPV?!F134*`*<5_=q}{vjJuC0ZRW5&`%9EEc?ivEW{yy0 zb-_k`0|ozlipU$UF<_${Mns=yoh zixxi}Py(&dlj#0{Ol-3pMZ3~cNaIgDIph@x*5#pi*-wfzhU_HS;B6RpPDIxRGnS*K zF5NxdhAYMY4|GjwR9hM59dUw{`%XVYwr> z#~Uas)rL?ymw77+@pqRd-HUX<^&^hd8n%I!-ePlN{wplyXCSqp4DW0m1<}HZRNHYC z8mpGGJWCDM>Q*B2rUKpDq(s?~g;vtW79fioVgB}Ani5s!}XKoZ-RuCB;;p51g6D^?rQxMUr!Yjz-~ z{&E_56v)xuh5B^*@DSc?%}hG5V6otH^#*HtW^>)gJUE%i>Evz}hxi~zl56^g-Lf^@ zkY_1K8$XCESfNGX`Q}hzUF8@4ZuH~+cU;$(qmB+=iY=NB6Ez>2@%5KUmXtL z9t8hI0gRz7P1yAuW5>_L&G=fh57wqZdijFkm40LqVM8TuT39#w8Vc)pF7~-Ph0PyB zx8`;son;UoOIG5zMhz||X_N3%l@529lmCnmt~#w04h>r5d_+wkMaENSv-joF8peg! zWicc)NU$=&_CGvzt zts}7JGrJpbFT}68xo8dyqyYbCu&>afM_U?Tki@*AKb#;pYAADCZwB|n1*&DvnB)~k zTjC7Srl^bWiDghr`i}M@eVW^vg_`KA;sX<>(=-on?wyGjW1?zP-4Mo<^?5F8n#?*C z;s%U)e-kmm2Bfs&C%%RFaSqD{(FUh+bfkp2UYrt;xcw3i4|$2Ope=At?BcZ)!>G1f zfmSHJz-#FVkWq<(Oj`;!?w2Di9k&PBhS4;@tqOZm+d$2oSouMbMpooNE4Yhgiwr2G z&W5fP+8~qpiKGwfke)$4a-N*8_!=LLpaq6xU-S`od(Xpe#uIL*pdobT7jgHN_>0xv{t#=ZXX_;IjhruXvK#+aKs@U>+>x9FcYsVe{Y-DAe#pX4o#c zjR~MP?0ML_L5&o9fM zD|(QnU?aYu??&NID!I`OO^CP`jCyS)s#o6$so!g1EIC@3d?*l~$_=SIPmY~aKJ$(% zq$$X3{(iW2|*y!0P+F;y?A8nRG z{m*zTdNrIbyt6`5;AARD+=2^%E2%X*4Th1K+_@7Wbb9;?{+s)AC_8v@j}Tk>q{sD>J2_;Vmw$dfQOCZmTt9Av@!&}42mRqrAGf2lxNPRjh@~?{bu4@Q z3(0Y9xRfqIxn=3-OkwwcLyReLodNwYSWoH7%eZh2WAbHpVIy~Ci$3M;#%xzZN>6

E`A>seQw}y3QWG>*%j~)z< zc#kpcuJ!bs$6V2|eBQpphQe;O@+rs78RM`XT?v~}bx)rVXhzGHmLe!~1of%xL!-i0 z^t#$oUY|W3ikU*MM}82Mq%4NiNhOkr_oGG!C$bv)lB+cqWAeCZ2p-B@KjC|X0I7f*zLf(j*Zf)hq{#>C*~!>^-P4Onp8ANTam1;J+AoqlZAUO%qv$?I@@2~pP|or%89tf zF)#Sc!R`27t|4xodIygE*87_G&D-ZC43U%2QB(u{B2X8SiO!R43mukYV{VqDM5}jP;hg#_pZlq*BrXP3# zRdp>g+|`WYt>e*cwTatk4E zg8Zpr8{^(EU&P`SY9wLji(UJ`Nj8Jomu~C~+Hg&k z%KzJgo!cu#6SWBAS@wGMFe&;fUCg@B-?>RI)C9GX3!<0<1Nf}^FZk52mMaZP!2#!X zEYV%dy`OuG`*~_IrO3X+g*WW)f4+c|W!y{djUt`~iqQX^2$4tUq2HS+^kmcn7+=+- zuXB5lJxI)*IG+xkZdd-}Qb)3HR)O8bw^(#1m~qFWJ82>k;nPvSutC z?SaqpG%5YCGv#aF!Prc7vTi;Dr|_w0TCYcKjRtg}Gms8#-O6n*&Vb*bUyQ}##apEf zuDG@)ih>S5!J5EOOq=-!r}LLXSvM2&mlz2`-d6l#=cc=zjA!unG2fEPdc!;C^AZ6P zv~y+}e5BkN7iuCkzH}6R@A!(yX;QQ@BZ_;rq7Z=}y09~@g_kH;PoXsuw7ZaHLseY3 zU9IM{MbLs&=~hhf(xsu#XTcy^gZyTOk}2bV>ULJ3*zkPC4~-7AT%0AG`@lM>R)Z)+ zsXxU2C&b?zlkm58F7A1)VtK?y?zQMU=icy+AO4BQdLLab_3v%qj0L5S=|^L3n2_$9 zIP$fdL}M>I2&YBkM#)cNkzQ| z_a?gIq-+WNZ!=Ct!3%Ev6-O>x{xr@Qe8a|TQe;zl4;8sjxU`$IsCxfJQMi!?*8h73 z&4kZb7Zbo$`|ZMon_6^r`3)rRV2o?FM~stP!>RmIC+)M#D4a1^fBhK&yY%f)Jz_-s zR&zM4$P)R?*-f2&#Ll=axV3aPIx_OGD{!JFU=0=coIw3f0TOZjDV_DL za@xSyPGj)q>2{x_Z&&`p%eIN>V+dB$mt3Br8wE%#Oya2a$aJSC$|C=!f5)EFYii zPHJjxm|4<~-ZFRG6@Csz`1sRwxhFWf=nO1b=BGA;-9h9zvHQvwxYSgOi)}fqC3hZY z3KVH?(RCq%%~b9a0tNFz1&rHq3x4qmaFaDAX@?P%qgRK22UTeLS9UkWJ}db$eX?SG zhz+5&e8@v5{^a&mn7|l$OFj+cTcp-fga2A_x-hpPl{@3i9 z+5NYdDmj0YrEaeX;ac%eM2O}HBTSy)%}X!5XW3^3-is)+lCKGPh~zUG)V@LB_arKl zX@)X2&UF!v`EOujUoTRP-lDu!30FP0@ahKo__Fd1Yux z)T!L?ChDXUsAo+>e37FC)YmB;Dj$dHLt z6rC9DNk2X2;N665cs(~G$7{0@KP;H+ns-szXBP@$_pGa0;_>?OF+?BI5;ShH{?E;O znA{Ub#mlp?WYJRuxyg}X^hfyhFM(@?h}#q`LmE@nsZc$eZ0aXqmtHgMS?~P);YJs? zhyOT(OVO0SpY1b`rK9=qX->k+oQAA?$Y1%m2}X*!%z>3AmR@lWkLDWC29Mt^F|ocB zUNV-#A`~cni!-fQ&@C==^`KkpRE2=c2hjOX1#eHy;5LsRPg%@Cvu3R;C9^K6gXc`7 zC`ZDu{z8TA9M%V(Zx8p@PvG7MQvT|%P<8Kt-n+xR`|bG<+03QbX)hU@r~s2K>T&qS zCe|f!B!{nUSRMWaPYe`s$@UUt>J{maYLPHqR)#da!-YP+kE@yb0+Vw_L7lOl2D-C% zjb=55Sq~+x7{&};n}ve62Bg}gNVQ!~RH>)Nf0J5<(0(K6z^L&wbo64D1+*7wCRN~C z({gVA2Xl(;IK?;%YhhU>aV@^q8sc7QIuxKr>NVa1I{VOBzC#F2xQEwM6W}s+G6_+P zAzt&BS4g^#T=T)SC8$SyX1^MBPgSB7{rd|)q*haiYa60}wqpOhk%+zF$Co&mx8^(N*#V!R;{VP0%EwGh*PlcoEMs*e)0w=k?4oA9M5>7N z5VShB!me@*)n3?5*5&tMnLUX_d6%IiIgnNqy@tHg4w~_0HfpOSs3$87BLjxOY@Z@R z90SN=_a{zzu^CQOFg}XOXLOwy&2r)AkXO{g-FV|6x@o(NBvwj7QQwe^Y8vo!bG+z% z^8s|MGo|-`neXS2h=MhgDX5n@#@J_7D7&8Qhl$9=-3;@u9f66t5y{E*aD~q%QtYK| z^umkHW}$7E5UdY1i)_ptVl3>Q?nd7CZy{cF9o1RiVp*mtn|sWv65*qenFomRX- zz8qf6fKN_G(=PyO_5gI!qv(?+bGGiaA)BA8xPRK3T=)PlGFdg0iXT`~#?({1+zUtA+p$1U>^hAm z-U#Qdw{zF;&!Q&%0x0}&pzX&Q|LwOT-rdT?I@2g#Yl0fhFEzzQ=0H4oVmvt-cHx6g z50)?Rq=si^m{{OP@13gPzwIa_p7bLv?{%nhnaDVew~^m*AJ0tYa1h@`L9{wWZn+|? zTt1k#PS>G@uk*OFX=2P=`i-N{`joZRgo^aOU~sqw<$Ih#?($oZV@@N9Hfj2uZbZ$a z*11HUW-dWHRr*m?&3pd_^wcJ>vqdY0yW4ZvuSIU%$>@pR4_DV8!k-K~%#PKe&4G$^ zIe(6@ZOJ2)Wy}}y7rw!chlCLWf+>UT>*Y?V3%T#^z{6n}bs0+wTCCSp^o%*k&Ugy1 z5;joyEat-s{D&ebLzGv69?DI?sOowYER~~kDI*c}B!(u0ucWd%Ioh7vf+^edg$3;F za875g;C`rp3mq_pBt00HCSQ?L&5y$MnHey7w-B6%3D^5uf}fs!Nu)e=0Hp;MiI1@z zO6XEsy0eh+FM`I9aO?n`^xRCYN=bs@rAbg?EYDrLb7AUPj#D35FW~bdWZqU{jKB&M z6>Xrxo^W`2Dbja~RX8_jDF*y=guQ+k%i~YL0|ml|(S}&SX^MAcq5m&(R_KDb{@c}c;D{z-_Xr50~rd!k1(a81^*EWQbL)N{f;FP=c2BPe5u#DF3s2Jz1<7LOJuFp=#!GIGsucYnBVH*#R^qI0rYSmy-JZ zvsk$CDfR}ck<|e;3M>|*`OhQn;Ok+e%$d-km2r5StAwX%!zj}4Df)Ki^Cus~g70*r zpN!Ra{zN3av(|7+BS_vUX;RzBs2Hy^khzV)nt|>@*;lWY5WjTj5eDAOsJQ$qZ;!@RG1AW zmER{|SC9tDm&W8NvZlJL%)^>*0RM5d%yqgv64Ad4)2$s=gEKhW5Z?XdZ>YL^kg;WdRt=N5O3zu4y4yoY5bb- z&lvw{AlWL#i@1YIwCqDS)Y!8!`ffa}<38e%r~y4o=8ATYJHS4Nsc_6Xgwrb+54A%V zy$9#eVBJMj`A&uwzrKy^Sd$7L#_>Folp^@flf@7_<_mh-z~=M-I(EYiXkSMrV-&EY zNm*oh!IOqXKj1eVV@}&!h2lLdXHi{YK|l7f-7@2i-B_AJgJ;d76Tzv%XXYcFk<)_M zKS%Mko$YZmrqXQ}DcasAL%*73NVjbbRd%M~{TS8@W@nwVLGpNWBozO3O`@44)!e;J zr}zN392~b`xlDaM>U)rYUB-vFyzmNc!fbgi*kKM%B)@~0aZBX4?&OBn?8Oa*Hta0V z#jQeJ#=vA=AZJs=8jWCw`}vd<%=!mDnYbEU4F_#Wk{B8ThSZ=IujS;^q)Ri6zeCkH z4L^@=LvPP}Ug1j^?H93U_xmzh*u^p^F~3lyu0*{oZ#*Ki5v|Q%Id#^b2@BCA*WS(K zBQJ0gj?AOjC@}wZ89(3N7ne<)$lJ9W&f(iYTTXI!w==eJe=Yu=Q4yDN=MWk<{u8IJ zTaS;kENR-<7u?$Tc{FDF6!OdJ!%ia=y6-)Re^%x}){;ql%gt?E-SJ1<48ajH{{>Qo z#w|2TT2l8?MY2yCfg6Jf4L?pW-={hKc+4}FW;aTjm`hFd7M8uTpwKW$!FKl!ED**} z!uj*4`^a_{qC&_!L}6swXxhKDkvZV%8Hem9Hf_I$o`KEaOw0JF0M;=Myw2ZP+by~x z^#Btc0Ivlsb6{>k1NSl}l8F7jGN$C*svI~JThXTy2YS3HkoLT{L`&Tj7%;xULEA}W z8#7?gDBY))bXXkxDYd=-WU9y8Stx%``48GvDzNO&U8&8FuU-90c4qh#` ze6gxGf9daRd{ny(v6y+E?n>b8v&A&*<03K>t5B?ODZH zDECznH|*3@ayQ%zXPLFsm^B&_L*>M!5o)|rObdTk=`~#L9pOzUKSWiGIh|Vm8#bMC z6xp6g`C4IgRw+T46`zi{tpd!sJ+zg(h$YXb(73)&EYI@?lbZUoT-at3C1vBSJ5iQw z1SvN$Ea*gp&R*Yqd%`jJ%5vPR05t(>cn%Js&&Q9|7k?4I0* z^d+n(V(~}h(Vl?Jb8I)e{~ENS2GTHgCX{*`N=*xF=}OcBN}Xd$bs0X`^7;UtkMBTp z-#qMWHm5M@HDuVNN!J=aV%A?rSY~d6`Wp!$;gT11`;=i=-(tEp59RMIJNNS(UvDiB?n8bc z6^y7CmmM35T@(J}=3Vy3n&Lp(P?3RopY=$SaUM-$j&pN-_b{LFTz-!^^Dz9E4fhrO z=t|0L(sFu?f!@yagyr}v@9%~7A|1AhI)N9(cIX>^6>2sPFts(NuJ?bTa_1_3x?RNF zuJ2IT!Q;dkRl&Q)k=8yRM&pvkb2k-y28Y6ib2C7I+`bI8&u-F;-(AMA46g zO3k$heQroP&%WT@ofrIAmV?`8pesn`yvFGP=Fsk#LcWo~baG%9Z=wGkD~?Fhu=Z5( zopc2nrYlGLPK65kYgSSIe~gJgs|wpYy*TGTPpIE>A8T)NMQl^E9d$Y;9Z6(Fu zQJ`Ff7Q7m2&Q%Wof>G87h4~eo+_2`+v{^o#o4vb{J8fl;j}bGFGtd|gW%iuZrV!4n zG?FjQuSUk(z2a9XUomg^a5{cWj$A60XuI7m=H6LGC(Vk50dWbCxZQ%Fj=hMgJ>% z!$|U{BI~>`AGoPHRaI@E2cu)qcGZe3R2IRa#TK_i;!&DBjk7pViCE8ak7AuJeMV&-($<#%6QCy0M zg9Y8>Zi0g#I5-rAKK#+Ng5c(tf5E`VIrrRiUz&y4nc{+#*h*}xvJO^!HEFA<%D8fE zRjK=%zPDbg`(+*H(_Ssm4{b8iToMZ0dt7rk9xY?(F)HkvRBV<{6J7|@i;6r?ij)nH%3~tFS7^O205m}P? z1-;~*U;16hGk4f8@y`bS2Y*iP(#^pzgmI6X3-)j9Jv5>}!S06nnf)?ZhF^on=>0NPm>82|tP literal 0 HcmV?d00001 diff --git a/source/tests/pt/water/data/data_0/set.000/force.npy b/source/tests/pt/water/data/data_0/set.000/force.npy new file mode 100644 index 0000000000000000000000000000000000000000..10b2ab83a233e3e9cd9d3ce05cda586295edadf4 GIT binary patch literal 184448 zcmbT7_g~Kc_s3h3jG_`H?L?uJk*?P{At58mCK94jR%A;<#<$PE@V$P!UO&BV*Y$dx=i}V(=OoUUGkxX)dAZGU+Xt;!xpDc1L1PUD zNxWK<%hqgM*?E2A>SgOzc78V>V`XVDa`dRdmIgZw z{(m1;D|>zH?I(dYakw}oqnxVJyNilNVHkYdiF=&C2jaQwV$6v*u%mwl2lm~~>51zAAHif}gxIU2K|KAzlv58BlW&59AbHabjot6kTJIVr za-zw5_k+T>+1$iZGQWBTU$~8w{>zL5gVl=|{WJK!*=&B0^ftN`m&E1Q=U}9J9NTGBl9x0M{pugltwa+z z+cDZff7eOAqcw#WPv67i0?bgm^f;RAo&o*N-)4D7W8AnypI5D%!t=*1hNLy0c)9Eu z9XOG|m;V)tfh#-^4|I{$U5er2Ltr<78j=)Oo`%Y~mNMJUKiRhE8CA)OdGyju=u?p)8p+P1?Z09Gw>P5ue`Be6Py{LU z-oOjy76@~#N5hUg#^{qCOP6O3;NG~7N2k}KR>f#EzWtE$_ooX+$AU;Pc=I6fOCmd^?EKWb;zYhj(6bB z+y~HHd`N8kaS}8ydV*OsiB6+m(JAXO@H*EWBaFLKtNSwi{$L?rx$g>ThioxGQ4BpA>p7K_D%asmM8yW?z?j&8*NN04Ibh3#%O6$crs=^ zGn0LswGlUUQ^OQb;N{&Q_yRY ztXB};YgY=><~$|)(Ec2`>;O!Q2}h%4XCTl?kIf2V;q<8z7_0V?JT88vrp8jM-I}CST#_J|ldp^%hooUgUpv3AjjW z8hctl#mJj;MW5F{VVS2dI~m>=wl&N|E7am7?e5~SSBBu7xE9(+>%fhd#JKw*j7B(-iszw+R;fbSJ3{_3ZvA|;?jate81=!RK*6tpneRGd$=R? z+a}EzrOBy&2ArHc55Gjr;`9}bW$PwRWU~jF7+JrNr_*te8+MJ;qGRya?`*W`UMFm~ z&*0j=L*YvMJQOTLq~Gr}P|*N+PIBypYs#NO%4j_lkJ!_OsbzxpDHB1Fizz)h5qi$A z0nNb%h<&U$wtq4F_*OHv?rJa@J!8S>+6?ym4lkjlfT>F+iO#Fyirf=`1PZ991u2Bht$u${$1_ijIEOu~Mt+jtk@pG_*=sOmzy z0+Pf$wHrcLuXuR&rdjxryMqlK2uC|@fyXmdW&Mg80RBCqwj_PDp5ukr7q1cZ^(Nt0 z!3yu01_()m^m$7BAzIt#68LtUmo7h#Gmrl{78PDiTL@#_5 zbBG%QBZP3L7+#XPiMlnXqiT0w_I&aVE6pRYzpVwFANfvftE?f4y8-`3#*ppNQa+QY z2fN#z!0)rMg137K$GjYYe_coN6Psh=#l9)n?d>gY`EJFTcAoSl&jj2*nd03Si^a47 zt>UJB?c$K$HA3DGtFl`*uY{wOqs8N;Z-t3yBk>G8#bd23WOG{$xO=7szn!(0T+i7+ zY};`fwRa;2xK5@p!$)9a@`*A#%vf&!4RGG0iaVb~^SfeKTHiR47a#gW4iyHNJ3bJ6 z3w}}bH<`eOnizJ|z#+%zJ88`K;}X+bq!%{^GXE;l8)@Z!UeMvAT$1Z6Ge9o?EN4N3e*inyQmhr)M_il%gk_4_(|#r zJ5BS4bit`p=Yhq}9L(K)7>Wn|lPob&m!y3-2kL41`1ZLH7T%4(AN%C+i=7JAcZsAC z*VOU&cNJDVxQ#t8PDBm&MzLV>ZRloZ$A>o`s4DPl z#nJe6_;McG3;3``DQ!~jh6COUykKJ*Xbip2{yhpX+wmgYj4Z_)kFC(&Gm$|%lO|Og zQ2A|%XsCXaoO?;=cYGYF=B>t@)5YSurxPetSU@-P<0RiJJA@(wJ)C+ym6g|~@`Vaz zaqqto*t_{7RUgWMoM#Na-!6#R82nGPh~iYDxyjN+o5X6$-lFD^EjiKiNM=}Lw(oNrW-HO#)s zmF5#!bN2$?ygHL3^=-Iv=ydcknu?SDy5KZ_Pp+#~=JBh?VC9x5p1GXFQMGzpc4weN zPKh5a?O{o!mA7#7$VVU`7knga-PjP)@J)*~Po0yO59OWI(YtQDOsoA2# zl6|;)Qat2M9>tb}P(&hd~IZM+M7q_?ZrQ`(BaMFS$ePw_vjJG z#YXDVk82g7{JFWT=N(grt@=3(dm1HPIgU6>)Z^&EH>5xFM&Qm{J`ghcD`~Azz^OIf z;&xj*IMdOCtH1nY)n&HeJT{z*Y-&KoAcHrS-l37L+i-jHJn_-r<&g7df9^+FlXqk3DQDD!I+`_7hSBZ)gqVr* z_{e%oZnBLQo`)OKg@iL?^m;uU(2T)3J4ceuczb@fBSNw)$y4kzR)tr0Gsf`M!@0tw zH|QTya!A;I35HENMI8?l#T?BDQi)rQr)|T)T5%e#dr~S1`{qF_KDLXE4?558y-(q> zUyhjds2wJo#PcG961%Y7EB8V?LB$olCX~uf3}?xu5N^DKC!~|g+}7} zKyTV^5m{!nDhIBt{wZw93*#%XrfmDiLwft&A7H<&G;D2(*k)l5YeF`V#hT%uU44*W z7aPHbjKMTnNeAz4`2lBX7%#Rs3A%EUNepo9#@wvOZy!uz$+sSy8}tZ_WCLkce={)scSbOKRV0+#{es1* zXT)d^Uo;$ikgVS?!L|-fwEgcFE$I7LQa@=1uE~m{$?kF3`5eLuV^g@d{v_TR76ET^ zBcQm?9}daf%U4xaV6TfAbog99K3{kmOD!FXXH8w7G-Ca-bc{kH&Kq}0?Dl;P#I1CO=c&CoUzjGCxEQcyq7L@| zSS|F}lF0e4{n5qS7kjwC!X5oL&NSZN4ExR%xdln>(dwD#i^d4Vl)wzr+%hol6UsH zdB1T;=1mHHewxb<{bG%mIxs9|87>T+&-HJV`0F1V6oYfgbD=AR_;^bVOTUY~=JvjOp8qS|YQ_NfukNEMj&^D$7db}-yT#ZIv;J=0n&OV0PA*#AXX#Cz7HXE1X#g4IXFzl=3;+;n6<|!3|*MENS z-}d_yVr&NMR1{g9-XR$O*bhb#T{)+P_?VT8L~dq=>kxnH zirD9CIp;1thdt$iU&=*7R|N$Otlz{N-+!UA8xMEhZ#_kRoPi#mx-hGofEO;@qlE9B z=jzZA?4=zB=Z1V1UPP|J$idyfY*h$4sN4rQIf@mV0S{gtCNUooj*p`4P*-*cH|6Sq zi`zK*z0<(1w*E8^KT^r1p1i@~G_O&;=W?hn) z`#3;=*K&?eD1@s~>g;~=Cl32{m8X94gqA@+xHhehD?jJ(n#+LeQ>&n=*FJhX<*V>z z{S_>auEG_+wqjqCg?zz$E)2C3c!|6dY_+`vznvcl+XgO!g@gMdCadr@^Kl$Mcqx3| zkp&Oj!f>lXJ2lB^bF53fxT9Z##BmIub=7fHTj|XH8~^L%Nr~vmE4ZP}kk9W-!s(6P zyeDuJAHH#f>`J>i6h-$#wHp?sO)`|JycS>QOyCJWcfgnQKd|({1Xdp4iicMX6VuM_ zpiSd1W0(6Ud1>`(a9U+1vuUXW{myd2&OR3xXNY;IO6wNz(pe4KFV5UtAOj$P?j1Fq^ z@#n@eP3Knf_^s~1O#@`#&f3$9EvgPz=o}6=E`l5S+d0?R1pBF_V0$C+Q1@UlvY!n_b!PvZeVRD+`*%qfmz6?EstE?2=)oKNE`n<*r)j_DMQKOL zbgmj&Dk^4OhROM19N^g#J?6U6;-(kSpb!Vvp3~?|4}b9e(o7Qr_n|m6SUfrPv*e?{ z2aRmGM=@98u)kd|Ec>Mo$@MpcjKTJxJjaP+*VWR2LFu%(d@()0r%rdy)zflyCEl$h z#{=wi!G3Ewnm-tfD!sDt;}YPD3e`tA2v!s(Sjn@UUnbx)&Ch{1$vXpNh_(?n8ie78!h~ zhJWw6W7@oKT)Y1`)qR`;56eFYp-1mS;u(T&AE%3f{+B`VtQAa*Yk^&^iSQ%(I5?;$ zNQN|w=D4r>Y3EjX)UX*yZ>#Q#FK4`kUUygFt9_NE6nF~;>h^OuVZXnwGORGo)>=I1f|@>(5z zv}2+B*mH3Gss_w2FJz(B(uqhwubIJ?)c%uu&WC6d~A1R*B7sXhrrsgMmA=G{jJ+jT9 z4F=2T_r(pIvcsLfT(`hjwd2xmD|+K)-2*T=GzMkPWB5qfIk?|_17=T-ht-bV#FU#) zVa&Y?9QE6YM>LW6A!w*j+`Aci#Pr0mt8!`B1_zdR&lCF3_ZQM$PsUphD`?N+8?^q& zZ}L}LC0s`Nj?cysjlbeRL>li^v?7f>2q4*CS$EF~8H`gP`#Bh4H{X*_SR7mrrxB@LYXkY z+vE?DdYT*d4nF{=dNfnOj+?Z7{xZH0xD+m@Ho)SmJE`Y7ZJ4>@tikyI!5=Vqo-VWq-hh>tn80D>8I%VycWK1=+Aqv9Hl6j%|<;0Vd8*(7!nXJ zU6t==uf08=Z0w?=c7aV1K zOMDc)N7$*N2zSB{L;cHZu;af(ad>irB=pBFUY|Lb!T%r7?~|atO%X4xxJ$G@RoL`x z8l@+Fp=Fs%u%>YyN~U)~qf17@*TQ??y~RhE`%95^HRZ*g1u67xcUSCF)FKY-871yn zZAm|TGa#+z1DTjApzbUQXN`|Qr`fMCX8u*UUI<+I;vb&a6GJ_!Wo&okEUQi5MF9a3 zaNnyBg)~2*xxLQ_-mfpxi4U9MT6zywTK5|wuP&$0&vU6-D~4xtSB#F8Qtp9Q^fD)( z0?l-IMMenP&fQ8wi;Br`*8%u=auK}WtIOd%`{A^l1<>UA9*@n^#1dU=!Dnnat}9fB z2j|X{VO9>GH9N^(rsjO~iVJ7W9L|bs$3oBc8N6D{llvC#5T74K;Y!7J8vZnoNPUlTouVF zU%;iUSFsiL(O=thFlNw2>CfKD_&c)*8auf{mdiO*^NOSJf2VP@>Iq&H@tY!EsG^;U z0p=MTr~2kysNX-V^Sw(E`Dd|YLk*7Y6Tk_xC$V|! z9?ZXYfCi8<58a@M>pu38wLSXEcg8S3IpBcN!%N8Pyf!2|4CJ+!^JxAeUl?Fmz}=JW z`0BV~9;2&^4Ug@xse8Q;xTl9h?sqBA^=!kGg@@T>=)KPO`Wn@A=>{vGE{DfQOQ3Py z9=J45BJ`SljE6otN@Yt4C(ZiH-*z^!V(bc*qY^maei_E^^+xv?C7ci(!yC2rS!V;AA3&NkW8(=wJ zkLpTi)8iL=@xO_k%qe~!wSUw^)#@&&R3ZbFn|(NK?hIM>%Bhs_u7bW%q1Y1F6$cz2 z#0AFBuq=KdZ+Wwn&95DY=lhh{XVgGi9rcx(Bc!x-i&BccU zBT(ncBHR*HASv4N2C4>o;QOS#kbLe8tWr8m4O{k8+(l~$RB#YJ9;$^4>Y8w{#E{qZ zHsIHbBk@48A8FPbuxFAcyY=bx%*OY4`{(o6J{_T|6j|@|eN29N8fUu4V^=3DiC&c% zj65mNIq7A%cV$f+ElKP>bENga_F!S3O zFo(`QZjgY{v=m!?+_@nkmJPe*(E#7$5V&I`@n=J-Y&JsM;cdcLM;l!E@ENH;=mFC` zkJ0C42~vk*1@yP*3f_G+uyFq#>Fo7#D0`to`y}0Y;`t`>Dencxd;do{yC>Kyj(IB8 z-FTHo6y(yR=X=S{b_nd9`dqZQ7msJBH^BGn8MMM9f@ZW=g7=jdBsbLnEOxaB&Rta5 zH<}?oQI7I^_T{cSGNJ42F#dUGpv+PAns7(lBR1P7veApNf~YzWU(9V5jt5@A%F8J1 zFVCj(UUJ<1Ljh#0ROKP7C!=FazF5BIJOv%DkO&WFK}%{VHVE%%-A`rSU~mk!jU#RFQSzk*6H8{z507?7>lLEfIv;K+|#w0n~?6;>9}n2ulY(Wq36@!QT1&1@X3 zAW5Q9R3csz{K+4j5GJq{R)3y) ztn-i-Z3#njXCotMJCmuc&vWi&(GJK~eYpVDGVbXbgodx1+4to@R@^leRR+E#&0&qOy48Xs zl3L5QuYW1Y{*fXMJD`U;HX+bvlme4mZ%gu)d?AII>o`_f4j-;M2RX~N`Owpgurq4C z^vwNUywXt-Qy-}E*m3%Zqrifh#5s4DZD&~L&t!R}Wi zM9G)gU$}P@{N6o-^28waQa#R}Zkj+goS}I$6iIVcH*Ov01I^Q}(Q2jLOz%vnJS~DZ zWYr7%f8&=stDrp+k}`-MQD2t*L zjhWbR)(U%f4Z?{Zp3*tT`fgOi-8Eclg4q%KNly*j@3^ z#Vu5CnM5}`xtMYH^WtNLYcTxCe=t?0LKr?bRlNAwoIS%1FI)8ADqLgW0R1I{ zFmu*1%=3(6xuOfa`Fb0@xG@(O?YxQeHuQv}*?lqdp$t|=SHYxChQF(K5kRep*ke?8 zJUsHQAlK@L4kkwsi?Z-hpIGj$x`^+2cH#W&w8=ju{HQ2@Gr-y*5HdWciE}ikFn<HDi#iYu2Nb((*}3jYEoavHuzTAIoIOC^u7ONd^>Y3 zEl7(MRNQ93?rUcFY4&YgKdBwuA0=~=)?7Ma8TcVQM%{*%wx}Cg7676#P$|&9Qb898ozH5;whokCPsX_vDAsiiL7A!}%$2ah3vS zE^i>;{6UoXu@Y8|J`Oc$|L9a!I2P4UhBKl(O6ufLrJIo~pZk$=ath++2e`(16sk!_ zz*3JJP&%&&eN|8_x;30M-kd_0Ug3DyIY(;J={IU`2k@-+6R7_F1)b@ffjDvJ2mO9| z0>fShVbVD*HZlk#-&r=Wpl@#~ks-Kg&c^K?4UqTfr06y<2^upOHb-ixu~nG`cx1$FUm)y#4tW1PKW%TKQ4`vA0gehqJT#1JcNFR!Q}OFA9ggFbE?g7 znT5tqbgA&=2fY>fWatGxaK;DkWbTH3e~KZrWikifzXul%kHtGX!i7U;$8xu-5cG6C zLQZ4H^TP2;=&7mCS4Y;v&2x)O)}x+`fv&=rl}%@iYo zQ^9z`FY&TrG5Y@bBIy@%l?+elUb4FToU9wt*j&#JesxJDoiab%TO{JKoNf@-B?@Zg ztI&3SJs27tTUp>~yVB zaA(9bFc^4&tTZhl`fD%Q__HxoHThj9SKEWvCe)EhehMzB-9?dWF2SIP^}_i=1#%g1 z2s#dg;M)zGL4R=zyeU6JcYm$JuUAf!#aB)2fd9GTdu`xk>UswzWbPkB)-yZ?FAXV7&x zpoy@1=1(EFu?N2ool8$2yNV&tp9yvw6zRb;51P?)sxYKSPi#12%u|MBvc-&exT^92 z8cWnb(?P(H8(H*bNCC~Q`YY~<+)dZ+^yctyu51r^Kp)5tW1>J>$ zzF#@mWf|7Wm5FV&6~c(!*;S-<0;`I0UX7EKCo75!Q&)NjL zQp(`${Z+zr;RGb!Y?7YHXclz*b-2?-QOH^)j*HcxE2k3Zz4v}_e%%*mxt}2FZh-qf z`BVS8%i`tDHlXvQCmz519*j2Jr`V~6nBZb0Rd>7r-rCnwy%s9WHqn~p zPpOaTUSaRQzd~tzSZAgmi@UEJS>Byac}+|*Cm1FnLt6GoHOvoQ8+n1O?L zJm-Pf*WpjzRSu}Ah8eSZ2rXq6VA#4<@&J=Lq}2|atq0?=%qlFel5y~#u@rSASG4?< zAvye?lnV3~pz-P{&^uIt(RP~X_+X0U^Y>?BeMZovyzSyYon4*KyY24j< znxE<<(#r)TH@_UYwm;s|ap6fiW{AZXU{UQO8f#_A&i`(M`udU3HQP=|m{vutfz}*T zdkk+MNyk8+I(qAy$fir~k@BzJl=Uo`Q%r{lsVi*BK z?yVr<$u*k#CJ(xY4&$he#bD6*7V3I#;*9Huv3%gKLFi_Dr4&BeCqjZFr;)?QR<-tGFWp^FiXBgHyhGOqkk3sa#6uOUt%yV z*8~T|D@f1#ezEtrT*G&j_kiw_F5GKhgIMOS1-le>3-2|@LRL;N$j{sh9*=uUPIgzs z_yB9Jl<%y0-J^KYq%)#*QZ&<>I2!u1Sy1V8w>wp*;zIlfi?S85r$rcN&Ce5}A1Gtk zDRr(7yGY*J36No{*U4)q!gM1Ej=p{Z9&0M`)vYS{p+lehJXzDPTb zD#a5;6?|%0Zl^y=MzggMsJ-M7lr%YtWsjw>uOJ=oJ=w)-%S!NA_pN-;yccddUW>k` zO&s%3@_IeM_)gX_ zY#*Hvlz6xSa6jjZyrRCF2!;zGjkMY1tv4?*JuMzfX{NHzaa{E`q?6Nh!AI^zpm_Wk z6~ZXjpg9?lBhIbzu?4+t04YV^@We+mvzYNx3` z6=CnzGo0>}PZuZE@iJvC9&QuGL(+RYl&VDW(SYufsVDbC^5`<2p-={XzQ>@>D@N+H zrY|~9O~<}@%cZ~Gj&@L9IM89uj1=5)&>dxYnzHDJC!pBjwHWqy7XCQZ7bg46V{+ZW zli!aL|C#9F9_2V*UZ=tD^u5r2ycO$gyv_#IoxQn<4FBsB$``aXWtMSka9>CSA2eD+ z(UMP)Kjm&`FMNS-g-=ASfhK4%H3LsxK16b#eX#a<3?5pymj^7A(n0}`Oy!n2A!lX<}DQ8>lB@QX2X|~260u$Pcj((7fde?#9IF*n!R)m1<(EsTlaTk z|A;c-L(4uoS@0Nwi>#VfV6; zY@xQFTdu{^r{R4Ci+43t_Hq=fkJH7lLznn@T^b}?WU-_5Elz3fDLdV+0>c7wd3@i= z7}2yHL)8CKWDK$UaswV|a2Z1jqC}k$M?iRTKxmowOKk6T2+Cb%(zqo7c<2r|8j z;cXX1ZLJC#GJ;`Fx--g0oUk8g)C6ZcebwO&U1(Z$9^uS(DZXw2g_DP+UK^*wic0}x zyS_Jt@BR#pe_x2Na(nWVzZ3YDZVJWQZNqI{x}dQ89`(C>N8-QKn{MsyMW+Xv;apz@ zVZgOW@_G<1o-cXQ5 zx$x~nPh4c}Eg78k8s^H!>IkY4Lvt5osy6Hi9ZgdC}&o7`J6Tg8pIYP*3 z4}oQtJwElN z>=txcP{N8k!};mnmrNPs(c?ll%FDb3IVTe#wbP5mZJWbtMXNw-TO&-%7y`5QO~dRj zujuE!Lo{TPwS8nh7 zr^ah1-=>@WRmFQc$A!7W2$$#_q{Lofd|Jm6Z*-Y~Eo8a@h;ojMN_-s`UwF|6J@0&My=XVpmcP|lB8hcP> z;Wn`PtWRq|1BVSg0yAA!;+ODH?Atkm(saw1gTH+zm4Q<+C}GU%bOVe1!R$mv6H z?urLWTyFTzn$FS35{qYpbd8z&T;X zLJeMFp-$UfbJ=5dJ`|720H=!X^yQigKV5o&`aiD}O82!(8_hq#xI5u|(qDyJigobY zwsf{rPN0n&Ou4bm1JAo0fV9D_u=D9f$~lrE+FSJ|r;IFG=Jy5qtnJl(tTmN z=6P0A@B;a)%jn|6bZL(*K6yQntDD`yr^kEvdSHj(EF^H|BRTH)v==+vTts1<4SsUG zC_4B5$&9#show00PLbrmtQb)CnvZqccf@jq*I za6anHi$`SB(w|Hu8fvd9F-RlX22_xtgn>{OV2V;*9ZA)g))f`%Cv&?$EW z{`i-{c?*;sXlOmY`Vh+v=e_Xa!Ve(mJ{5!&O%#;1iIzLCs68y7X5ZJK>RJ26B`z;% z?-G4}6BRCal^sR5!906whuIY1&s8o^W;~DGpUd&RC9b zm!_dCX@G-v+findu`=fkcUbp{Jzl?~=8%=rO&Z{Jh8JHuh(|6@qK>CO$aqvPmG3N+ zY}acSct#TUyt5CFb#a3%^B`83J%VRHS1EaKeoM9+IN!aOhu8 zuGLdU6}9WUp<^+0OFAs{>b*x8cPx{uD>mY;jZ#$HpUByVw_w+&1-#|&UhblmfaRez zxckvcR_m-^&ZjLpcP>2Xs>yg-s5zXgyEvn6lOtM=sOt1^Yf!ixML)Hq^ul&6J-MaA z76p&!=*W4z`R`}?UNnjy7N%p!n~${7%}*%UlPr#&_K61Q6bgGLKZb2CJssS1QpiO1m}d0G zwO^8?CWjoIATPPvY0`}lG$xGqJM`f!c~_j6v6s?E zMGJ|!`GS9OqBQJ|DrN^i6uSnhaLSu>zU3bPs$-0B_P$&?sy+ke49>>HNE!U}i)M@8 z!MxAaeO27@N>PUo>~F((=J_dZGo4E9rmpJJKbr2%;GbOd{}&ZD1+ zL2!5XRq8uwE6mq0;%&}D!Tj7Fd{@<1h&`!^{+E7JMPiL~R!%DA&G`bMXGXAUz7cJo z?Iawo|0cEu1&~dCIlKRpqJF#=d5uh@9zJhj`p4(|*?FsAYq*1sq#r{M=YFu`K|i>& zca?P2AWwKO>k*ZAD`Sr`d-NMB<)3D;bn}-Xt?k_JiVN6CDpU4?zi~JGcA^V9mn;)B z#?7QDLbx#Mpdl29cI4vqh5~MWppl+-P%z3~Xs&L-~Gc*feNkTOo-CZkv5o>_s z_D!Kh6CBY~)MtOUF0ko@E1B8#BgY{NC`3AltuJ2yRF}iWscrT<<6a3SbKlUhW7DMu zn`c6fQUaMuvvFnYbV^qU!H%9=xDPaQjrjqv2+pRGhn;()?a!s(SKIJ!{l|E?&R2+< z-w6IEOZnWWeweO*kJm>4=FiDy`6rtE=J9n_S?~p3L>;6hk1GY;J)N_|cXLGz%TcsA z{*|y_B^@jq>Y$)|5EQ?0q`x}>gY0xrwMx^$w}+Pa$9N%aSY^c?%i?hSs}iAQ`5zja zd_X+>y+!DEV+ol2h@!_M&G`2HDtfiyGAShM(Ds|RVZ5diPTf3$CR}x+hN_qJWBx$F zV1Yhv7~7luF4har%}+vIekr+rAJ2(5neOWL1e>E^8aHC|9<)iLn70_G0`QBg3f8{}zmlnnG<`)oJ|F3)t!uDBa-*49r0k%Pk&8QQJ9l&qZ}6?Q z8JH>A3j-4W5cgh*ZQ6tB-b|Cuo-Iz070wqA1TPh~)x8rA*Cc`YjIQ|TK|c)D*2YK2 z=Lnf=qiN6T6~da=r{UAVAvh?qoi8db$BvD|VQi8XPmDN4sqZI~*+eaVYO{`C$hvdD zU3JXTn}T6(E1@!BBmWp(44Z~@vX#SD@MLi@^dE9WFrS<*(SDo*nU#&A=i?=)Xy2D3 z8$SuIg;ycO<_Y{Wy^oqarXkld|<@P&{hJ8EF zl55jBJ*X$@-7TemXWd}&_MM{i%^djC_!#t@oa=8-i)Wv%_yDD|NbFf*xFqje0+E3 zqR@5Gyh|;(-J69gGNf)+J8f251y#X)|gue z+iQQrwb|qNK_6pS{jev0?yks%30H) z4m?ClZUlr0~u)S>%7BD^Kh6 zQEOWCc*nwK)O@bQd9;Yl-uJ+!Wg6m$PZs>%;5Y}RpX9gF2Q)7#2t#*ENO+eGRxxhm zm~;cSs1B1&2@I!un;co;)=0eJC*cLNHsh-5aX54FQFOKV$eWwGqTNmpthskr;!}7S z?-cCg$E|^IO1&K~tQ#PkrymFVmrN2r^9I9egdcYVBZH~x} z;1Cs84lrH8B`)n{o1*{0!k+dx=-egoN|T>lx&8!nI`4>y_)$Y6lqE&NmAW@b&6h>*>!&!&E0{00uf>E@0dOymkb8Z6sy7W-tc&?D*Cl^rfOZr03`g>41A(Z5@=YnsD6~FiIfft|Gpi#Fq zyysMVjM>ciK>Z2ayVQ>dPaefVC)RM8%0S*?Yo{=7J5n5fEsKKRsiQ-OD$i&b1!8c)+_4U+NQu zAKGg`&g=>?q3=Dx<&2|fslOAPtZ&MWwZBX=b$hYr`&T6QF9*9>o%wpBt1>HSqabL$ zCWox8IC^RUI=9)&qlWfFn^os9;%;ZzGd#s-l>2b)=RFW6GsC;`a_aL#mAegd#m|9i zkeP8y>Xv>M{;pg^d!_DzkwJu@d$@s)M+wqwGMYA_15P;~jF(C(u;Lw+?fPCvX&DID z9o4v`K7Z_n^lll-OM|B1y-rs}7fU5w z(2;o8j2~qc(|Tad@JFCkp(muLSkd|udugk3gSat%0N8pL(TY{?$f~$byzr?TmOUKL zt(|^?cU?XFXblsZ-fHmKJ^^A{;4k{JqJ&;XsbisU2z0j7<~dn|u*abJpdUD#JB|8| zYtpBJ$($49cheO9YpY7#=I06T|J4e|C+CXed%HlQ<_{Qkv@_o=3xWH)NB=(y#!a5g zBU6rWUV0$C8ez)=I{3k^-0t|bZ7f_ZG#9edK-O)~Z?MP+MkBZZ`f1xCQ_YOlg$Cf? z89Uf=UJtT4qK^;U2I6WBJ$_QA&wF1-lG)ZA&{^K3NP9M)f(A9i*Xq}_$tMjD4APUF z0I$TmQP24L$`iC|{B)V_;K|sLv#8seJCJ9VC$V;=xH`=Z`o20TCJ*f|CghXHRv)bsvVHYl5Nt?7t*_R;~vzFK^MrGBu@CA?l5jRPasa+mQH!Z@dnJZ-TqetMrtOPAz=dsdb>)G$<>P;ke&u_%M?c#RX& zPDe=m*gPK8{e;lHc`K>ipGx0wAdQ?JNk)2YxK}_79(@0U7AuCaN8KkH=XHo_^l&eEU7x{>PLE_i-b4wbPpm@6j921AFB4_2?{TPMT#vrB2cg_y9*1@?f&DSPXm7Ma z==SVAe7Gb%&n0Gp@JS6!&hMoTu}g&U8i{P$5DDMshQitz^Z5P8WZs~9R$N%$6+KR9C;JPkGfTvSdF@d1>sPRCP7@8Zv-$82Ybe#6hSPfm zE6b~WI7YRAt7TodY*-0&xM9cv1B%4o=gvd$+!TfJGZ7+Cht9gjieBd*)1)1XAlN?# zK3oY$hiSQ_*Y*N;-fh5L+S|e8<0H}S(JXYm6U=Rf8)42LC!VU+7gH}xhBS>?G)nt5 zEQ{EUlUC}pwUfvd8PeQr(lYM2=OO*pcIO?DKS1+jGTaL2DD;mxM2kB9Q=A`aBfKoy z47=S7c>Uzd;65Xk+RkvmV&xpc$GU{ACaUAWaT%n(aUFk(-9Xklm*K@3YYeJ&#D95- z5bkb|)f%1UlOsnc&*q;b?b~gzv->l!T2xDPe>*i!Y>+*kcL~+5KcJGul|1-SHy(Gp zA8$H0Q<^mfa&Fr+UcNAe9(TSj*y3ksx8*N(n4ZjvcDIF_>-%!P_6G6Ko`2-5qlJ$T z{D;Z2*0V<0LCF99g?7$-qFCiPmfCDe#J0Wd0IgN!j}~cRC*QqXy5t4ihz^59cN_V$ zw*%m^sj58v(?RxJS4rh}X7cY_KgD@<*Upbh?JSbE6&~n+h;@Ij0G+#uTxI$UhNjdC zp>`pxyz0i$+Y`BB!8>8OdRyVu#T-7;W)^SB9SBo-E&P|10p5x+tPHt}IZ4RdX4S*W zZ56oUDe=bf4lr%v9CXYai2Zde@X3I9N(&r{A3h$&hOLq@S(nLi{g%!5hJ zJ>?HRe+I|aqm-^YP^_B&nBLcF<=N$U_|{jx`CN~ckV@m^!^Eaf zt!4AAiMtUWQ3lc{wRovQOr&*x*WjTQN z3gr~uV;;{d?kPmhY>%#!+~Ml`#}vEtBs3>$^S&Zoey~3euZ2Ap-%bjGT~%IOxuz=y zPHWG(H8$u{ahI!e$MMnI$yjTdMmwA4k^bJz!T?LAvYJ)!r9(U)w#ner<9)HG({X-W z|BXM`rtzI{FHQ>6!VAxDQkH*L_z@e%M)i4u|Mz4JzY+&U`Rd9=lm0=Ep(D}UMO#cx z@x|ZYg2>$cI|TWx6fXrlXKkNExV`ZhR!RMfYXzqv@KB^e$SMN4jtwa4M>x07U{%hgxO`xn-QGEAGIpw-Xf&CFR+Uy-KmdZ9mhhg7oS8=QG zoJ&Y`#U+_{)KzlUFyvu-9B{$Xw_;V&Td*!DqWr*tkT$X}>ol9=r#)RTENnL2S#dyg z+p`KT-K!TqO&h~eUB=;w@O>;_szLYWmBEc~6Ukt6lymfql0m<2k|MxCz2sWm_ zo@YeEPcD4^^fAy18%6t~&(X0R+p$zhA8@;@=VxrYmK->-wL)iZNt$iJ~D-W zE&Vk0q33!#VBLUMAXB+Q<=ehddG-{l`<4&IzFpW=-vQH)wTNZA@58y-Q^f52Mfm8l z1}$qB&F0VSptoiu^k|BPId2G-wT=<8KC0r;mZO4wn7Vj&K?v4AalxOOeNh7GF{i^I z_&0P2q{b|h<^j)ux2ux;^E@j3+=nN%bP}r1YE!7Q9_@$^rmv5j*ruJ1@Ub?VHqK8G zhQ*GhiZNEyYvvDO|2H!{I9LTX*A!As>`5q!UMoIK){@sH7vlKx>GIjuF)(MqWDZ+0 zn%yrRr(Nf|;iyZF_#$XDPPqiEu`mK2%xwj6dL(+;ZR3TTooP~!HR7I^O>}#YGw#s$ z=i~WTaQrZTvg;hq$2r?C(?2x0eI-&yD6OYaOKjOAXYw#)82!8_)@=l=V)O zL%ol&;2U-TDgu%eu9APN-=F#TbCAF6oy9v?_bv~rKA*#JBW7TARx!LdGLGz8d(f;4 znH*pC5)SQ}NN~O{B!)`Q=iPSjR=chEaz$IEeVcmF&CZd*b#pvd5kfC5o5c^|jx<7T zBm0Jj^Q%+peA09cdUiJErgzWjTuT`T{3{VUv~dLS(m1RRK7yW&J+W--ZAf;Af+H<= z#JoER6n8vDIuBa%>cC+*WwaT`zj!XDe(r}$%a+0D8xdI6VIJpuogv?chVp1F7aY6h z5bWxu!8`3L#paw+2>W`4^R~Z*AALn}jzu0lt9}7(JK55S*E(!wG?<>X9u%gYE2qNx zpLEZ0m*_NU9%T+G6vudY;IB8$G^?!)egC~@-_I&EZhj{kU6n5wN5qI)vy6Fugbf*J ztMdk(1<=_vmK!=1qIDj!(}W~(;jZE|oQFs*%r2=eY$pAgF4# zY|@txlrrw6FlvsGd}HK3antNQWUA|hx*_SZFOH6+sI#C3qw(bDHIuR$mcm7yr&Mt@ z9DnM~!EDdiRMx<_@^%e^=1W4 zUX}oRCYWMOgz7`FRn6Q3M(4Bu=+PgVMARTj7@jIU%#7# zftpLXq1g<>I_Bf9ZtR2Him$Js@IUDFnt_kM77|GlEnWNHf2&O*ii@~Q)I>)EF3Hx;-$@@QVtQZ-@K22xf z*7^uYAMk}zr)SXYGX@;JR)yxhsiy?TGnjaR#h?FtxQCw>ue0e1Ulx6&_~UiZHghiQ zx)n`3nmQ_f&)7*emb!FO9xr@sDiJlvj`dR_aK$fs%yLY`EY)7@ykF`+$3LQ>k&mfX zFO$5UnqYzMdU(9W1Jaahxx&o^OSKc>{Z4f`P0Qe4V^issqcNNC6z==Y2fgAii3SY| z(Xs3UkG72$S8fjDOGh(=-piX{;DrOFR@1gqtFtyIM5qWol`&$bVh~Teti(>`XR)ex z3QZ5ZOm}AexW(i_6>!p=9Yz(D96-{CVnf)B9iGS49f@o9*JnPY+#m{A4&(-<3j^ zbmWh{+eq2v3i{&g&Ot3c@Z|F-oG@t;_v#x;>4AHA_lHBU^5H#rRec_8=Xv6T+qanB z_T$#U7ieF{TlDdn0v}!9j{!%|@n)QYQ={#8wW_*u;re4>JmdzdoBWW~^*PBWOcU7Z zkCb_*NSwXq5LDYFIR^|jv(r3%`S@+Q;{LmS;tll!7%*) z9Ww!cx$bALyN7tq)-*VH=LnAc?#Lc%A}H$cIO?LB%;MSK(Dkt|WQdY)^< z%2X{J_osr&mkHEIJ)Bid0=PwHN0xpM6x$!!(2DE5sYB6Xu=|@sPCvimuFLA!BQy+` zpG_lgm2q_L^IZBIn+4mv)!}o;J@6&Pk!J05poi0GAAAj6ptUy$#m)55ucCfPoHBYyq*7y0H*#Nmra@;#@I;F`Rby8p80 z@O{B(R@0eBR|bg3yS5P~gzbi?!)=W*6vWBQ_$DqZy5Z3jtryc>> zbg)wreGk&cEv6l5;f*JX8y-<`ct8;h;NcwR)hzD6{!-j<$xrIGNnMM)dFb$?0>-Yq z$0K_AQqL)u=wtsov~qV8d9_!?7dMB)v@ky*$wY}2(@x43JJK(^`*V+zz2z5j8rUo36QoyEkoMf7dn}~>7eendR?zS0Bm6QcokZCg*b*{KNcXd4`B)2{c6uaOFOJ1gF7L$Ec1XUR z`k}8zXX$h)|=xotWc&$;0oi_Bs0d;ZE(BBqs zIS=Hl_Iv2?yj$e>s8kU)unkPqO2C}+j+Fgk3>!Qtq2kpMu+6tt+ThbsS(HeG+@ImFq-ooGUR`jE&9DerL zkFx7g;-l%N+&QE_1k`Ket~y&-GBp;8KWdU;&#^GTxRO#PPlvp>KOytr7;?+~r?|9M zVz?Ur(7w9T8p64IE^l( zs?$l^WL`9Su>5jOe`v{^0V!mHaovJ>oB9~B<;-82tg%?^tumQ3pPKLkc`ay^%%GI` zyRw&;-GpJvt#G7fKbdZo5Lu(OXut0`yWr9T&OL(U;FXW}dP`|NA! z$#fvy_#8scs$IF|Z7SGzRKo-9Qh16(3(dS@1^ed&!3J43modkuV1sa#4jP$ba^zk( zay1ovd_o{FAc3ad?S?&$#dF5Wd*Yq6kHTlSIizXTiCmLZgq@$yiMwCkq(wbk*6j`k-GX>W`H5^HCcaoG80!g+f1C5cJ{ zU%Gfoow=ECi*}q37Vk7#;Paa^u|`|+QT8fUIE=S~o~kAsB|9b_9h(NPHU^3drhXDn zD7FYyF)Q%XwDa)W;wDT;4TISeC$eA6E4rIGlHEQQfyP09A!kXfOs8@Qm0RlZ`fll5 zA-Qv&=~}RdrUkCgYXHlrtwL|pZM?u#6>FFJV!Oq8sP0-Od5Nv*q5debZLa{>Y~6(l z9XIppd!g_UL>TR1peP$v3JrI{F@Jf0_-E@Pa1FHLwg3Lm{Lb#!;u-*I{Z_*1j^=RD zO7dkLm)Hdzb<#9ifu7MJ@b;i4-#Pph3MFp#$lY(W)qEb`_nd&~FAdpt+gNe;st2%k zWKS>|AW-2lJ-8A(0k6$WB#qZT>{;_1^6z9RPU`sJ0h9Io>9PY~s(yrKi`6hwr-&CF zeM!$JjK>oTowy{effxPD=Y#r*s1kpj793ZVXHMTsZ?7oCrK9$NN7^dc?&%LXs{8@3 zoBe~6H-)j?@=4rb^=mxXq|0`C2ibRM51!xY5Er*MCW(;{%%_~7l6QcvdAVe<@;$ZB zd?8*waFM!JBvIa;8JM>r2M^gtil5b_d&rQ_mAiQn#j*LHF`PZ!Ul#HD zlz74WC^{WVi^y*~4v`|Xux^9Pbw{cYUg zvl_iS-l4?~UxfN&XRxQYp8S&8G1-e#g}id@UhHY~J{Vr%s(P#Os1enp$l08-i!J+r&m{b$THWl$4`O!f6t8+hWZ0e4;5_Q1BGlkt3 zpU0dh?eVzNFe>^U%O3*|W475*N?13ZNA%F1Xr7lz~Vb!W(8{V~4krGbU%-)Z>DMDkfSorb^ei5c1{q~BVP`?Ab= z=X^Kei;5-WHI-shskJ=hGKq6ad(#rvO&mWp7e40fz?8qztY>#y;l|01Jm8%=yve?Z zwsz`x_2wIPnAsI=lg#9+94mSKImt8WJ_r4W_rd7MJTdds0eI-D!`7d|rSG`0JTB-A z4(alZoKhRPr&%a}PwR$nIHW^lj_eF8c2Y%&O1?zTd;OiHrRC+dt#Vmg?xTwH+XVanG z+9`CyYB9LFrO*b8C$!mV9&PqX;@>86^dFXq1|Rit(jPNSKBOqMyW$6PcmTidypLtO zI&kMvSIK?!3_PS&3R`57Q?cqf?0NK=hHq#pJMzf`WS75F$C7B8n~+COYi6*2+Dg9p z-&93%o31$jbDeneb_dJ|N~4L^$?Q9@f>j262djQw&^1|L-`n5Fe{3$-s-?gvsNh~L z;k@^05UHtl@TMH)i6kr>UHLVIHpOug*8u?icHq%XwIBH{Lh*00?QG5LpXgW`09- zG!CGXo%DFa!&fp}_jt$%cqicazSQ+@7tnFjR6LMLJ>V+=5D=(=^f0f~py4_FrnB@T zW(;Zsm_g5#(|FzHIlRRG5(Kd+=lA~rf9AHur>&R4VBvDIZ#)1KKQH6%#yP}?i(wFr z6#QIUK~rT0r8W&vy{&$U_^C(4B|Jh7$wmD(P%v51T%R~tI@trR}QsN+= za!SmRJU81t*ui`jTPA)IF1=2I#3h3;-X@=qZATtrX^6hxQt;WENbsm|p|#(G>B!|4 z7x9W4PJ5v$c2;kohbIDsOS%?I_rSDQ z#^T$Nxr&n2(s$tNI-E#u*gWYP)V3dkE;IK~!O?{n77)yFNuA2tJ6>iLqh7%Cqo^g( z4_hT}B&&TiEI)P}(3u452%l6r?jI<3HdW3PyNv<5?^$xABBmOQ8-{&=%P4w)I`NZzZ9I7X4m zFLk^qM>~^-g{{Oj{mVO6k)6nX0u}JF+P2OmMdyN-j zcAr%A*=`Dn1NG^5 zgUp}KrZp4nHewk1$OT`|n4k*AuvIF#F z`6o&@{3^a$cZt1^9R=IH$6-^)4qUi!k@)nYwsOs&TeR?08`4z$E=-KJ;1a`FzVdkj z*@`}*{<2z#ESvxyen;7N%rrikScvOp?Bx03$7tTe*D_o6{%~x8gRESBT$%;RajeZ) zA?LNTF!W+I?7i>GzVSNz!gm8%R?Y`yM}73jUoQThc0t&*27K2bB6+^C{d?XoXKdj)w*RLP?gppRF(4#1XN@xZ1#j4~?_qM_MEK&^0q< zuVfqCq&EO=YxuCYhk;zXIu0uv=iv3Kb6jU3%~KET11Vsj+?5@0esUr2)49pEr=3vi zSq;xo)s{;$2e>_;Cpvz7DmyhOk$RtZW2+zWbpC!ad@NmvlkH0&B6v8vR6K{8o%8Y7 z?bTvfx{A{L{0!E;)&~1*oDC6`7s+=xC_WLW}cx=GNq5fPX6DdElNo+e;mpdmlP=2B#gbbd<6_>w5Xo5d} z-KegdaJHXvn(Y!EskW6ZbdN%p4GnPYjG?@^AQx{=OoqR*42g9^m$a}{*|f(EG|MOf zeRrOOYx@0!_K&pXST~<9{kFmBl^y8+?pM#%`JCzb8#eU13*$ZXB=j$zS%CKIrQO}e5 zw%w26e>C~w;so%Y5hdF<;FGK>{3%rP@OUq-v+FrSw8de8H7^j!cd&9FFBH5H_)vaCwaNSVfeAQ zP@0b}=WShdm2FP;U@u`jRwVVp<2Sq0qaXgz<=SaQ-el?hrx6X~ZiLgtH&)a#DnR(& z6h`IIAwuoq3pi|rioCXXwo)(FfVT`^#{ITjB?wqihA-NYb@);GFKG+Sxt58aUM`Tj zS?1VJniZyg3xfBb*5a`z?_mG&wfwT?1OJ}97|&en2X}2JV|{oMo_&6cKZOov8$B=7 zS)7<=k>!`=1Og6vHXsk5uf!DI7j#OQP4A`=hJOO$eoCCB-2 zKrIFSog$xqZLFwe_E`9Mu?${rw1TsSm%%Wr9KQHBh$A<7iqg&TR3IhTY!Hk4@&W$^=_7Ijn^`J*+u@UR&7W=}v(c z?PXgonc$k$rL?_SmGcf?k?~Cfd}`8$dM@jaiT_eKOvr~5>LYmV{IjA%TQ%G(aTRap z#iHFJJ+XPuGPaq0MRe-Wmbb*(3T|7H;M-QOQlIgEVE3p*4m>%4eg4Rx=igBFJf(rZ zADj}m$7c%))xB`xN``F6ekUm0^qZ~+NuFw}JMhXmm}#Id_j@%H53bFZ{dFl7VqR{* z&-c^V&nQc{Uv`{rHW|Yr1Er!)dX|fYP`X`wS{!=Hh4W2AP-pTq@omrJbS`KV{dl_$ zGk#mc6z`*yQ5S}z+?=o_c^A)HIuhe9S#!y5KlbvNP8Zi5mG(zOV6kN|6uwfSI`fY- zt=}_P+s{xIec~1UnQ04}BM#8qz@O0PdIop-93tw^%D`tm(n0tBL5lZv#;T5sV0`#~ zMW4uzwA{&yYtF6Uti3TL-ym%XSYJYUxE40;PM}Q{OIfR{n*5yWdDcqbPd&@K2&%tE zWAE9gu}3>+w7t0rpST@>lXbdq@|ZUEkh_xqmhSxeKo|B~A4aL^T3C?Q2}eINvre9%hdN15j^0v$}m&6ObiodzA=NeDV-$q<}jMCwE<qV=(pWstYE?Cy9h|!uqsp^LnyUi^Ft=Fx5>Y|kA8(ycahS{Jx z=N5RW^oDi~di35claJjh=GY*S-gF))Gk5Qg557v=m(xz}q3=jx!L$xeM3Npr;i9m>qHf{JdW(jJtP&(F^sdOw~es(MOB6|0svE6RH{C zuM+&G7~!i&J;|WpweYU?9|aGYiGBNigVg@^tm1N8oFVP(&{-DGOKl@L z8lS_Q>TdEG-Fk@evR`oIT`HAd@4znV1uo{BhtR(JaZqq!C2Vo+NIkbSkgdgY8tFD! zu()Tz1D!6&a;p!ERvpJu_{U&A{b>eHzUe1y$WDOOrgL!f5Ci$_v_u#Yz8!tO94EEn zbyV}O5(85L<*h3Js7b?*+Fwmj+*v0%@~;--=tYk)SoJ(l)7gM$4LWmjuQ+OxQb4ct zM_^aKY)V#J%mmd z7V?q#rm)^h6(iM(zy{xv(UDx2iF4nA;|_&HT8c~7i3uPelks^;mv`^ra1?ATAqwJQ#v40dhv_@>il?qpw#Z%>)Z{T3zR1n)Pb zIZ^`~OH`E4s`rRm=Ur)C>ty^}&OGw(dHDDzgT|~+kXbD% z1J#GugyY>0qT%;s_D=2w6(?HhL>q_vsJt zvbQK6PM5KTcP7T1>A}VCQiR@Px>7++7{A$dg8#HBC-?no$`%?V?W~xJ4;p%kk2ZGY z?sK-_ZcR;j)B(mSM^ijCTZR25GnTA9Bz`%t0y|8U@sw&6rAPbYtomU;R@xXTjj|2d zzv3$Fzt@u#gU?cWkq!Mx9tGc-m`H1iH@$MR@c0+dlRf=X_V>uz<7=7-1 zoy+@J!n5moVpiG`7}52obRLS~ZGYo%;nlIYdDbBC9g;&$d!U?q2W=H%pQmNSb zZII~dd_dIJNQdliDUfeG9;|b_h!dV9lIf;3@aJtJwzN;B9@cAG4JBTli91*;99C`3(ZEi^9TsSQR~Qtmz$<+|idSuW;yatG zG|t8!3tnkq=AFyzIX@IrBQ4PJ)Y~ivv67cBO`_KPQLhRNC@;Twq)03dYJRMSh!oB0zHJyn1UyCe`HFW97BkDD;-oSMdON8)w7u!bD+|lqSFW6b8|n zD!f2hL;Y3+Lid$>P?0(hT!xtO=7~CNmu1RDO`jBxJKqqD`_7|Dhpb`hrIAptu0kFB z&coV|H^q578p!iqBd+Bi@OJHI2u;zW&Y?eqjr%>xwJZjF=4>4^7^ zzmdgO+2Mh{5+9MhS*)>agSGE}fp?U3*($U1g5~#cX!#+B^}3S(Wtzkv1b3GO4$pxf zjTPd>ldWLs`j_gf4nX0lhl;xMN*d}HPm2pWOMTUjoU_fJww!LmC;yuXvajU=A2Gv4 z+5=hccZ^PJN{*usE~4SKwqkYTey)`Lr1Scf=y*iB`_I~nSKma-S|4k3AnGb_E==WD zUd@;jXH4d;4*1NcJwLguhS~|Qp++3T-TZPVBX@2|+=%N}Pvn_XH(@(^ z&#Sw*@_>NVIA7W^K~6qm&y|5}@jR85HpI~I|F$qp%7mlFXFvhlVf#XFI@|0dG`w1; zFxQBYdT}%G$=F!o*y+&;UOF@! zxA&?Bmn||fJRZjj=KrF$HV4tBw>`%-<-=J6M`+*ClB`v`vp&p$3a=(%*MED(O80N{ z_e&U_xR(w`_G?g(bucVUzf9wuF4Lj@%czUH0ok_Aq3e6h_|?N{@M>g>VADDkPi)(V zv-5n#oy||M@5LSBpWFWUVrK<;B~`(RsFN%=KFH5C57O4H74Y=L5}4)sl5W0n$O=K7(SvA`}~HqD$|0!j`|4WRTFA6JG5AKkXkN z^EVXg8mswtv=?X1l%$EN;9C-wse#YQ93quYR6DiE56Io2l!d=?N^!TVQel9QPoy(rX zz8GsL3-XtGGzHLdYXUyloI`fM$6{U88+f5tftNL|(5hXJv2x&AMe&vZ`RnaQ*kgs{ zaq+pr_ha+0^IKIueC58lZ(L{GbRZe0<;_HBz6Jm4<9U6^Np`Zek^3Gsz-NjjK$RlT zY085=`Vo93b`cL6(oH_8%%A_Z%aTB8M?N;ulCmP-z|ZxP%Rw}zGrm9Y#Gmi9H8va? z-`?a&Vg(Ge+X;SBW_|acEiBH?!`|z1gvBF=Q`N{yh+ik|OVzzk8-_K|&=IpZW~DpW zAC82X(@ca*hfm=NiHTcob{mIY2xNQv{_@mkT5?OR4$98YBlu$37|^fZ2acB)E6C#V~6L^aAB>`YIzt+mv_PNuPfQ-%1rUg$3rw_;4NNS zszp!s#tZIKe$mOXci=er)4XB(f-aa^uC!a`z)nBx*!#`uZek%fg?{bE& z6Ai`1H3#8(fd+ovX2!!M#@G7$Cug;n0_9FNXPv7t;-8b(6)hW!L=(x&f79YUmmQzL zqf70TyGQ&&J7op^y}3=P*3CrfPW7dFiy)|Ts~48txGKi3lDv->GdOzZF!}83DR{9g zT|B6&id7j)K_|wG@2SOzyCfFsMpXgdeo;gZ26ZHvhaOLpGWLx5cgbYx9jcQj2opCS zQV3>qp_BJPr^MuF1+|-j4r^Y4>Y?S})~!Gm{!QAww^4^B6*@egwwCYBbf!5!RAqXK zPcr|e&*E_92v~0FOqg+8vI*wHVB$*2!J_q+nnMPm$D2VM@Z*K#I%^ds zPUu8`CQo6LZ4cnd>%*k?_9{QzqNcPd*O7abtAhX7j(lw{aKwr6oYzrFH?Q@kZax}l z)izW-d#+sQl64YB1oRV{HQm^HYz#eiJ|g84%TZ=^gy46+kTA9p^lDSE_IHYu9Swxd zZElJCqODoBDV$6?b`XZlHkW0T4MlC`Xe5bIseN%9E-$K+O@3nmZogi`k)l2H^l$)t z-8zS}jF!^d9^Q21UMWnQ(E^V0Dtdc8h(>4kMpZcr#m6>d`(^uaZ@3;h#Z6X3d0Z@8 zBb~R_JsBzO&q;>1j|};qK`Ea-e-GZSzQP4_S7PhcQ+&M!!0%iYf7(v)%tc~G7QBU2 zqeS7Dhb4~m(PHgu9YM6}gM-dAko&p|(4}Cq!Z3S+xO!H!P#rrNY7%3?b!jLK81S4f zubv8x7gYq!dl~TA&j>grT=;pwR&07(B4u1=VvO9JV#*dOj+!?Ki#`lOjiYbi<0;8W zmHU8RYdoTtkr(if`gl5%z75TH8BjFjh@by{P&l+s;Ign-SbuH^X3b+-JnWCS{C*`2 z@*E}mxAQK1y_LvO5^tf>bPx<@Jyra7OoL1|+G3Gc2+W-Ejbbxr!n*$&g#1IjX>L|~ z9<==&3Co)mu@ih{-z}%}*-SIgNZl(;$qUD%GErF6>ky4jULx*Kol8Em4s!hR;qYpW z8>c5-7ayi%(j~KhqF&b$7^}5KbY2m}SKrvsfsupx#nB+>o8ZQE)?-2Uhr}-ZJwh82 zud`O30ojCx3L{=Dq7i%3peeZzmE8|T)dkgHWn3V1Xlw${f+Z45Y>JOdtGRD;9{n?q z$J?!yFkLMMGDrNTC2uv+s`U_VNZ!fUhF!${EBe#9pUD_M^CP%RE}o8`PJ+|lOYrfm zj5=OCx4dr+dq>Rc4DXCwzgZ0uR;>*Av~>r*WU-@99}VJ^zkN z#H0D!Sao6;2j8`aqnXvP;qf@q?sFEduRG+T`_7SPnHZp7QH=27?tJ0YyHXtN=*sEl z%Q;NtCtjS=8*|s23pZkh(1%~IU~Wo#G~3xgEqB9&-CNh-$GUx#c>jvjMUJ9un?ty9 zkt$0s4c;Z~H9BS-3uQNrFiJN8`mn^cJZb|2r^#UZ-2=jkm2$o?M2qrPmy2hwYGVGh zXB+rcbXxVDF!1U_nxOlRdiS3K4}Y`>{c;Agr%ETD zz0sXFAD;*|#eKLi$&Qa@G}EnAQ#|W^L~=UR@!;-@czWnEUKw-?BAi;vOrktxulst* z6w%knc#anLt_sHrDaLh>wc*0i-^nt0w@fxNn$G7pDvDReQbzf9sNR<(IsQL_v-Sk0 z&~cD|%8-0oB82D}kZvI3^?hn9y6V3UQ#5TA#>%o14Jaj%h9inwI z_{PRg%Cg-aTs-@Q?9z-VSmgd$h*C)guk;z%UU(p0e48t}-Yo;a%yOE#S7KaO8S+To zY`*+(DnD*@;Dy)I;Fhrr7tJW)zNUM)*`W`OceljFor9@r`6OYndq`d# zt>~?K2>jZ=6a(ISfcQE^vD)ha@$h97z1s&Dt{;mvZs#Fw_-(AZ)q~SdO=A0{XSub$ z6vIc9@&EVnW&d}WtA9UG3XGKwAka&)H>d7FFGyqE8?+r|aXHLHE&r!%oUsYNG6p)3IVl3e9@{lOivg z(O>Ug+^4J)@0{g|H#Pp^VSJ6<9I9cl=Na-7S5cCw4e!iyl6XTa$oAd@r+Z62qpUof zb!!*4&-w**A@{i6?LF-bFqZBJ0-DaUkZ;YYgcMB>H$8JkAu|U2wiM9*nFaLW`z%cP zrz`D<*d!`1*Fef=4_;TajSrQm!OF}n+{3^NwOg*yF=r7Dm6*VVvudRI@GW%gI0d%0 z-oppheQACB09KspM=z7+ai=BiST*e+#z}i%wg<>L>qRL}(C@-(7215NWD^G(TEoZc zG1BgVxp-}BU*)xsGx*mvhJK}vRXh^^LIdq$maohG;O#1P6bp180>`(b9z>{oN{I z{I{P%=%W6N#t;{O`9@Z)H{fFW|0p^Shn)T|j%yE< zq#Y$GNy{kIb3f-1-$E2hLM3|_vNBSM7TThXmJlsc-E$sA86`V2l37*=q5SUeU#NSZ zdp_s$e!pHq=Kr_g%i6dGWpuP@&+>W5q)J3N0r#_<9sRv(j0Pq3eA^f^ zyk}Q{lhZHKQ?Gia*|e7};mx3XZ~;a)xzrR2j=3j(p=^<`r?z|0g!amIl;@v}mmjOc zZDF}1-z6c#Q`6z$k484JMF*_<`+%0!dzMrj#*907ynRxN`;Z~PP7nUZ_K%NPVaZPD zv{k3nnISOmfDDKp%1VOnE?^h6-MHkoOuYK#3aKADim%!#U|;-DNW4?cS&m9$GR6wD zz&V~Dy~Yx%)t6H9>6a8>o=>UC6R{&)FuQz(X`FDf)>unM1%EDxyeOd7;d$WJvySt2F9b1%XY#M z52DdhQ4W$)N|~#D1#2H}1^VNjaBC+fGlw5K0^@%Ke`7=m_y6ol#f|%H_o-0KSgpw# zUq9lzXGF4NcP{WkCX6?{By_g9LEslVjDknq!3zVXfLQAcHnjGzY?o;@gSN@QUh4~3 za?g;=?CxRST_t|*sAT%JNuB0d>odDmf}_Nyk1w@Ory-I{@G4G|RnBY?X=yhpzr^(0*%aRC zno#5Lu$uL1PuSE;Yp@_lnqs4pnM%oZ{`7?h*gf?MWjM#cMVIHCx)y_{B}0hcafgrk zPq>@(a6tbtMl|8mZaU$pAk3}ykY`rMzBDX>!^+dBdMsm$Rt%~6bff;$v}K{dgXgRdL<*}UWt?$yGJkmf7Ca>IT{;Y6 zm|5azwEJ6xr;Prfx91pc!nkpuDD4Hh9jWBAJDlY-_vb!Dj$pStPr><@He5t&A=5VT z#fQ317}b9et-Sw%*_ErnWNcwSMbdoO+GQ*x@+wXmnnqUg7g>#sj$?7Z1j^YY!;}k@ zV7b8ZX>_hbFM~YlTs$39k2PS!^UM5_yrWpQ^9_-hrW{~~)8@}|El4NJZkY~Ol+O;gDys84U7d(Rs zM>&|^lL`qn|=* z!G4O9^Q7+nuCyhylh!{UO?oyPNMn~JGaOh0;ih4rJkEigIm6TMem`n_g%0eXqjk)o z!xO^0?vhPjDwWP!4FBdIf^;3B`dL4R8p1A;p6OB0{uuxt7Q}&XorR<|L=pY&88Z#J zX13y43@r411@B(wP{Vmbis$e_cMX&%+}FGZ)I^?;7xw(SZhh;uu?c3)Wg*G4TGye z5iEJ$HTGtN8tuucWkrE6MFUmsY1{n2EJ?WEvA-FE1*T*9yLMUlWBwz^_;n3>pN|3& zUF6HYD^a)gQ3x{|AQ=dijH_rD9a^{>iYK*WP+x~Zg6ua=v+ z$r}#uyUg!=od_)sG31{-nLU#%hpWM!`#LVnydD;(Ij6?dFIM1wlMQU9tDv}Z#q4c8YKb;h5V=I#>Cpe-Mt-pT`)F#>OL zvoe>vZ3y`6;h-qS4cO4#q+Yp|)mA35>;9d*ZRG^))iz))`z>**kU9Fda00YwkH<}G zq}Zve z%UwX>?g~6Runa!AUjav-GI+jiE9TH!_SK?*ojYcRO6SIqy!Jufey9qxG+*b8iwD8J z2?BenTmu?%@1fC$JZ#b5gKou%%u~yajr(&CFRwU2j}|sn?GH?b(3uzbrH#V5!bXpd zhL{L!;|A6>eGC1*GeCGRwD|*Z&)DzdY5cLH=lMd1dtC06D>Oo0=sSB`gO=J&G!~e7 z^7l{T&~e7xXy^NIYSD47UcZF=yVNPYxdhZJZt}g~*RYRg7Etwjq35r!Wg1t&5 zi`7&X@+(qUmvPFm>}hUI{kC(IrFE8eKGtyzjFiSsPX|`?lq0A6QqZ9{4=XmbGgWVa zL0=cdHa(ceX6QZQ`fb04-~aXSrd?KC^|wkk{rU!)om9dX^c=3)v8NZeUmbu`3-{4G zWoOo=9nCx>yP#BRBxsIor10c(d_dz5b}{Z2SxKGd)%Et{fQ83U;w9`x()!qlkVjmo z5%2~jt2x)xo*->E6Q*fz7wPRD&khdx%?%lyO+LR*l1Ho-UE1&jkIr4m7EC%yR*G5R z_eupWZnkCzCXJ%YO^KXwrXg4>4-=zq5$l?)fxlC-p}W5gHHE5B>Ee7*RJp)EcUFXZ z{qEs=scb$gt(E%-G61$yLEXfMtSuAxQwjS}>WU?u@QmU%>$LJ;1Aj5OKO4B~`Pr=2 zPzJ`lS_vOHCcLYK+=L6)@T#p2&)VmJ^I!{JvRNCwmW4v<_96J$Uz$qaYKpZUH_)KH z>FkHkP|)f)#d%nd6gZ>E~#x)X8Rz zS<0M`-((}h)S$|sgQ>I~6gXv7aD0yzrjFJoY&U}CM~fgg*oD#$Oa-N3FFCuI5K`W8 zmN|t^z&ExL&~{=0X54b%l&S_}JMD(Y0xKtZo|NMmrBy6-PXRw-i9K%Go`PSJ){*CJ zeQ3-NX9KoyY>iUwOv9iNuGd8k^bH?Gj;?4MI_g36s{EHpS z>E(}kcCo|S7qC{ye0kf>!2xN}RC;$E99ce%f8Mbk?oS>Gp1tv~XG#==o{l5c?^Cfu za1Jya@8rKNOoOzaUpYtBajQI2L zhhpgDJsp^>BzRoDl*1HrH~8~v18!RNlPyoQqrq-*R3ht35$gZg(bXaBzRhA6K7@1 zaJDYm0(bH#lQorxd4hvHB|e`y6?VWN&o6jB^a($)){TxXE1-d9T@bSZsxFKD6*poNQ5ZM+#dNlZN{}i`Wux22(R~DXsUbD0s6z z3_pF8%~{;cKlFc$Pq&J>LOnGK$mrnu3}5nJZtfFy&6ZI7X$Gww_7;zBdWcm5SL@D+ zFBtzkml--lu+MpJyiAx0G_}2Fj*bOw?R@6`=nA7rCq%AEkJx4xP1^TO z8NI6Iq3d+>#NxoKEGTR<1Y}NU8s!2zXlxPp@zxpU`0OBlGqZrJ9b2HVTmp}e-@}p8 zvmwJhmhStHhOSF$tWPFFbZGob%u-AjJWy|7zQ=1kw=x;r+*grXw;xSTm*@AGx{<9} zIW>JI`r*;Y+|y2SwLQ|}fuR#c-xUW4ERZfZY`+IQ662X?Nf7Cpu3&4PI|?I3%$XDV4_J*rF* z4;IgV6fC(~W`x(x`iV{cUZaq=$7zm?ff#EM7uxGCBu;APX(so?{>7jb`db^Op zTX%)M8qrS@J8}x!1{Tz9@t6j;dce_syIEvf66?y{4>w+ZU}L3jVDQV=4%;Qi(0+U` zQ_dHhWrotQvF^PvNC;fKtw0^Mm)UQpC-l+W1IGVZ#jY$a2C42Dl(ScgZY4RwTc-@c z)uKS7^lRa{k|!CpXW;k;wdDDI68~=cP5OQK2?WhDhZh?CK~vch7t1EH0z(;+9&5o4 z6>NoKVYj*C{BnU+mccZGFS5hNC&|FQmeu`^12MXF0q;kW=w6K4g3m;CmH)ul(uOk zBo`Y)PD&iQ4zPud9Tu1*Y@!B8I7qf>W-DL(M{@3$xG?!b=Jq{-`Dc4m-&le1J!KLN z`fWis*XBX&+gwOCv1QwT1l8AJ+{gNp@cL&xNZ2AaX1gsYjW)-JU(0J8 zEk>}-I-1w-(cbFn#-9Si$;5;xzM-U0v^pP;%I*hla@(@VArKJ(mxh+S@tGm zr8Avu+xxRUL4#oDS_M{nVJY7AD#JzRe~7q-(WvP0i51Epq;+?^*gIi9w%m{oTK$We ztM?(c<-q=$xR1vnzhyH96(PHHrjp6d=GeH()wnESH~aW|B1?LE7E=aGq1zx$%Gi0B zt)Fn6b-o<{YtOiV!>@N-m&_b?ahD%n=x0P%9Cp*B{b}TUNE5&NB?&uL1Mts_!$&Oz z+^LHpSYdvID{o#-%I5#L!?Whmmzr|CdvP{wI&cK*ca$?%JE>}~b?T%VmjR=~jYw2} zixl^5!$*@F@P>yP$$cJ(>cb9Gcd#e#k>bhpmfga8u1)-&KPza;*@0AZLYs;{i~vnL zAvbzRohjV6WMv7bpmpqL*3@8vJKk);%R_unwk(%7HZB*9%>IWBcS4zEXFN^nwPY>7 z5s&B#Iq-@?+-Q~qaUtcTU^|V>s1Z9->Q+-$8pp06UVT$z>(~<8o#O zNbdGOLXqLW;Z?wlnrZAKmpybnnXk)&%FA_}PQWB~Tt^B{j9-WSa`R{jjHSy9-!SX) zNBD=PkbLN678%jVo`2TB@wQr!5Nd}v(v>~}i;`6_z{VUjeRWxakr4!+KZ6ertHBLCgt{9hQdpc0(XV$k zQf@0CfBQkUX)xowv$4~x!nmMe! zfPtUPQ0CJx62*D2m;rB@TZAMoRqI4Yx3QtyKq`?dNd|Z^Yo%B^6AG7eV&Hf#5!O1k0>B!Sasx zLqA!Ly?pAxeR&xndLw%uk9ZV-^2C|A{?0cvmuY0_e|uPC;1b6jJ?F6Bx^kErrHfZ* zNVRgx_xJhn5j+NSrdosNob()`GLs&WY zw7ZHw$v@(|%xAIZ2P=8&vB>>0t0Co+v+2-OTk3M3#d>DPU~q^pvXT?s>=EvSd_2E)`d#K*{Er1!{bOy5TJZNE73yxki7tZ@*~lZ=0@J~QmJR3? zzE6$plui^cH-9i?x2z?LUB_8d??L8mS}D3zumyBo;>oFAnYJrcaNSw=_}jX}aQ32L zG7^Q*&4rg}s`MQuW`nN4R%29H7qU`y1n}H zlD(c)@O$0?T5dcDhMrfUk5(rn)62>!^jjlum2r!z7fFc?&CZa{?c-cUkR6kGo`-*e zGU1d@4p^1wNOn~+HXx^fMStB3i7!mWvlsTViQ}9p-AhXRtj<|rF=x=EF`qb-u_<`@ zPa#(=?j{d&Te1GNVUlI-iW2pVSUyhiKIB{9p|TNc>6Kg~=*J7^Hg>D>MKG%Pki_bIC>f3j%*j|_KTz7q>p&9=p0n_ z`@$IwQl#eD@~mCw4*mPw4jwyt=<&-~e$bZll+YAU+6p<8VI6@{hsBa7LrqcOrm=B% zO=#OLV_H8upVu_XVVBZY!$~W zS%CUVxK(@rojotZ)i4LDnYDyAe7wpB&3#8h&im0l+qd}Y?+2E+8^HA0bIM&kA5AUH z$*^6P9>i?~so%{wN4EnP>K^Vcqa0 z=(?<$7L4d)DoLkEHZuiH^*%s;xB^}LaFt$*HOR2`F{zd&u`k9ONpYgkr|G*$GdAu9 zRsCd09d?wiKbS|WohCtArY4BW)9LlO`OveG!4;EPWG(EvM}3mvx0a2RJWVL2)$Yh{ z14|)bK{%g1Jdm>+f0!vuN)UJ_&pFEwEtW0#*B0lmr&F)5^Ia`2Y#w{cY4km1 zuf`5%CNdkL^=1HU?=N&S)ZFpE&tfV{-9!#KE9vR7Tr3mV-8H*1$h&Sb|90UKjJIrI zlCe9*e+EB>o3=~o#G?aHdj2_mRab(#wWXM}R~=_hJ4jyZHq!R>o^(@7O5AYY2oI+9dHq!2Q4FQXDd)$eg>}|9?WaV z+p=#-KOy1ka(=Y5C!0HeAgi2i!|gQc#?S+|tIwUPW18AgBt2V(Me9%G-yYUt8M&8H zV{JCA>n?^8?i*WpK=8s$y$7?;9p$t_-=pJnRd}$bmznw+L4tW1JP0)AZRYt!*%+n{R8WXKpRN1^?`P+w#Mzf{K`UJY1;>K^~myI)sXccni?F1f{z zD*lh2UJfObJAvf?`@6tW*v%hRs1mYE5j9-NRpy)2%yobZ+Nkmv(icJwE2m-q{c9-r z+JCS*v4Fi0=D5T505wLc@F>(N~{G@5&-v#W)BUrv$qd=Jv?^li94b0X|= zokJTug=>~+Rq)mL3FgLEixU3($+A|;OUzo9@m1MIVl~xlT(x=`+g)$QwrqA{{u7V! z$^8=0y=*KWnlXnnf(hL3#zB(bmYS3vR>6h*BT-qbEl$4efc@44!8F$^5U5g(>4r6I zWq>91KGUV?;t%|(g$_*Ui;&;e&ft$*yud|k zWEJx-l4`&YPPZ`=evEM;r#X%=FGCsk>{TYO(L>?GH9b~h*TRKQn?YSsOX$B?vo%F!Cl?m1NRX@zz~y#s07reDm+bfDNeOJG)J#<25)XYncl!?>waPP1FR zJjtf)#hms?sGWQf{yGm}Js*T_*_bE1<^x9zd24|s8Q)-2TQd5o4u}1P0|bWDC^%l1 zjLCyifU7EIDwaB8sZW&{(_~7QEvHkDt}O59T(Y7M06LtJfh3! zI${v(Kl(VXc5($9{l`%6k%uO09;+mlY9#%Kfj7Rn(SsUg4ZjOfzr!+gc_US@gsF@`>mVsFapS%B+%tkCv>=kvUn z#WSJvKS7zEX{B*P??v-vxdFJ^cO3Rh6f@nDsdV4<9g59t=x~LYn>1OTosM{hn^Y=A z_1iRr?1vKMntaFS@n?BYo#V7yV6q1(u7ub2s+5xVhu1u{iroDK-uC;^OjA0V)x9tW z$BgML&QFh44E@cm(+Pzeg^J=gfqlGGMike4FBCfN?q`>)zi>ezqq&|@9o){jW-K>+ z1C@2XQ)3BG-$e!qbUho#w};4ZMvHXS;yR?+M~}$+lozpHF?3#^8YK zDB{~uaycDOYu1jX-~pY?U{)$La>?AB)OLR8bafEG8J1IJ)oBbCQPx`mY z*=+9#=RIalf^Pb~^HOpbxOHF7Fs>6xXMzNLlMW}T- zuBP*f16M)p4-t`&$_LOIob!e*l&~ zb)u)`L$K~-5^kGZf=m1!quiZ6bp66*aK0lZC@rAv6~tZ5vPBnGfpI>QiSKU)Pl9z| z?xqNCc6Tx@_iltQx^3Wmdo1k!vWe@OEo5YR@6v6bx%5Hvgs5X|0aF@ykfurw)0{K% z==H)6ucr(Yk5@1zKFk%C_UTDx$c~`5Gr#h=-C8ubxE9;xqv`o&3HWdk6fLk_k`<5B z(Wz>1_rwR*Av2ogYsyKw@5_nxW?XY zJgz`(|3oM|_8fhhD#7u5J}g~YQq?q98mdea;O2c78v0rfJ1(kF!=7N07uW=C`-d^} zz&t!PG!i!)%)>RSWa#6HUy!mihNvtY=Pi;I%e38sJH0$S8@Q33d~*y+!b4e&y)yNW zUWMgmskE{;g_KB9V&cG{Z{~h@_@$B@_RGL^ziu|+S_51DJPflh$M6lCoJ1#C4F1~l z1kPPMgfaXUEFEfsv~?m~+L**lRpnrbd@h$@B>{uy1tjfL4%;-!u`fmp8YeP&x#^EE z@`(aP>I{doM+@-bXIXCf23c{mRX+U<`AE$cqo}PankvtlgTk~PtSqDdpND9MS)lN@ zXce+R$gYf3C2?aC3zJ;pP%i>JZut>*m9S!r0r5Y*ml1FKR;RTq2OeDVNJo1?;h z)I8;lG8HIq(k_-AZVZO(JKJ%(QuJ=)GVcB`Th6I8m;dAxhj(+V*n;@?sPOahZ7M~1KpW2ZmH>svJh zHjy5=XRC^~oPEJGoRb8;m59CD^c3{QA7VCBFLR4#1d{6-E6mz{lkYcCn*x%H+27hF z_~(oVQ|%V++UKjntb!?E7;_98rn$kgrgxmbRwU<}6Ud#uFqI9sBnNt{8zFYW5!$U2 zOZVo7(RN#TdQ8vxnwvYg5J@7rG+xH$>o=L3#U<|3965Nlcpu%;Zsh|`C1LvaAA{*Ejz+S|fBb;5DP-=h@uIhuuJMU&R$edwP4 z0FB(=^5W4q1us%8uuft*SF4zs<~bJn{u}pv%vbDij%9x=&f?)IAvOCYQEc*sr&NDC z0@{5;pxQi+0xwRWabE+;-Y6R$rl#P!npk{%K?SwE4`8nUy_!`P1(>iV7Q!E!(h!q) zn&X$t2KFXnrcWn7RXDeIWC#w)F`4}RUkS{*@Ca`k{0cV;dEH0fS{Yk9kaB{T>-#Q>TB8H$^6wbYy5lp^;<&nS$LPyiOJ1;ruD4j#uK6tBhdcHu z+0sHY6Z~6W09_#faf99HM&o8lM%|qmu14JqAqmE?` zlFC;({95lCHtBO2nwT7fZO0RNt0o<0F$};wM~bB{Ri%2xE-pq|c$c@Y!pox%)qISX zBlG3qyxWIFJXJmx^rsJ`3m@;XnGyX-p$+k8S}NM7&ERVVFIe6Fv% z_uu3PZg1qrR514V_5;rGa5{PPNOQBencQoYa<*&h2*zwSp^^VNkh&4ebTh{=d%58_ z&qSN~To!&SPaC4^n@Qweb&cDzx|)CJBuAbp9n^C2mqYr}C-}o~7Jnxy9WsYyvgJ)D z*=t!HI+3hMg?CC=k^emQpKc#>^H7!aM!Ud|@2AjB`X{_})`CZWhhS;sGdAY4ENf9r zWxB;>INs$j_vC;FOY|E9>ji&%aEuf6r23L%=m+NiE1CxW`OEh>S@SaE?C6M&F3GM* zfD``fIj6D|K3H2{@FeC>2w@Z0qwU4qUZK~q(&{?rD6kYFLacH20}EbdX+M7N!Q*%| zMT2aV)g@&LpZS>rr$Ob^Af{LRiq+f1h}w%M;gEJOoV6yBe3CP{SHrL1M4gpbp{$Cv zF2`y#u8qO(fy#h`yV=FDQ`k<~Fmx9(UxlCluvhZ-ObuMBpF?BKg^6AG@WLEQe<~*C z^#K3-FpXxnx8Oacu{B=u6Cu5DKj*pW2z-2X2(*SdL7taCg2sQ$Xt5nAI*!EBZB@Lv zZU*D8wX(r2PE0oUhp06vm$|-6rey`WcJTdG%b`(|2 z%ONp&2TC`VvFjT>F=>pz=KA~>*T0U&nk5V9zp$lzXLb^+8#ahWT*-g~o$ajlzXE3S z{TIwP*2PQr%h`%GK@{?H26(rq(B$4-@GaVz&Fag94UGdJXmbSm1vNo-`PG_|0x$Ba zI)M-71ahsnykYA41bVi>iC(;pWCq*(xv1L@_^|cES*pP>aQrk7Q!Je6?o(^7=TkWJ z3k;!v#siQc@G?)Ot4fOheu85w_Q8zt3n_KOALf*D7&nwBvfTaepz!WE-db5tT;g5| z;nM9a!e7EPdx>r)chay_70E_r1xPJj!~c_>3z_l>utv(9>a8bnxvq1eq+%Yt4?9k4 z7Q9CBsQI*ahYECeU7(^PAL07Z&ozF}60ZkXb`H%w<0S>KK25#vo?uxQTk#>|&4q7JzK$Yqq^P9I95n z!4nm;;9yZZU(|7t<~WZ2M#wQX$wl5a3u#)%Xs2

Jbo~0y36oal zV{%*=yB1XdUf&~N;r*|o%*oT~km)z-%uc6m**cQ+H_y4o#8&fe^7kIvohR31}o);&RC{sc|4mUIYdkoYnywC_kkA}1Vvh|h4!2>Oz#E&+FC%r25>g88#p7+MeB6`0)Lxl`P3lv` z6<$Z#)dMNCS~d&a?GEDmfq!wS)E)eCH3@?MU7%Wb4Gfu}A- zxV=q@I8d0kT1J26$|eXo^>H28ZwjK+pBrqM=`Mcckk4q_R)z|b#!+~pF*xlxA-Kg_ zC~TnzU4GR8zGrXo<2~n#HlNGJn1{8rV}T}IY}rN;kB8B|+xqx#WgV3_s8P$j*ZdpB zYXE)<=v@Cw*sVLz36*G?CB22;|16mwke7?=M}NV9_(HV0xtS?$bApBJBYysP4vq~A zBkzL8Ec}Wp8?eKJ#+kLV%sZdB*!;o#lt+V@gLEz}D?iQxOBa(4KL#8oCBQQeYe-wX z7XzO*)!3zsVH4BS*q%4er00`GMqlFDfX*oP_U|#gXdA=Zto?*jxBcKE3Lavp!1gWB z&=!hr$zV~^pye}njOiuDXGwzeMv0yQWCsm z#dvqnIrc-@8tn2_VA+ZfIQqqEcqiV?st^3c!H;j?%}Wtz+Pa7BiTlc<)(Q4_{0(+< z!9u1HlgbvQ$-t~xwXC4P2zRgjO&XS=tYK^&Zn|W{RHMXfkk@5?^@g>~W?vKyb2-M; znjZ0!4Uci157x8myA?@5LKZgN0}PySpy{i`Lzzq)(f$_P|ky-+mmq zm!-^H)Xb7cyu*{#Ls?$V2>~cp!bA@O;7Y9reH7d*)~3;NOIIisdQGIB$XHZ; z=}ao`F0%VaOIe-TZ9e?_T#Pwt2n`nckSCSKrl!5BQ8}ytTl>kth0H}P{`wGbvwY28 ziYZ_}`;TfH;n@z7Z1upm(;r{N{3CJ|P4u9>31B;tLv4U@S#{4+k z<)MtOcd~(P^F^u2gJ_)XP<-I@1SE&eSZEt#zjnXpRY{MvwzuN#veWoOVAv`t45m>V z4{-fDx3UX%=UK()c$BZ*#*`j@Ws814VN;Zr;-C)UnVD5t6F#ShzqU3K{mfK3_wqnm zJYBexjg@CY!%Fn**E726^@#PWe1g$2yEjUtk1d!4=S&wQ9+|?KI(kHUkfX^sv|>5e~2yr^|X*@d$fsNc|HQ3zEPzp z>lmi-_CG4h-^)A{=7QnplbnXq712WnbDa3?9lNQZUvoe$l>hN4gF3e>vxJvrf@7>0 zPMc&x;LK4Flvab!HbRU^Ff+3A1l2*qFxPzn1QaO1%8WAtV=n`z`xkMRZ|#Khb8^k8uiJ6) znW^AqUqmJvtym_T#xFA(O$+oVvxf&x3ZB9U%4(Yj|0%e0o2TTFqpLN16x)*l&SaX) z2chk_QWCw}4&x`a;u@!=uw3C1%C8;8+ddl%Z|Wv<+}j>dpg5mp89-L*~u~g z{%_g+YQjUB_2`n($Il8BxN>%;6g|5i-&%HnJ-^Wp6kf&C(X^$|<8T#S>au8j={?fl zX(V3YXGOzIY^iXEEbQ?=3~M?ZVTY)mR~aOn+YYs3*eGrGW#?b~@-6_rt5)GmB^%CX zcPvyajbVLq((HB5FMRNO2t7?WBno`$N{Uy+z^D7Mo4Rgz!6bxLKAA|r!GdgW52D9Y zWO-ZFQK;S+#GI1^#@XyY!nx7|I~;^={Dc8;-SHzzukd54AEtrYcLQGM!49_hmI3@+ zph<6r=+K6A54bt!JU4!bnk0SmZ*uE@j-73DY z|GEA%PF_=wTYQ@^pz}KD{#Xq8>vd??B!OS=x)7#E%Swvp|6|LqxRCUdMYLECcz=9I z`|E;5GpZUv^XYkLjR_)~pn>Gm-XyRnbkT5IA^uY1=(h1Y>{_;irrh%-%j!TFQvHyM zo;$tRiT^PYuWCG@J*|J)y-!zguDv3@ zjvNisugHSQNik)uj0b&PFCM==NuWhcQ*o8kIJmxxc)Qy6JQPo z5j>P%O(T=s*I?iKj*e~K28Rx$u&%o@u;{^i-237RpIx&LQ>@-#=K^1tIXaK_#l^r3 z-Tq{0kO)hSl)+wJjg%%!v-;L`xIq6QKT*iW-c}na8E+;_`xMv1oKR)9v3nX*ozl&Z za7tncwId)lP9OgG9HP%{H*0(*YqP!fFW8&)AF(LX2p7iruwcD`R6Y6=`}Vel9sZHT zwNDZjn>$|M(3^(b5%VT?vv3s^3LV9>b8R8}$#l4LpM%+XVW<-tN@s4gV|Y$Fm%rQ! zGis%&zEj9%xs7Mbhug!Q00aJ0)O}Hw#GICnDPb>L-!hNP>6rDk3qQU+%U^vVINT{WD3OTHKaSrdUCkxY69q>_Z z6+SH+&9vG&*y;I$(Kw?-N}^w2=aDVoa`7hXoA4jxG?+r%k~Nh2J`v13>qNJXy`i^imxP|? zC^mVN0sAdCuDa9JiY=-N;Z2_GNB8Inl<>=&1;@|gwjb%lwufgy&fm6Xqs34XPaa5T zs{U~+1z70A8|NUtL;1HM-ex0MJE1=iU8$FE?5*U_z9OKprierniWIF^I3J8 zaOqDJ%}F=lciUC7!jD^Em4+4>EWgWU{>h*Q<6QP+)F4otK8(AYuw2w)R?nSqwx=^Y zU1(*A2L&1F!i&D)RJ-Ocg}FZA5=#0*T+?IJZ*5_ffqDX`>jIvd;Y5GuFQD??SU6KT zAM55xVB69_>Jjeih8+lml<7zDPg597>8)k07qa2zEjOlArwwD)YE$-?bgWuki10TS zBmOKG8QkB4;dXf-9wu@rpaHs8Qig>a5AiKJ0DqbFcndOda#DNO; zpmg|qT;_9v^XtCF54W$DiyaLa>D%NP1Kmm~4Dfi^$( zy*De&@T5n>P2fpwCl~%=9c(>OK(jSJ<7T%N{1|0>dTygkMPu*c+T3#dUEzv-n}BXe{ovEA(wJFL5P?Z(@6gm7rD7W+vnF72|I8)>tb% z=51On>Ct&hIK3c3_#LyxonE)N0SCfCDd0NGUml8*&whMhQ!T5pe#4G$Tg=d4An2TT zL#I(SY*A+@oVrmAYqQJ&yvD)(NeigHMV%8;h@yS**%*7ZlwDE(f^!bUveN4#sU&M9 zJo(z6DQXA8Mj0P!zv7LJ*Hf|ZnGd{cG>5qciZE-BlIY)-4m7nJOeu9n@a9)CRcyS$ zoCY3*ir}5Fv{{dhT%iEY@6K}8Hwtl&>ISwd`Xzo*-a(r(i<$n|pF%(4ENQQ=W4hZ6 z;ac@WKJ>mVe01*>x-k}P>85_=F9z4SL6ozimA}_B3X6~L!)v>TgNls`*}PJaDCLHd zaISw90ALf_b&;#>5&GU-x_IsSn{Rv(1%^L4n$-(fU-e?0HiyPx_^{=y7%)giU&HG3{SPvm<(2f7X% zgG#UA=+^dzJACFNZ+^BvtTS-9fr2%D~5@Ey4>->`Q z8I*EG6HF`o!NAX#K97<>{M}EEO2;PRyqnVElCfrzIUTC-;jb*YOi2;3QXN<{!i1(S zl)#9J+ks0c@>bgpcPO z0Ev=O^)vOathaMIwaqD{`@$Q1@9<)BkW@H2OU8iOuHCF@i#!|k_$24&Kb|ezh1`^; z8KUXaw>S>JV8~vTyv8WQO!_;ckeXs-`Z$CPfL5dp?a@8=XiWqe}4F%>ru6*Js^lukv4XLvcsJbarAv6#n+iL*r?L>9TUn z_wgN;Vd=<=cRH|9t5>p{;WaFKkQv-6sG$7*muP8Qa`iL)M1EdR2Q+sZl2`x?bjBTn z)*?f=mve*ta|^_9*=MZd^GQ~0CMEf#*93t>8exq|07h8~b8EH+9bLn752roB4b{i+ zk@pLjzibZ;OV|bvGoG^hm(Q`jTLM?UTBF9w?lNo5)`P%IUwSyvnr`bnK?4OZru=aj zU*&d@_kNj;5zSBWYU+G4Tatpln`&!}bfl>-SP{R^H{k0XQ*cpGgTtdw2jB)9gYh#> z=;;zEJf^#hq|eFWBz+DdMaq&VmUiO)K8i3zO&(564yT7l!l2dr3U6(8knLB|qm5^) z1eaqdG`OcS|K~0w_;**MG7iPlZDN4AzEFUz6FM!vxWN`HBSXziP;pAF(-1p!W zdzrA3$uI6l3aMw=rd8wN-Jg@-B~^>9Tf9+mXcGE1h0yz3qoLr3@DCT{)`LeXqwnPN z?EBBVkbO==TrKooOy!qx<3*W_*S3-zyb%Le%H%1%XE(QH=jECTfsH&%a}l^_$g?|Y z2az{crjsIL^!VMwS_bz}OV(Ysvb2uXEztzA#&67g)DI4s_GivFi0eEkP5-^?`yWN; z9Z=){#qowTNF}4JhLnmZQPgwJMP$oJQRGW@c9E3^(vCFAEGaEXr2Cw6jYL*5Bc+V& z5!vbY{Qj*!x~=>9JmSElhBnJUyL_2yh9KT5dlf|D(l!gOgUxNH77(Isypj_&2bGNr{F z+-)noUe!*M#1m zJ@In(Qrts{Xy+d;D7%>R`n~>q<-Ww0yR(O$eSHK!yRQa|ZBqX3%M8g~w?YV**9A5# zs1QFb4Tn>&Qk~!5?JoW;9!$EmbHMYGnnEM03%8n%!SNPrsZ*5^wtRmN$F`dZ-qMaG z&iy90j_@Hf!yIApNB%D8vA1;nQ?8hB;NiL}g<$`ZX102_W#k)A0l#ll0wHMT7 zxhu}$+7SXCO;A-FY0j4ROG}-H?&z#2_z_I55jQ|Fpc|SVF-EtlCK_vChyJa_@`5Ko zE0a5t`oNF$Wb9#Hc}GKGWqSfcPegIg0atjRm*mR0IT=1bai@_NBE@W$iPT3K=iMay>|9Q3cp3uYG3j1 z)Q{kzyPb8UbL7kiT`INh4RLn)+?;Bp*p<>w_Vjai+!3kD=3Vzgx9hQ3DYrx0&9<=k z#cEzYlfdNYAqsVjB7?E>>BWm=93=VVvVz9&n!5S&ZTdgxWUCRsv$~Hx9h-Q`ty1o0 zeS!lV2cg%0w`lW1=^kWU$BT=W;(=i%;@YqpRvI@1ODBI5-y zuL4sk`xF#7t1~0YP?oF&;IZ3FC+U<8_|Vd}pf`T&S7O zYcD>***)%w3G)VX(vcpl^I#A9KOc*^UQ#xDuNq-Ui+HzW8m{pR=OND|yiQ{Tw!fG` z2Zrq7YRfZ}HC~G^_H7be_hyPK*0xjNqinJ2b9Z6Qf!E^8yALGB$VR*}Y9(6wL~vWo za%#S4M`KcBxM}26u_pbS(^c1A-06H4BuU8NEXlQJUy!;f+1nz=Z^^eJuL zR4d+W(wCi2(Z~DUJ5$O{4aj=o1j!E@1%s@6;>3v`V7QGmL-I?84H`aTai%VYp1Omf zJyJpQ-VwBz9)bTpH$&vP17Iecqm2uO@{RS+;Py`yuHSNl=DFkxHEhb`-(7R^8{&dX z-UqX;olPm?lxLCn zrpyiFJqsbB#h)^p+r-7&o;n+^zD&l!{osx`3br=5h_indfVH`%(AaY?oYIJe*6=-) zw#S?`Hhtl(|Lte(`r|y$_aye&auoY?ScGmTj*JyM`;VMb{8K!otqau{44#^?HLqOL=z^S}0m(oSg_cec3(cQ?Nl zuDM3g`IN`vx3A$aHlP%XJC<_Pe*qw1ie&t~Z8nD_x{w-$Q_RN_T=aAWEQ&Kj&&U&$y*q~rUbyq2f%aH9{I#H})m;=Y z1RTa{!Pi~|yh7EGkJ@Ak%NoOJQKKBLD>05%SL2(Ff8o7hBBbee7bH@)!6W&ZmWH6Iu@&^UNDidsCSaRUjoGF$ytH=#%EH{i zZuLAE)_fh!4gBSk>V8nc5Nn=4qz>x+X42@)c>W%8Uz|0-Mo{_`3hTp_(c?-gnaW>K ze!abz3(2U_xgJjIO~mP5H#zL#9bWeDHoWM19A`;;jM}S5p;{+ahz!0>%Kz{E-X6#A zHD+?t2R{rPXvFI`=bJU$$!5Pw%oGn8^8bhhk(y`@~V_tEp@Zd?RwR{F8bcQ~7HSCqL;W1+_c zr&kurm{r%8XIzTGhz$>L^(k+R{8K2@bO;b{6-j8D`?|79$zeVD-eUUsGFM{e^}*f~ zPKxpCa%n(W0jamvP{6|%&`X*(KKICz|GWK5%+!@~WN&uK2YCm>=h4ODe?=W}@s1OC z%WEn6^?db=f&E~wqT3vG3dGRm%9Py6p4c6YI!5LbE@ul8PZ0lG=mxnb- zKCwqabKz}pKktaIHRsTE*U7AAu8PHu1No8aTH(|5A!6yKZ22RTKr!I)IQVV$7^o(M zygs>b(3uWm{?p-D9eW2`Jy$Rf+l$8~Zf(ta5q4dRhnQ*`@C(x5{LM+UF|dHz+^U3` zRu(iu<&s!j8i&)5r10MFJGpsKm=IKPM0io#OldPdON`J=SQ>ZNS-X1{tgbl%=ifBK zy;sj5aOn+nupP-t_G~6-_n5`PK@q{@{v=wU1yxuOWPB@_&McQX|b@b^=C9 z`_EwQC7iypl9lGyab2(#76)#@FMlRD_Z##b?NZA@S7{2SNAKieeOrnCJ`|%>!f9>B z3?7~5p;*6v5?$HQ32&a6gfq*-@IgD2bx$45D;Iag7&k3mk=Bn?XBUcPzYGMQ-cRK% zFRHkoW32pM*nIf)p_X?1&7}@E|G}NJbJ*wYCiF4TfP!U#GQ(B5;!J%*-u)nk&q=*; zjZSabwN*pWyuC@7klL9aCdHs<+Dm!JfF@p9yo3Lql~|Db+Hs%t#tNHfOW|M3WB%o! z&3}wD$XVKB=i0QR*Z#+7xyN^QA8{Jsc+jptxUvJ!Y?=K8F0IV_85xYe^6oTR> zV3J_X!Nu*_)$s*5yzj~t23s-Wup?gYkRqLrv#B!2nb*8>=9}_ly1p@(roZkWo4M*C znl5ev>WgojkbP)zt`@-_{}V-NR=0Zlf1^SwLI9H zMQ?oHVIPH!b)kgTIhc}t2Y>B}XUjtAPE~&ePM|iF`kvyAe{SJN)nTar@+4mGsIG8- zz7OZ!NJQ0zajdU44!?~J!&Ji=xNu*K+~oCUNKomH=L4S$W3@&KL5^=Z+oA?OzL?3U z-xkr+gb7%6B%b}>Npt1t#p0<+%|c}C3{X?ZVfPXlYiinXkBz<1A>lBWdr4fhPjgYI zNW<%8ouJuPO{{V<78jMcP@mmB=;^i-@W^xn;q0ZP68s*jr;P!(iV$%@coq!bk}Oa+ zBYZpA1P}c_B-VDR5`NCu0{%5?;QXwYsFtoR#tcu#%omgBagmZN?MNY>v5tXx*8REt z%=PR#`yyX^Q_60g$KuSQEqvvIFZS^s%OxI4wA!>cO`YRG8*08mVmAlg_%2O|dUgit zpDqJc<8m&adzFI5DsiW#1LU>023~)uhw9PNj9c@LFv2yQ(z{$f5*!ts@6x{z>2M{@D13$I_53*MP3tU9!(XzAFUvlgzVuCJouvGEEXkk=V! zM-Sv#Ru5=czb}+iumpC^UWi8rrwL{`!4y(hD_%d_ftQ|*=HEZ14(6JD|5rWoqvx}E z$nZqr$u<^BC1z)8<084E<|3M?-HCnIPQu`elZA6thxyI442m3P%!M;M8n*KrSrrt}_A&Z!?bXBcpLU1f(}Aba(c6n@LQzQliFyx>LXbA zAe_F5s^X6~W@zxLH(oxmM}AkjTQ&uqgAemsX`TC1=a?i<{wpLRf1kn+R=;H3O_o&u zWTMdf!75Uy8?gB#T{71_fYUAep-!JR@ExtM$f>^0pJqpZ@BBy_0EdLzDTm3&GzT`l z%n}o9bD_PfJ*7>)gm<@E@wkR=WHS3aSx;yLzkiO9Q{zUyYk;>}rOAJ8h!ikZn|%_V z;0cq?+{r(kigquB<37r0AZr6-yN-M)xd&7Oj98N_WkgI9FZ@v8B8 zx;5QUsJpvc@qae&wjR?l-^B}W)|z0nRV+NO?Sy}`f-&K@D$Xq1OD_+^(;$yy5_YFO zE-}gE_XhT0VWxy}Hr`xo9N{#`TZA5{205njtX%M!WNaq*=(Hk5g$! zc_7WLn~Y7DTcJP}4F;}vx$~AbGD)f-HxGaC9JmTP^;1Xpje-14S15}KlFpCi%Q<(} z3HKuBQ($843Dk;Mh!-*$_muCHCe&F%zRyz{+jNN+EgAtkUd_UQ(FM3-g*6H} zFKO!7Afe~c2UPs77guD|QuC29;^eySeDc~-JpOwEN7}R#LwlVD*WTwr|7RagJ8h3K zGjmYo;9kk^ejWQQT8`K2@6*<&J}95zE;!{sfq?^BDLtb}NGo5&()ozm`AS$h_HIh8%K-PpC_HxN8{JYnhJ)HY0UM1C;IM5VtKDtD(I?)4=$y=n&o|(m zM`2*)(-Y&0Z+2qI|i%akJ^vWTDF}FbzRs^Lvmyu`o$)XDIZG&cNS;Ftvi0ufJ#^X zynPo1W`oRK;%0vu;>AmcCErj^sg7XMepXB zPGN$Ya|YJE8-)9A7PDHmoc`#i(2A{6_iue?#jOF7@9O9q*zsiwYxfD}7Qb0|!KOR4 z$xo2)Q*Z8FX%B9x=g~eafjjQC=QzL7SoEY9=4qW4I@PVfHf4K7?WYm^rOk=E>ZS^d z7Ej{swIy)rzcO0$doIr%*^i4CnJBb7zD3CCB|E3{42K%5qwron&h<*JJfl7sb#%)h z>(P76e(y~V_UY*Tw+AoV=EL%>dpJb8)-DP!rpUIjXtBeByLT`X_DMM)yC6%}KXRTA zsobYGikITn6T#R|V?6&I*aSOkYlVb~-f(H!J9=n$*m+%FEycO<$rScO@=t~yVD)X= zp-+4i4~|hsv$$pqs&(TI>iU$cX3A#SUZhku4eO!}6nYj9;q?q(_6k@IMTdRPdsz*_ zEa$a6y6!j4x+ie`EGOY@X)})O7R~FFrz`GwXA4t9Qt9GxHJtLzlqO&$y_oeD2G91w zu%=^R`e`A#4~@e6y_fL1qFM}Xx0H4UEQP^gDIC9im|~d4btqk%4Og_|@N4h=xTt9z zf5>Hid@q?D`~z7buEpx?e4HXL#PL6OkjKdq*gaXFk32dLYX=q3HkXTVY}Rh7cvC{B zVwcdT8wbH|vAz)0do)eo7eHSNV!8bBI#{tX2Ir4HBF~q4e|82_ILjqU9`XGoEUaxu z&Vivcs*3}i_}&-qPs_B%kE$LpM7UoS1qu#gcDWdqp`OS?T zzhF)|GZJ_tPhOl%mXsjG|~sk8Sj1XD0YuY#_Z2HzBY%i4_BDpnuUp zp0J`Tc0HwyZ*%tu`|<-o-zQ2)lza2BrHPw`;t`)vXGmrmn|Aa?w30yp9 z9Q)2q5I_FxObZ%gX?brmXkESlJ~WIFj5c2uGY%hx=a=Kc2&r4P-Zy zP3Cd2Pm-A|KG=i%mss%Fl!t8Axe@H+U*e(0Rpg+kq3UxZ`J=@nxYrsfn3{LRe;t%? zLWmQNKDdW>e)=nH8QB>Q6h9R@_t?Wf#+6Cg1zQkRdeE_DPsNLS8tK~Lxm^CY3x4Tw zh&^Yt(7!I;!q5m?Z2fqKCNCIBZ~Ffh;*BC$v+GY7a9N$a{_O+JbL&_uU?|`5FQc~b zofszX!?wOTG<&KAe#z2B_v&cscOX`qthCP*#Eaa%#!!!4G!0!`a>g36h+7%vH}nK zi5yzr%DsnA;-6Yi;i9q}y4yd1-VvQ>^|&6K_s5kr>>6mdsvn5!mcrvF`^8PK6LFA3 zFy2eelXyDg#3{vYI92tL5YcFY!$m-~{e$u9)=Dv}Za)UtNDQ14mN+5(tnkIGPU!Hl zjMgi(@S9nlxMiNPOy!v`gsy88hfYgDI&xKRkZ6SWJ*526h|S!2eE@#f9td{nr@+rY zNbDeW@lPk^8=XheyYO(m63HiIYBdLF?_i zuI$E6jr{(I^Bd^a6c;z#4CQ>UG$Bt8KZwX58HbrJ9C zI0n?WWPz&0d(#@>1V)Qj;cd8r+55K2Z0DR|cvS=X)?HY2MX>0-KonL>EMfNx{&>|o z4Qrq8Nqi>%f5V!jPMAk3I1Bo|a-kApO zfAbPpOWv^E$qs;CN14tV!`)Nr1=3WYS%abtBI{;U@yYTZ@Z{bqZ zeDs|E3X9Hl;a+~H$z`8(Pg`(K>=tlPV(%Xn4(P|=fO}~e#9?^gN&uv(#>3&`0VrF$ zPu%x;GL^;`!xr0J==ymi7ltQNIqaZT-F4`a(hnC!t>&j7j9WJQ)THC%ECcF;u)r!3~pFP|cL3SijgAx3t`XLhn4- zv{>S}X^*15t&`B@g$v%;dIJ_;%7KB)|H9xoS~3m6TOl>-kkk4pK6G+p;Qp z(v3royeGGkCT_3MOvcl!xVTQ2=VWHFPmmhV5nhq<#7XkIrFyc@q$OLwq=sUpXY_$@ zpE$S6u;;6f^`ZZnhje)FSyml!0Y?rTitf8_kfWEn;?B-=yl&A~u_2`wUv5~AI}Xi8 zwYVLaQPo*7!sej(ad|P+O|};|$0yPUDU-fmx(8@CX7IkUZTS0CHBL{^mbJ|Dhf!l! zN#3x*+}$gScK)!S4Ot^#WnW{?hqZiZ-84?S<$&8u?C^%fkP0gHrvsa^q4SxE{5Bv0 zyANB<{f3lb;V6B+s=Xflge>@Xr-=@9+)ZwKbNKUd%=w$KqDy z(R@S4oT77gpr_?FWXBb_VX7-`ubPFcyd9R3L84c_JKWlxEGh4XD&GOteco zfqI2Og5I9ZY;!1xX75m9C#e^ATr&uIT84sE&s(Bpzc5-kyjmbPkrvJ$%`22#xNb(7 z@WF2leGQP@nq8)|S;T5_rZquDNGrY6T?}nLJNV@0P1O2Sj}0HMlOK|eLyw+&NwYy> z{rB%ozwD>+PUZi=?;bgM@g{LWsS387dIDXRrph1duM-Y7^~Cfit7!XO2WUJKN5>Xv ziov_0gtv8rc}DL9I9d|!e5fV^Z2sDDXBQvZyHgG4P!ua0E(gaqTX}I*Ht2=8L$l{X zP<=lK%Ad#4Nlyh0oc)5WwTH>x+FYV}o>sDnj``qfkciu)8AHLf_6j2%qFP^dJ`(Vg zZOl@@@u@GGe0v4_s8vu(j-s{^ePHdO-WWe^DF1GXqrQE%VM}xZ{P_?pocg_yj?|j7 z^1gbCZv7#ICwZWg)dy$CiG49M-(BocKc1b;U&~K+@`v^7=ZJGU`wJCPjQt6I#0@)ivELC>CQ$C zl6Ckcqq`{jSlFrJjSzTgJ1xr5AU3p(*h`=CWNwyr^kE0d4-I3cK@MxmUkG z;$PpzFlFjlSlc>9SiiV8bxX0M631f5y4jcJ)Grl=Syxi8B16U8heH+P+J}PIH63^; zu@>WIYw{bPYG`V=(Agp8usrW{CQUvVg*S?FF#l~iq%ChF(s0JJKXvfswOD!Zk=ydB zGi7AH&R5#;nSrHqd(bSg!7A?)G`^f*znHt2Tib!RTN$Fko@zQaYnm85 zHA4cFIP>?s8?^C(8ZY>Gjb2&)gFah2a68Y_bh#$Xd5ZpUyz1~B;vc*7n(r^+hm$G& zQ}>{f{l$1(ae+&Bbmd)2A(YUj&26>U$)?qnd%jJ>-vjyZ^qDVS&o@eqb@6Aae@cQ z-Busw!Rb*by!lEmzDnoblYaQpvKP1C_n2lzZI{QXcHp0#9JxbD8XefXgl>ITh3k(x z^Ows9h47b7oEo^0R;opF>g*XHt*hwMoPi(^GJKVYtCSF zdpdnE9EWrN9A@48_KGtt4tWy!hF<|G?g}D@8t3LyZNO z$nyC^x_Y<0VyHA__R4sLcf0E2hd3jwadKtyZgH-jGl#dNnR8=cD^4;Lz}s;IEBBp7 zh6{rEVscO6_?slWcKiiu$?UPSY!Z%h?!_7@0b<|d-DT5)!g1QHD74vUNE^bJ;R3}; z>{v5JoLRD*f|L8BZ^23$+FzMe9FlRUUk~nhE{JbpVodpP6g$?tIBR8MxFdJMC);SWu1whYpYKcm!0A^v7E& z`qVp5lU=Ty$J_d|smqii+L_&x^eG*Il(zc z2PwvM8;#!UPe2LG;2N6|u<&1RVSZf#s#ag%Gf&K9u6weu&%B%5^Q-|J$TsBldqPuE8C8{baXx+40q}Rq`QjU2sxI6NO*q3_h{1i1cHk;q0kzFzjx+^!Xj!-+C+^ z+c--YXp)3+nLgZ3ntvKw45q6ew~^6?COXzN1h3cVunG<2lz>W@{q_~Lm?gqohhnUp z`GGw3964OSO-Qe}j+zz|s7T7%7e-G;=NIavIo}W)TZ$mZ@FD7S{R^jNDe#8cQ`i#w zf{&L!qc!G{Tppf7op+34k2Zfkf29kizscYgnZHCAk4>~UUYTQZrz&veJ|WOVZNclLK!!5#2b zi3`JkM>I0GJ3Rawfir3rioM6xi2GYjc(wc;={z6G_u74iwz7BPwsFQ(*}0hx*~@9l zq}?1B9m&lb)@C!0jbQ#)l^rY-*i~+xxh2i1fG8 zHcSR_&WqvFvEPD`|8cgGyoguwc4B^RI&5mZ4QJ&S;eAmmdB0i@2fH69|KdHo@WgVY z=WW87uBu}B`#<6;b62*TbWE_1en)m87G#o=$C0^Gf7MbKPpn=Diks<>TNFtaYk$Fj z<2j(;wvaFS+Td3-MCTvTU^-p$!A8&G2eYe%l(wbxcz--q&J)l~9wYd=g^9nmoP}D+ zt+1xf3O8pMkg>NBTnt-|EALi|13zbp&mFX3;GrSd_~aCgjbF<<_En36I^LvR-*?gj z#dV4@n~RJ63G8!kL+wc|VO3rtj(RH1EK>Wj_OVZ}$gmKP?n@7p^`Nrp5xge&5U#p*kH1)jQSt~S*)Xl1>|UsfNsbk`(RLwh=+uwr`x+=-oK8jM z96`7i^paL+$?>sUB{qCkrH1$ts1MYq;T}?7eAQXdk{_fcTNA_&?!J8cd^Q*x<>K_( zJi*=Chh`OMV4{yQTOOJwDiyopwT@e9slPNg=r|6-=A5DY3VCTn*QLDZ|2cZ?XPVxVN72^JxUKZwv`G2Iw_!ZF-yMpRZmD< z^j)~-|4j69oG4B@+Kw;WuOU6@^)8+#`85(x!nC>1u{paNHhD@+ttm?Mz^{O(?+74| zK^o9YNfQ^y&SI@)DW8083+sFE5ysU#RWm+nt8+Sffso8>#|M4EzZ( zAKIa%pCMPYS#V>>PjZM$qki9P_+WDb`5ROat@DceXtW;N7?d7+aSF2 zI15jI-9jz*QfMt6p(zuFaE0z5)X=nqwyTEpv6~xRF6aUA&%4oyZ*R%Ks8VoDse{GP zi@+%b9F}hrmmVtRypKEKN^OPwqfRhAm?+W!^*osWCxn*UjUt=sN`Io zpo^{%pBg9i;)DiReR=@3FUfG$tlWVX&3)LxQ^ZweM2 zwsQEYIh?RBT#OK^;ZtrJbbQ{63r3pLIm>1I-2NFoEEvUyYTTR-?Jhk3juzpTGG{(A z#{oT-yd@4;K@SJjK)Uq=Ea`U>Mhz+z0#!eYo+aPJOy%Bu`kpPVU*-dr11>|#)xjJ& zGC=Ydg$Zkq?4|3o4q^F*PHevK2*)HC)0}G;@O(!jTHor$!@J68`j}LFEcG_qJ0@|~ zh7p*4;w~9hH}X59qtxw}24_ZlaPsPG{A6)Se4TKfMr}nLXx$%HUOh=Ic8;7szYKeM z*OL0)Ef9Mv34K>jkllEhMOP{}g0@93raQLySzBT{dkW&cQ%1ZmFAWTb^vA1S71-{w z9qUb8!Is5_oc2PAKeXNA13wO-e)(<;?^lj5-(BL^Iq%U-`v2-b)QWrOuMmIj>V|i} zpK;F8%qN=Nj@Np|VbwNmP`)=yw%7SKUa*Mc)23F`W7;ve^Po3&%c^(!(5nJsl79#r zFB&KYRS=l|?0^NP+MNC968JZ7A@$eQP$RLwpK}2}Fjc{AC!=6{nGxgpP^@yhK)0m) z{@{wUV#n+?{7S45HmaZGOAAbR@uGH$u_Iz>+rS8VYp_g;$)@q313|pD(_LKPx=HL2 z`j8`fdvfLMgYGJkS$cAB|!c*RF!D)B{+a zm4xSy+v5aM!7jhsal*|^{!cZHXZYR{-}aBkcRm?w%Xeg5Q8%(H3@8|bJD1$&Z7y*vPn^Y{dbO812U>G+$Satv z{uflfs8PeJSET5pMX&Ff;&kc%JMH&bIMVepC!RYk)IRjXefQj1EjN$H{kMA3jmXExsgKo(Kpvd+hOt8$OJHwaIlu-|btF~tHE$ephga1;|KX@n}F-nB^mG&^V zXC(R#X%)s>xYC@MXH>U!A6A|@!Bv%mK~>5XCRyJCx!xu5LP0)R{TGWKlV8HEg$F>} z#8mv4W}rCrs~bQ26o}t!0H1xyDqYGBb0Y5#=iF=@R+j|c5$n6R#+?H3y_Y&n8rG9X58Ma(fkVVkoksGjw|~iMc_@@*?WOm9cEh^r zSmfpQvU8hbF|$}%_C)du=FFCMsJ88><%1fZ-4Y73+Eu}$i-&l1a4x@?q|0t?+CoOi zC@ilJ#q`bNFmqih*j^flAx(`!zq5|m<$3`RT+spMs_qwl_unT@h&)V1CzRRu#al6U zW*SWDYK#AM(cys|-RSwo-EekmoKVx;MO0m)%Y{G9xL}eCW~_ZjjxCpj##2EYn^Y+p zEgL|e)4u|f4tJM0dDo7VgWI%pbntG6&p$+?*Uol|OZ~^-i{v?EG5x$aZU#Y>-)0&d z@mVLBIz9dIO_?z+k(e@KZE8=1FUL^*D2dlpT~}wqvdHf+^xz8)ZwllC z$=VW+@Edgc*pX+r3}IQg!z^n_B3+=ANb|`%b8z1}=FCWvOkj!G9f8i=7ZC{KJ2Hh1K>kpvs z%9*&VpRySFu^+F^nt|P_e*(t_@}iCBou|0=cE$4!ztc=5mzl8~8 zALk@}Kk4Gz0un{7ySTsI9BkCTO)qK>&`O%9q+@!=haWej*CY^42|RmQa`cx@iHFN_zqGYAA*_LUm)AG@m)p`7JWuDqI7ftFMJ5iSzSlM>v`csg%o(oQBf5>tWWPV9`kJC470LO-ena z9#Hja)|2ul-pSJ6NSC8jc3w#<%BF(+~6R?{OG^J>F@F-9!#F3+0_lVtM=oeU2WGFZmrjK}+n<8hRS+-6hm{ zq+$wf8Y>D7MJDw9hXEF;I-}&9qKhRVe9Ms0IAjaw_sFKZ56YbusOj*+L?d2P<%r_m z#niRKOq^~ro|fc21#yZ@%sFUFiO)Jgd`mdGw%dS5RCfu72N>~j`i0G2Gw|C@CrBEg zBCgwdm4X^aBovZl>4FZGZA`3EW{&2n) z8ZM-8_kmqxZ=ZVcr?9LjTTv5nVnsTv24|7H0@3VL_Rq84J)8Mk}UHHb5J=Azi zQ!(dXI6nBRk0q;P@RP|$dOh+WJ%72A!^ic&amnfYUVE_OrZ}ACvre&Tk^|~&enLy6 zyPcK7i^mRn&xH-$owl`G00-}9^B0VxgZU{^Ph>bRb&-kxmPowEE*p8ni0y2vYlA!5 zn)p%F1Rk9rQ~0;2;#9MZETwud{wHC-IgV&&kibA4SsoGvnrr&=>Ut0P_l<@4J-v;E z&Wq{$*Nxmb_#6(b(j$i{@$}%wLCC3c;N-r2S&~MQak{hfQ9T>}kk~{jk`H0@sC?)& zYdW6ZVu90ABtGD=C6WhZAZhvDq6NL9FzT>^qIU;VW;pS|jbE9PJcL=dHu1^leeu~I z3)ab7Ep9w*O#^>jqP2NP`9M+!JW_AZ>y}4wnRj>oC)X$EQXh%;6z-f6m_=UevdWe$ zwE$TLvcb1%@o2d@w^#c`)lC)jGY2zq4H15z%`_qzZMIX}#5DIF55?*MpAJaE!CHrS{G$xc+J`ludnI zR90i{Klr#7Kgv(>W}ggEN$QQyZaOC#ejJ9al`qBgO|vNTW?QM{pI`E~5sT$= ziJPU^`~1v)A*$#gS637wk0z$wI-WO$(IdSvYf~}b9{&<|x9f}_a{t4!d6#)o(NYL- z><{y#xtxVf3p7054cC?%^Yye{w0UQtXo5G$b40$hdngwChi>8=i4ESEogg-9KLr<3 z#^t`{Bn%6o{Q=!EzV@6vXq5rZ`FU0BccqWqdjA4cifbi}uuS-8e_8&aIsv?F`h&;z z-5BF5<1sZxczx+=YE*dv8zq*~u8@6@v^JXpJ}!g-NR=Za)-uo#3E%6&LR>nP{!5jaLI8ZwiQP*C7lw9O;I zWXEK1$o&CdrW&H~&zp44{2()n0tk#R;6pc670N#}@ey~RyZ|-4zCq%J{<%#pR-xRI zJccVxpUHE6zJUl5ozIoCH;Hp*dPVl{R)AZ@A6^n>PwKv%4{#I#T~0kuxf50 zjP%=u-V3YACd`&T!Ur)<;!4%exhEVuu5H{ah z3hO_0!S}ac)A3p@`P?a)P9Y5?v`#FbbK7bJtIwCkSA`jH<-L>}{Q44xc95R?J^SOA ze-UDrfT1`@HU@2y;~@Bqy|^s!mwedSr8M7QHaEKHLwR%$QSk4Nym<^iF+WVrqq9Iy z;yKw_zrbfgAYEM-N8oo>__#a?vgVzK4Uck#@O$UP_NS!}FG>g5e~F~~ISMVUUqG2& zs&lgQSmF88{^;Oa1y8^40MGsFsCO|?vW~gv<1Vp&q)fBhj4QCnPLFT)+Kz8@6Ul5_ z6jTjX7h;=-Q7`L!vD~XageJsNyO>@)gfm&Y<{fIRSjummw4|L_SMccbRGi+F4izUR zQSUp~A+F;D5(EVm$^&3i#V{d0>m1zg6eG9!9zx4huY=C%@px0Z2M4)_;-{iI2pH)O z32Ro6g!PiRxO4e?whx+|vE$;ob{t$5k9V7a)?Q1dw`qo$sv)O4G9PT2+7(LneH2_~ zETZc*=HTpNf_lr&!M!Cj=#p_9r0ZS*v;K#nZL$vb9G@(9OqIix#Hm=BGK4owiUq%j zI&iwD;AeXahI=-C6b2e4ib7(JIHZ3o9mOJp?*~b&=qu^Bb zRLFUB9hRP#x^G`Pus!U9a--2aWRsgrE&mO~1{MnQCGPjBzn7t4RU(Jaj^}r;d^qX2 z2CwXX9hOhrC-}ZFmrbhH;-jhC(8XFV7WR%|eOn(;TX%&FI*j5mXD8zT=T*4bX$AUu zy(2v#jXfp0@yI_GY|P`@%WTB?*5#QI~Dp;)pq%NOC7N z?3noyF3Ki=_L2>xdD|3Tt!)`Mb>`r7izUMdS8R z!ZQ;}FTdm*@?Rf#b+|={mCn3@N|F4kWiJ@#yK<&hB(088Ht=qm>9KKV7 zw*+aR(O@|R`OKi-BMjJG*_y7{e1JFOC&H=L9>V*rrShN(X-BDV!@;R5Bqn)1PIO&` ze$A;Im0-hC*^TGWVO&-`7DsLKXZv1N@Zz00MlJIYH$DC)dNeJ8!4jwI$)znUyFL*^ z3U^9At9X$cm6cb0gi0Vj^u>Fcx3^y(nd}zmr|>TKcfgm+RVE;Qbd( zUM=M)ebz_PruGtXgxngejX8?lE=dlmSxb5M-jRsE^zH1Xo`s}^vq-<^Lzw*TIDBpE zN>6qaa_n~LcGLO|BJ2(LQNcW{Ez3c(ysl`ia8Y;~pv;4eyk&BC6>!|~z4$1|*1Kdw%%s|OJu@EMG z9?3h`1PeVg?5Nq!fg&e8fJd64TOFpwk;KRIv5s-5&dK<#CB8cGsHa_I{x67pjEOn+v$_TrK`zf2ya$ zeoDWi$p%{M#5!+%9yo=-XKofb=J`SV%xrKQCeO#Rd}w>rTe^b>ICPi3Q0LJDYYkpY zo#zvz-Y1Cc*LIM%vKifoEENNk){yAd1#=`m9(G^BOwYM6^)a2tzQ(N-*A~AWhiaF0|DjFEM@awQ-ERU4f z-@4Plynl*JXU}(A?R&38&&1o55!6R!vZNbYUpp%FJHn#Tn>`TM+yt5d3b?8vU08q; z1F|w&;{N79$cD9|W{?_J49*izCCKRC<<~&IU+Kb|!4$mGiz0fDlbA9&cN+9^KfA@2Ghj}rn_YwizF!BQy;{#xxM zj?L}Q_NNrEe9BN<3Ih0lyb7ze_lpC}%Ak8|GRy0vu|v}Zx;^hKTt9S*c8v|AjdMrB z;U3*^9*iM-gRXYhruXA2K3_fEC(g#A{0m?9+yuM*mHCNAH#F{e2fw~tCrwyNb9>+X z|Gl=0j~)g^Z3Mgaf%tWK9KCBT5Q>|1x#z=q(z|0fw1269RS`w>;B#kMeS0rCUbca! z{ZiSa$PW$v+Sa6fzf849KhU&5s6Me}74=!v3)bvgfQ1@4!UpAA^w+PL931Rvls(dvF66ZRMij=PFpb_Cr@ zPnxtmmpZPNl7sGMTz6!ka8TlkuKFR+N{L-*@x8O49H+tB<{L2jo;q#XQ^)%+=flT} zG`^uaU8pbUONU$I#k|fEXGt(Y*FZzuu)mY|clSlxbqNF5uW%AwJ-3LQg420b;dW3> zn!wvmgrb4^YhnIIZDIH)2fSYKfnM$#kI#A>gvuH2;4?3R?!_C^*QphBc!dUz9^DVC z!Vgf^f`} z{}U9}Bx6fZrD#3B6Fr!GntNSHg3c#C^TdfJxZZz*B()sDEhZX#r&0@*)LpTmN`=$) zBW=nxD@Cy_j#^*crD^ka;eZJeZ~y*Va$jCU3ab^w^Im=VcApbCS{6+dH3<$Yeu`)M0+qY&P@W3)Mvf*hpS-+-%)Sa@*73nByWa>taG%hMa>tJ3omR z&Hsr$lm8RfZG6fd6Qg;=i~e>Y^&f=l%tDFRu3A%CQ;u0t-D+CkIvo2gc>=q25f;Zb z@SDBgK$a_``F-teV=i82y&g$8<9-S!78cR`1E)|mFNHoAxk7{EMb5jnnnym-sHuH) zk<$8I6?$BDV9#5d;MKKPAln)!MkS1-AQyG)d*~eg=h|5~+jTh3c)A6=-!6k{kIsA|{k*gbB&T<5mk8(8{&5t>M_$b&A;e*xv)HqgY z3)?hw<29e$dFGcn@Sz*i89GW6XL<6SGGk~^Val$4MtOG9&ShT)-MzP;MoC%lf;bgS zf98VlG?>B;$V)r2W*AWT$TsD~Q~WL056^Xb24@Bj=k5^7_L5Vk=Y3Bc;k+BxCoZOw z(r(iGp)&rwd<5TYS%IFTHi}6b6>&#z8~XY9vBV^4mS*&$661dbcy$RQ*OpA~ZtjZa zKMgp^VKR)lKZ3Vrg}?@jLEIPfh21~);+V0+dBmg%c>a4VmT%C5+_41^KCe~C&ELXz zr_BPZ<|CIJM#y!Sq)03)S$VO)E%<(1jEk;Y%ga5)Ax=B=sX^>J6kcDEEi6PP83b| zMdQ3RU+JF28nHZb9NfA}dtA4p;Jrp)c%lA=bc^Ou+d&OZ_*7yu?%yZt1=jVj^j0G+ zXqTfIl3OP0$Qzm$;7lJrPm%JsQcm7qOWMg53v~l+VaoT%aO6-=)@pLWw-WEkXMZdw zwoAU^v#nfuDHz|IBycUhMfnhEZ#v+tc(d8jI`F;~JnbR*OZ^d)6-VPv?L)Tf0_Di# zuqplCMf7^kQ`sFCU3xWcqxf*g6;3?-tGeogC5!(RkiWMgOq??r(kq8!qV!I%U1o_R zbCh_sVm_T-oX1~ORP2nOF6NCDwK!1hfz+Ui`*rnEZTkzHc3=hXF{u?3UKp~9#GD=x zP{n_5cw(-%9?TrsCZ?E2isy_ngdcC7WAV*y*m+bNw#=)Ls!(C{d)XrV)z^()x+}r$ zmx?&7&tp&@e3K5B`tsxCPOPGNj7>ErvWk)}t_v18&NK%nJ4AB#pJ(`jMYb5m-oof_ znJ}+nCJpT#CCF;DXn3&_h&GontVEOht_tVGt~=;}|94QY-bi*6_kdA?FKzd`42nTN z#8sJ5FycuP==o)0@s2QXIem~8pK1X8`jNO`fsd7R%(DJ`=5x*gWdQ*W)HG0J^)jO41>lEoiHr(DTNJLBYxfC zh#?EFQnQbpY=rkSzO-Qu)o-~D+r4*^jn5tO{oM;!;2fH=dIDa_dna5^d@H6+7%E

=WuLe9@KPd6GJ{!P^T7ch#6}NYPGg_;C&68Yv=?rgfvu*SJ%#4)mg%al5yEg7`zEAN;NTE?d}pgz*6$#k)A=PVc)bLMZNGv8M@zf+ zCW+movlpx_&%@3tCbJWclCL!zgTCa@iA6bdVPQ739aY0)Kf?LqK6~yjv5=N^FM|*E z8IW$M$-6$e@*VF2q36≪&xS=qN_?*Qqg;%-=enExC_$^Syc@3$`^C;yH75ysq|8 zR%`Z45E>?2oqcTVPg+HV&;^4>&FwCPk~m*3-NB_ThRM zCf!-Tqcnpgl|sDy0jl(ihg?@(xG=Fo*ab^$;8Oq%U92d0&C{gc3UOlKTt%w;+y+5! z&qK!C@to^Yf_IbE=)%_ZV5ZUz?N7@f)%%j*qR@|H#{}D|SlFPq@^knmWhR66-a~so zK^&(fX%z-76jua?;r+U9SXdu}cKcSqf0D~3ee5`)yL1ox92N!^Bd*e!p=tEdGy!&~ zrqQQ61E96&tN5>^QGBrA7`dK3N%wXb@b#Og={-J$2XQlCS3=`O<3$I#ndf_i_`> zwa2r^s*y7JKu^q3w}iW)vv_qzJmuzomN?r2E;^{p-=q$zmRw)H@pv*-_n40>mV^pb z!w<6k=L5od*IIG#QAK_-LsTbFI5Bzf^4h_^-LZl9(pj2`(vH#$a%?m zoBvE)*+muh8F$Z ziFe~@IK=r|2wFA6AYNiNUuv#KUoxlfDcx~Jhs3e$XN2#j^@jToz2V=&TR2L!A8xk1 z%u`O@Mf<<4;PP9A?{|`?b!%hraNcy>5u}b2BAa1gS!DHJ9YZYJG!B)!{Nq1c)%o?O zShnfCk38EBqDP;-Flu=fD1Y35mnS;&)2@{?xwFJ|cwvRkO{qjz|MuuDzK_|#AB$Sq++aI59PUBR;Tm{vv((crxynmh=du65 z<2WcNU-Nk2;5{6ymGH4PGc2c+6|X}71GCvSy6 z@}+`tyrb>W9(gdoTRNNdDVOz_@>o#vxCWDtz2_cY3U z5DyH_qOQxwKzKn{-e=vJ`>HG0eRDnslZ{g7XpIjV4=uoS!@+!dK%H$vNCS?%&>qJuq`gI3`55vZqxS{wVd30?+zW;WY<5JSy@csrPvGPAb1^)`8sBJ2-V+ z7+K7*rG?+4=wf~h+jlddmNm=xgQ>QZfAOGUf6KwV_!idfRl z#2;~5JZFeG&$wK~TTT#mx{`*y?wrJiao$W6jcS93!`ttVF+Sc~(I#KFa*W^Cpuy|OOqyVSz38Ho;?ye97mxp9d&Lp#5Y++|*63=H3v-Jr0ZPf||Brv)VdR?X`>4;`UR~ zz$P2V1tGjZeJ!XBHs{|Ft66Ps5R5;1feK4(AZn8ztEcs)lXHS`hNdD*#vHnrP(;%U zyOVH!6^}ACC$-nD^sus)$LmeQjJ@5(`sXWPn(QQ+83lv&D9O`MGEGQ`(`L_{1@yA5 zH|`7**rn4r^qY|(tTsD`yx^MTD9i@E!5_sjr$52Jx!=jF^B!UHOmiG|xsV@r4xk%b zF2NT+Gn^znYjS&4;;xGWuvgV38nr^|1ok)qorikh+DIen;C#LE@5_i7hzAwYT=A|J0A6^hQ7X`a9v5j=;Rp~;I|0- zXFU{ez5Gk}<9B0E)x+Z2krhzA_A~!oagB0^eiAGW^%Ea@sqx@B=3nbKI8;qKSJxR4&>}JuEda{lOz&3>ZGdamV{IaJ;-*NNJlPc8Fhvp3xKO{MQm|gVaFSZfwJsH|ue$ zJffD1fq3%wkLrMJC&d!`A$0xOFOJYnp--C94boPLzYf_AahCgdz_6*TA2Us?@GOD8 zU-E>pd%N(){psMX{E{#yiqDNqBlj2IpyW#o@3E3&KJxd%H(%_LwjMz2uEyS7~5VS5Rvu@^iA!Y4M@Y-3-T37PPo1g7XWC=Nkvo_AcF>XcD`vr;q4qe2x$v&aJzIHUi=Bd!FtuA!i-&9d}j;ah&;e+sgmC9 zA=-XkoljcXaf@0%&im|1nOXg0&n_+G_{ML-xJgoH`}`?b^ z@W6##cu9&9r)r16h%J%Awml(O{_rKME8h~+@=V}@mmLm0p=7rqK}3^xG34~68kLOp z)4$JZoH1-YrU&ikjcsFakp3cEKTsc?wnqpJ0SEcwLlg3sKZ3m?k}*!YJ)K|t8uqVG zmG;yL;8Pnze3156`tLrh8yck-)bf1h*oBI6_O zd)2k(_mmg>aq&SuR-Og1>Z35zc@%c(xQH)mDrr-O3+^m$K$SN!l*cq0me&?|IGFBYx#rbbOfX zf249p#zHYsLkW*m>hiH=sx>ByB_H|W{d{ar56=8YSf}z7hm9&?t?iliw z(h!HqUCTxF1V2jukOsLWeQ|n-Dx2P^pl7w#HD_(>uz286Y@Ij|>oQkz#X2=%u)iAx z%!=kr@5{K+epF3X8q)0rHq?;rjfUOwF?r;6@!}eF{HLfO`sz*Masnvp7YnJ+Gi`I4 zBf%!xj&-dH#L|FQaC*u=xNu6E4bQp4Q|ZQ>KH!z0a{at;Wz!B}-6;(Ye)fwBmJPuF z?Mp8o9Sd9L--a=sg`#5Eh|?R#_vNFr)2Y@{MO^#6m6nGshQ9L(MA35w3DuF*=Y=NT zyjf2n8trt#^FL8`(uZD$rpu1ZRbzeozI;=d<#ZFLm`UaRs~V;`{B@6K5rFTrW^ zV<}H?8V1cY z7q&d@AkQv>INEbIE7gtwJ&$|h&1=tv8F!87Veu7FzDoq`Z_7cWt(mauLp-cVG(d0n za(+I@9*Pc(#52xG)L@hY3$lv%zZ3t&n8ztxXbYh z{OFF*tJy~!Tyg{x_Dc6HP01~m8HU^KUGU%yKRD=Y1rtW*!OYeOdZagw?86G+g{La3 z+p1#;+)j?Qzis|jFQJ}etI4r{fw;P(6v8CW$}F7|vgw-+QJ45F zoVff4{V3>ZH?{c#+bBiBm*_OOqCE*p|9dF0a1>#+pE{pjB=Nwl6j*aeJf;tI=k`_i z=z#N1I=#)1cNp3HkGse2Q_s<-uDfwde=n?zkUVYfiCFz9nB=-$p`<}Ksay6O@sr*z z%y;=}Q!!vI`#655Qz3Hr)j*z8j@62{*I$OFA*zDrnQ}-i)PN@wY}o&2G!IfO!RmE! zDBbn3lj2%7A9{+OHJ$^_swzk+kr%@j#?kljdYB$F0RwC|LEZduFyxE@Uw)fNnc+F` z_>VfRUay5e8@tnjghgO8C4^mbJw&hX8?i3qEw1*Ep?trE{Pp7}@z;#r{PCqW-})XX zv|PAN8o~9v9Vd#VtDnP|o3rCR~+_vlHL4e z%kYrL4SW=kLdqxClATYXY_9)U?t89Y^!xJ^?Dn=`r>Pa#@%J3=X}TthO%D>P{H-ZH z?H}BIVFG81J?YlK8LTzAPO#mohZ(lUILEyfeqGrK#@m*n#?~)5d1t!t@^~1oRPyD) z5(i}G^<{i5*bZ&LiYF{^2F17Ug_)((c-rp`xaYMGy194bl{3eHisBrpZ@-PU)g+*` z={frE$9`@;DZLXcG{OPt%EI#N5p?@on(d~kLumfdCLDTbBS5#+xMOo4yPNd1doZsMZdGXE zjnAvVWKxSzl~9f8OSAZjehNgYJi*Gjvv_;m1W1}HZ`XI(JB+isi>HUF+Tppy#KWIc zVq%UkqCt(HoIL@0OZL%}o#W8#rAQ}V{(p|wEib`>%Y%8@ht76+8q;yck=}6F^(Hrrl4gvq{cBoHrgQMl z2H8t53$9DqhlL->YPKq2_9e4wpRgTxP~iw#ulj;d)7pje#v0&vW)b*h8nU9-Xr54a z5r2O;!>&`#aa-&tO36CRVKFV(XYDe~C~6ljCR`O9i)V1{>0iRC<3Tvc#u;_2voUAe zNDjEYlygT{im?NG^Mj{dF)=s>llx@S|FRDM9-2~d)kXT=wHoVH)Ufl7EJ5*mmUPqo zg{MEKaog?amC4iiQF8{Z$3Qj4F$4<{jaO}`l@KNf8y#g%QVw5s} zNGKpn$&u+{GMXPmT>!nJ-tfRk;sUs&!Vaq^6gIgpU;b!>)8eK5)+iQYXVlSu#TB%B zRxm3^En)o)Bgi3IMauIF)L89;7t`a>hW(xw?soD$?C^m*yXwKs3sp4;P!xeI&3)b{^q!?giiI;jt@dvYGW zZ8#!~(S0X`L>#7^G)3O2-68wn87jQI^Fw^uH-P&czDz^TNw>D{$4JG!frigZmtFd+ zSu=3nCfGAZ;=Sh#hDfglvKpkq${2(F0ymL*s3OZBY#^U?e$?jFmHpLBdE-4x4o_c8 zgSP@Eo$N%8efywZb{&1Vk}re?w^83@7h2VvKuwdA=|r_IZ%_VBO@0T^qs$I9Z|vs7 z8m%;8%{5lfnJJjIj~0@%-_y$_{!p@ZD)sJp5Av7C(t>;k-g0j!ELM?vg$cu1YyEw` z|8AEsLEDoTT04s7MLsyRrGO$2_s1t=oLD?&3a*dkxFK*l#yRWr*Dt{k)xM2i!Xr2s z>o4&KT4<@VGdzvyL`_d;Q)!R$P&#c4Tr|>xuI@`^6J6EVLc_2o8{}(FPN?F0W98sZ zLz?i?JBzW`XneQ4vDz-&L>wpAAJW2{1wHLST)b=p&sjGLhs-}rpPHtL`MD<4wiP8*ul^QRFpzhCUxWXcV z%I+CZVTcG-V;}IUi-RcO&~b5li1ZBey#gjt%8>P4frlO6f%oTIKo_MsID5qiJQ5$v zea()N-@&fjrlX8)NjlU|e>}tvsHB0fUxH;)DE++u3MM?W~=zW2UFk?#{zuXMGGS7H<l=?9@5{zHa+l8m(yU7dPa8Q zR{_l3Q!K_m-$Vv>d#UW_EwQ<@7-qi?L;c2S7W;Qj6ag7U072ZkTI$nGafA#mNWK(D>{K@l5&!iP83keqJ-D zgW-8B8&rliBiq?AV>h24St~4enudFCJc70%cD$#D74*1ML=mcssBGvByl}=xoR=jN z7B*Q@@INV^dDxok*DFZxiQZtPeFKMiEA#nX{p_%(2McP_{FMD3+-BC3`W!F2h(7W- z;*~ihxupZIE`*iSJ@8D7oKVp(8WRg_@p7UrR_zPL7DYord*c=8EbU;XCXBGL*!O^Y zb{>LrhedLfnap{(n1gZsRKZ?AvDgHe(nP%(wmg1fSXr4CMd zreJ5ZXCdhS_$g};B52*tSo(9y9VXwtiRMS|z}595aG_xV&hh&QI|9byiy584rQbb1 zcHXdN+?GK#cWhd@<@^mW@YSKY{r0lLn|$$8@@}v$>42uq>$z)I9vXRk7JaRba?8}t zXqv2SH}lq39P_-xX0(*2TvE{urGm1UowSDIm&w2wRJdkSPwts*21&z?(SM$)aQ|cg zcUD^lyTWJH+*^1Gdu{aMhl~5#l_aG@No*jikI*CkybZi{&Ueu;Gn4I7pW`;=543Rh zdeE^*q53upn+TJh?0c>n9&A>nA5&ELZq`rHsX_~uHAmA0v&9ouXQ`DwodJm@nK zCwh;8d0$skU0@{Gu5g5epDU>J`eB&S@E3N-U+2d$Ysj$arEE_3-|);q1mg*{u-5Jz zEj3ZZS2u?7r|iC{KFox(?_IW0%ZTNW#4ujJK$}(-n$}dW`pTuf;`mZkk+4BYyE;SF zl8e{vV)5_{n8C-Oqj)s99LuNYy632~?@{W%v6twyM2@TH9>v`E(L9l)J?KwM_B3iB zzeTQaaqk&8sb2^^E1y-*|Eoqm9sOx}wi4Pao@dnwyRjhIcXaw$UgyZN#Y`-h|9QZKr(^c4gxO^`l^jgWKr3|JKx z(vNH7!TI+unw!-i_`Kdn$36T+{R!JZuVJ>VN2w9|UYBOYsx^G%rV(eq+e$wBK8WS7 zpTPNPhe1twFl-VA@QLlYV#DrktWhFm&=QtGa{VOie%F+nDy8pQ#yafd{uF#J|AZ-- zN<4Jy3VK$#O|ZZ9#V`(8L5x12M>o9a12`lZ?2nAbIvj}ewYPkB_`~3={-8r$rESJxd4|ADv_d;FuElYu$e|h2hRdMjjDn_(gc~>0VGM*E@ zB*3^QA0fJ9J;h4J|DpBfxT3t6Ymx}QJ_e{C^99-(f@Swbf%ivOvr*649KJUW9%x&m zUi>>YQ{BkJ|IVewhdy)J7Yp>5mJBWr)i5;)g%4lP!o!&#;l`*(^p6_oj!;MjVY9Hj z+hCG+?(Eqb7v84HVwljn{Mp4`6z!pt;0W0 z*mKVz?cgK%e6D5X;$#hdoT=Rtm);+NA>XG7y(M1uGJi*Y@mS*3UX$a1Lx*5zaDTqK z%}Utjk_XG1+)3GgIj)@iN$h%U9C)oP#oseF@c7YR;APWHe!CsZ*l}ZR6nw^TR)x+_A?L78Lmj17|?Z#gBh-MDm4k5w2!2<_E(-%3;_~>T4Qi^?+ z*RK<98Cg)SgPAyH-*I>ryM?Dqxy;|QooRztBOP}bhVDPL*y717v9Uyt1I{~=v(H7S zy_=3%-Dil#?^dz?%|nozr;q)wwSfKSDYjEaEx?ZGL^QnAg)0tSBO})Ve0#S&zVTq& z$HVk_SdJq1U!VX(Us<4=qZU4VIRkzB#BtUYGs;qJrE3noXm-OXv9tC$8mN`Vn?I$B z1_vu?jq?t+8S;fzPE6oa7GrV2gC6wwz){o>SGL=1x(*kuORAQSZGnV;hw>?C| z7F=H5pV$0nCjD*27;)t_&NTf=S_Yb|cK$2+x_lA0>dis7FnP}DYKkXo8gRn-TQo`* z56Mx07Z$AN|F+A7zaP$v6(I|R8OLU_y0M_*^SG4ZYI_|esO?bP5 zU~a@gbPanBtv7G*nwvu;5#u79pMHoeT|1$6RWss4C1HJ{CT)-$;~O{q$BM(%@k_(X znpy7IIILN-CaY#VYj#ZKUP|oF;u=x|WZ@ z<)?kf$=L$pW^G1;@Av7HWh$Djzk_)nZ*pd;wCmW_)2^jq9`7hC#M^0kyz#*qSeWx( z7&)V--QguuaQgfp-q;v|aqU-eVZa!$n4Am8-9KZ%#14LHvzWsQ&)D7_wU+zlmVl+F zB{Yuc5W3H9LbrBJyH?2qvdO`OilZWB$3s*3er~X6@uY~$T5D)?j3VYd=CeSzB7*tdVRZ4jZF;sDyi6cGzVZ0 zxo|#HItshBoW&cu70^3TMZ8dG&$?-zSol!tFJ|P?j+23W^sQH zbuMe@W+A!R;QWZ~{QW;S(O~saYBjc?v#y4=En}~eYkLtmCf^knkMByeZab1*zd~Wh zj=w_0r{jYD_!H3VwO4fhnS(V82H}zhf6l!w?U+?&v-NQeFdp0tKU8D*$*kT~RPz#k zmF$8_+6afpf0&Gj%o1+xJ)(^GQ}?4kK*D*lek~-7dREv7v2q7iwf6Ig4VVN zP&4AAaBz`1Ia{k@(i$s?1H6SgUAqF``>)2}#H}cou$``qo5ID7BcRyR9CAb7fXr7kb%hjvFeZY*4VB9=H*d8V~Zer0dwcbuawc zsEiA~ABWpc56Cd$6D*P#f?-|`>{HuH{<)g?^YS@H5`fT&rb|O#U}^4;CfAWKGAqV2y$61)(^GhtoezQs&JHioWtpn+zHy> z?-7M3lt8A#I&#_9MNl2}0-HulbKlIXl=meE!J?k}6h45^?Y69W;3~B{ZQvti?;%?9 zAnlLs3^rwYv_7(q>LU#~%~x_dy*&k|+oG|d@D4qAB<1Kgv` z<-r9&VLBxgrSlicmgfrLsETcm>hbrqkQ3x9ziM9PtqwxSoC-Uc5L(KW#O5&g^b5wrh=8S9Su1TscA3 z>e2{m(1U8$q)~J83OI3P85}>|kMttC02``e#n){(uT`6KB>wd>(>oM5as$0{3FUv| z-DM`L=kk~afBM1V0e}m2E!(fpR z&#QZE5If9G2>our+vm3^#7~p5Z)@B6_t(OYm%PYwK@fg=u#bxk(`nnM0cc<6#J@Xl zA-B12gkO!Ds^bj1fwT2&Th0AlDWkX=gN@6jZssI>8F>{326nLRlri|}+#|75&nn2( zi^Arm6=ds`g~e+p!jJ7eab2<^oR88b<9U_h{jF)DgBVV!b0<;IoQ=Zz{1P$@ci^ED z`*ZuD49W=o0RcTLWX{L*@VQf&;MR2)9@zJuug*V3@eI)X%6E!=1Bq9Rd#`x z{ENb^ONur5i*iI&IW@lUVi`tj^u@t-_O@N^R`Q^SIc(*U0`iH@d@+1Kmrlr|mj|!H zCW*;aWnF|O_Jc^y=o_p&zE*IOb_%aUJ1F)^Fkh=F;XIXa_VO&H-f6>Shj>H>8?PSMD3W#pSUiGANJ;x}@Ey!c==yju>OYpabAGGB}}@WuTHw^Qyd z56l~$MA3cw@xa*5sAXj*7|BMl^Rs4QxWsOq5U$1?b%Pu0Q+VQLCvcQLhs?>Lb_XB* z5^lHXQ$N`xc>1lA-5=!;yZG8T?tCOscGDsZWv)Kp{4SH@dyj#(#!7mUVTnIKXhOy_ zM>rI-i1pw3(DhA9`2OoxS@ZsK{*-rF>P~)UXAA@EHkWj6{G~_W&JD5e;eAUb+|d-k zQunk_ox6t~^)(e`*GyP9qV@#nl;+N$`yln!P%GK zlGH#03R6*UpqyRMfmbkM=Va(5W%DwQ=5R%_D(%axgX{H)Xs)G;n}_t( z8zUv2;u?(X*v5yv3-S8J-T1fHTgd&HjCtewaSogyl?U2v^yW6nPEQc(&Md+?gB0!7 z+)v_;JNCj-%LbvR&Rq(eycgb2FTlA;%Q&%n2yE&(n9@F*@X_fDgo_gwVvBmVl)1>^ z_0p{+EnN}*t(?WxvCrvvlqq|-9hN%%oxw`l-&xFNS^lI5LG4dh=sny93X4I?8L8Fu zdMWTfg|+zRR|#%*yNgNdOlk`H9F!RG#`tXP3AF9~4$~(|xtJZ#;FHQ<8@B=tuFpg~ zKQ@Tv-O};eBX>T(a3QZVO=W}N8&D9Lfi`OQ_~xN!v|{~WHW)gD99&L9a_5nPR_}W9 zXbZ75QrN;Yp_H6l_h3!&2mIAj1{1o5lJ2Ssv}$=PgxpOOv&<$*ori0@ci%2fKeG$w zhsFqosRwDHot?Dv9nR}~YH)%|6I!Kc)!dh6DW&jP=&-jhw!Ybf%jOpGwYFJ!g3{UE zuuc5@=n7T5k^JJ{mtfooIcP3zuod!m2*SY0ATNlFZ zP3HLAzAuDH43XgUyTYMO?XXQfNXThu6duJ57Z$I30bOtcXKH4`OWR^Wz2*d6-YDgj z69+-g&IGo;c2_hwq>16GuGFW~4_N-~HkqFGlTu&D_=vR2N;Eu7N6)_`^&#Kroxd~h zsw$_q_7{cDrjxkfRVr!T{SBIZI&3?wun~5emy)TpBQ6Ux=f~re@z1te!K>1mhwdWa z-MSE?vm0h+2BTd6q4-L=S-yJz6r6N3;QZmMl=SZ+4J>>_2U~`m$?E<9ERXh;{mF5I zh6&Yh{Kiu-sW;}Au{}_IdIbHqo(NO4FVHzJLr^o?Lr;BQ3B^03=;XUY zwB~6NuDROq0++}IY?tQ(2mvmN$j?63KqlaJT zHzvBmC{Ht*`Fti*`tC=ic}3WA!d>7p>eJ=C{`{8rrf76XTBDzzg({~TQFqcMoS(du zpVcuF_zO{39=3`(9Fj1<+z6<-+s2d%C$Sj@vf^oX{;=M^iXgk8lqI-nOJB(66AsW8 zd@-Ndng^=V5%oen?R5nlYg~w}NA7S>4tcW;5v!qZcOh=@?PXJ*D3H6mEwheH>=R~lG=;((^S zb8Qw~8<$3&ciPBsGtchqi{>*duENg+zqupcA!PI=iuP3Gk-@{+=;hQOcZP2TkMUoa zVby+G7ZSvb*397QH5A}@WCeaPQzpf#ZE)oZ2&{%+m|Q!8ng;o)M+?A(ay z``1$J%1X|p>OM}qe~mXi6Gs*=&cR{FzO1M3d6r%n30K^TF~u^UdTvao=z<=0@ckgr zlFvXht^WK))A2C+ZvZ%G%cIh_XIT8HiqjZeLleF+t8fi!MmzG^~15l~72OpUjQi+>_kpf$Hd%z%YT)3PWZwUa;E!F&qz46RI-5am$R^d92 za`^6%IlcE0W;(KSggmjBd+4-;@B2QLb{QwLIfWm%1?R8e#s8ccejfox67tyAs4*;J z-)Yopw4|5fOfSa79B7Oa|g&o=%2*qiSaMf#hm08qYUuzbRCY5xk|4DX6exD&)B@nK5SNjz~G-B&a&Ot;L`CYC|-J&Uo|jC z)TU4iZDsDH(QYo5GaD%0A#Q@mkkizN`27U-(50`$kYVrH z?dCSL()$RN-a$B0#~S9i3NyGoed&bmG&H!OFCAW<2E8j7oNt{D2hN`3jtlIc%JOQu zvb!(zWMzU|`eQc!XB4a&Vh`q;wyY)J60V(|kE+Y;>G{$^oMFA4f%an@dTBBnUH+t>2e4j*)N99#n+gDFpJu_!v)r*+QGJ{%kXWYtT=VGp;$8KJ^byX!qg6(p+T>D z@%NaGG~RF#SuBm_HfSBi!;)I~b6JaqB`?Awp^mh(G>s2=yOFXV2f?}vW^_NklG=2n z%zMUgP}^C-uRm^$3PQf_#;rJJf6mx_Ax9xm?*wSrHsf!fX)ruO5zL0`fZSFK+WftWm41K0es3s*xx3CVC7&?b zt*Hwd&#gs4w*vX&ySwo3JUzV8zL+Ark$XGhlt?~+FmsUustg#4*9RVj@s|?#3x{26 zb~_EghQcV`q$Z0SJ7Nsi8MUA74POlp19xykF1VBVi^H@g>nqNEQx1D_6>x1!n#k>7 zE%zi_EN~8*&~5xR^!0v@K|i%9s>qr~D$GQ8zkirgqz!lWc(Qwe$5`luNBH7HIGgbJ z2mffoLVA596*ozJP_Mm$4WBTTDxO5c+Y>$%m#xBPJn!Lr*Nnm`rt{JKRHf*tT`JqL ztB{{9IAwxAJ!IhlTBI7X3R^p+%rLz_7iJ;sX4Iuj^MoTmP5ujiF(H!oabJ$;@dRaJ z_fpaP8#J>g6dp|d&A-$4fac`;oadE&9H%1#6fH1J5|Vg-&oH>%&9KZ}k6I3XkQi-n zrG-%mLe5+h$0$~cCVsa9ue&1j%1Wjv-_zXQ!lBF{IvusnM8eyKd=_7mC{cX5g4UZ& zr|#SLv1HU6$?@S!X@b2WvzasiY#wZa^%jq?K`nyWhW&@~X9_7uDUsz{OcAw>FMyaL zS@P3s#qxhn5R>A;^tONH1HyaQiR2=V?(W6uq4QyKLLLUMxQnv!d-$cY0(-!9i^NUc z2uq_6;ikmll;~pwKazGc^Sgqlc+4;w<$M6DZ9Pd>$kp}>j7PK&ho=cIXm+2sxNF%I z`V{g&GW_;!3IAE2FU?mJEs4BA$s-8|?eitqL&EcIUCMNA1&856d$i@F;2r(J9LqGJ zi(bMMwkp9LM-R~4$HBOdcagWciG~aHc?atkY>#9m3~J#;BklaDQD7(=&M=}D6>Uno z3^Z=k05V$K1U(IB$@6J2AGh!b7cp=sTVZq&otqWunn-G$xed56&P5N5Q+grX89TZlPOUD%x!p3IDsz@xqfy@NcEiRvzWg57aqE)^*OX z{nr63yS*NVxlWX1Ip1K5aT%v;nuDx740IJ#=(@#Sf!F;H(?Sere|R67p4tfK%m%}m zF{?>h62{NL=sM;fN$^`b`v!Afy z>pEDkP=?p%2zdv)To&`v8H(>&J@e*p+C|2~)-KjT>>Jwl_O{ zEvd%wTpF4fKar?a+rfETOO$v2jNkY(v|5~ztNN+O>0Tp_#nTx5XOYzs`i)`RZCzj;-mu-6S4~NbT!jpY%dDn4w znCO@_TlTYr{nQ>zXNP)$$HCFkPcIFnA&ab0jkh6%Dj8Dq$s>QE1La#CCS+h{Gk$sw zbe$0VV{a`mv2QvwPXmhktA){9Mzu$5Y>tfdZLA_J-hKzR<`z=(+w+vwn@DPN%C$!Nt@)D$9@hfbSW5~yxETL<$%;pL?}FYXN3hTCW>MV) zOSbagX)K4;{MiMWux_LU=YK2>xBu+mT!v)`PVH^f*e4!8{1g1mjd!4>crN%3KEa*0 zet~*=w#@OmIrvXYr<1RT!-$)sgqh(Fk$t@xTb(>w+$&v9%28pEW;#Inp=>(a)NTMP zHw8!rdo|+{C3`enq%Hldn*_BdBy{-71{!_Z9I7Whofz>|U=HrLhszgKp!;|q`01ww zo1fM}$TJ)2|5+1Kht!clS{6H!e}EtS!34_vyYa!ZNEW&AI5XN2z^0C$3a%-`;qxj} z^e6+W+NUHgTxlp>xHumAMcu$Fzx9~Ls*N=5*DW@s&V=$!R6#E63`7p?0M+QlP;S}` z)6_VceZ&hl{jnEV4r%0X90V!T&yoJgGBmh18I%iT>C%E;cKc2o^c@-FIOk#`zbZiZ zS>j;YyXYV;Sa+B6RT?IKXLW~jnLHO3N93`*!4jAz>^BAt_QZtxv$R&)ghodtc-2f` z3a^MD-HMt9fzpQ_6_i_Yg%zyZL#e%5(yJ;**c4wY*yoVP-hT2B z_K9UQc}g7f8&}QGbyX7!YbUl;em=g5m7)IAazqy|ZKR&01ZeuV2+FzlkXM3HoS*1^>k zcUfbHm?GERU~hxZFe9zG{MQ-2q9WtI^z?->nM9ppvwzRxAgw?Dd&YY{bLTxay>u73 z&7K1uisxaVW*ZKglES`oXVIoS5vBPzQM~>nuGZMjGO~-8U)P8J3H+GhE$gxQ+EExa@GKiY!+UZ6P?9*W~p+1@K3g&!*Q3f&)8vG)lfi zgLnHm>ZeeT=3n$U**_y;Yt2Zs-7=iA2N=ra)!Qilg05(++8#8kt7Aq& zH}q-OcXp(^RB~HZkye>YQRXE`Iu(`SZOA-a*YZGM)l|@ih0#za>qkj7J7C{kMR>JE zm9iGeO5MtVd3Rr>bx$?KwmN2*)%yTnZ8|IJxtWHKp9f&v6XK!_jp#?42i!<~!jE&F z4*GY-Gm{=0*6)%HP7buh4c8kb|J!g$Wi|_uhl#w_Rgp1AWm1S0i2@T*m&ha{{A< z*|@j4naei5i|cF#Q&ZnfWS=;OUREEbc@fK@;6^#xgbsi$0xNgV9ZTW{3cgJt&-YE> z#&v!0tLYH>Y&`;};YofU3No3?&wTleGjB2gm%{OEu9YsF8f_qOn%9HlB|XviK!E|< zwwFyB^PVgCaiS(dM+N4lPiCtVUGc|%Az*rZ04$F-fy1>Wf`dr#CtP}mMs=^*;`QZR z$=}K9BMSN8j@}Akl+P{fLL^W!=oE-(1-LaXPMa(Syh><1zM;;F?^uirYU< zn-<&m@LOH)L#>c08?t{oec2)STs&1t{f{HNA2SjBGJRR~k6iAW!)mU~(heG%hdWNW zRlzB4EhLH3JNCS~9*Q>^h!`a;^xw?hHvaa)3eF;GStU8^)U`T-mPPG1L1ixjT z0$eCkBEJO(@z6L~W?j^u>IWsU`QBSt@7@n=^wIUCXJtjQovNaI*K1_u{TA*{nZj-X z(Ys_t=_cKW+-J{FA+x1T{{7@IQJ9M*jg}=&UV}>)=9xZYvth4U8Qd~zXOkA>;qXU} zY?{!iPs~0diB*rM(pBd$=u{?%?)%ZORu9%=AMN;H#5Ydf;sDIMT?t3UFLBeFeH6rR zWBs(%q`}@Xf@8x6IKg+S7t~)oSnC*lZn)16w4Eq^Q~e9pn#Ry0Uw63rGM}~GFow?C z$zW^hjgx;Hv!&RV6mOW*6NP8YHN6T){`f>Eb|77C9Ye0gXK3jMU2yf!fVtme_+I^T z=yaL^GoHDDoq+U_C`{nTg*k91?qz^q%@ww^uOeLccBJ&Dv&5axylCL%Hul9+hxBKL zKy=km_+_*jBbPAxZ6rh0$BuJuqq^Akz4G90{{q@Bu3!mi7b#UUlP$e*5`Hbor^`(h zu*J%lzFsz>)!luh%d#Isz&|Tgeffc^9&Nxic0*}frY_{yo05X!I;zsY2u<%zfJr zUgroLwwMIgPhmPf>|;dnyGwYFPio|GxtDiO;qi;VnBDy_la2cC0OpTK#O+s0>5-f} zR?lr09Y5&>$(mPK(ZUScHZC3VUM162w5})vbv4f8@rlahHqRP@&P32D+hF=WcM=x(z2Y3UFW@`H3INf%U~R#}j_J?Q zPh$-Kd$S>$Hc4or)_t~USRg4Y?`64*YuMp{cX%;ea6VaF$DCPCaL7cFvtUis<7f{N zH|^NwMJ`Nz-)I)2oX?E2Vwt?pMfS_M2s-t1XsjK_*0}n?`^Wd;{x!pD6RU|RQ=iN( z=ch0i8zVOO$y27dyLnU(?EF8l zcTG=Nv4bI*r=7%4TCwOq@-Kh%TM@Q)cyJ#LbKs%PSZH^T^Ke5-gh4B4|H=c_CDuM z2ux9Dfr)$E@g)`~ZU*mKmKw5RJ8n%gLgeFD9j_Aoj7~||4MeT z^>1BSaM)rxP_&Hf@0}n&_iKEqOgmfBp-OSd&62CCbD(fH(8Y0L&c5Ik^PAnvl{IGI z!tr^0T(JhLc8X&oRR>F)OJ~!cyiGp#|seo_GG9Z7F8+N6RLIcAM7_xmi`xWSp_mlpxU$m0$3p;*K zqrFTeYbmRIm(C_V=)m|r0Ti8+N>~0FGuC{aS1p}`wx86cQ(YX$VCFZb93IE&vxeg@ z({?6%@Hx}alw~*V?LgllNAf1vlbxzpVqa%V*yVN3l^4tVk ztroGLgNLBIof3Nvlc+Ex1qXY1a=&!?LxSNtY#XXiYCWm!eOo8$Cm-g&1&_qv6K?Q2 zebdQ%q=Z6J?U>F)FPb|mlr89=!P3GR`E`t9=C2QNlP1hT7=N5HBHn?+RVQ9+eH{Bd zMTa%N?n|HgJ*J?OYlL^SB3LOZaU(-6vHszwFeOI|%1b4zVs|;!Es5i@ce_B=kO5K; zvr2(cf-pZJo1TVV!0p42u_gaT@Bn78T>{Qek=>c5lu^>PEb)Cn4J|ByU-R$WC(?BZX)(-CH`i|L%_ z8Gg+Aa*%nI%LXhuLYnPMnMP>~?&>v!Pf5RU$aoHak>Ql8W*zU4qdvnio?N;XPGWc#dq+RO5Z2J zqyx^>`+O24J@tal*0XpfWF(F9HNaXqYjQhO#dhs`NpreZpxfnf(9$oD6viH9218pM zuUB?3XZPFW{B;YQTy~fG8cWI9YdgLjGm(PJITm&`g_=$NvZKZ4dF{eDzO%wi{FP;c zti1*${BWV+imuFEM-B(y1xSDC2~mo3sM40P%BKOWctH`nxN{*jjXcEcHw=g6QN!?L z)jPh`{S-Y{dq|S?xh#t+={y~-U3KZ2CO>6ffz#n)11Vj%cU7HhDrTMVdVst;7<7H}!Ff{)U9 z!gnbFXRdCFI_XEy{Nw?C^#L39w^s_kcVEHS>Uj8;lucW8!??*qjBu>Et$18jCw)2> z1sRv-!w4%A8e5VHJ@t3!;j@S2|8yg5><~D2{tgU)~Yj|I2R`6KRz2N{#?}Om>=@#66uYnaWGh?^+ErQpAcaDh< z!0({(#uyeed#&SPQCZ5XJP#|%nx*kwIU z82>7fYE-Rhol_s^C-Ve?doJR+p#y1_b0R%f&*XZ;7ciT6J4zL}jUzq@x#~6Q_}}Cv z{50?zwtGHfp|9t`=(oDy+$(e~V&}tx9nWxl_$~6Cr$O6QGW2BP5Yn5Y1|KF| zVBeQN!?0&};N9q0RH=5L7t>VeU+E1vAEPh+{Wb}d()v-Uf*%Xtd7Bk{2TGJ50J}E( z!$pG${G?a^L|ea4l^&B-rm@$wX~B${sQ)-v;DA=tz+HQMb4yAOAAM$(ezV2NfzHJ1 zp2o?iyrFj5X8NfkWa8>|!7)pp(@ndMQ(mjm)8Uix*5hq#*RR)(gKwyS$-_HLu6s9l ziMrTB!^v!rs~oKORxR0}AIa>do#kemD&U4rM(!Qy)X#1R*$X?tY@zDZZ~9$k@;aOS zXFCgz@0>{m{b#}H%XS!aPv}_*gI-I4$>Z}Sk_TUT5<6dIrf2?&oGylgb6_A;yY2k zt!MxZ@o=M@=iA|`>24O1w~iKn=mRcIzF<5vp04`#hiPXM+4gn2*!1qTob{f;Y(rTh zb69eOn=*F?U+XX%i_+D=-#e6Ss>0x=_5rqXY%LpQlt{N%x4`;`byR-WgRGiVp;Ebm zqV1ZXd+T$E)My4y(HgT~RWUh%3-)d3TAb0NK*L^HlYY=vvPrlOCEx$D45McLRlsD{ z_itY|aegH{Sf)bkn1s3~hEe+`Lo__bB*%ZpHbweZw2Pg)FbzhP_oF8k-Tb;{Io^FQPh-1du-N=PpYwMu<@g6c^vTVv*d>78 zeH=ihTSwrV(H2zJ+6aNNH7Ef2u&1qr&(s>jCoGvp%7bc|ep)&@?YYZ6^0(oo)hXz+ zPn9BC^qA(FIQmg83l6g%!!Wxxw%|Yw{#A2hpX!AiTtx~QW|)&&Ybx6>I8CnBJK{#U z6qd7NHWy{{hK%CBQ(o=~dK8xquAB9V)F;u*u6T@Ht&Vc{WVun>#_(6})?&si9mxA8 z2Pfj3am0odX#BW~Gk;%?v2to~PPUKureZS94{xWja}L6)?~(k)h4<0eKNO;jDw&5) z8H`vPfPD>uaIMl~$ddc#=yT$=u(#Qd3uUvQqUR8$MVw%Jmk))Vv!-$VRoAf{(W|i^ zTMBcM84Qyxk21qrLf!ek(KKjK$ew(uo6+0b)UV95VXgHaiMh=5xbp_1LVFrEZe2Q~+ z4hHROvG~K+oQsK-VSP$c`DqQ`C7Pik_9SQ=PJ3>Tky@FY?Q1!H>Z1zOzIBGY?@z^= z=>@11-oUO2pXT$R#ppEu3RQ{kvr?y1?9`s2XnoX}UwrZrn|f!zDB5lsGxHuuYo@1= zvY{uZyz3!1;QV3S<-vn&jL`i}9zuE90o<_Pr8M{K0xW%Ck5iTyLDZ@ac2RT|<64B@ zbJaCbyT^FQapd{w_s8*v0#8t*#ST92qc&Z>yb~2n-Jo^Xa_)owRUD@Bm~$A>fQ_ay z(3oHkvUgUacHV7_IDeIs^dvLOk3^=$#;_}=pV;Q1iiGp4hod1&Xo7qp^TIvs>Prn$ z*Ri4AKG$&S#d_wN{2Y^vg?re9LeAu6I!kyS#WKd4FvXGTcgX%f}H-sKn~bP&h(>}2z`*TUowfHkxGu^HQq zsYT`yCu;Vfwi!oR?!Y=O>5C`0$ll|7JUP(FR9LE?K7P8ff!z#^=B>Dgiha}OZb-; zx~MM>afSc+K=y};)H&1#wrxY0rryTXb4QW={1GD03w`ME>LYk%?Ld%kKgg?Vx$?!q zfvEkg1{OSCVGFn~zGKYTu%|?lu@&lSbQz zMS&=N7g}t#0KL&|Y@GNli^@GJdFLZy^A0A^`d5N~Vni<m4208{8LF>cIf`@V@mQPQC!0E?XVY)r{Q++#I(y|`zMBNg-6S`$8mErW# zb`)@*ezBuf!i>e@9S$A30-|{@+@14>LT%GH&AZOx;1(ko>0baTG>eLA!obyKB1?Z% z#s24Him_P*E}>g!ZKDoLfBh4N9eThUhqd6jz|rE;;sCH(6^g>Fi{4HEkaVo0xWjf( zap)&KYw;6%jxcAQk&)PDRZgb!w?IY7bgUaxfdhilS(<(r$Y>m;GVN>7XdO#`6XQ@v zN2p$GwSmBs3t(MG13X%OlO=3hj+dt@(-FaGaMs9Kn5S5<9^0>=W@|@d^HqeN*D)v* zoO=nP`S8_t8Q2})1yyG8*r{PjHIom*ztSdFV##Ak#5gu|?*Ob6m`0vovq)+84i=(* z3r?&*NgBeuHeq=smw4@GO&;lrJ&*fgKgaMExuT;^s28nIQ(zrmOl_PpSgwB);n`L zTX{2^`MM0ng*O)C_KH{B-9}f~DKIsr$*yJV+~#A!j1I>dqs6p-q!Smk>nOTw1z@$q zW~_RP;AUCPngXie(pG&^mPVmmS1Mdrv!dNs1^=&#jlko7${7J_>QJ2}IDhudYo6N4r9${1Y6lVILiqy?mh6+Z>!SuV9tbN>EyzjUH zes9#{&sEz)q(UmLvYbGgqK%MwVjM2>ZAYJ1>$reh1K?_l4*RTelGUCN(cqL>aBXTe zJGMd{^W&c3rL|qSa*iC!o|A#`Z@-B~UVp)-mFwX5;Av#L^cQ}fp2eg~izQ$C2%V+! zsm$@(QxNX(m=n03uASUQV}f(hXoMQsux1kfxyf$aF2O~Dlh2>;6*!-p!05(3mbhyV zn)I|tN{Zvy%8SvY?Y9Io8yAyJ=5FYCro}fL&t%oEgQY(YT;j&-{iGEyA7U$~4vKQq zAg$SptxO1K*KiP%Ia)03Y9 z%dWMZ_rIUb%ZyZGul2XFiqH8tS87HdZv{c=u-|Cl_KIbx&Y%-k`KVSko9dc}lBrQK zdMadaca+j;oUn)6KeULsFA*HF2VL383mF)dl?N)$UF_Q9L-cscapuyr9vw2iu_3>t z?6_(N$E~oh*-`S8`|tTp^e>8GeLOy))4+7eg;VNq{h zxwkgEkfBD#U*Ge#LeBPCbbLW7aAFA7fB+ z_AKTyvYQ_fC}x*N#0zf0SN!{F0c4|_fNOl7;|(Djy7p`~i%Y9xHv1RzDms}Wm*(@V z>){{dzFB}U)1dzRdoraEZHlPuXWzr^U|N*mDbk2@>{!{wH>(~L+)s_nyGj|^d6ybljqlvZ){`t^x2801 zmljxd#L!y0^q67{ z{$R+gAkg#{oYRgr^qZy+X!qsw4W{S z@WPyf-V~_#n+rBuO>X78uugdz%9NR5HkX11oiY4LyK!`OqBU7hHfJwd58-U<99H&h z0xUjff(|#cDQ3$v9OoVizT8dvRIW_7^_S4A;0)BZFsADh2f(Yr!(iIU+pPKdF)m=R zJ>R&Y1%IvY2a3zO*qMd>B#(Q>QP%A)e%+MS5TB|FQ%79Ip!-g^>c9Td+rfX)%0UY4 z&CgibzB-(+E|+$nt>tQ@fi(91X*Ri$6V4vp*k_RN{SHc_SE1t_N2QLCURw~x0=Efn z#U9{aIiKf;TwBR%tn>uV`xr=jr$>A8$3X9${#2!^3U#*US@DxAZ;?EEQ%zvaPUaI|nPHzwko?7}J*H z$p1(pq!p*IytB>paBK?x$yb6a!zQ!ePSLdDtHwm1?I-!%{xigrZl}@<;qbOPQMfZT z+2fUva4NC;L%E()q(iqT5Ibi~c^ThOT{)T71Urz!i!n5RqbXTT-NJl3`iQq>4+2H` zd3gKJMuJW?`dU0tI>sXoH(LqW!+w2edEHdX{gO(X8}+!^(8iu+XTUYtKDaN!36?46 z&>%M%xM;Li;P-E%<4(rhfid~)|siuluP1K z?aX7ioYe6BV7l?|0G_*80JaN1)kN(*B6P~o~b z18vELuzK8QX8q#=iKU_ReTD(=x7C4`zSE~aOP0cKYhCDBsv&)!D+{_B1ElgwE+BRI z0W)4U;P;oD_XbalALUZ0zWp-x9Q$&jzuBpS$DKIn`2I~r(qzzq2N{d-MV&lvc- ziig|zM6+2}MDM?tq?JK;*Lc!}+0_&=u$H~d-Gc54ax`mO2CsEa zmt?wxGs%%Zd_vYG7PjFi7*BpGWC31r4YvPKwzigA-!c-N6gE*>;TamDc!+93`jgS0 z^`Lg3hyQt@FIZ(Bgs@}%@MVn~{eBt^K^-H(e3d5~C*|1YKtCa0c$E?|?I6uf4K4*8 zMEjBJvHO7m91Kz<|BJielF4E+cvdU0@-*<%7@mLlTb8X`tLxaH9>l7xJ^7piv0RPr zSZ1j3k^dK0&$p+B@9CkEWuzlO7lk@UW_Hx!FY}VBRpP#C{>NqPF z61E52?s>3_Ie*Y@!C_II*FC(Nl#S0TPq6DN{kU!1Rjkp_g3tO(;p({=(0%_8T(%3x zkL$*;ImNdn{kNaRieIv@R%tmq|7SUuIBpa-=3o@N88;agny!JKJF?W$F5{^J|eo^>4>L=I3sHXr<24T&$WXAe`|+0FG6rM@5A zS#G8R9@p$6s_s9M+kJ_H1D0X9$?q!uy5k91xi3%0^7Khp0{0oEz)v&9>GMvsa%83fZ#YwdWRfEPG7c-Ee#&WO_v` znGexXV%{}hxl_tb{H@sIOkOI%j9wYxEItt<|0L5e?+hwa90+Fb4{%viAMly0!`S!M zK~lMmK782Tk4*CCDZAv?Ry}0<8DfU5BrQ|Ny*;P7(MxkA$BfQnXX{|fdASd-cnpQH zHMh{!%N=Ro0g?Dy7vnC?fE~~JgWN7<+Og;?3mYEGYW3{clT@Ksk?R5%`|iZWs|y|D z?e8!zYZ+Kk{vTK9oxw*}_(8*v05}_z54=GXsNT}0r^5z-lglmGUbq#CZi->kTfv7F zyb`1-j-;VAlunM9(ypDtJTtkK8{fDd=e)_RDG{Hf!t4uZvEn@3YSJFGU)BX37p|%DZJ`l2B+aPm%xVHZyD6P9daYu*K!qYKqux%U~`%Z*p{n>1e#Exap zxX*`VgH*dQn`vd-!&#GDaJcMYe(TIrxPR~pJTXa_!I_U>lN<#`k%tTJ4NPZw=T*_u zc_GHT%x32l=73*u939Kt2kYL265%+Ty;qf%Dp;(Q#SQS8Am0F?5F$ehJF#q z>J{vY;vPgc&J}w`7F{r@8$qUGS@qF^ulrL;PxGeDr7%JNoe-n`$RG7AM=WKQqsPsAfM@ z2c2cgrp3&AYAmU~uBGuRpP9M)I_Sxl(mv+|xCDJIY2Fwove>?lEpw=5(>K)e=J<{&JpTX# z1y}T#vtRI(rVlP%WX`L;Fk~+Rcyd2FLfX*%2D2-y@zu&s=!wmM6*=}`#5sTu9%MTl zeE8~FWz_n*olg*U4>eEc;Ox%LkTq)_H1~hTmTY*)EtEflMbVE~)`-1yXxS!+n=_L9 zM=W9)_Cati$N@yl_R^BRJX@k+L_JS#QplhV7QD-soOTz|S-vk^3_p(#B)W8ulfk&% z%{0|}A0@YGk^gczX~F(0WE?K$b280YYsp1+viUT>v}h}vKq4;xZVma1+`-Cjgt(y9 zl`Y$`nEnLJpyjnGV4S#sOy9g_N0YyD>dC3Fy!ZmIQNM-i#>?T6V|lQ4_iPd`6uKCm z?zC&y4C-yGVKt%)+~ktkbf+qe4LEKA7Jf?N{2MN4tlGtnJ(xq{tDl$&w;unP2}~-1 zX_@|G0{KR2NR6-OP-VFT{JA0r_gccaUSD_AEK?_=xLY)#irAa}^7va-N2=DSCtguJ zf|8?rVNd2%tbi0||K=7`U$~L1`@F`1`E3xZl}+7?MNqUphppUGOs=q%Pi!+p<#Q`> z$F_Ma&8Cc*#opi&G$P<;up6wf_{u)5S^}Ggt3mo3F->@KjJww-R+L$_kFV4;g0g*s z*gI({EZ%1Ws_U!iY4-t9{H8!Ct;_f?_XT z!%fR0kn8Q{j&1dZQ(Je@{c}k|_{1E(#(L2dl@zo+6~k7XuIJX|SBnaxchk|si02ee zQtIF!>L`r?y}ylU`D8WQu`dg!Qmtb{hb)|4Kbo4AmtyV4O%#|{$M3foD%yMNIde~K z!b!K^V)S|)n3MFIZ{A=9&l^_LU(Ga7TDy{XTN4}-c?3%ihqJz`^O$MgcvO6x%>CM5 z&Be@(#Ic(fvX+#U!rxzuGu@a3wz&;BeT*G8P4VLA>>Nu``CfT%~hJBqya#9)6IQkCdPWeMrv%qlCwt)hd zWNvhO8EYAM05?^Qqqc7`crZv8^My{0r?d|^O8u$1do7!lN!U-hkOuTSkIys?!qg4j5_q(Xox3YIKT1pSvB4(L-gO^;jDCP?g)Zr#2azn?qz3-8&SslN&XP=>v>L58 z@NE9a3wZqDLLB?C78j{zadTcwW*5?bAS)U!bO?JymFG>s;GPQ2J3n8tKv4ws0zYW} ztvX8Ia9w!+%w^kk#hjnMhT!tz_J^ zEf+8Fd-j~eIg$+$leZS+lsy5ww+gK0hl7|4tL4oUWN@TSHE!M9&M&^9LJ>(owYvq@ z-El)Kl8L~b-xgBba(!^NGNptq`KY<;4r@6dK@oN3bh2VMTk@a~x2?#*KQBx1`I4ui zxi|0QtGY`p!_k;5=6JJb-=m@P{tz%zyT+C-RfaJ?hQjGwfB=^iES$Z8-^s7zS9bca zoaRE@o);}~6YdoYGFOo+lcCpd4C(B-VAfrI8Ks)h|L@Q~7cv(^rZ`E0eyo7``Iqpb z1&{St=YZEVUp8{+X0p!HCF2b_)Q~-fa$RHTltDc~;R9%TP1N?vne=_`bo0(&=K~D3+Gqc?uAbW`iW1B`@>8c zo^qL*WdeVD9eFy)K=6JCe){!z{uJK_?td1zJ6qOrw>piXwd)pZdOw~9mGL;YwuCk5 zZr}xPC-l^WaWfVUg9&T);Mv7`p#4M#EUN`KbwMG<+RenJyQ;37fA7367FiB0*U_ea692J?S1uu zYxwsXm8HPC<_uyA53jPFk{r@As>7o~?yw_%J4|bSj}e-4@O62kW75Kl*g4RIcyC!K zN$%#m?iH}f!|ljYnE7q8UqWLiX5vN@O(3mHs5ZEmjk5d0RF(`9cJYEkY?6ffbuVL; zeSP?ePa~n{f*Oi%-{rg7Sw)4@YDe|j8i@JT|i-5#KKxF78r?E(2VDt z7^|nI;#XtfDR4hLXYClG^j#8h>n#q7yUF*zU5;^v7kHCDLT2kz78koz;PVFl;-E6%hea?BE=M%0UdMpME z(&LVg7SKZ}#7WKU!Edf#pzxp>)OD`FXP43;X~943hw zb_dB|>1T*>e;`c>N8!%99{6sRHt&Cv3#KdfgI;F`3h@uX2^OuqvRa>S_|3<2kCh#} zuZTknp9lQ6eYKEo++G2dhxmkI8jR{O2hMG^g}<`~VEus&Xe(BN!PgO3pXL(lA12x_YpQ;+XPqc_Wu zwl|8aCu!4z3unR7KO4h;jlyTP)3LbwC$1=t5?qu|a;>=$_E4;6ALHFP+gn@VrQ3_I z4D+JOmX(5CuV;d%nU|o_>mFQOcnp5FJ?3!+JMhZEGSN+OgjIj3V1<)q!0#-&up__KSFex*d$GVn6bjbikx{*GaWK8D;=g;vg z?U$lQoswepv9XkV;Sp|rdJ2abY~YgkR6J5joar8kp>K+CeB@AxZLOxztM%iBK}x*# zUKp>>59RTnOVH37`PsP9*mrp{E|qrucWocUKT|XvPmM`odD2{*UK5I!EgT(Z*muI! zpCfr&S3Nv4ER60?8o>L{OFR05eA&y~NAB7C4#IL>uCVB&xbN=*>dS+{wd#O)B}GMX zYgRNT<%RNu^IDQy?2dd#$_egwzY#9Gy$8dGQ~1rUL3rn~z4*4IKVFz^3C-%2qNl4X z4UV1()gw>P)2Iw;-a1o!W^Rrn3wj{bYe7QD3>^GS;@n$UI<|9gAuIWQiiUMufBQU) z3C*L53%_N}Wk=<*g=cB0O}e1&H-dj}`A6*!o1yCE$&~N^m69ty@%-BE+}|!8XRb5n z7h_{F;N597Y3PVOBe&DxoquSEls_oS&ZHhYqiEiCGx3sQvbgAHg%GYN5TngUV1Lb8 z__F2@%$7JB22yTiL)aQz*QFG#Y&v4jAmE&QQ_0gEEL@sl$x}{rC6BFLz%fz-Twe^N zR+DSuvA#L{^S&!B`Iv|!G!o(eGmLD>eW#*hiB76^>{@sn-0MFIbKYCf?v@O3LEdcP z>i93>k$)eAJ&D`ICsh~3d;gVDbJ-M9?tYDY+pT5i0%@mWL4&Zi`*OM&8ptY}jCju1 zI`F@+2^L=;A*_sQM>D?d63*-mgm?d*(wXsdWSbsz!it*(X|Gfm_I5J34e!zjmJm z>$l##VZ|RfW_g~!o^2DxMRej#H>JM%-~>K5{kX*3s)T7RgW>GvU*dUh4aI{#{jqmK z0uS@D!_EVgQF37M7mFA&QJyJg{tLwlM_s(hd9$wJ(o5RVU6{w1|JeI5Vn z{t)_=tY!Pcr!fD*6PWQniW7&O$EPiY+_Sr?qBZgqJzPcDtJ`1jjtiC-sn-au=N^FD zp%N~scENr|x@@cUS&Z%w4KsJ$7b1rxa*pm4j5@xLzoo?SgloMN>Appf8Py99E^8!J z=b^B-#1=iIJp3PvdxC%9bcvg+#b2Yp!I%FoqH(Wyem^UOQpGy>-dc}iCw~|SQ)rTG8*7+8(*Bylcd-`DY*Cr?)^F`P? z+D=I8uvAdl+YOpz#S#~wCy#r%j1u~Wu=k({*d4P=d^F+-yquE@?fbi7($ggIy~!t9 zdDR-c?erb5{7DwCiO+@E3lG9aA9e9qYK#FsB=X$ z=$aCp)wms3SGrQf3vFDp{VxpOy^tT58PbmR#q{C*9nrnLBPDB17nl1DH#Wa>9+yW4=8$)V`q@Do3-Z5I2aRPy(zV9I#h z#qsE>PIzq42OOifkGzYvih_9w`z&vRuySV(G`hhbH=H5ejR{=1Y#46%7Dhw&I7;lA z@yrXQ{A#y(xa)lv)}6Rh?9|7R4ZhOKR*_yIjZ2<>u^7UPTbYbHy==CjK(CNN} z!@9l)i}huoSr8(o>sGS*AH?~4yhz37F8M^?72=XAdHTDL@c;Q17>PKz(<#BRsZ_oh zzwja1Bc3qH7ZP^Q=KoZr9o>dn4$a7frS2~=H>8QehwNdiZarc7&k^Ff+pD=lMLb>e z9)j=grK5NEOwv>2fl}#3y1SQYlzMj<(*7c;mO5Yu??rrJ^AuhdsE&!B)`)Y1&WR7k zpCt3RYOz(vUbuKya?Snc$ZforO2vz`lhr}7d>_{8SmVLEd@fb}BYgbHRAjJQmfxn~ zINT}%b$DDf?q7=)a09@lflJDLvZL{D=v3%q+>_W^guWCLgEh?7sZq#R}|u zU^aMe*oF)HRLNe7H9S${Gb!n5v(2Uhq!!STi#IGrv)f#09Z8X|}H+hK3J@4$i$vT1JE5+*bLE zE)`_jQOXI0xk>LCgk1yfQ0%Hp&^RRzuH=-`z)7+2#&D&$D^{SQ%s@Ebqm_OyI7uou z$J5G#|8Xnz#HK^eWHsp++m`AJt53WZyS5+C^7~Wap6xAa3%ViZc#~mCx zD+z7f_3--R1>k#hl5F_-SlDValBNV-k#-+0iaj-Zqor>T^tkhr`ffYHzrJTf+=gVF zaZibzL)77+%|GxOb&`@&q+O81CE^&}Cb@i{7yma;3k`m}q+F@*Q+MqiOkf4wG&02x zw?wSi)P@&_YE#(08(a_)Ol}Y3Aty43BIXSgN4Y+t!C`&r+?QSaQaA_$cBjF49W_eN zG^9W0a^TBx72;VxV7zTQSFDUgYlW8hLgGK`oOXlZW1fmZj(5Zf=flC*WTfc-E?iK3 zSO(#<2E)qZVQ}+(JUJCON@w0{ynV}d4EK6Z8q2NlziCpx(e@>4&A-U!qp56!E4q#7gGbw!(KJ(Qet+ho-0Yz_-w2%~w6jZ~(3xMs)ZJUr z|LP&$KhBt9>#tML;5rDnv4*`rDbdqSQF!=M5B!nok2kZwU|8p3EDbln(S{vq@Z^q| zUy;i0<<|Us5%^Gil-Rm&J*f|P26l=SKs{&U-}CGFjl|{7jK~oGwaewHMla~th!9cd z-xJ~V0|hh;9}k{C^(hX=)9M#0+|ExGEpuDQ{;#yh)I*a$U=q#O@#m_(37qrVl7}j+ zNbQcq*LNwQM{25US-x1lI_C)7i&IuS8gmzgJBS5c`$HeEKZ5GwJ)o|#!XeD9k7Ck+ z`9kdROzKu~8IG^65+D9kz<=Q;P#%>-F9!$Fs*HN!^Pv!Un>>mh~I_08o z>HzAPkO5tvB*1G7qHlKYobl=rxDP53s-io?zuZooov(_jujWGQ-hSjaawD8jn+<*R znqlg~n_~Q#SW4?XoSQ#gr%j$K!LU>vizGi;zD{TEc zX9#s>HFDLFPPAmdAzunn!N+rSK)rXZ=vlFM)X;l<72qL?{SHmQbFUM`3qAv>Q>r&V zpYU2}`FI?x7Mvgp$NOY9Yy*$|T*74rov~Ef7w&O)B%Z;?pwqXUQ@xaUV)4I{=8^Gm z%QX>wl6yklual5+<^`N@ISFFpZgjQVgHxw==ee0L$^3U4*=MElCB-Lsw?{+C&n-iG zb{t2u%2(vuqrKw{<*&T-_By}|zi3@ni8$g(u)4 znICtrpl{n0Mw zBQ8|hCA_D@@LB5-8QXSMsJ$u%bMHt}cG5)g<31tK)`(T#dZ2Z|aM``iwz##v1Xc7@ z+2!_k>R1xZOYGC}--~*|TiW+`UX;%rTqC)=M;PXe4a2_c2vwV}A(0S%vZX+GJDu9<+3Kv$fuOS+{K-z%!)xHi$wo^b}m9 zNuYmm1KrxKAm7esq&}KD{waHhZi(@-x#nx2;-)HkC%W>Kf-14hLxx`QN?0}An>VW| zs5RXPjg|d5XQKld#027!m@eG9L=~T$?1#A)T@<$Kqu}wK2*H204ix9=$}*Z0Am-CuHcb1LUe9)c)k@t=M>GAcKh#bN&?^4&uRsr{gzV&tXK zs4DrXmA9W{4>603KYswN35}A9Hx|EK@f4NvQu#y7RjRArhXE%$f?7%rYnR*-mB%}? zUUV3V{`2W+%m_TRcO&`Qey4s{reWHoHL&d}(~&)EIO}bD$J?e(5V^gn7!>De1D9z4N@E#7R`td8&3uf-?Z+o17X2+To%@*8j2sUX_LYJgY-FMzfiTzM@;&7NFI1pkDqLPPc5-U ztf~H+uP2O!!r>XnCEvxVqn`;;V@cL%=t`fp^F?=`<@9T(2Y!5P!qrNd_-Rj*7DEUUhE2Lsa8NHpyCz)|TnjSlCI)Yg}^W_-}_whAp2HAJ=M838tihdmk<6fmbVN8>> zcb@o#gW9(WV`3JtNAD`(?t;NoJmIrgG`$BqpBTsYn*8a_E-kc$>B3Gm3-(_f!0TVB z@!a`Y+^^xSsQFd{Z9UJk&h$NKUgAnN;j`IVwU)ZO24TVNqvUznlY9O8CzwC9g}YVK zxzRI^x$umYCOZV05m2(5j@tr(2J)V@K16#Sew)V+|Rayw1(lhRws?_ zFaD2mKTYJDMOv7jcOP{11BAK%bZD=yHgDWJ1~2)xLcp~cC|aZn7+yr`t82u}6Jx|5 z1^?jI-W)PCJ0_lp^rPD+Zo_H$MP415}VsrN|OAmubwC843sB~g27jqob_1t^8az}0K3*kF5w z@Oh*L%q!|Hrp{6UYs;a~S>u=7;n#EAp_vDpKZc^wnqAx$tczpzrqLhWL~3e~vd05A za_Ql@_~W4hZq10{pT@|y4U6f``;)ZR*Pq(dO_>keC)4Ri@Y&@Snd5__Fyf^)>R($* z<>}dYqw6kSeqICXiaN04vQ6xnro{6-BE>c8eLbULVW6|%1kMJ-I%2SqnFPVC7&2Dg$*T?O^8 zdQYvEFCa*<3PzkvmOPj7FnID`u}0#z;cz1Nh|N&F;~wc>_y=>&y2`u%s)1LlKR~uk z0gX$YCFGw_<=2mcNp*RIaPFcJuSgGtuTQ-(%_vUTcv?@)Cp8XSc33>Jb~}%WS%t%H zE6Kd4)d~Ho4#?JZJ1j=M&Js;MwvybrOt#T)1~!~~CJvLBO&62y!u9Qk#X|v-vxTn0 z2c6?k?NlfH{@5Sg#tsM9@y_VA-yO#|S-`)Q7BKOp7R^!biIerGa?rD@!knsJFte#6 zua4OSo1ce};S7D0dp?Ds-?X`<*`D6sUW*ULrlN2!pYlwV#36@sNG5T~+6783_rfTM znY0gP97tq~FiSMuKMb|cN$%xWHsrK7QfTjS3y#kFMc%Xa3R|A65RY`xVB^?AG1=}W zOdPTl%7gu=SMZ8a>&9Obx2^8YGo@>HTF8f!+y2nkr2}MLs}1qBsVO?#jD<#SmbEQi z3!8uKq_MM(g6FhVFxvlTNp4OaJlBiGX>%;N_%Y8(i)a zhJ~$iaMDf@raGR5#$mVNYMl+beAmEc*B@-P;s>`(A{=*m2^u~tWw-Ftw>Uhc5xp5rY(7^kA*I}y3ReY+l8vo2r!P$7C$d6^Q z(`ZBAEF69O$EezRGaT5e%lm#r3YvOTnf~jF%h#Tg)tc&p^VNL%+ouiKp&B+X4}$Ni z$GEEd^O91XXu(#0#*cZQ|JpPu$u%Pq&*8Mm* zzeT)wE`wYRyP?czuvjSdV_#?n5(FqaPXD5dopzak@3u=+C%I&6Mrz{N7z4-eXC-Eu z(*|B(smU{py5gs8zbVppE`9vb2**}f^1~m;;E?>siFSlamGisV6!JJTv{gfzAqEM-&+g` zMt^0luA^x-*HZDDUE<3KcUqR$0%{qfVceEEg3BDqeLgi(SUb5$)X8d)4-M*pUf$ne z^Yk<-S|`wtYx8LOI0cm24&rH5Ge9Y!1N!#9#fkwp_)(VRdtR%Bxu1+Vwj><4ov%U1 zR~=}v-edW)VHwiQzzgzCB85=nWH|F5fyZKXUN7auGnWLwYxB++`fC8XyqB_f-IAc4 zKfon5e{9~b!2x|^!7wUUw96LQSSek8e%vs&oKzrr0(4=wlqs+cse?^9EE-Q&@5XF!~00ije&ab`pi4nnGP_x?iH-kOqFlr?mWz?P)N5|=DWgu zdi{Z6bL9Zk>G(saP4qd##0`0t#g~ue9IS5)7T*{_*o?8opT|R8*Uh`AIdxKbZ6@jZ!uv; z5}7pBf%WZgl>0XwvWD!%d(Y?bon8%~vb=;wZug}FWA=)&13g7vJQCI}3I#dE;M2jc z`0>k49O2oD2mD_^zj>>{@078WdGcqw;6Q#8sKz=*`zh*qHk=7rAS!Qe$0j4x$>eMR zw>%ESu>a?@Pt1oK<=t>;tP_4QY!LHmpHj*D)fc1ZED@G{ek6OfzdgOG*e-vPzFM}$ zM}yvZ2UGv)p+a#-ZQfnJTb^-n3+&vrUTks-1jpS+>G=LqxV6WFFCDzkdjAIU?jb>F zqw0rZ#2w*Ldwn*EzXeqlfwXvyhnRXInA`l_$+77g#YlVJB~RpJm)k(8ss)gpv=1DY zmJ278N5Q`L$=I{)3oe4eg@kkJR5M@~1%?kNqcLMe^NGE{=#3Q}u-PDV$w(2} zjXI2{o`&Js(tp@G(+0=gx=7o!%iu-XUSYutscsz-gF~gAldae1!xP&U(XZkEbZ>HMZsC$#iJcqN-AJ=dNIg&!^HYos&2u$xNFRSD3$c^v;s zn8_E7Zb0wfUQoGt1myHlA_Gl-QX4%?*75gH@^WOEZJ<3qJ?kXuetsd0>#>pkkv<;! z?Tn_Q zuJN2UJBibML%iH?n6PQ6CGHrRMT>GChy$HgVxjXBSo|=RZWrGm*-v9|vsP7Hx@kcR zQj~e0?SA4$XAF#2;#sEJim(^yczEh$Z2e}Z~0!oTI0=NS*>a_V$k8pvn|}ud{`|O>pJfdvj#ZqhyMIJG&~z?Mk8J zji2Dy%1RobF0lGI4MjrHFfg_(;Dc(t9jlEbzTS^>Si5Zs1kE3TuWSb@77dTZ*6<71 zX+%AIT)GUeKGgs-uu=HGA3|R(`~Z$f&-QlDMNkfb){H%TUay6BH@K3=_d#?^WgdQ7 zItIr2Bl;%}m)L;q$@a=zao^`#XFH7oBJq0I)C9u3D z6OV52m!GMRfHl8o@l_{@N#;Kgw(N7@$i@F*qe>|RzKN8)F|jOPcNM!1?9Cshcf@JS z>~Oc?DXOT*!soxwgRAW=Va?26;=`f)IBsk=uKV{6Ze^D6VSiO?BoKa3YkJ4#!#Wv@B2Rk)bZ-I6Cr*K=EV4R)9MDE~TH?n>o3WT!zhtvX~m$p?UxVK80d?gRWTKPymzFJ@G<=l)VW(DvWJ=12aBu z6SSmSvpnx8RrMQzcCWe#Q|i`|VzUc_TW`nR_W{c{?1F@iuY~U{mq7V&N7NYCi2gQG z)>>&N?p@tparan0_VVn{dj@FpJ1Z9sS*q-K_mdqzzm~y1KjUa-S1IF*!?<$KRor}L zH)cUG1@}HmH&XuJ6Q~kfCTxbCg&TyQgBQYz1{um+ev#F6Gd{589<4DEsQslCFhS}$ z-rf96u$Fd%zIJlr_6yWt{iA%27R&K-OgL1J-pSLA{sWKxy}4+K)c@#W45p54!gv>B zjw|j%Lm#&a*++kq?JZks^a!Ndzyt7OMmA43{RG?Rl+*CfDER)(l;tZOlKM7koLzpF zRM#E?waI|uA0?bz`i-0emBD9E5p~e|L;I{$aq*K}wsLOaZ4W-vhz}vKtw|Rajn6~N z^In`V>9TYW?L+&2#jw-#Xvx#<2}%aBbR;#DQ?k^r8}rzB{1#+-ET*Z#Ud=x+N4= zt;O)8me?}Ll+V_R(CI=JB`rv%-!&QHR(8h^&3zdPJz)G3$&=A{9u9uA6>=_j;HqhM zV$%a$hpA!m0=8uQu=QX(Q+i3jh8-$+SN8y$O zdtu0#nW*#10R`LJusg2|7eyvh{mm4bWOPPs6}`n(BedzK#EILWaUGg(9i_Hkd&HVX zGp<%Q6JnnCp!uWRap*`7*oKz0 z6w;#Cc4%vshqT^Mu)Fz4!6~yBaZ&wW zNR3!bz4{*&&l`^uQtza}z%$b@zG$R4=GsEkI`E2=HqFH}>uoS)(8^@=zU;wLk>-U$I|d`9dKa!9?0rY z1idSD@aNg(5I4k?9#630whxnd3VI4hB-YvKD{4F^uQL|fNHt`E9u~}856|Y>u?y=s z+BkgW?wuu;#~^!NyRSlU9XL_kZ@&#*AF3;n7`4=GO_?m#{tP{NtAe{!GU>s^0iYo} zPiy`jf~it2amlb`T-o9Qi!W>Nv1Da-Tposh#`Q3uo)wCRq_c%6dX#!Tbq`N6zW`yTb3lEf(vHCW+vUhWr_&ikcof136b zywWp5nw#Zu+p|zQXsUzftQ{fb##l_fw_28T$PMSe{6GitK8h2zTSAHMY)smAnM005 zN*Nwknj>>@ob+WTjBU%5GG9A{lDpOLxyG5B8_L1Tz5}>j_9MHWWwJZ%N>KQ*4qvS8 z$c0^>(rWiMNP0AYt|c9%dbcKQkN2s6*FzkU{0gewK5&tA7VUR#2svlWz}gStk`FzU zV^5mEsV|3xv-9(X5SLuqm8_3~(G`ACISbsIV)~>%+UjFcthnodE_D^+WeYFV8%{IhMFMpyWe{ww5e46pFzGCbiOE!_R_Tks| zLX)|I_C!YDhezu$V&OabJa-N`_jbn1s(Yd9eREhCUVnbTwOwLPl0BD{EXDk3dWt%~ zn=<{QJz%+Kd;T7$1=+oi!=o|(;RwCMV&~y~VS9c%g~gOSoFp0u8`t=e%ZSd9{i!Qw zb|mudKNu~NM_}dlGB%A_3ELl=a!&ivH2Jv>T$cKjW1Q5m<$WewUDALl8LQdXfFOS}UVYSv9aw788?>ljqbx~NQXa|oA#YT>wge#36q>4$I*DI1>f#6oG*3n z&00?rsDD7Akhi!E7S73s@w0RAt>t2L=&_!^j*aJImOp4>=R<7&_Bm!~ISBWqorboF zTZAq5CSY-I6*%-Pnui>DjRD`f@x-Pud{4%#K5>xPYLdofomw&U(H)^_);5|{L~Nus z3;+Jqhd*8_cquo4W3AI9uU;sBf3Xt7Y<6Q+?g&teXprWRhj8eOcrwdWM;)&KQRz!k z$(-GhRJy@kSQKy`>!TLoh`UPoV`@GOZgauAju-G>jwfw1t)}l0d*Gl{ALeL111%d@ zkodsDlWUT5f{#K#D3D^U0hG3t$!+EOIKa<=tK7Y@@PjHgYj;Jvy;phB*flJkn$LSo zbD+z+IWV$N1qTOIP>;$s`WtjYcGzXsNcX^E76uRGdmR>n|3yD!zdmFf5KWzYo2kge z3I_dEf@f0Q>F#-j=1tsy7ozsSyx}G^XZ=rkf|Db;IcngZ$uhB*7ejwfBYBUNn_-k` z3|j9@#>rA0e6TJYSBxLbZ*`_o2WhXRZ|gDYJ9#$@)KCXgr@g|qv$w=xmu_HUzl2jC zc44YpU5{FDqpxOJ& z@XFi>`Rn?Z|L0R+{DLBC`uCVhr5a#@QU>h|knVHcEGWjm3x?}zVcOP6K6lBQT}_iX zQ6*aZkk^&()mftBwP^W*f=a6D(^u?LcvJGY?1B?rHSt-dJ3IX6Eb+C2@!h9CG_GQ&KP}Iq#=_lC)}j5j!97evm5K~uYtJ*lVJ4gI}~^RIUHP)B3tCx z2d1QK<(VNH@L^*$jO+d8qPgoEp>A=L{QX&`>@5Sh-}5+F+TrQQzM6J$aZV-b#wSZG z<)Lt_?hI|*(@6V8Y3v`<0d(vxLHx>O+TDBqgB09>2LKdZu)jzIJ!+)Zn%3Ttkbt6la9~8;p%jlcA`B` zi1VQ?-@lTxeG{zydleSksDX+1w76;2Nh<5nS6HRi5kfpC$6k_~1a_a(|mYPadhoUHb>L&6#DY@{5 zPJlsL9yH(T6wTb4O*ihjpi1LK{_^f3#d~J6lVxXoy|e)OnBEqioq12D0i9&UUZ=>D z>|tkx0WDr9aS$vS7Ve7`mD^1Rlhco2mWeL-cZ!0w1E%7-)?ch5vyfOZqv+Jxt~e*8 zH|{xQOhvW@kYjs;hB`XXma|{s@6OYrp<61+XQ_!-x}OA}ywBXb(_c7NS|s*HNAb!Y zEl$q91~(7pQFvwwndo1m4<1GkWK_!clxFiZqZ{b2YoTa)kcGF}^@3#^r9Jb#H-u+j zrYXiWl%d2wzz(gkG(4^!j%{cZSDpJ0QaZk)n}sz{65@t?Yb6%TfS#OxxSQyekW3nB zox~u+KY+FFFyoA&n6p9IaY)bwu$n%OZzpQdjiGnoU{`Z=`XvXKx)s8Ki3OmneH;xR zo!Q2n$s5xwrkg5|9&%YU;G#dz-0I(dD4t- z%CPR2l>OONMkn=S!EM_g@qV*Egr3wDQ=9rI(siGKdFwey8I*|42a;jSkegC{u@Cfq z9|fz%d2m9m!7{IOFq~k9y|*dzwFYnSFU%;r|H43=-Q4?de zLh(!MU7UXEDz)2rfYwU0uGLpV$o_kK{#N{nYJ4Z-kg2g;xicJ;a?;6pRSOIlI9gU- z^%;gfxWw+S6X0>Pv3S}3Fm4PVjGsHE&>i>5@>@N^@!5nt7@4sMFEozh{6b|Z`}K#O zO&W}&b0Lw{U{k;>&K|df9WK71UB|lM@1HKxuI4EY zJ-JdykKMrI_PVo6#~gHBuIzXq?S#1XzcFMX`ISO?w#TSEFJ62v2n_P(VyDV-xy{+z z_}IXb7roF>dpD;Mo)t(QZTjgjyJ6}+nzn;8&D#&fFFNbwF4buM^MUQBq zrtApJyODu!zQ==~QYc$l4dLr*(pkpS23K0|VGY9vxVwEjoa1zr{Z;jFWLpZ&(-_H0 z1N3Q%O#*w}{eV|zzXR9xj(n^wf_FBzbNnJ-?7D_N_Xv@6DMR>NoPKQkl%_ktNIO zZ$ievYFe{tKdv?nhv~+7QmkY;Hmf{>hkMl(OY5aPgKscxF~5&L_P-UnKiffR)gN)K z?>Sob$^(+;If}_+&I#8~1kelBgIuAWE54P7QAAoh!BaDpM$3l47lYpXbFHPLa$q-V z_f)ERRK`P(e{xxVpaFd{+sI?LcH|fFYL3p&MUGsunevai&owB+s%X;!N* zc@Po%6nArs&7Hu{KCI%>HT$T>QQOgP#9@h(8c#!Abor)+Cd^|Sy8k>y81&@={!}dF z$xA!a!Pouyb-g)G&HW{u`L-Ce$6m(E569u9Z5!xf;BfNovIX^SOZ}&Xw;-nDd|6n( zbWSsIXZ5Rr^lC~W9n`3z&I61v*m4t^E&fc4bZj~KXu3SbZ8(ke{UW}o9E^51T{viQ z15`u^To`Cg&Cv__yY*S|VN8vDz>BwZW8G^?AAJ~>jWNZT&)rbfRY8k(UZT*O`#8{H zJWtd3k1Fa9@m2SBil%aN>E1mFD;s6ss}l4dJxrRBAowI&X%_W z#D=ZTyfShfs!dMi>fhbivKO#!KhT%HHWwfN~KKyEF$DO}uN&HV6Jmb=l zJ$gy*hSHC)V^pqiAhlXJBe8YgPSxi`*G|~!dYa_?*o=zPSIMXRuKe}2!DtyNc}A12 zgU=srdg;EO&}%Q~8uzE0FFJv~>sW?~UKsYlkbUb7VdE)H*zd2#9x-FE{LW^|$o<5F z4!N>Ir5qRfrBLfcb>4sH06d7E%;ncR(e)SG5C+zQ!69d8SsX<(qDDYrgp`f^@0*w! zvkQHG%@GcoCkd@LdeFTZtrC^Abiu+fh8vg4Wj3n%U}vSynNrTwFwjCAczg#;O;x6f z26t>bkk0*@D@L`}-jO+PDi+OK=g`@LdvLlzVwkvxieGM~h?Nl~@?}RKQ9#yM9OUHz zXXk0+`mRVxoh6^v(Ox)K{~_IZwSem{J3`o!Jo!0~<>H8JHSV&jmamrI7wQfKqVA(y zDE=`ScOE}URAbGz=2gL@>+itjavEyXPUhp5{is6XF68H5koNn#@#4#eDSX2NkX5;( z`Qy#pCiNPXv-W|CP9(lbaiY%>!%<`D5mEPXJ=tvOEsCLPdH#^B)V*mgd3>|!mnFR;<4QYnE0cR>t=Ko z#wQ0!b!QB&@rj24&!%ynmN6-f6JgTwzFc(TE)92yg6!_WV$cX@2tTnJGk?v3d5^om zU+Z*1F@Go~&p1SbjQ+rFiSID(%4EDP^OBrLhcR1y7;8?N&F^m!$}~NNKbJf5kPUx8 z2Oq$vnNuJ^`yS46Gv`NXyNZjKtsvjg(iuFm1CMu(p}husOL98~((#D*FuM7Jl;@RZ z?URmB)(@$ctdXu~$wQtXF}5h{*G zT+XFhNo?F}jQe62;{Sz~UNFOI^4q;kxHbd~=r-Vlu#&FkJ)48r!6&Xq;73gwO3Ho20Eelx} z2hC^R2-zR;pj@i5Ney!O;{>*InyXGm3dT^dI7n!2- zh0Zv-afYDt-BeVJ-N)@td-9i*5WZWZ14~?E*bh@=BYW%1KirxLD_V-g2bqb~$4eh~ zX6?i|3*XY2O_%6LW*Tg*+rSU~G+@5`8fZ0M;}(lzL2dGRVZo!Nc*W`qf4&+bj@asi zox(Sf?zmOBaug2Tyh zEKEEFXt*1dMODSdHKFw5U?r@Jjp8-GOX&2^Z*jPN;}NYIq1|$PS)+Am>_Wyl`697^Y21l{Y>N?{6lE|G+GFc9l_tnd;@N0UZVN0 zo|Y+xGpP^aSB9U^roSd8)}G^=KU&1&9rm$VOQZ0*eghWV@s#x$d4b=r?~7*9W;pxA zK-B$jGiHA2f~)Q(3O+$$)a~DF@H;pZ^X9(;r8QZCQlq-$8N38-PrI^_pOneo;K2KB zhM?u&eta)Zs@Igeu+Wgm-A5HeNPDIs4DKAXzQL|j{zPEMLZB!7N{7kAmrmmZIxALFO+^#T9m=)B`<{NF!L+bR`B zX`#qSMn;|cx{i!wlx!)=%HB#=rL>1Mv`|8!L`Zb*>pFxeTVy03E0OG!GJf~>_wRW; z`sUb|Lf^EAc{NvHT|o$JdUFgm&3-1Ew`2>9%6=d$oh@}Qg1bxnfs^R!IuAG8_ma6- zoD^~;4X&5^U0As`jGqimkoM@xpwEKIxH?pisuw0g_wq$}d!h==yksT2Jts*N_Z#t+ z_tVk)&k3-;Xh25~c(c~}jzavjDg0-w9IMt?V^pLSuc|qW`Jwkgy|y<#sCQ(ei=~2! zv;({HeJXZp_m@&StAhEqG~un#ml}F1=)sJ1p|om&Fr?2&@nZQB+8XqOmP#z_MFkdE zcqkR7#L4l~h+WX_-?fqp!R_JQqg1q${Ifo*hhc5)JB4O}5`%Ff-b!yTaoKu<_P@^L zopuYxe(VJ4>XSKXRSG;ga)=7_&%xT76bN|s6d$^ch3-GzQj}dWe3;T5{5;L@`I%Vg zUFqRaZy!k^W*dbp!?P0Ww?X_`RUtGdpXOxqwJ_s$cj~VmOg4YoVQarUMSAmP+A=r{ z=ay^YP{#+dA$tvZee@HyU-u2h|6D^SZbg&tlNa#Sxk-2&lS(r?N7D)4!6R2}+bc}J zoGaU&Gnri4NL?Ds-BKR1PzY?EBf8rjrn`ej;n%$qBk+1MjrLhX&5Aohe!~`aJ*FY~ z`9FnSLu^FFg}w+IPII5JhRO~;HoSj=D!P0Qy=eEkNs)2RpWC&6B&5{lb5Up{W?L^; ze7?T~r(T*$eZ@11-lPuRS0>3y$M_0=LsEpNwl!4Is4CuDvsBPu|BTv|pHtYJbcEXr z^XY4F9Nk*>jaE(S!4b`~A?N5LIHg#}Ro^D_kFX~4N;8AY=1FwN+LsO%y~TxH^2I=7 zi5>Z>6VJ_@B)uOa*=lbNR5{K=rS(xb-nIp=b*;gQ(515Z&a-LFTVviT>xC<$?@+bZ z46d8k3(N1nftx=2;o1Xp zEa|C5u;`T&I`vPc>(YCE`N4+@m8$~o{!~gMrCr3=2PO#~`%B5 zscW<_K0&Ct&{GuWIMZWmZ**;$D|Ok^A-G2_H7y)2*1nII^)GM5>jU0$Z!ZF9e6nePG6i{t$G0Iz8Sv7JSCn(3#ymxX;}}!FbXl z9+=uu{^NloSQR~lYbHhb!9ESAw6?)t4%MJ`F#_kOhvE)6jApx?_)~r?fk_iA8&F22 zhknaG-sy(k&0X=wAWxj_xR11-Jg1^<`IzEdP=d$3VRV!)#9p@*J>#qe<&Zf1*m9NP zkHnDwG+)%IIKbYYZi{Hm>*uqPMPUx4>fq33EEb1&}OaC-w|!{6ZJ^TOYzsT4oa1V*W&A5Dl@IsZ&cDOXv!X^7RLu9&uhXFn zy(gD=o-zeHAA}a$B)XVlflcp%7>;k_xmwFPE7pO^RgY1$c^BOIRG@X`O%Bh$GmY!Zy9_i<_%pd zO%jtEx^cAbJ838F50C#i01vy5hKzW7U?~%@=ev{}n45$S`=yBlM+wFo z6brL_1}m>=+l!{2+2T?`0mqgGft&R_QI`8vaCM7-gx%JHz1}^=FPS%+TB@Si=XV^i zZ6JP|;EyhAD(Ead5jPF8;g`1uOPe53Xxc2XSH?c07yAd|ibdVHOK1^&a*@Mi`>Pz7 zH=nOgm_vVN7U8Q8yXc{XFW<1ah1zQyabWLEsZY0%y1nRzcGCaRj+a~Ti~B0fHod{C zG!99O3l-&L`-8aW?rDm+=*b#>yEtQnB^7D}!tx$Oa}EZvP_P)S%3`^pyEksWeSmLE zTY^hw_u-Kzt+0o&IdxT4Qx--OAGv7G`t}##Sf;n=oT?7P8^^NVoR?%YVHjRKvYb2J zbbyL~2@5WRQ6XuTkaKHJn zXtFFCs>axT#^r$XAGd9Q>j z4cPyWfU8G3;L*(kz&pwXwndx}x(CZ?s+9rFjO~uKcY3kakIAe*y9)aB%7(o*XCQFn zMoI}f3$M!CW17h-4BM-N@u_A|c&;;!>D4IQzw}bfYq>&ql%}|3ptjQYP+R#V&3%Hi z_BdL$bTDRZI16JkAJC6?F*N+{NBAF~K`&pEPFEFDOuvB?bjbo8@(ZD5S`RR7SkGUz z$Mf_X4RH7EOLY8}4qN^!1x4XV?m00ER1fdsr{^{4%vGs3-)Jgl2B!i~Fw#_a`L7$MC5T-0d$nUWmh4HvQ$5W|{P`aj#&R zw8$a;cfQz9;uIxM+e-@?2XMYpl?K^mi0Oy5(amoH2ai6ErBAA0$?=z4rs%NXHU#BuqQR$S9PjxWCVo9m<3|m{{%8EjFxrXTTKoS$ z|FOdVGZ)S+;vF`_@a3dx(6RF7)HX4ST!%=$zwPmRDz@uw3tQ&|^JJ@OB@<_=@R6lmAOIFoQsPg} zDLMl)W_wfOwsT@|p%Gqv*jBlsn>$7~g~73GLq30MKNV)Yr%R2|lD@nR&AU|4f%PvG zucWT-hfzhY!#?VKlTFN!si6H8i?f()1UUi1*re zLIaDV;u7cEFmKaddKoki<~G>zI>jFRG4ZQdcI_Ah?Kn?E|M`HvjiKuVT)X}Mw)ZTdTb%fq@#Ox9I!LDik?({>A4Cl+E|(hC?H)&ZOTXz=6b$>jbv zoySynqc-`eSfyD*s;ogFZGm0dBsq9LJqga!%!IQ8u7jlmNPcvyEH?cK_gn6RCx1(O zS(ZM^0pGju(q$_6quySa>e`)sn{Nt9;jgIAg6Ab0>+@my@5iF&jx3mdyoF{hP=j~3 zRM_Zh8*G3C?)r2Eezg1!dGj4n{j3W1+PRRDMo+__OIe5+nIKPzqMnD{#nbEl%Dn4R z#Zf8^)KA`zlCw6EU)mwsOb=k^u_jQB0(Mi*eujPafgg230!pJ5Vu}5$2O)?FWWOk_!9OF)^?iD2g8Oy z=>iX2zj`;@J1@i=Qxot=kiP80w@TcU!SG<41GX+SVuiFpb})D%RexRrYp$B}vDiMC z_fy*L_!J7$+qGl+t}4`Nnr!Vav6rs(U%L0wV4yXiVp|RM}z0VXteT%5d8fg z8OQ)^e0SkOi{Z5T;%ao>+#X*}Hs!`G7huN76IAg}Vxc-egsO3W@l2KtqE#CV^zM(5 zPYdWx?N7mXi>mUD@-W|iuEx5TGr-CEufwP{Yk5-ddSTM}U3m9JCf<~ChuYnqqm$tl ze7nyQ*N+@5e{wz>PWCj!#iyl?^6n~X{wo8Ym@Sl3oFZPX%jV#^NFjXm6go0I2`Af* z;fXu1<0G$&)MnUWNY8B}3s-X&`fJP1*Wx&|4T_vq zi~d2R>$mq9+rUy3lit0?4}#lgbk z^)S5b1drQkqr7jK$2vYv^zi9Px|Y`gsw3`Uz}`KeI>8*09B#74-GTBMR-^G(brfVr zreOOuo-JUN;b&IR4VqYw} z{XK`BKaa-unMxR1_Y%Kkm@6k;_{^z~9}5zz0ykZ`MyuNYq`MB*ic*Q2Saa1(+5NaJ zKTjEjI|}Afmn)-5Ykz+%=qIvb|55ni_?(1?4%l|q|FVIJg7QEN{kZJ_o@bMwbh8sC zVqdd9eEet(S56(uA7(G$hU*fz=wPmR zzw05|aij+o8ktKB)QhCEtsMikp&;j6s{1W5^gq|q`(dFZU-p3}ZK>j%WB!mGnkMUi zsSOEkmNaCZtMJ3dmNpE|7O!3Xq{x?LQ{NyhdUp0a^%&U?+TXNf^$APh&s+<3vY0|= z6{k^*7kT)Pf7E?h2JP8qk4JyjVb^qN&(&Zx&oF&YqpdZC_g6G|by2+LeHH`YM?G1*qR#>0K z;;rg;G{PZFaZ_UegsqXzi=Wry$HNza=06pmOt`{v1+#eT=(&(soK9H?F(u+vqtDo zDRA~-d*Q-`gt#HtE-JHnUqrY**0KCgvClclb|(F2+utxoOc_Jcl~ z5MJ&Gr+Hen#A2R>g%vL;CnSS!uZl(6!=reL!5Gna zauYTC4W|05A++b?K%QbzPL*#}giyD9a$9~)7E_Zb*j|r>sKJLgFlr-`rl!(dt1GMS zHo@;*C2i(nFR0!7oicPAdCj6OoR+eUZ8wg#zqfumPMji4Tc9Lf%RxR);~W3;^!u^xzQQffuO@sap0xmh<_Yy^m7q! zPI-zt9WDs(As$vtI)@eA2Xc^&LWmtu1*`U0l55*%V(bt{tUR&=O|;`sJhmI(w;3;b zsZE8eAD_{dx=~c58A?Nvm*T*oW0Z3S1#LARm_!bZ_oR`#c_#?!QVN zaZ}Fj^+z~hY6syQuBHoa70|l-y0l+&iRQknflUrZut9l@Y%OXj{cwPgF&$}*o-T!F z4`9#6!K@-N&!lMp8NX+lD=!4dyk~LGV@bF~3Djle`Qi`D;8-sP$+kdHQllZdi=7hUS6F z1wEKHr!#hWwH1pNFT*QdA81{U7B;?HLkD+<@xKxCA>idj@zil=Iz7`#%IKb=Wh;i# zeeEo2KO;&U;Lu*I^R^}3+9zV%0mAha2I7C)5;4;&fgRuP!Q+#ri8HoLz|I@|IqQWA zk1y=TXS}7px|)=$Z21qQZDJhqUSEE-axedH*D|kL8m7lIu*#WTyeeWp?(}Wu&C5$r zI^XcX;pN=?aS*QH6#NrrBENl1kGrf7#@-K3p`5)VZd z(ATzKP^4CI0Vi@ncP|N6a|DxIL=yvW|_efZ3ZI0Sy z56OE$BqgrtLk~~N@Xmyua-Y_27%-@Wiw1ddK}M2z_3~3Rlg@rgFQdS{eLMur4x;}S z_Ct%3{doOMAh%BX#Q{|V`E`3qw>3`&mshrU*{o3va6OL8cMs<4-|F~j?lCkfO#-JA z?sTVJDB2{<;hSdNA^p);?^B>XP8>aX($Phx2KLwD(vuMF?{r3OY z0Agu3s8HCFVyKZ3m;Y55a}(W%*Mm;P3qsD-~AtT zG|-2TT?RZY_b3KePUE?C1M#iSAf&qXG)}D@XJ=k1`BCoyE4{YRqHnuluh(c!i7?>c z%()bE<08H?dsnYe zHi=)1NR~2RYvfr*M__)x7r1}z8@T=5pXGD(Fj`SB4x8TMFtuhd6~SQIc&7_jzjOxQ z^Zlg1(-TFzf&BP;E0j-~ijQlYL_>#Y@%u(&Zn`jqBcAjkt$)kOb#OQuuYE#W-)!K# zht~LDmLY$bw2og}NsL1EOukXO0GiGo!~?_U3nPF1hQ+zTp+T5Y#l|VtpEnn75NjbW(z z=nxw>pA=?|E#)Y=iQKQSfco?@q7G|;w@N+ja0`O6vq}6XwoDv5NtM<&XOOeUB3OB< zOc?sRn0^Hs(I>UBoWIf&$DMAEZ#S>PzTJMnxN!oUDNW?OvkDtcMzlwWZU1|OPU6g+OH4JZlOf|pUvAuIgzWS~|-*z(Q zM|F85k1PRQe|7Y}bPHuWz3@r@uk35^6mr(>XRlM=APJGKG3pQ0^3KC~G{49nR_F_?4lGN5#4)56c1Y2ATc*^@qF{Ew>{yF7=`@VdD z-$T`LOw&Y}`N$&}y(N!R9@tXMjudK$T#t2zn%t)SG1SIbY_2!uPKwUTU|s3GYSbM& z1uwxN2i4Ga;T=UUt5MwZt|#xU`NjRGEQ8%KcGypM6wU45z^>BPsc=t^o1q4bbNcWp zFGKF2-$l71CKkOuG|-U4J@EW_;zS`1+m4oW(XIpNHLb_EuS0mv&qP`{`wInL45P+Q znd0uK860Au5a&C1}!SUz6``Ta185YsjqR0Gc z=so;)QOSq8Zbq%>!E&#OflBjX8?nuD6JfvlX1?Rv5mT?fryq-7%Q7kpdGwriiFAhiHJ;}Gz6>caV&F~@T z(lfZf_iB9k_X`E5S<>4db4h1WPmJ4ti>7z*!Fi4LtdV1Zd)rDGFSv@o4aVZ_3_q+4 z?#OmG`g8X0-ViDJVZHSQ+|m62#%+zGm4{9$dLCO#t`9bp%$%9U3tZ0g*a73%Mw!FA zXSKlRa3uQ^;dmuf13i2Oh(;dkC~22BFLTc2M@>%L^-*6AKcFt)Mh)Tm`bgaUAsGG+ zUd{f?3VCMNIK2PwE;bo@;n{=()PHRY_}=i~|F#D6vZ=Dp{{#g(#7#Jxi~M4U)5UTTzBG-PP%f_#sJ0@VN`bW0!?&5 zn4f5%w7eNAu3}<)|J~TF=NcOKHv?3b=0iZja;)f916nR?cyO0eJmNS2N45#1byND_ zq@$kfU!l)`6SiW%ihq1wZ8bt=Pq;ibgtym>rQ2Nw^3*G6p@u>bQ?3_p56 zFw8DO&2Cp@caC)9qTWKFz9uM=BzSDS#~ZA zU$+aJH%Q#8T}gE9?mAIukoLaj+$B2|eG1L}Cw#v1TlA?4p;K;0FlbMuxZ1N`w)HU6 z#66YNvRI_Dcze;Q^FKJ;&$5p<{w$&jK^?mg}1mV-5MJ{UW2c}v&HSxLr}ikR_^{U7rW{|6Mn`S zqip4VVfUV~@IcDNy!rG&{2uJh+t+Q!>;Yd<%ecHIhS&XltBuTe8@5}r?sd1uv>>pPg zjeW3~!*b41!TVBJyXS(eY^*hXj(?&Uf8{icm-4|Kx6X%zga#P8sUPf_)2gUxJi$}! z-b08_EIql~o6Aln3ezS(f|}yt6q%e33*xOYx&I-y(8z`k^^1hS=zTE%-Z7zC?<2LW zI!`aHvY=}AY;y4!Ocu6};c%@J#T6eVhvX{gI42DHtm;7j<)7uaeI zfyTxU)TT6-2J~9b?jfr=?a+Q$^1M-;ve8JGyXvl}{$3j`kG~hD$LWe!|2bgv(^WXY z;D*$%{zsP<{1Sy2OGvxdnYsTB3Ln&->-P^Nr;ic*OY4@<;l*RPYxM{6eJ_g}Z|}ye zls4QLoj{8}I!U_|tKq-&N3_hc1OK)2V+R*?e*U;wke&V^{*}BvwNp9p_rnnSwUWf) zoZCfnY+1@=eP_mjvFZSZ9GWBzPIE1 zJIY{MQZVUTkfFB|IVe!0Vnw$O@aI;cKyl;-RktKrd4IN2axiP~5f%-sEle!dEXF0~^lZopx&Fi?{t*En+T z-x;B* z*Kewy@|=RRCCztP7GG66PHX1@&z#vFhJ+oVTT_oIF3okO!WVh`;8Bd|xZVhRZHlML zVH1G;9)M0vF!qgz!nmEgFefq$#h<&_^iMEtaIa!Zbt72YGhAXG&Bssus<4m#adh53 zKp3Lxj`({B_bwdGR(2=A&(4%%!fU}ZIv#z$?BayDKt5J2V~xeCl+J3>GjbEgCEOQA zY?_Sif+Y=9r5Z*&jDwJs@5HEh1HA8a7{XF^mz3Q*LB1uYph6SKfL?6lVZT`L^}HTB9ACv zihrvj=#FC=XO>DF%5@hwwj@eSFB_vg)@_OSqj4PWnzRqs=-lO}o$JNJg9dSH_+@w) zw+v<^T;n&{BYEeRzP!BLnG;f_U1RM?RDJqVSUJBtjvjObo^>)NP*6ZyG1z%5FcwqaucLcB)Dln0mT0FF?xp%*I=DmS9$3 zld#!Wm#x%JL7+vF?29GQ_z|zE#5{oqJ*=nLxHz18#er+>I?@=u_Cn?|D>`M72Ti}$ zij6n*@%x-Ta9+xV9JP-{t(kuv#AWYk`13}*-Ns)zVCD?k5#vsaeWYzI(d=URVgun| zup3u}=*f50Eid^K;?M1+?U{(a6KKhUt71dtY4|U-EC1DBNp1i7(~|!C$fULg4F2@O zOA`#_uUd^U_wgNEjQZ?gP^hSWppCm%E{D&tI#^o12tra#`LNXc@XQAf_L>N*X9jt35b zz1cl@Slc2VInoU-%#@3sS29s+t(tPdggksaIRY-enJDgbpGV_Qgkt-|wLG`lxFq#& z2`n9Tj_$N(La*F5Fzvpc{Nj!FXw_#8RvAB$JWRwXE9c7g+n8f&kT1O%rN&l?(k7tG zEyXgSt~I-nXJ+F7soV4uQMp6udfD`UuY=`d6sy@2!I z9_BZb4@2c4P1FqSDn>PT=DmJ_)aW10vv0+dSYVz0AtO{d>IOQpG3OoA?v99jOv4w9@JM;zRV{QVh)1nSnPuu7RJA=gS@% z^c8hehtX-T_2TtY)|}V38H(bJ&`vg+cBbplo$=cIPhEx0w@rZ9rkAAhxn68zxLYy9 z;RYBxs?c-K??OZm6&k!`Ae9_7#VcQSNdEE{xSygA2`&=na9X~&z1|(n=M{0c3}d`# z)`n;Fv&1~@LE_0Xolv|U%MZ5#rG_NKqm-qvTG|IR{_0C@);_@Uq()LWSmLPh<3Y1+ zFEVzLvZdA<-0e{fnQwbVKR+bO{5?{IWr_cUoU?(npwb@iO1-xsrv0%9PbyX}*2jKz z^%U@HA&#jFL%loC1*2L`xUcRAo$Nbd?Un8rsx9NgAJ5{)5)(cZzKFMWTSZsl3`i#| zY;ee;=clyf8*K?!-Dt$+^{L`7YaNV;vWC>gZ1JC+1ssSy0OFo@c;0Opdd;wu^;GI$ z)9Cgb@aGCIZ;6EnlOS{(cbn7|J;+XWg>s&Sz~5V=&|*Xm{gie&tuAMR&x~2nGO>UP zHmFj}n^G#Q^n&$m&%k?YeR0cubuPUC5cAsvBX@+s_FQ9{b?A<`c3f|qvq)m1-dK*Y z7j5OI8&081ff-bMcg00@xtw)%Iv#JEEzET=QoefXgUi1(L)X|4-1B28C$y;&d)ml_ zeVsBm^`wcI=4+0o9pBSwZyT}diqwB=E*0w>yYkjL4~NS&*T{NEKlwuc@eINOJX#P1 zek&s{L{5z7bqP;IpQV00{Mc17GRFr4`iJqiAK}owo-yEHJRk6Wg|eVCuw#Fq=;We? zAGV~iwaPsCxyR#_+x1h)BUa!cW6}vSMuJ7f8yp{$$^UvyQhE;d$6@Y$l*gk?>0!n} z`T*C3g#$(EKe7eh=D6Uas>$?e;s@$KLLIMMm^JvsGG!uYx?m3 zPW7}W?>~3xWYu0U*jx`2&96~-X%c3Q8L6DU?+-+-bY+9V`vv9eV>r0RAN7iN!#bOx zVvTOJP&Ypt|2t4bar5)}P~QTyTDX`CeWG!V)*aY1v^)0qNfXvyWPUC&07l;Whf&89 z`10rX5)V=G#vgmIs_PYW4vFT4`3~s#<}-rPPHd^b1H0w*1LvrnD@{dna%wFZr z=bbAdRjVD$bNh}q!@l4%gN2w;qNDt<;WY=v?!^NmUrE*8eAal`j-!GyC?mrgi(XH_ zH~9x~6|5n{`f|mzdmrHa#HIKmF^$(YOyu#udhrVRJn6Zg3Wc&g5F{}*CJjqL3ok=V z_8h@~I{R?Vw-EN)q$ywRCgnBrGI>=8f(!b4**b)ab|6xBEhNX$RC%@kCjg z2Iq8Kj-dgIWsNcMs8eo;7pzs;*Yr48%%8;(E~lXC{cVsfh{AZsA2jJ;E_C_&lU`L9 zq1zuld2cD({J^2TvigMuxjMhWnA}Pm^xtpnqqjd)rALsHp72jZaCMg4pwXo zfZY?XQRs68f4mhY^$G(qjOMUe;ccAz>pq`;^_fZwuF;gDp6paN9!Dk&S6#(V_^-EJb4fB(_-yocY`|bz0@T z6ueslxicoC>X8Iq+HwWv8`<&A8*==5`5oSxt%I81Hc6cN-coKf9&Kms=7Gz4uzZi7 zC>AHc$rsr?>BU3#$;#sCDz&6*{7;}pD;g6x74QDb^QFuS7w*E+9muyoJYlV^HmIsr5B%{P+)IxZ-lW^ocWZrikGmj_x*<^4X&Yfp zb^~nkxJ~PHltRCW+eOzq*o?@{WQwHO6;V5gAJ55;ahk`ueO+7G9B;# zw52iCCY%{(#(4vmQi;wUkj3w%Wf$l1>#;3WuW}f`>yJ{F~DS)VlRVJ(FTu<896NWVfhjVkCU2@}&8|2QL z{Je0AO*bANHkUsa7NX(7e=@()KKO9?Xe!?z?abZ|gGbc^`QOJxXi7gW?U*Er-cM?U zlBOWMP;!a7EGQPXR;tR~*0}KCr1$JGlIddyRpselNpN*qs^HN(36?Ba2IHKHq`plR z#dlZ$2M(U5YB}(u7#Gr;tW@~zjHL;tQD8Udm#}Z@6e^ieDl|OlB7BH?Lq2c)N?sp1 zASUSigGoJoN$*P<>)qN%&IPt`)^9!MFHQ%G8Lgs4K_q;tRLAkYC&^Sb0{mX&!<*qR zsdbw|VtUWx)a%t$Xuc0NI>mDR`XRXX%x4G~xnCIm>$JrEJw^^CAU3w6?Xe+ zvgOAaFex}1KE@Tm#_ZvAw!azw{$s?IW#y!2U@5FB6JX-R*?eu+HujnS5MyI^fXN_l zys&%%tCtRD_e^tY7;eo?C1WUE;zV}*k}KptF`}Tf!=UxH(c$It{gALK5t3Km1(k83 z^tkZ_EbK5)`7h-c9(C%%EtSJCDXSHi=<8`$8X8dndaXloyMq}Xau0!M?O^>Bz{^zd3Nyz{bC{K)-CjiDh_8@7yh$qvG@%puf%oCjByIH2>K2juD-gFgqJ zqyYtsXuGh1&CH*ZkDEPDTht3H)7#PO;w#*DLCGM*!G$T#gzr=RC~ zW6F4UcxD@c{qKg0+sBx~&Qpbu<=up%B-X{-lt8wx{eXGrzr!z`PMmfx48Q30#5Yru zY0u=&h?8^iW7p5{_t6TBd~X4%Jp|Ai?g3u*pK$Pu^?af&L5G!9w6QT4uDPY~wMz*! zZCI($qjE0pd)h{MFfx+|uc?O4ON`{bI**ngG~LDvdN1V<1}+$**#KAK>)3ui|aV`*NNv zJ=4oO;W(ud#`=DfyrT$?_qP)wZ1&=mU&EA3if6Ivw?Aa{B8)UTyTJ4ueO&$IINkNO z<&WZWT=Qf!f45vlo97hbmYH+#Z%Yz*ZUjE^{IfX7YbEyowSr;|kVB5Bqgk4?$(QFL zyx4sd_jwqIm&_gu2WKC`dp_wfAvc=l{JRQfDSyHGaWHA9C6mg5>2lMQqiB6|8-EEO zEqQn$IBReEm61^txX?O5JSv$ z<;jlfsRQ8S%wD`bx-08y_rQC`yYaL2aj3au z!B(9l@BfW0J}j{3r1gWa@l_=E`R#%}{UnyY{s4ZQ{SEzIYA7wz9I(Z4JKJpk$JzI1 zkwy7s!NGhf&Y5~pe3zMo$v;=%xePOMSZfGzs4i;m%fi-(DDgs;4nA#qg^LILfM=I- z(O>H8-o9aj;-Y-$@~MhUBD%BdmNe|^wgjh1k=o1o3JBJCP1|qAK~7=_r;ne8VXux; z&5ltJkXZ|@_7jpYK+Wq`nEN`ETu!pMq{&s0ulaya+%tY|Ofhk3AP!2pz+V~)VV6r21nz6(-sK`|<<&|)+)TV{;7h*JeS7Q7 zj(Bg~Ha@LWN~ff=#T!Eva%+EI=(QpWMo8JraJdJ6__+w}VrSBSI~=+11wZsRoq$XB zri!ZigZXPv9~kSphpmJT=v2H9-nBT3FP5K#@q8_Kj^2oa!=2 zka8Q=F(YWWZyFtu7{M-cwJ^lOoEtl*ajfxJ`lt+d=;-zYKD%xQyYPu%TV}>t9V{r} zOENmTXv^37o)taqq^-5fE$Hf0EdE$0z;u5#RGIQr+;-VdF!p{dB$>35V}~t*qWY|G zI(`5K_npM&=cmGhkRDJu{DPSCJCHOU+MsRjQ*^$jg8nU&xIVN7pFRvAyQYq~LU~s# z9{86|hE1h<=V4Hnrj2QFTi{*uL|Xq%lizG!53tJ)I^A5t6T6&b*V9Yb(%BLR*7s%k zK_&D%uL52tTtUNZ1vOP%71l4;#Zh}4c&EuGNw3%D;aP?_-_D4g&%dG_zU?tW*OVNa zA7J;}Z|LmrNw}&w9q$?s;MHd@pi#zhD0omJm=`WV<;ja;Sc)k`mRs`u1(&gF{&<=W ze!S{PkgPN_9+y_12hn;h7qoHX{fDMgZs{S~^y9s7CEymHKPrd88SR8VdyI(#6G?xu z7N&icaq;`!u*lF58$ZR;{I&rQ7PyM%xu=m^%R^H9DpEvC+qx_FX(-)7JM-1ygK$Wm zAK&SInLKmz#qtMddEK2e)UNA#pb9P2y|fsnY)VjEPf3Q?!(RwRW&jE2b;x(ChSIF` zEu5_WKZ?%8ovOBr!%?P6Bq1qLcrz4{!r9L{O%f^^D9uGlnrJ3dri@W2q=--oDTTAw zItV3c(43)B^Q1{def#?dT&{EN{p@G0^;`G-5`A8DayJ7@@!rRIg2%Uw{r)$a9k<*F z<%cxb?x=E@>-89SRGg#kf2PvIdNJ(QE*)m#ex3c9SS3=~wuOHbAY@OkW^#E~g~C zlT>IPAmYuQ1>oKFj?l%gVEYm?V3GVqT;TDFS^Qm&FQ@8PK)gA@o=r^Q#yXs&P=ym? z#=)z=nb>SJm%d~r5M>EI-w6?TVWK8$nkwc}C(ALT#RJ&}x$Tl#(>1jIV>B6Q?U#JB z{lcyPTZe{2UFdkY2)(w4v8(F8+4(3}QQ|3MXxOg9n>vaZnxDkTJ^k1KE|J+96|!Z_ zigz^1U~|svP)W@;oHckLDd~kX?bI(Qx8W<+Ck1g+!y>4)eIoc=y8ygfCo^;J;?GLL z@i{|!^50>4kQyhcN*%++3R#NK8{06#<|Ed5WHQcVJUcZuoCdg!qM=Q<(SBVAQ(QKW z)a4>X57q_2mEU=MQP6o*E%^lgZY}Ic;7E8=IEJoG=)ssZg0r&XF;(2im#jJw$bNj% zg#H~1VfbVXf-6wPB}Wiei6p*V!;=S7TtfMjKwOQl#f~b*xCklKyxN z<9%C)(ZryUuxP&v91^_czeX8zoqpHIdZ;z2sZOV<0mtY}e^W48D8>8Q4dl^qh0b>S zuv2o%pshBDGEb$U=i5c>6E{R^|KdOK*B6T1@^O7&+9QM;4f}a*(~sEw)&M5`UBGT0 zJPC3AjF@q2uyB6Vqu+peu&X7BpXC_>_HGG~Vpv2&XTCS>TG?Xv#r?~;)Aa}1n?(6O^9sOSx*K$uyT0j3Kdsit!BfYz1(0zs(T)T!{!h&9| znlG)Wf5;SK)!=GL0@}a5&+P?GI_}3{+?L1e)9j;o(yWm-K5>BOS=AJO%@0N{?+?L) zw}7bG5(gw`(@0^b=Zr}eJP9~Ml0}bb_BnwA=PnC>n=PS3y@`5?p23G)5zN{VOLk^O zcr|z#jYypa=cYZQ%{d|D&>#y=|E$T%Ium`Ln1bCz3#PfDn69qvCq8s_7khVgJ^LXo z#~BK8=vX-zdx!U@znaF3sh^_*J9s$V)+$MRtPXlj8)(?>7+RSrj|09vV*3hQNPD`b zc%s)9w&~m_W|}b&=XExMeugfKdZz>rZw!*&?|%vSp4~LO-UR+-#gS&44ag;Xk`P@XczV_-1Ecv17$# zm|1_0OrPGv`d#D6L!3^1y&o`-;1ITCUK;8r`eO&P5rf4i`X?~PT1x}LE^{D#{B8l; zJ2U9Yvd=7ST?uaCvY3779khF(#7<}_h_}{F61mM7LDoqFsYa)Z%NVE5KU$aJIrQMw^(ypG@jHK1@h)GIDg!&DHgxLBBg}cBO8>$QiCu{VrG{}(m^hMDr>%zU zGI=)fi4#psd{2YV93yA*2^5!o8!z7&1Mi+)=3N@=n4D67c6RbPR--I9%XSvCn=u=q z?E7H6vEvMQ)r5lLEDLyTa+KHgs=%}t5fC-zD8vZ&km2Bh&IWQ|W*7^O%E^LPdm-gE zrNN=+1Ep;V^KtViW9mD6E=7EPQ>D7>1RI`aKnHi;z{zLQ$jU5?_1xS{_Q@$s?2ske zS#*HiyZ!`W-fHT)qzh|2J6Mdam*57u3Gu$w6utKe^C(#Zlikz=|JZFD{oYz&{-5CA zy6u5Y-$E$SeIhg#Ex?pT3b1ut2sB%PXVJ`JyN|s2V4>p$)zpEPPz62d;SX9LZOGZ#r2Hn!;v1e zU-5z|C3>(mvLRG`zCQ#t$V-wo+p<3%izrUm|Jbp*50{&8mnAPfj-Q5Zql6@Vu)C(o z{oQ&G2Y+p0r#7~-L-X62=AcsCxhw@QB+a4Nbs;R>{x?%;m*vCeePkzIxWnZIMNDnU zbi6S=6&#e`Vv@`|7Bx44zoS@!u?^So>|saI^*;@$vZyxuC4(E4&>)mtL>6 z!6Anj?^kXG`!$2$lUXembWG#(W?pB*6rwn@i&JP>eHC7w@r_?U#DcXHD!{L~Wi-wt zk$;>Pz`7HDbJ8(jt9w*IG==TE*~ni4u0a*P@>uQQVnA z9~`bZ6lBMY!=yvb{8#6Vc(-{IW}ldbUOH0#bigk*=|&%F+qjH;Jr;p+jx8lEQiSLG z!*J;f8CbY)3!FXwk9#cFmqMbq^3Qzw3;$0DccXMP$#;yV(IadqWYjArdzFK4&#$1* zW}$Dl<0CVudX34h8!2E+INDEmk!gi9N2*#Q^+bW=GVym#M+qW+!WcP9dG1+J5Ag$cMlOS$i1i z_W^V@WyySJ9QGy!vz{>txMzlxy}h4?x*L6=x8D$+t-g+@UQXxh_o+Zdurb-4*2g77 z`og*`FPOs-Wk_G}my1~YR-!n>h#869D1CV(9f{e`)EXy2;jVTzwq`p;ubB=rSv5@0 zIwf(m^P^Kc+tJX0fIYM$jjc`W>oFeOh26>P)Axdfo~!6tzZCEr?hfWf9lXw-Zf>IC zVcyj=PH;}-v$(Ap(4v$_>-HOx^_J!2r|-;W`$;&_U2Wmq@W;{K2bo|AWs}#X3mG2; z8dz&gOAm!Z@n2QVi&CbiTFI_f##77NQcRc^AaEQ< ziI0u;qjxRuMRP;cq50iae)*f9sD8?h9j}{BY_k#!EjYmGyQ_iTsUXf`>HxU=$dbE% zH-vkz@jI5BX(9K02GZ&uXZeatno=e{BzRw9*=o;v=sl&v`UD12!OcIcw9|++n7eQ# zqB?H7Ukm4%l1n<-&2T-$l$F&Gz2qLS$hpFO$z>QxQqxh zre(Ewe76HU(7g(mW^AHL@4IkZdkj4n+LH>Z>Fkl5QKe$7Hr#0($tG)0hkBd=+K)~G zGZ6M)l_RLH`cd!-SwK;J#!1yp65!9t99l7Y8+Hq>6W57jv1WcW%I3>~)rjL@7x)Qt z#ty_r?_j=ZTPiEF2t~W%KX`hDI*5e+kK%++!0#O@-Y_hNU*sK4O`l@;;FqdW+r3d_ zX1S3)`#gX-MK@#6;9mZW`$&nxs(xfU@-TBzEal^6zhhHt6uHgK1-C(QRGlNP7DU6`@$T_X*6~H$m0*@Sc`vc%ws2tKR{HoGMtncP{8?jOtot& z{irvkelu_3hkiGhZR0@!)R01pYchGOuC64xW>p00vDuF z7imd*zn0;eO|Eo%;3jZ75d|B!cCoS67MLnzBP-gz;yd9^xAj0E=@wRl=`Stvth>dP z>lo963sx-nv?KkC_lBloUlQ@Ps3_#d7hcMt8;g(Qg@n&6_^6p!!FW0>xUNP;ccN&3 zjiMB?z3Ju?j*QnxDR56Yf9~mM@*3I2#^nlpQLR-V?Tv%h8L9NmN$6#3%qB_22$=rQ zn2L7$z@&L@*l)kQ;FlU9WOHA#^btM0pR}4?_;*>TrhGHMjSC{jp!@lNCY5+f+cq$Oo3^Taw+-4FWqviSzXw#17U(@)m+`?!K;s zoDcg7^+tc>{DUw${(|79?N27pHnGm-Tj*(tK2sbJM9;4$!}4|U!a355Bi?vH!Ri@! z{V?Lx>~W&rrw2Hhe^1$zR-RQAT;Y3)LTTS#JF>Gcg;TfFHD!MEq0 z$YQ>aXTJoe?veL9q41RtEm>xQ1<|{}KzSK|USdwcxz^CDdl^QJn@ghoGU5Q+?Ud*d zMm~9=OzEK*cjelFt5pRHQ8r=&pZS2RFgMSuzax1(>$!nhl?oJO7z$k&#$ zq@82vCL2%ox%F7-RE?{>Q_)9H1!g+$<>VFSvH5>$al23}%RB#No2)`H)ngi5XuHRH z8e8y@yU>5*c8V-yt=ZG;{kUb8A-TURgrnQcNc_wQhP`aV#{UdK-dGtI@7+sFwY5nu zznn8#`i37exrGm!V+m$wmNM&&8T^@f(PSFi#R61hFt4?Tbq;dI%rsZ(9cRJq{Thvx z@fP@Ntvy?jvO`o&{!BkbpOpU{V>>()sO?5K6Fo2$|L!=*E-86|sJ58%Ztu_i(C+2l zOdmkyBTisXv^Ra5;}6YG2a0v>=|DjL=k)GFD>FWK5yuOWv9ZnZtWxh0O77cHcKE)WqWPh=%%k3m`!P!UxN7GV8Oq)%rDalKZ&i9;o})@fyd>Uq)e|=VOUp z^H$!ktO-}5m|eW*M+psMaEDwj8$4jNkSjZl-LqwIK=}mmKpRCm+*tr;V-Dc0&u2xu zmL~A_p=a6EE2enQ_cb=Q-(#iq=W%h}e>5jBo*Q&w9^PD2flD`g(|x^k*0jfylYJ)`cJu`m3iG)v*7B41kY}0qyMyp^wG?_`VkFIFoQI?)6+dY4RMxDS|RS%y~;X{4}`w*^_-010&;t?0X?<4 zSe0}SUz%%81Km_XabLJZ5?{u$&7xqEko9^e-a|D*;skbKIR7d}lV*4-LH5j({6mxR znENS#GS!{9y4Qp0Q{;V?>L(&M#~P-XH-w#(|II{!Qgrc4gehNZIO7S1)D>a?B}&2! z)axhKOirXVD}UM=ttiFyMx)`F&PO<8q{$oImW3sbi^0EFhqH<+hGV^6xN)W)s3Zns zuDcdRO*_W23YFn^wm#%uJ%>s{hU&=u3i3J=0gi5RAYD?!BAt|>$4Zy)@f|KLYtjVG zb3IUIJCTka428z<>-efX7A9@~$5@vK@Jsz!)mfwfpIg+dUqA=SUc&SYW1_XPe5S)p zc<}5T`X}qd+L+^bG2k}qs$YYGordB?p3JiI2l^EDfs~nh$zpygm*ZZJ=>?WFedSgR z&-sWwbNYbe$XUFOh7G(*nMnQKjU(Cca0nW^8fW<*LO=abN}en)ZtaSLbZY|&p8Abj zs}zqC<0JSurjBmEQv{FU|^n&C}V; z`tw>k(P+(rw;v#-fWtKY{uDZud=>6KJ4&wHSa>x`0cMA#vKzr~V6xmE$nL0t!#^G2 z*;+05bTJ-hAM(b$tGOf{cw6vzOr{mFLC{}64*z_Z2>(iUvbR>FSaX9st`vL~6)G9D zz-nj2{I$BqE&+9=(F_%v@=q~8HVRrmAoSBkX=GGe+R?Zf{D=nL>BF@ zXs`(XW-{115mmmKv74#Jn3rDyA2!c}%?Dd)z*8@~h-CxlaDq0RN;pQ--c=)uMK;H< z7?;~b;{vfU4sh3%jD5%os%6(K6Jxbhr{3fg0-YBu})67o2 z8jWppM^IUK1gp3nD`aUzqE|osp{g*Ft`%0oh+UKT)*>V6@X}6JwNsulIwrBPf!(Z2 zEs|VaGWm<&oUufh6$J}!)_*Q)R1i0UJ6rvU#hGw0aHA<5y8H#l82!g4)vsk4J>Rfi z>k;g}p9=Y_*HhwpU9N0ZJ_{bMjJ7I<;Be(G{7{=rJqpQme_0XxKK?K>?+j$&{aqZl^g>k-+`# z_knS_V;N^Lnzu`S%M^4%QGbEJmX@zzf7e98&#qZa?fNJD+B_7#Pp!h5W40L7)0Y~= zQP`)x+Ah;GkP=s>;Hht;(YPlBGNxw`DwnW$!ELlKO@WnN8VmZ3Cvd8L8bsW$0msZG zycAp9Nr7=s2JHZ#7h=l0YWHSEG z3-ufa;($Jt)UUFZjo(?#t}j<-&xP4r?T>g~CcqB7UJI;|gvHQdHIKP`-47~OmEgR6 zE!v{q$2T~aV1<-j^#&bUWnq%Z6q&HKuNp4p=N>_T)qI+v99 z8Ixu80lwI?ocg)wgYke|bO==fUN{py#_z`AZwf#w_!>TW8AsCvcG5x_F*yB-VWCh2 zjaB$hu-9@MC%v zF0shLr|0MJ2{RwSF@@V~5Ld)>H6G!fK}Asba0tv@pGe<^>}NBVzvITb^kEzI1csI{ zBhC^X!6g@k9&hhCq!GZ5w-2$)u_?f=P=;XO<5lTTqX?nzdH$ECiodWtQsD0bw{ikEQC`>s-R;3~ZJ%pW|i_owu; zCgd#{!8Fugp>>TnSbj5Q?v)#9v-1x&X=^prx&2|S%SNKoog*wRXgKt(DPc{+Z*m_c zmf&kBeD@NUqHR_;Tevx#9u?n3ExApc@wae1zoMLFA3V$-tt-T6{gL3*?X=ydAZA5RfUP^e zG2@4GQKvV7zBQCnqQ(SV`S&lj+_uL1$vRMeE}6|pHU`t>TJY#uooI*AXq;I)iOsjT z$_`&=tkFN8yBe|rPF#0_uX9_tiViV3w5Y;=LDM0$_8Q2$zC)EWwd{bI5AHr3$A5Re zB{}&E`P0D-cz>}DciAmNa6a^<*Xnh6+{_ZH-f}cE<|zxAd6_S*Y+`NA9ccYH3d4tn zFr!P&{NbU0c+P1L^ZD(MeWJE8Yqil_*5EL{A=jG?s#%1Wmz~73?<;71yek9*aHJxZ zVSe?Qtmt<%DgN0(pAHq!5~m#0_gzFeKQ^&HyHeSVkuu_)n8tnuJz#fq_Hjl>$MdFF zzhU8}S@dB2e99B{GhL6ZhCYca9fo*@;$?>z}^h_cN(XaYh1eS5o7=^<_c* z%w7yWHkmC?8V=g=T3GWza4_m@N=>%Zd{u`8Qh7mG)~IZDfU?K>!eaXz?$}QqUZg(*fAqvKkHPO~cwH$J>rVx>lOpK1 z_YY2ZJROH!jHX3DelUZ{vrtm5E1hiOO8U3nFq?uobXoli?KvGvg|&zAboe)>?@R3tnz8*M=vnuif;^FisLDRws6LglPC{DGJ#zGh4xn(8gg1|Qra zDEKsQvE&CgPBjVA_Bn&>!5DLB4J)T zhzeHtg4u=`*t^=5=4O8u82aX7iBS=L*cD0+Hhz?+oXRb`UWX%*ga5OGjJ)Ua4PqVU zo{|P<4c+l=`Zn^sok&*C^@wenELm6A$o!Ta;|4iJb9K{)(=MAhlv-8bg>yGC_|;IF zW@=C8Pvnw#kOl>0X5s(;c4uQPAHOt|&z4^Zean@}yL=3cSYQdOugF90f~BChOz<@} z+j9-ihSGnUnrx`?w+z?Q>})h z$&#;BO{2hyNEW7TO`SW#c$vjTbad}N+-H#qcaEm8_7^^6 zuCyBqisIPpbtAdI5-FA)KLP@0n7kf7U&IFwt}x6Gz-5hw*iGE%W&34bCH_H239hojDMP(#s+Nf=F69Aqg8?-jHz|S z>o!Tewcjo@`khJ*Zk5rt zpg3qyR~7%C)3ftYE(;nVuvF{H&^uHMT{R`#cX!yxoz=sQL+^99-bz4xWG7wp z`OHT1-`UNX&v@WMPqk6$h>lxm9-(G3C4~Q&9eeCn{1TPXmG}L3X`JTWJ!x8DEA9O2XFm zPL^UlALR1x!lMjlFiQ(&gUo~Rm4P#-6;#dZpF1zPU2zJeWh13#CV8L`RZLAXDOkHD zkqvUX!+Kjy=>6?&f=8$uf9YIeYh^Yuk6sbj^pk<)hr)ZQrz18fb`}qEE}-Ca54x{w z!CYoWQo7eMc<0|FF#GHT55{zqSyf?YV;SZ$eq>tQK7DsYVMeOYqj* zQIMxtPI?R8=#n z_v$u&X7X-MON-G|w^sH=8UwXU$H3Bd1NQE}gUnf!&(?Ibvxn9{Y3BY-_@Z|JEKNqN z^$?gZGony?{2w-{;XF<~upG4>zY&hW-pYTEFR~+NuZX;6-V(h|a3J|dXW;LVua(kC z_gHi26Xx>Hnx!qWfJ!3^sA!15_I`ut?GOVl>sUWKf2#>NYg;wUh~PNI7BPw!c+y`X z$KkhiJJ+ef;Tc^|^2yqd^OEO4cTHcIlsXW4()-b=pKGz>$utZ-Z$NgfMVM7RRG0~C z@OLveR8|fsIXfUg>gU+M7BVY?wgzy*5(Hl3dWd{)C>k^bwzb zX2~6#GYmsJUUNN{10imU9~{5`7judOSnR+GdURq5Tk-u9r+D-aXOjJhyW0Jo8&W4? znWMKbe3`|mi=V?a;~C`l+Jst^5At^$$AZ_eVZdxVaK|VMu6E@}3XgvZSFNgW@yi?< z^|7qdsdp=gRJ)jJ^KhL1sg-wpoK8hY%T?-dQh0aZwt-6y-+!e>kCWYg( z4L&rcbrh3XyO(Db|Di+eNw~kb44!A+;0;pe(k%TrmbGaD2+mdfuQD7WvIfA9{^peb zz=8xK9t^U-4{9wXxO*J(BO3rLivf}<>AX$ro379&~EZxsCcZ3E%o)Zq5CD85jegFznQ zSgl-xfz2BzzF!VD`p$>HXXnEZg%Gkh#j{ZR3R3!}g%6&8Vca?mvYV1aY76h#)~=~% zw$B$)Vo5#8_u4>BXgk~dV3jRp~l^E!yXELfG}QV;*M(-us<7D|8opY&Sc z0Z>s*q5D@#`MkE*P&slx@j`yZapxj#LTw7Yyfy@iL=L#IH-|f0=FfUN(piFAF2q#z zWzHtI;oW&{mgAocUs8(zE2`L^PhYWS?G~7}JQDtsSiq?MxA2}>0lYTf2cN>)S@-;D zbhj#!x(o7Y>u;&xd9T4U<!W?5Be7U@sQ@MVr~~2RoI_LVDVi1>*fXaKB>Q;=fBJSa z$v@BHkIJ5c;FS6FDX}cT-xH+%yrd##zH*FpWu?I#><0lX*^^FXW}4D z5m{XIDhSj|q~(vQAmX%~^wkJ0nx{U1!pALQiJu>`F-_5&O>r*%9^1-}?VAL~;fhfD z?>E0Cv4nYy_`?M!gt2$&R?vU%eO9ZpfLhdlF#G4VoSln-kVlz}&Gw^E?UE}iFui~u zmrUWmXsqR`-U>6T@4E11^8_f;mlHBGqXllBEhQvov+b5kXt^xGs>8wzC;ljvJ{U|| z3kTw}DcP{z^a3-5Y{^IWBTOT2C!BYV;5}SNz^+Jj@uk)T(x2W4pLU)Qco62Kswj9y zmfCQYM;ay1H5zdDnn>YHlYxOUO}KVq5+Y)S(zZDL&&|g_*DBhyo1j&$ETe8f0FUPlGpbG_Cszm<=6HCF>qiy!(Ebu44sB zvjjJqtl;8(_7Nv;T8dup3iP^HW*7!g2=cR1v-W}BU zfIYnYm4?a9&UDOJh7UYF8oPZW*yMjhAbPT^X!P4(IC{QDDQUY9i~3Hhsv+C z!1RVQ<+zMwc?u~M>y^(=)bxRV%hxc4ImHw)C6hEiYQeI*kJvYTBjzg1h&C`aD2=Vf zbUj;ML#~;7^?v>cr__=Igga`|EL zajcksz(q&BVV6`0chA^DNk3~?po>5FyZC^r=P!1nz?mXjJOG7VrKm%~9&Ir38DBC$ z(LV-4^Q$1|dpI9fGL`MTnN3O`qw${R255c#jazgg02#51!!B#U=disUbMy*hEU1 zdzsx{-oWL3JcG-&jAE6!1JP;lO|0#{&o&Rc#KuRb!<}jjtb>se>*4+)DHW0;GfL9nHLvQ?+v%s+vGM`5V#f?VCaE9O_7d)424$%8 z)fZz5h5d}N=PFb@2Z(FN?!=BUn(3OYO$WZcjDEe=-V$He-7^p zskhRgXyFMOW8(+E{fx+Po*G>q0;G7}57q6>@O71tesf-ufTJ)-e%eH%oWnR9*99m9L_k3S&h~;=%t*^uaR-VTG9S#ul z)tI6hOPRM#0y~&9k{f8Xk`H#S8fnWV6=--mjM<73~L@Fs2L(HPv`; zITfrDSU3e2BFQv<1)cQYA~+J#IfW~hbm{sRd@RUR9gitNfy*wk(yRja4xWDROedWs zsmyw@JLEkXO$z05Fl(?T&Q6(0wcfdcxLu3$Be-9bmUYY80xPgIeu3iUp|#b7s~=Zpke@je9~|7=n_j;y0}7a(M8lQ z84GPJ2gfvvLEcImJOuueMr}VjIx&g2S@eYs%@Yo3CuO$&SEp#>yr~fVK*}<1t*52i z+i;Gf9_)7rWAl`sV6{k=AMj!`Z7_4gpCH50QP_wB2u-1*AA`BTb$sF5(Y z2|+k_%xMMuU{XCJQdK@jOXundejyt$9NNQP%|6czi&ydsZE7g1;J3sm zdOCY%yc1_C$3XkVKd6wFM*p59Q@{3^7yW9!OY@$D8|cwMdzg@ zlCiPbP`-eXD7BK@zD%L+zE_}+lTfc}q{6`XK<-kZDXtTCUD~HFzz0_svBw3IXkmdG zD7?Btu_rX)@LLWv)@v}uZ^xl1cRwabxeH;3VM$XCsBS(FO){(Tw)Hzu*=d5O5(l%>c|0HLHHnt~;5hSn!-Ra6 z8e1~7FMXV537efa;J8;?MKjBHaTW94FavFC)Og$%oFZ!J;@={E&*(0`E?&Z|e=wGw zWg?hPnOpIsWicK#234)0nXRoDA@K5ZDGi>_t;Hh;zM#vW z2Z+w*^ySMM*ejrny;|n54f%q{WJx!7Zh9Jf@J!@G-pE zR)PJu58!<=>>ws8nRj~D4e^E?6$niH{`)P!XQ*FHkG4DL{|H#;-H^bQB zDTa7WwuVZKVW!qtWxgHW zj;v&7?g;#%#C~GA$|{fyolbX8*;2E^AJLHKCm?G3MDd1)!=x(f58(JyXC;QF^GWNc zI$M~K!c`9LW|#atG1>APXWtph9+YJocC>L;s`Sci6_-TqZ0FLAbo9gpnowj! zd76KivWt{-Oqy;Zn-Q4u|mk>$Hf{;`gS;nZc;iT0yP zxPAJwA*DS6Y(kH)BbEwoC6_#G)(jnxuD1Ra@!x&e-uI^ z<7ZHFLN5Jsb)wA7qcBg$g~HNY$T0mFhoT|$bpKL#bEKUen!W@!9y!JbYROUoE|M%8 z{epdatP9d7ulT6VwqhJCp~wD%A#3hY_&!m1{!R{rWyu4@(RUNz)S-N~t2B~^_W#J^ z9ZOODh##D{@nsfOUyuKO%SkTCt7`sy+413DZa19^nKOn7AVL+#!`s28j0QXy$^TtWbp-ZNay_!GeL0CLh1|^K{0g{a zwI5%_b_o3AKJ4z5v7GnT68gJJ4w8$@QTKqrm$flx#^2YXyz*G;+O3sLsS`NfH}A9An<;F+VaVrAN^RO>J@`U9RwpUORLQkUL+K2Titq6f#; z42FND_LP0^CN?SO!h)s&(%kWV#oGm*)PJ_yL1( z_Cpct2t0zbtj{y850TjYL*NsC4rRUUFzz^=j(t;FBu@-9`6TP-?B>vsxMRc&4t$)iVj!*nnD z2`;hn!^mv_jEhiM^pFwgW3Sbm>`dByvMouOjSL8f~ z40nS~!(QU1S+n6-SPbdhDc<%-5Wjj5L9VMty|cjXo@u`-`(X?y%}MFY=2j zWu1xnc)n3lG<^R-aOpb&c#l4$>(K`U7#|B-;Kw%DT^B{_oD)@@i^QZ6XUXzUGq?Zh zA<%gt3->l1pqP=Cw7p?2s(YNHKTUqjdhB zXcQ`rmgSuq2f|7jV-A*qR+{Ei4|0tU6n4=_ZaXStV+ksBpOG)pFn9U$LZw4kC-Z&MCap@D0gfloAFXcdXaDCZySDK5jLsBY7rqfxiuQSryOHu|KsR9 z{IUMtKW*DWDMTapx| zgqF5aec!*|U*OS$-sd{!dcB@cgOk*6L+nTc zlV0wNv{{MTO%TT^MyUh>lfjKU&)bq&coCR_FV{mC9$?hSMj^2H5og^BxT0lWT26@~js%bbNz4kL1w@@4+tp z0yL6r!#5Mp(ts!aa9jA2)_pmQHf~;k@#6=#kkOChY<21IKeAzI}xT@LJ2Q7euM-Zv$eOkK1B1GfEvUb=vH;A{jA1~1`UC1Et~lf6*r za+7^ILk_1z%7Il+7u;MfA^aOA&K~VDLf@C=@L4X6$g1CArYawUB^#eX#KS7u{v`ta zKa8Sp;eYf^VhOeBm<>*cx1j8tGvr7k*L7%_M1qD}*`A*BwBLR^nc=6yENE$^!L}oe zM;GARDX05DLt0NKHOJL3zwV`1UwjFGE3Lu{0GsnVOKXJ zZd67_LW-zOB$rL^&&IO0+eGVAGHrM1q{Gp7h&TTVDl*^c!U$C~3|GNxR;lb}&up3y zQ$hCB7t+Xt!(YVn#V(>8u922CtJJ@pE`*sZ~O>@NG4xMDN)Nbq;N~Ol?1>{ik zBE~v5k6L(&&>_Jcy8ZDC&d+6npL*5FS}jkknB7jCt^oOw%yrcSV=+zXA!P=YsZ`)) zcJ&w@89K`t@oHc>o$Zqjbjs12tk){-x@7@S4`D;ib%&x%8hob25cRRn?x*JEYjA3`IEu#}Qo#Gw)TOo+|FviyR zb<|^75i?PFA`C58r&=jJSX=Xe=-PAcX>VhA#LQxM+*N{|&+EwdL)&0d>>a^#?p*g` zN2&SQI8ta81LOA>($s1N!OhKM@wd!Lsx)^#4!ea@zvWXj-4e3B>73R$yZAHo7KxCat&NlfbQNtWWYkCjG7>IvpE_qG^rv=*21g zDAk`NHe(}BmzWR#nrqmn7=Hh$wjMoMi0{?}6@ zi24vm{%f`1HOpyJ^aZBF&5n0HO9kMO8J7=!#f%PeY|3|QaQ&azbaCH1Hn>lZbzQ!XQCeUVM@w$X z949iVK^ixRWwUqA??S1{m=3Hjq9u3(uiTFy|(q`Q-<_nh@Za-@SZmGH`OuL7Rd&OyZU^q#h zbD#OXV*@QeScH@OHEDxLHdAJqj~}~}=>9$b;O4BetI&LfFQzPp zO_l`aFH|7YUz(tFz!Ca4e=b;$pM?$EmSD+XF8v|tgw+<=v@5)sJ>|Iz#zoJ?%(gss zX@xGVES-QQedpLud!%7`Vy^A}JI+i_yd=K+n*hx>rb5l~J;Y4RmoEGkAe7m666+72 z6Lf^XBE5=vbdQW2URdOU39~EVsb?&8SPf9pq=b(pf8oQS9GJ8C08U@l#)gL+LGk(~ zX8wx^c>1;s{gtOu?~90c9{5n*BZnb*@eZ(lZiLr%eWAHyCHY5VuL)EphcX^w9CJa_ z3_tSEL+_|NYR&yD_@a14aFpxi1bU~U{L64UW!Q(-%|C>y|6Nw4_3GL*I8c#4QW*uo?k>3HuqMtKCqZ3EBqsW3 zbNhD&hr8Fq#=GC?gGWl3a&rb)ZQ?v+{7)om;u~1N9SUx%ym6_8J-nYF#!q*?&TEtd zcpI05GYsQ#foC0r_s`|OQJaMqnr_qSW;t{Ru|S#jlUS`glV4qM3|9Qv0@fELV1`L4 z%Cag#8|QR5d$=0A@(P&yXSmMMTq)sw&N0|Np$0Aqng|}z1oha5)bIRJh`2hD8h*{d z{F@xFw#}NqW_dVWt}z3~`4y6ayz>~isFi$EUWOu50krS_M2FAcaZ_j>T*^3$&t)FL zwIOX#71t7O`CEdAoPBX$U>=hvl8?#aX*jJ{jW(@SLwVmdSiZ@a{`~5AB$H16~wXVT;l>w7Y6dWwjns ztB2gZU*ZG}SdheZk-Lc|+0KXt^4a$m-DI;zGcCImO#>wr@!RNG=F;g$Xt+KI6YfT0 zrDhC@*d6-+InH-kCP^Fjfyh30h0)p$=%m9S3Dm&-3a`(jf;HcN94?=i!i5v9p@**oMXi@?yB2(Bzx7jrQvLt=chf-a!Fw9+ z&8N{zo>JYPFR4b@UI7zx7-FAI!k`K3xWCCz)_L<~db&0YnA7q&_SHm)HHxK&_kU%q z*Z-u`AH>qI73r|ssDjk2&}Fvecw*zE-(;-aX%Z`xB~}(&d4Ck%kvPp0bbp~g{riO5 z_rW%{(fAO&Dt}Fu{OiX#Ke%jCh7I@+TaXJTJHc02OPUkLz~>MNyc}VM`<&h2=GOwA zoy!6A+Oz~cmu|;kMJb`)KRIapG65uPYe_OUbKH_}rX!t+#7IONqWY~N(Do9Iu8;I^)5S}enT8jLG`zW5gUdq3 z;)>EAq+rCD^T;oO1xE?0PAZ2t?xOs)PoHA2&J*gpXB-ZUafY)8O2DeWfoxmu0+~CN zna1lWylU6&P*nboacQ_fdTX;0?oY*m$kW6_lck0iDoOq0AK0~y!o=qbaNbo#S~rkH zU+k3RHzx^cMoz&o&=?=`{tVME8LC>2jF7%n2Iy$-u3a4mRs)3~ON* z4e2sPz@4X}X$FUK*1FCHVzZhkIk(pcsImn=dhg-67=T=2c{1u?nnvQH_(SEID`8 z2foeUi5ihbWa|?}9PRHRvgk4*z@;Xqz{EzPDxkJLyY8-|tcsJ%h;(2HmW)zeN5~P3_cBm7TC!Bk`Q2}FR zv#?W`Kq|L+(<^%$I3BDdN!%X8xp}$!h2<~OBo&3KvO3uHvYzbb9I}f}-68P>e#E+w zkBionAeYm}!z-RKhkHIyzs5~ud3gi84sXGBcL(V6l*Mut0=&JvpSiYmA&R`a$Hu4UQolce_-rTFXCN)Jd|86C*_h*}u(W{Rb zA30NI_=ixC{pAzM_}WFkCd;9$R4ezw&rmJlZB$B@0#gqiq%BIYI#QGzUGa(R`8*pQ zFK~fL9COUq=^QTMX1C6M7y3D24!Zw5h>NeKkSB-Mk^%QV)GfNWEx-OEF zI+jp*SHQV6%HT#x3;lO3hTHWRV%naq?DHq0;Oa7-KQ&#MoUOWuF>20GR%H$X?+nyG zS%NKB+(7$3N3fZBf|#9*1cPM`AW-IdNTuOSgrOH2U3&&uT3jdY>vZg zz`DMdRQMgSr&OOGIO_;J^b4it-}F!>jz=#%*G12=iNgEx2o~-r6O4-P zhvMjY_)6r6PzUMi0{{%jMo`F0nx(|8R2DXiulnUf1;Wgplw z^Tp(BVgyY(R}F3jj~TTkLp1!`QhM%-Dk|uYAy+!0$*s|~f&yhlh&NtKN)pSdV_^*w zaJYlKjuZ#Q=~dLMu8P?h zc(V*MwnmgLv#Y0LO;W+~qbkSu{78*t4-%KlQEYf(9=-kK86C`xWyKn$F->kPmbw~% ze?%~~c%Tp8^ghzY-%Ygo?P;9i`+)qm33yD*r}-X?@;fp5VW z+$5RF3-I^RUnDYa924teM22Qwq?0#ZgNsFFIA4Fm`x!`Qd4d)Y<}lBYyPpsq@7Dh88>uy-C$vm$GZ$jZBXqdRud#<5UtH8CZ$K2`lLR&1r(`T4pHgCy((JQPeUD z>A(DQ^h0qYJt^^oj!&uPjW0xKxfMvW%R1=GBigi6X(`utNg(e&Z6pmr)5*8%FPSlW z){z3yW#o2|v@kWHAJvU=$lUqL#LIFM89OZ%it6iOopwGEcaemIIcw11tbs7WRs=Tu zHiw`8Orbr5^F7NQV%m;OsPoJb5e&Ld6zsWV2l?}5s6w6xowDXFNIZE$``j#`uV+8m zuHa8bqs}2MO&|hUC$i7jmL?i1)4*q&F*02X&v~36FH2sN$d3j1DI<$WckG5ti%jZh z8b@yis}sLgbHeUQg*Xop?i`nb`jKgH?eKiO^LGzy>XGDo)SV%FGnSLIZLYNDvO6yP zR!646Gv=IZF8vlajeL5S!F)ZOA^4=EMRe@nu@!d~)A*n`y6xyRGUi1K?H5qGRTvB* zf8VjQw3m?F;7%g(_YGaTTLz2DZqn4X9@HU~^W44NjeiwB(Z7G}DG4#beZhB_*N=}8 z75_H^J&rLyTS^Yi&nA+VJ~=_QcP!qzWDUng9U((&5}F6^p!JdysW;DwHa!+FQd6$6 zQ=P`5G}lXs;x}=3t`uhLpKijq6|%Vj^?jd*dweUmf3=CG@!H@W>g>42LOL5N*(*Jyj z7=F^HGZ#Ol?>C7-`{V>tKQD(kjQLJ~wVz-Pt+(TR0n=gUfCn?!)gf3jB@!0IsbZ_) zBf3BM1QhD*g0~#+c~z?$l@+mNlj_IOm0OV=R+7Pk%{MUmjTu$z&w?-)dunB-4(vrQ zCVNa6e0cmB#qQ3dT?LzAAgz~I!RII_Otd^%a9BW{G&3RH9bA=p8nn!+c?pfD+ z>P+U|?{uDiDLuv8gLAvqLGa5UtdX!~HFBeg<-f0V@fI0q%5y@rs=(#?w?JOV0BP|7 z`r|FBQ>>x-_K`4BP))IE9Jw-Q3OH-bgM__dV8eZ<#=nSXW~*kehPEjPnQS#j&L@ z?t(gg-8Mi>FXY0hwbfWYW&`Bz8N)WUe4^)r3^B!OH*T>OgLT=KcpT)&6y_?-T7Lrc zzifgolOZzZ%yDjqzX~S5Zi82@M?te?zv+TBoQzs+@w$`xR_ zMLV9fE5;kG6LG<3JL1i$!M4kkz=c}^NN!6M=mu!v>Qqw@yS)s4th&S)(tR*lD1*Lz zh^%Q8oe2$~``HEJUJXITb$zn##VmAu{1pByeF$r^xV&MJKE9LR4BBrw=h4igcy#ko z_SDIjw5N{aWn3#Dfs4n1r2QA_mXeHSLoYCBR2+tnUE`TOJqTsn{Gnds32)4mt*G-j z0G>sglemD{Q0=b{tE3d@&($kn^2cX5_EfX2VZsmg@5c>T`#YLW+V4bWs_wz#8>aEy z(^J6gGJ_}d4TaOAayeFDF3t3Eg4?2R*kyhCRH{%!Xw&nb;IzYjRIa*00=`TK?;Q__ z)^<@?ty2j{XBFYuPg26GBEMk%j#B!1>o2JAJB2dqyBOyqvD75+Eshs);%{O17GVU=$HQUm0m9GSC`f(JCo<1dmCPxwCPoucyE#mT|gL*Y*^B(Me0z9uql<vlXzy+ulj=0NAOpE&gNE3M8S$1fh9kEf3OfXJ&InD#slB?ivZj`eY{ zyLO1EnPfqY%xT6>N(|G_M?rUlJUi^C22DRoc-K`pU-gXy8q=-ExUSEm8^3ll8@vN? zlJp(y6-MEb{*%P&ehB+?vJK6!l|sE)Gx4sA9p-Kc#n`faw2!}piR9Grz3OU^c>F2z zAt4d;?sI(CL!Rho{FrLbi(@ktCjmXrIbXNW!qL}8AU^U|QyO zRri&A_DG}tlLCl}=rr1{#AW>Ywy`m52B-&ngzOrvAp4G6kV+dV+t*x2$tPz{sN@8G_7`~inO|C8%C3?%PNU}>YTQy+~ zbxA!!^}?pv#tm&DPwu=WTYPR(VadOwW zosJieBfEb@pd8mp$Q`YwAu*HLdS7MIe^46Ts-BbQZ+F|Qv+Kp7EEjZreh?b&>B8uV z>EI*3kqJmQrN;t(vZw8ZTgWRoEvm?f)g^Qi(#VM zY)}}RkFqH-q<4zCFev>2>^M7vu3TP8F1VL~MotBeS2+n$sjmRH3Q=ooF`CDT;E&mZw8QrimD*fRLZ_T#b!VCg)K?kv9e1U{`0Z`B-w%GEE6;=yH^~uZ#8Hj5tlSFs zmyco7e)vP2mmVanbB59B4~WzbFRnw=NwkCSk&BZL;>1BO*5o4R&`7#NH>g{)62bK} zWU&fS(~w5z%|2{TNjuv$7)|7MucP|A&e97*v4Y%j)>Pq_EQIurgDDD*7*M0duUP#D zELK?vRFApCnYB0RifU0*YM(?`U5}%WZW{;+&MK404-WWr*Lu8PGLQVxNx=__^XUZ6 zr6smKnd%IDrx)|PAW8igwYcZSIt@N2lDe7n*uNC=`CT#0ziY~|vvSCod@VA;gzEt6 zRZ-oOF);nWcDnJKB8LCSVB-=*sIhY+o$-udcEv_Ksq=t6EHVjT2It*+I*Hy_Q)ZVx z+eQpFSi)&lc~ zWnj;&dvs~gMP@;%37&i5MuxbIOhvLf>~O7z4F^)e;e9Ja&*3;n9m}Y5(pF5KV1TD8 zAECm6D)Re_6>L%Lr%Psy!vd*RDi=45MN;8p-qknoi{nEayX zez2HLfo3ouaeEcWizBmf|05qXTe6FGz4L?m!xq?kH4cMMoy4#?x$HBWIHoK{gOnzn zVxM?=lZoE*n31PS5S1kh3)4m^2Wf-;9t)gMW=qpObLbU10!ODR!!55@BuhsW`^`NF ze(z;<-pJBlXGHMp{{4dJC0D8RRVnt)jY29DWyPwMa^H^ckE!bR4!9KKi$-dTiM@3x zlNl?8Db4d>RpB-==qyRClp@f8V@Gv6>_fp}TMXT6i|sczQ|r~+Xin>Fd?X)D>=ly9 zNw*&6(bO%RH_nUR7*%I2i60D_KcUhgdiZgrFOgnZ$-R3OlSB8!iAG2c*@cG@nqpy& zXFj<x>H_K#-r#M}L-c#H|A z>=)2Go@MlD@E7{-K`t6{Z^hN~5@GuLhvdU|FS2IXp4;ahrpYx7_5S>fy3SY!UZoK* z@>Cn^Z`Kj#q)DjrYA)8wBDvkaohN?z9TV$m4^(q4$ec(g>E;sTnX@z|OmbzDKifjY zgl-(0eSx`n^9)80q`=lk*#hB9S?Io^ip_eK^jPm%W`6fIlIS;!797&PW>v8gO(x1v zUv5V^LG1+g9ehoH3vC#WaXFBcH5n&I zktWYE{6Q(sVV0x?cWT1Oox_&}<(hGP`tCg(>OO%L3pAm%NMHD~xQWY?g`vXhwRoUE zf!TQ~!nR@JO!8M+0ZwQW^qp~-yt^HQma3s}T<&WEv1b zu~?y$sLN==p0gTs=e3ydI7RL~m@)n|)X-B5)D+h7k6<}{41RqP!la~Ez!mi>_yx7-^pQhfX zeh|Wuzi%aR)gy#I zJDH>oA7-B|{6MD|>qA}JH5h;KIg>GQ9AdE7GjPHgTts%P7sfKKy zauy?%6Ommqn`1&%;Nqi`QC8Xv*G&yZugy_RQ+_8#Udlt|r^WQUaS>F^|A@yn#eh}O zAdFZ0jjwdJVgJEmxZR=-yF#Xc+e&*<*}aTLq(+c7*96if=)~eqCz5HwP~~Y)(Q7Wp zt_tzTUvr8fRe2$L8Mjdp6;on5m$Gsv*OS$oq-pM|boRN7B%ShE0jBhnF=4MMGIQOb z(_$Q5u2YCBleeJU_6yh%IsrfJy$Rc&KOzfUTS=5!Fn!A!fnVoOGP~0T*S7f3drvst zYO)Qn{CJ*~y8MG?FJ-ts<5x=AMuBtS5q5glc$yTh4A!C6P_I|X&d6r)Wy27u+A_rO zdsyb+sV9t!ZV6)$DTV7L3I#7dE@Yzqt*QGSKNmikIlp;EH*b zbx&I(g$+0Kgbt2bP@=GuAI5Zlm$|0!&YWlvmG6&zs1Zv?n<&% zm6(UUgRv$94u1CJK=F|6N{=n%Q|o(b z(R+btzHB3(w9TMCs+1T`pH827Z$o$70I|DI(0iWQtoaQwoKZJ{n%}%g@@k9dQHPc6 z{!kIfYBOT0dF$9n^O^WhVJVC))q>C4jNxRfEbPhH$qarx&X$RvBlA9L5|^q8@cNA$ z7A1>gx$k81;P7$~y_5w%1HwsdRXCB^=YtWE;be8wR=5)Hg0(XyA$9mo)*&zkNI+ZKKWJ@0A^K{plqgst9mX1 zcSR)6+f0Cs53v1nu7JrT{%~GBintqmAQ$U}s9JHE+%~^}&8au>Kh1g=`8FNyDkl-? zJ2T+p<6m^)$YnBbN)vQcYodyj55B7a`m1^gu5U3Um(!&9hbxUSYl9*jJ?oDdeaDG- z)l9l0*97C0OL$UvkjP&;!~E6iqh~}lp?~>1@_Fzam29ddM;4tR9a%T%_Qh7jQ_`I7 zGMP&%EqzG4dM;yT--92+{qVe|ES78^3$pH+IPs}5ZC>?iq4@0Dmw+9&qoE_YJ2JQ>vw8Nk(D`|#S(QH)pR*n8S0!Z(9qqLLk z{Pf2M50o%rYdP{)^JLfw4yIca!ysu^E z`@3>kRB3*T#YGS#vCwig2L6SZ!tvN@rum#LJF>2a?i#+%W^><2_0Nj%=<{YAE;tGk ztVLks(F(Gl0jFVUY&h0FCBUJHsN};9{l#r58v8Pp$eD&qvg7{NwKyJ z6*7k$YIGY7FOp>}1mu4pDWE+4m@Bl*Y>~f-C;pn0sZ?kkABl z>#Ai)=zdV%zY?@N6rl9!Dolw~0=Jt182K|xFydSe0V8>=*z>P+Uw0j?b-#$N&Rlok zL=s;9GM?OB)C#k^oFG(C2}#*&!Bo4ME-*63olTb5xwnMP4ZI32i^8D!{S>-vW*2Tb zmP5r=$FRP$&y&$5!t*<%JSN^5DAUp&bYnZ&d@9^v>4V)Q~)FLWfysE9=S!z5<}U0|K?Kv!Z_BvL6qE|o=-M+xp7>EO7_sx zJ>b)L8%kBb@YV_=*>MuL;hqdj7XC3Nj^PK%qyHX|!}Go)9XxG=hq9K* zxX1SjvHCCrd-G+4k7M>g*?-#LQzI&@vgbJD4kChH!A>-6cthD@HNL?&b9mz{AxzfO zA%;%HWV@F<6bu}NGc|dzUS=jg_;~|)RawZc^yASy@$cm6uQN=soe8et=DIn*Zm@YZ zxgeaB2l-{jFzUTB~=XCH)}`@?AxmxB%1*F;(vBluvhOYh2i zkt!yVq&ld>>pe3BC95MK=uj`Ix~n9#dYOsd2CDEW^%ZrDtYUAyx=1Ef^un-V1F99w zgNHZ7K}_T&{g!A>^hH~^-A5=C%1!0}i*lub|0F=ffV)2*(1nQCJy=@s40ol7g34Ae zvh!~v?@de;)Sh?<84kT%CUg$Jj{0L^dKmn3;9;R^I&gMivW;W2TJ4BtKJ*`;%C(lX zIJFQxjod&avyCii$iO2rJlJxJ3vkajAOCcZg-FXD@;oXS@;1mqk{E#M0!3&UyiXQR zRKm+Ybl`--8uCST8Y=B71?Rk3DD&n6D$KH?-o~PQ`}5{FCvyoi(wBz@WoLNq$-5wG z*F3tj>!^prL4dGE2H(dQ{7;0j7kze^0^hmw}omVuSULv~m z9ha30zjBQgEK|TQD=!oOCmdTpUM(d|Ts4LLE+`LDl8BC%xWS{&Km##63d&4#xrlL^TuC~C{X23w8` zXnc=VHT+3zN5jC&(;l<8^|9SM_s}yZpU{P~Dp9^J4cf-15bZaubms0HYM3?`eI-v2 z89N{BZ`_SvcxfEVT#-$>!`&rg5<&CLI*8%A9D!r3*|Odjq|zXo1YF%or+cU4#Lgc; zW{nlbj>#iR$+~dFHHF=Cy8;^{YVkcf!d>f$^psE;-g6!MA0-y>z+w|lTE7Kp;tkkR zxq-B3@UZPw1jJvxM%YO$g7)z%@pTfVGY4eAb6A$VpO!}l!@Q~2*>Oz5Z8@S9euw>4 zxev83uOK&X$ikHfZF;QZHVzntkUj6;)1p6daD7)PbIJbmU28H`*H(}&?qPE z!%NA3#|GJDnH{`Tr8Hv8^bq%z=7Qq>vtLY*;uCHVgOEm7zz)Px9 zDAjn3UG{;?N*H#qEgiR*HAcClk=;g3HGdGb+(0guZd zhL;DBSA#QHbstrlwDmZ>Ilr9ll*ku&^-3~D+3`@LuEDt(gHR*M0A~)AzzW|~!d@R> zsGJ-wJbWE;gr*Sgo(wCS{|LG^CDTwXmP|jTh%e97Lf~>~wyQxNL%819PtKR>Znun{ zOaDcrt*y!IfIzZEc98w4zmGUDn=rh-m1vp*Vp9}e^goR}BQar8QVf;4QbpRHTQFAy zyJ$*;IChq0FeBg8P;L1?W{c8YYCe4lYFds%v%}NC|8WeN*gcNq6-?*8Kc+Nlf)?2@ zvV~fRc@u%N6FpX9Le)EGL*^VV$8ocpY_y3)7n?iiwokyW9X|npfM zuo%QQ8RE4u#l&?mgP5pxkPx*ytlYg>=p`4+IF&}To4UAOW!G`)WB3qUN2=IL!FNX7 zZ3>C{XUo%{dzGDaOA2RymxjKb6?ma6hknr)fdg+!iOLyooRwYAngs~Zzg-OY(W034 zk8{BmA4BV<>ELwX6r3crRNUn{UAy}*bJ%kuwtbz)^7ukFZ{9R)FWo^_bUZ)<(W8*O zF;`$@wE(8Q;hZ<4lDsn~Yp7AxJh-j2oje~?&u&uSxOlZSEE_K(DA%8jK0b29Je9kX zKKV)&+ix(Pd1l0+DHfAr803|(^jf?l@Gfd{?BQlAW+MUvi*so4(F^2LUk<$0xkOe) zucwn`Pmz2BK0P^VMta@2c{xo54cGl6c`J$u{2Rkm7AVs{{-??6AVp~4xiRlgMI!9u zJZ&Kpu}))@9hvP7ar+uc!=f2r>2D}}@@NbRC_>hlVaffOWp&1OSr~tpbDA^zaLM#4 zHnpaXB#bmNdI8Ebq-GOw$kd|$IM>Hs4S%BMXF-1&bg>Qb^U2>6SJ~bzD$p!!B$it9 znYNwR=p?<*b>1psWXqxJv@x}pp0NY6Gc6B;h95A~OpoJzCs$ebV+LHAvXUfADmND$edqi z5keG%TgpRW>-2DJ=KPP*Q6c~?aj2{Jgt{;*$*}7~99hs#BffDA5P~!~KpW*`IF4)z zw@bPBmJQx_gAPBIW3Ap?V0Jy6&ZALn%u~11m|h38No|N83JPJ(O=lo~3&+8(w&YmD zGF)ao6DM$f`?4krn8-|quexJ!rbh}bI`SX1DJT=I!XQ{Avy-N*swCHzr<0%R_1J7S z1B)VU@YmOwxI3TQ5uSRFIaB&*)+x$1|NOx0R`=#zS+kdU`+O!-&3D2Y*LIMMIS0?S zrm$C=JGoBS5t^=54PmcV!>pU)5NB}-`~Mk0|7Lyq*XSfPdKBXX;SQW;uSm+Ly3^Im`>=U3emDEU7vr!}*niNxLM{t>cc<#g-fJ^Z`|7@RBc)yq*rKW~*tenH=X3 zqx9>=2(YLP=k`)Dm=SiKyib&ej6WA)(ai!3w&nIJ;dy%F|PE?d3vzZ0SZ$XAFS#mV@+#=rc4eszxWd zejGnp8V0{E#}hH%v1^h%zeIOEDg{i(+R-)=`-G)kvoGUF=m6C(wx=s)x8N){o0eP{H=zH=O&`w+HK^t<$Yuo*Yiv{*6Iy;2YP1H zN7Ao+ji|k~CbjR6)3=e=shVLZx#koL_G_|n=}v9$u&fusSLxGQx4oMtebWw2N z@>0%kk6xlMKCd%GViYw`_1n(+bM+O#6YgiKZ#X3#nRm z7fDERgbUwRQhB8pki@Z->_*2zcvvL+D(n>fE)fN0Mb^-tl0{{o*O1OJADF!|8DPfc zkRJt!(3cYy!HQpbY~t2XGH^GMDK`_)HRCQ5>-0$ScFZEKL;aZU5uboRZPwC=nt!CO zzXc}VT}kK9=5zZ~JJk1?P8(c55t|iUhU<+lt~LD4dX^=FXqOCaT=$9Ycr(bVab3CA zv*+usIH?n{*W5f-G!f>iUn52z1f-C2OY-ELN$xZ*BjIup{UbHuvZ*QV{4t(Ctson% zbo1->UQtH#@?_pL|3_ruqhRPuU4XY{PeuKow@DJ`+6{4?L=H7|Fo8SrnXd6t%*=Ju z=zZVS)HCcJar{}%Tp#QsuALkkaJdDn&6q*nC|cnCieFqu-j75_R*<*$%{+Q<0j>yk zf;-h0VXUbp46Y4eRWMF zuzHF-xbO8N)o(+YxRy?yq(_zPmDfPSx37uph7^v!qKgY}2GG8eT6Wu&J*?Z+!&E_} zm^Q66!odx@X~sWC@=J%~Y0OuKAZZzp@skB*m1(f)r336M*~=R$EQW_c2~guz2Yc)u zFvkn-uv-GOn5iNmxK3RKs)X*?6=s4lkL4i!wJqD~(kHOWOhnPOdid>k6Klft9{;jl zxJ2m*$(|pLzRAbI*82{--0Cgw<%OHHdtiX*a5G`G)GV0cL`h|$KR#Nv67R&#Aw5c^ z#Jo9yjEWK>ulb6`kKc^#^`ng1mk|=$u!nw6;kde7=D@#t1?<)1x+}@@C_Zrm#+tpL zJ|p|V=l(ZxrTi58nafISPc9*klg?7bIlq}e`CRJ1!bm8Zc?u_$jFN@>x1j5YGS2wW zz@9&84Rb17vHaI>u5)z^)~)I#ma={@RLU{4jiOM*s+X+dbNPteF#NS+4LmPeLosa~ zzUpNdU-SDgU^5XG>N?t#WI1zh9?JMIZYsu2(}obo_q0oW8RVbMVcix2@ucMwv))VIxX>gG-yVj8 zOx+nSJFd^4WFJhlz8@oVq&UWk##h>Zs+P{Ojfa%83<_o*f>p_hSjF2&#^?-?vcNyo zeD)aZ3>=FR>RVbX2tooAGrZvi<5?~!T-PL)Awf8< zdLyoJ4S=xzT+&vk!2wFw&;*GqxX1h-yGe`VHovS!owP{2dwwN#+wX>Ft}h_{3k~t= zb}{&ABBU}pDfHvtGAIx^jCSE#tb|FDV62ZS-|K-Up7t?^(_<5vB)=G(U!RElFTG^& z$y8Y8vWV6n{6OyCE5N+ZSty#b8fJ5zNxhW$G*;9TP0pv1$Um=P2h{R~jM zJQ)*HUQjaA4K^O?CkyYSvcG)eNpt87tPSrYn>4n-{4d#X;(-o-k8WL?B~RgUSv1EtTD!&u0yc-R*Yc0j&?n{e!WUOMjq3==_BfDypQB{L z>ShQKkreu{RaEKR4&rX?P5$0W;FUg6hswPp*l>Ou@~!dXtR3&pD2M zR8g@=B=fYVlY{ftp>o^`_`F*iV-1dxgwXq(hu{J^2{L%dYbH#-zMFi?s$qBRRRE2+ zrI0)h>4~XUIIcVt{pPjfTIps|tvN{K?Oe%?%zwPN^UX|Q(+Z~Npzwbkop(S^|NF)@ zM1zKwGD>8YQbIlFzMn*9h)PyQ$==x_G?kJDg-W|fWR!Z&eJ3kaM5qXveQc6Fe&_r9 z^Zb3@=e+N6U9St=UN6O|w{|j_?j}8N5hfNqp!dZeVW4FYzIHdozQ%j$k&T%!Nizm? z#x>K4*CwL2Q40K8;0_c2MMJWVHrWap;+jd3;*k8ka4+*0go320I(HCva^(dZ9e}XW41Z( zg@x5AsMal%y)vEg(A#Fv-{C_mH-uuvqWj`%_vhm5`WB&I=kpY0pP-m23xlCjcBZ@S z^#0_VBr;hFk+v6;HE@@#@7yNK2oG{)4Wv^XOE#Y^K9~)W^euxk0#hVt-r} zc3m95aUIW;*cDqxXmUyMDAepZ7JFXFWzU`>EE-|SrF07)gzXhS{hmpB)AvaEMTtV~ zl5V`n;iTY|7yhWocNdO%9gkXrH-XodQ+({(4thFnfaEJ5 z$eUCnL08?E`vsN41Y@~)`kb?9{yvWG)E*a$AI!&@_mo)Ybs71dJOc+@;~-?=afqn7 z$nl@DDd78Ix@M)qdS{1I_|h>#H;)`T(mDYf&p7kLQ@6I_(=H>?&$N~rmfok`drv|{|1BJSN}yNEPtc*b z=cN1nvADCB#BNVYgz1OVg%M5P;rG5^G27T37wGQiWYeXbc<=)q{1M6XW75RN3Cl_C zP7*#_TSM=Sx?|jZedyey6Prwx_Pe(F;Foif^SyWC`0pBGhI=y{i#-8vmgy*h_ZZ+I zH4mPmAHg}AZyhzJOTL0VD&S%$@QKnq{QW2m2d*+#+$dNN_6yDlUsF;ccFi(y*`Xx5 zGNMNLF8&@nlZx8HgoPSm+}2)|pIE&XX7svD*^8saw{d+ywP6&tzU|Il68G$Ong`B2 zIu9O>o`hG$F?dCEmFkZZRE2#cKZz$7_I4nhq9lHL)r`OOjpOt}J+AuQ1g^i*;KtCo z5+CF!T-K?g>0P!^eg|{3c%DNKOgnPaurBC7;VIi_ByiX7cG&8+AD6xv!>T)Vc-gHv zV!Z8q=&(H$R8P+$pF8`};qD^1b*Y;D_jlo(Y3p$R^1UvWxGpt zz5C*g1^al(jwd{6a5nw-;ye!9J&jJsltY8kGLGqY402pT@a)TXuw`gJ9(l)>L-j7e z>1VHC{*x2Ml>hj( zS18o$jB~qwp*horV_{|l&%ZewJGt%`GGn&0&udHZq*oF3+^Qj#>CfTfDm|K8n=91# zctFP5tvI5r7yd7|MJHtsTh^VV3j_36ZRTqlo4uG1_~dfMrum|GM7oq)Isk6Ou0gl? zalA$H3Q8Vw>@uVQgIouQKixy{a%4K)?Hw#^$n0I7-~Arvn9M-GzwLO!!Bsr?wT`@g z>2CD%>dNC6|DcPDeMH%a^-wYSkkHk97$vw$z4oVmc;dM}jy`OH+r1n(>7X?(N|9J~ z89I>Z_Lbkn=2J!CWE@)Wj63bz*m}n;%(r@uN&AQK;cM-&`F02{Z-~HY2LDO3`BALc z>;+-V*K?J`5ru!M^6>_RTjsMEYSQbv7$92OK4F>pGmL}CmK~(WW;w~20WA96kK|ZAiw&hjxoq%%~ zzQvBG{(DT0O3&D-EdVDdP{>s6j%{s%2@U}L2)Oqx00 z*q%lYE{P;0g)7#Mw?GLEDtR|NAbg55-IHp=%uoI0GyNjLv2r7wvzj2be7i(d9&WJc zm(;T^Hl_2ZjL8;xf^K@c%U-ma_+DVUre({zq7}ZX2KW!tsE9boT0vgHU@=?64#2oJK6C_?I4Kj@IoV|UQOQv0>JA?Dg0d$PS=9YlqYabvuJ(yaS~%lORA@aQ|u(V61xlfW$VPqurCV7pSt{L@;@pcR)*g8zu{8T53%3i`O>?W%R;&-(XzV> zq|C>|5cl68>{xwN$Xh&^n)f7A>v1)jbnl@erfxSF7L-%8r<9v@QjW7@rqX)vTqrx6 z2}NJN!($y&oRMnA6JNKdql>=7#oyAuBQ;ix?_?;=ggfyVwNZFJ&y6N|IMZ44!*F>| zq;R+H1zm0l7m9sVv9`HXcpx!}-oLVjwm}DJ|M1?l-Xja0y#uk=i%>=7`8V)))p^*X zHV!_>wSiwPC zX^#w0#!jZzM)R?Lr~!Umk_Jj1#m9 z63KC`J<5Il%2pXVbMY?&&Qf&eJ|&BB#}geq9(tHe<_r|SDxDC+2A7ecfy9Wt5Js0? z9EI?dZak~gPUtamh3M7iz0=FgaQJgNm}2&I;AyXaQ+8u0{E*(o;*(bF^gso)M|B_r zk1-T*}&%^M~)%4zQCLe?%iXT|Prb8q6VZM@BvUoDT((&S-=a*6E zc^6@&`i%0XjNMpL=M4v_QvBL;1r)5b2Kle97`HNlPl;#v>meg~j@vA5KJfg8SC!tqpD2**gA^b_||IX7@6IjuG0Elx^YGua*HdTeylfY3h(c{fk)1-q74={I92vZ`cHY|J`a)W(??NZ@LqTq zzMglin!|BF(#sw;ttIui8@y6wmfT6#UTEGJ&24vH(46H*;LU0s4l6U{l6Lc;`jq5p zv@|E@H!=)a^^KMWXvybHIfwr{ht9s!z(moJ4NnBZe?!$pt1H)V4OjY68x&!&x?|E%cEZmF0T!e?$7Jj%AQGiAzvy= zP-RJsGTQ1@xR&>DRJ zV^0nvxz=9_s&K~amz}XX`48lT)WNWiZyq&Z&$qdD64J5UYa(j*_&1TO0e~>%+_q z(HWq9{;u#%dp^zHI*>w@lBu-b43EwF%H`8`q5J2zRMQy2%T}~N@L^UA{jq{iP7C3% zFb6ESRZgxW?$S3WTMCJugb%AV`G8*zSk3w(6c?|jg&hm%N{|J%{%en&?hoZBy$5ru zwU#_3Z5z*?ZUVMXiRPOf1;HqoQb*jw?Z)=d5)jQE4w`b~ssi$Gcj6i$89NlsAlj`2 zD^No0tyquWr>&uGL-cs(>RfV2j3D16PgK;~;)*qzv|-p=_%-4_IK*WNdm=w7I?GO? z?dI3we4m?Qtd%oG(<4#!#}uf~k+?RJ$87N|S7^T20sb0ALR#`{u~rdDch~x{BHuuq zrR9rzt0#(Y_b+C^-!qJ?Jnmsk#WB~O8~AAXV2_40}$_(0KC+KAfZ?6?zX4CBvS7xLdcgFd{G_Pnie za!;xygAKRn^6M?!Yfm4XbL*in@}V_;8@~#Ui=~Jjt+XQ1oJUr*iA{P2Tzj@$%sLau zFP5Lg?UFC~_7RCs)c2-%@7-}UxAGMO9Eah&0|#(fS})n5e_e6n(vFZEydGxfMMzrX zWc=0F4n`eHrqQIsopZ7v?bk5ed*>(^JP7A`8}`G7epa|DPy;VpcalAva6`Pa_Yl1{ zYoitY=F`AG_K;Ni557yf!<@dBFD#HT(#H+*Ih`G@Wyl5CvYNW|CMu{MmFv7n?}~}jOong?Wo%nL1q5O zAjGy30#_d4)k|uej*rO^=X@LnMhjD=9F&freL{^I*GPQx4}k$?e+)(>u;3pt+^)HUXqnxj+zh0&h5g) zs6ad+s&l7UJ7Gw+J^n6Q%KpmcoOUr&_`5`;L&`V7XjCi*-c97dl`OK$B`9=J2&3|+ zL;Hkc7}X&1UDZ@b8XZnK;dg1Ks+4ip*NA6JJApBa4~m1T2I2sd4S3764OWkHkm~N? zT;FdR^zMC=4qlUVQiBb#Px5HM69YNv;%WZ<5AIu%jsqJ{$aT3`?qo*T?O_cK>6xT6u_C1bty$&vd$D{){%OZ0EP9}HkU-{zQ-=JH*q>HO9q7#oySih?|=XO(bderl*xJ73g?;j8VvbRb?-7Xi{jTdBZ zTnINz3#Juo_TiIV-pCr4q`K_|9@L*mjXM?id*=i`X4;p&M1}Fu`qA7+@{tY7$-~dx zgPn}Ldda7|e#GWbFP^5-EPQ;V0vjs>#QyeC{NCm|ja%T!J=@Zu$D}e`n{)&@*np=7 zWBKyNCjQ=I2rgK61PT{B!x2yW@{%KK(X;11&I`B-Ur%2I1Fbx)>sm|60}c3F2P15e z5Ks{4b`i~B-v0y4?)yxfdEQ^*Xys#O<4JtmS%3$> zR5ASLI?j~5pHa?XVsY3Lr_Kl)zPo$`4_{%!@=LWspO!3KWMD%DZb_JT=^kx5 z6pgpE+<9)pLkLKaNqio2EIEFczWj44`*1sq{#!k6wERT{Bn?qT_nBf^4H?{QeeNeDhAc@J~WRM_mL9(|8k3m+H$T%^!HS9sozZ2G++ z8{W0g6Zcqtm*9HgpgTl{Us{*Jkn6MYtcc?MvF6-u=V;ngFL|b2kKom-5~p#|HL^aU zgy+^}lm3n;c>MHp+SJFG_Zdv3bd{a5)`)Ofb@X^>>gq4L7e)v)wF8@dp3kAVwfKmN zg(E9U6|?(ofQK9HvAC=f)P9fP%=B->b$>-2`ApWiQpp;#K#{agi+^OE16N5`cQgB= zqU~=xGJNhR=5`Aed`4^Fq?F6x^mYx$<1gH9e^!hr>4r06ddO~< zujglV1Msic1!#$3==QslxZ_(aw4c$3ugeV~UZF47T>qYSc55S>US8}S@JI5&EfhAC zjG%@o#@O+?AwhUgT;mu{W0GY2w9o%^O($UDY?(>^?;lcdP!FM^bDsE8IbIxP`w*Ck?-W$k>(ek8++uoDcS#NwyAP{Bnu#Vo~)pa(#@2rO7mMU{X)kU6aVuJ&_es)?jr&yZd`_a)s zS@6xf3MM}_Cg+9=;^hgwdG5e?eyEx%On;S%>x_4y>2ZXcQZINeMITCE9YV)mQS_&w z5(77OVPEY(!q&CX98sVPQ^ct_wtq0KHj*mg#t?C9#woF#yAA%B+(d1mm5~0{MNwtl zmrps{!-J!vag47MYhKW2)vc+}xb6pSF*FiS+hpPn>9xAj8$-2!xv~cdYWEYMB=uU^^(r}qp+aB7Q1To z=RmEa=yAZAn^xA6R;$FOEPF^heUt>{Gss#OwsUK_DnCD&Pg_T&3052{_E|l=sl0%;@0@XiKW^X{cNQ;hP94Ww}LnopC>>vDO z;Y2*38jXK1bi#g4zsuwk9z)<7KRz_?AN07_9j7EZ^7twbE?J|-A8dQj$o;*c)peA3 zrX1P$$`9e)!<|%X=P8t@4#AL5zlDx%~yw!OLyV(T~eRA ztqb(Kw+=E!ErU6}H}OrD6&`k8BY1D=%t7w^#K(X1xi=xZd#*;^tWV-R{g3o=h5;6+ ztY%s}o%YPS3g0CCP}fOe>~_W*;`2#@Z+jTXn^pv&I`mYb;Zqz>wMwPYwh zSk;kkJ?M?&M`*)^_VK*c_y7)(IM#C-JK*`{Cxnw~W~iZ>%42XiT?zZjmVJu&>YlTF zF6%sfHSf-$eorZ{G?#|g-V*RK1;4G*@8BeVZ5?0$ZCx{u3a6@BzZi{+E1GN2k&iGf58EV7Vnoo-ZuiV6{b9w^3lg`Z* zKZ%ocUa0gll=a^BA8kz2!OLSOL#U~z+-^nyCaayM4H2psV)}#NnFm(L$3w8kX&4@x z;*^lDPxId?v+}I5Xn$N8l3SBl(^*cQ=MKOQwTpbmtqfI`>SN6DJTCF`!2!>G$+Gqn zJ9ghj-3)8XK99?yOV&$pkjn^ax)m>eP;95g>n}rg$w)qSDwz$ukI?DxQC!y9gr{8E zgKmYntlc$QdK+zaUnHdyIsaq0WB7~+84RjyIX>?MJYv>7-7W~87KYP zML}!F^SBN9H2-D^<(f%4zP=;r@4F82pxMjN?B9FIw{A<@S6JYbO-mI1!)jz&LvK^y zv;GLj>Pe--RMxKUjo8PZsCwBOs7!u}qt`TvgQD#DzE=RB&bxrwo7Bio>na|(yn)Tt zCh@g+z}7?8IeTU*7iO4XRO=M5u^x-<_4dPv6Y&(Y#S>$iYN&f$2=i!gfKSlnWCh*np2=Z&{j%6>{d@F(fXkUhZ# zE~;vln-0@O&5|2XudYV_G)7@|+8C)uk0tf(N}|TT!*F)tHPQU`eE1jH1$;HXQu3Br zC(f*@l&`wR8_YK`m@&rfFTwp_${_3*frTTb4 zYa#1)uBXgDGX-gf#69iy(u!y$x;UyoiV4zAyw!qqd^_OrAD(bTyO~t-dvf@x+c14t zu`pxAU|yE!$})*#s@?VkhBr>;Czku*%k*84*8VBEANoQ|zaE6hd!z88aUjXMZ$!Tf znQ&R+4ZiX8W zCOm=uUk@m*6!&FUm2hylzMiiwz5>0%!_XnBn~?JRILEJEgi9sAr`NCtqJGFzT64Gy zuKT;2uZ)VvaE%ZlVX=~w4cm%?`zN#Uh{IU1ARV6_o=kuGBD9xpg;?|B_+P+npddRvrSa$0Hr?~ZiB^x-h7I);M#@l@TvN;EM@2*&Um5>NmEq> z`aXy3Q!3$jw+>>HumsYE#G$8#1O0h&51fW}z-yuoD_h@?_NbD!t6(5yqYtl2mg<`C z$MM@YZSH^9hTp~+;EK10xY{lmN7b0(vHB=FbZrQKZ76|Bn>&!@=s20vSV`;pqC)(a zF$%kQpCOHb{!lgSm$b9BB2nu(RSdG`6ob2ps(C|r;($6ZGSH_$gW)tQcp&*@&cdTR z9k8;G2W_5M!Q1@mWm!34X!m+Hgw)kI?dvrSRIMh_lYT1YT|>^WhGi-2^$i88(4!Ws z7E=Co4ArbY2+bp3L-3^*Ui!QX_jyb3_+~GB6i|hR9U?(KwE+^>q;cieL6~u)o&2g( zPqeRa!M86Z-|yZWc0A~VY0;MDZQ0NGsBW|9RO*ahRw&_GiSHiMrX|`h9Lwvzs&j(s z1bBP*GS9E;0Iz1Qz)|BHc|>kHUw^Ac{(szIhx{dmq`2Z?n@7CyQw8^$bO-8gE4%7bl;zf zqET*Xu+F^)AE?yfn}0`(BM$ClZ=Wc7`A{ErZ$8a~mglkSd~31IM#{N*CKm?TpxJ#wIsA|2R#I*w(2KOkV)FnnGaPkju&iHA#LK)s-jv=69ra+-|d0?TgMH#$4 z{DPlzw77onVD|g_5WfwzF7Ny596EmA%dIEtdDC!D9JTm14-GFCF84OLOY+C!!M zW7G|rsxnw&o=GjcQ95ZC-4Po<4dBoo7Uh<0Vf52~E>+mIp~cWR7&`Pao4=ETO~-MR zH)8=9{~d(OCO3&=cS>yO>x;>Jv?)$k?n9eueu3q!T3UK3gu8Xiq8eEue+Vi@of|WBlsRS+m{S`Lr4WbK8O?iKf|=!p}fZC z34Dp`f6zomhVb zO|JD44t|MX)3#fn^<5KB&GHm;tbRes5fOH7wB)`&Ci6kvI9jFrTFANWN6u2tuk|)< z7=GnHi5n&e7R?NouAQcBg{RPEiYYF;dWlA!+YAM*+ZENB`(fr)iM8i{13T_A<981W zAm)-cv?_;URp)m6;M+?Y*|QUEu3U!);TLtQvBfhvu5>S{9f$M^MgN)*7`{-_V#M|+ zcgxf#V?Avc_G2D4JE}0+Pf(~<3V6amOSq`jDCYjkfZFPw92lM{x+NA0lP>(El@&p> zY(SjgeLE5~OwQr6m;11N&;>CjbU%+>y$R;WIg|3nRM;Tb!NoSIFw(CQ0!${c{*pax z5*R@DE-m0NDWMPo@Xc<#wIsID}}mxJd+zQmf{ z-k2u#c-D{q?$1O`X|{E%-Dmh(QVFg%FLQe9e{lMZIfZT>OeZUq;YDHj~f?m(F_+-cqO5bFRXMV)O zH06VW?$Vdy)~^oKsYZYm;r3$H@9n7F!yRf?RZ*GsaEN-oNo+dm!K>}UvB+*ere9kP zADcD`0sW*sgmw=eqv(qNx$MXGUHb8!4_>@h-4GYqt)SMTXttRcMxntD&?@O>6-&19 zi-*%;>z6aoZqYD|4r`OuDWAhX%ccJRXakI??!ZYKrg6vF!%(R(gNHqS%32e5@(t4y zn4($=ZWCi<_RllO2T06sZO^9X^2GGX*TnIGJINt89`3pAlQglV{7Cjy%DU^u4dJ1< z=AUHe9B6=j3$1xaei(L{+80wHk`CzEz>xLRNLfvdeQx(Bzwd%Da!V3b=lvryt$}PV zWaHesl^`$d4aJe>l-0RHaJ~10w$EJ-c~%|ien=Fg)k<~TdRJ^Kcf}@)8Zm2%Gt8+j zgW&AnV0o!8%zJ;H-mEXAAbBykZ3t5&+4=LxKi_06XWN6_m)CHpb{Y7I{=DCTq+|0{N(z}RrK%qxao zF&n74(w0G9^k3t#^;APf2c^HYY`!{O{tM1} zTOmG3e^2LjeiF?vS!^{}hC*#DruzOAY}LI{FW&`sCgk$$5w`ODQBoiOcL4Ui-Ap@O zdX}qC=~y1WWW4lGm-N<>Z${EP;CQp&AY2^)t+T^LZKo%s6BLCfzbrx33l~|wJ|3+#!(n{VGeJXlJJ4I4Kw(8_g5##tep^Q5HB^w#zZteT6tQZLQ=tZ=#WxuAv)`#kle% z=-Fcoyq}UnMIo2@#>_}YnnUMu z!m)m^=k_^TADhLYKc3Q|m3cfmSD$?z#e#L$o~%{m!0(oK=Na}&s5>eIi-!*89jb*w zTx1>fSa${wPYU2#zam=yGM2v&HZI4OgFN4N3f9+3d*0|u%)8#T{I2pdbk!dzE*@IO zR)3Od_6n2o5ynz}vczyOmHO0>Uq?7;U1xSljKJSJJjL))4r282Srih`TT#&vK&!fc zf`!}E%ikJV$VYTMOV^Y&>A2Xx+(9#+v85ifwNA66V;rmWHsJQpPJ!$7;TY6S4ZA&g zOp}k*(wN(lXQ{axmtS9lCyaW)2{ldB#Uhrub-)J4Lh?+T0M)kA+(Bm^zkbjegT&bzv7!L=&Me1W zi6YxtnDO(5?o#fnKwGx8-2CkfYTpV64} zPbnvK1FHKr$_#HDVpAz^^blP{v)JR*y=8@5r%xy0cFt;+nH4#8-yRJwF1hf(b*8M6 z@lHHgZNqCi28f#iMv{4nfif-w=2BhbNvCh zYdcXKpEjFL1neZWDYM1AC2sInyC)s6zbc0M1o7}DQ;10$CszLSok!6wSU6DpS{4M{{gh`T`V@v{#2&EZX9=ioCo8}_Q0XiLd9YoImVg1g{%%*81L;M z^#soR*x@>=XddLi^Y3YNxeI;2l7dB=E@YK+Qt@Q|IfYs8TH4tsM7Z(M4wmifMHBaU zk=dG3*m7kx-MM2zF!MTmaUa6kD(O5pAW|{1{X!lo&f$vsZXoqpR2Cr_MkN6y9;i(uP z$f09y;M&f{_*AuqVm4&K#rPZKKBfosSel6*?Yj$^7q7$L$g%jhTRfZaGBLmEmXOeU zCAY0n#?MNkVqaVm{BRh=>s&oB`h1+2ea(wHjXp$hQyb3>Y=V)(Q#5&3FYG_1A%5Ex zDRBb^D?W@@Vy&B-0LJyAS5ay7Y49u9a`^x`Pi�(Y@f_>L0XCV;4>T=`M_(*(4S# zCP3l%8^V9R3aI&IPqNF^2X*s<^flED6JOsDI>zeov<4fljc0yW_0s9l+y2l?94Yh- zIYBJ>Ds+Z5LZ`6;esVQoU)7b=+rXR8AD=J2m>x|Z!*rB|pZ8*MsTm=`C_Q1%}&$9eEP0-CmnyLKM zfIqpr#W6pF zt+;kn0ffF(<^G*?uuHWiKU~y~@2_})d$X3{hkP%r$sQo(w55v{e*eUuu3`95@=K08 zmrkdZb;zs2xXdqiJS8kN3XMizob%4O+|6O05ZKZPIk}TDLg_MO`FG*J z6T0)~h(*|K%~ks9ZvgG@h}8CJ80)V*0Yk63;VQ?|Sli8ndV5q-cVji~|65 z%cXR?gC+Sch=wCgI-u$QQSmM0p5PeQm3$XwqV4!jxKLpt7ToI#aCR!r4vbZJo!BTG z?hr|zwFh$D)3L%m(;2wJd@(wQbjC+K0H>}hhk@!3$X5DJ`rbVXQ*&~0)5eQ@=7gv4 b&MKUje>noCsq0~*vKHCg%qvrw*9-m+vO>aZ literal 0 HcmV?d00001 diff --git a/source/tests/pt/water/data/data_0/type.raw b/source/tests/pt/water/data/data_0/type.raw new file mode 100644 index 0000000000..97e8fdfcf8 --- /dev/null +++ b/source/tests/pt/water/data/data_0/type.raw @@ -0,0 +1,192 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/source/tests/pt/water/data/data_0/type_map.raw b/source/tests/pt/water/data/data_0/type_map.raw new file mode 100644 index 0000000000..e900768b1d --- /dev/null +++ b/source/tests/pt/water/data/data_0/type_map.raw @@ -0,0 +1,2 @@ +O +H diff --git a/source/tests/pt/water/data/single/set.000/box.npy b/source/tests/pt/water/data/single/set.000/box.npy new file mode 100644 index 0000000000000000000000000000000000000000..65897e0f9c5ec79e1a3182fc9b9bd6e00799244f GIT binary patch literal 164 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+l>qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= gXCxM+0{I$-ItrGWItsN4WCN~HS$9VUB!G<%0BRf_UjP6A literal 0 HcmV?d00001 diff --git a/source/tests/pt/water/data/single/set.000/coord.npy b/source/tests/pt/water/data/single/set.000/coord.npy new file mode 100644 index 0000000000000000000000000000000000000000..6e0594a8030bf6c9d7e3e2a4d1c7b9e5b421af1f GIT binary patch literal 2432 zcmbW0`yKSLG75FhocSMPePwHe zDG}%MkyhDAaw(@}QcH=XEYf6s|Ap`Kx98*6$Mbo}ea$LQ-wAR%4G4^>keQ2TcsCu|QUiNSf~zKtfgx(c`|H{sc96>7buPIJ#z zpzPcOmZ79gU#n@*lNTcNu2Do^iw>1Pe+Jzh3%HgJUl=YGk-6nWy0|J7)|wyKPT3Z| zQ`RT-TXL2ecU;AbQVn5sWgRjX+tc^Fn2BVzi0p8pyPG<3Niu<=6+>Ky7ZQzqQs)=u zZi4HGgx#t<%3kEHr4_xq(74Q)I29!dOX*-zEgPhm_DLWAZAm&#xmepcgVd%iCm+Rg z5V$$e#%reZXsZO;GNPKOV#x2>1Frvfj9z!c&8qo?&Hb=azljoA1*AIlxGVn#G1=9e z?m|u4q+-kO|LQW+xpM<^gY6MtFprcZj@0{YHJ0vHqa?L6NG@&06g?w49BD>&i?pTt zEdFGtxK><`nMtL?JzUa&FNHl6z})8wekpy)epN6g+t>`ePTdEu(Ow=_Dfm&UMwyRC zuwSOn2gE%Af60qqRel|pJ>5`g?MB%jJV^h@EZ%uu6W%|aO;eXzayd+iK1Ghf?xPQX z^;a>i(UYf{!&Nx&bTcyx)f7fc0`Pry3tSS$kP#^lnOz{+`bE>Re-xZ~a9=Qk!UKzNKeY-EOMFOFG7P&G1u8BXL&2oo z^mhA6WVKpR_RUb(8jiAD-vmg-%P2f+8yvaYMkd#c<)SFE9zmVEVSvBVF)?2I?+(0 zJAOM`2G1+1MNYq+zZXbCRE+r!;PNVLTwe*s9q3BH*BhK z?tVWKY9>-@;&k$rt4H>7UHEhfsATC>!mjP~`cV!m66@2Bh)x6s26O2h0<=9OGHa5f z2Y>&F`KHg=&S6z@X}rKC9Gi;B?PX{fua~moix@mUk5so!WFt4jNY~entdBgzhQ~AL zQ`IESKYlSy4jkalxjAELatG_r$Yjcgy(nedQRvBAQQK-|5_Zf+;-A{s_n!i;^cOR7 z&^eBjB00Kv)r~9-%h020MkcNr{CIK>4lVH@xA#9|!Ru5+saIg7TOxEWnb8cJHk^(B z9oA0y2PHGHvA9{0T$Q1^HNjIQj3 zOLr@8q>ziD=4rI?uo4+wdBh#mZ-VQT2)?KD3KT?pU{>Zy-@G9DBvIxCjdjqf)+K+* z1fge%3LV=#hJXqUUanhA(|hIVK)w{O5@*+N8$L($~brivU{;y-q1lLr3CR4Oj3jo{H*EVCOJ5AYw7YIdtiD$fgHCep{u!B7=JB8 zn(?fV(;t3{mzh6sE?XO+qUk{S>Kz!pYfqjDdrA54hC-Jd&8p9#$^jk{ z-LJ_$>Iw8OyhV5H2fWMQOOu?7k@A-|d4R)1hdpEo?!EN*ywEr_r*-vnU|%2+aF7u!4L6&5}*0RqBS=eR4C^%~GMD z3!gWX?aO^}WH$!C^Psd-sKPFg8rCA;8B|wnL}TJTIKm0Ze7qV0Z4q>e z?_mBDXJpN>oA#)8(XDn0Vdg1YXe4DJmZ7S_UdaEI=llEXRa1QB;QfOgaDhUmW z`H3|Sa6hj_LVpR2x@$4hID`hCPo{S92c&o_khgIPdD)~vwRRq@ZcT)Njy>K?-iyDc uuOO#;rYP*6E9Cv|<7JitZD`h|thghHADP2$ukfd>(&eo3oe~yBzs0}CO%F=| literal 0 HcmV?d00001 diff --git a/source/tests/pt/water/data/single/set.000/energy.npy b/source/tests/pt/water/data/single/set.000/energy.npy new file mode 100644 index 0000000000000000000000000000000000000000..a0a88fb78ae09feb17e41593d6d8f60084479320 GIT binary patch literal 132 zcmbR27wQ`j$;eQ~P_3SlTAW;@Zl$1ZlV+l>qoAIaUsO_*m=~X4l#&V(cT3DEP6dh= aXCxM+0{I$-I+{8PwF(pfu7z)39s>Z!l^&G< literal 0 HcmV?d00001 diff --git a/source/tests/pt/water/data/single/set.000/force.npy b/source/tests/pt/water/data/single/set.000/force.npy new file mode 100644 index 0000000000000000000000000000000000000000..d5b847a86e3a5eea0476a8cd93210965b3cb44b9 GIT binary patch literal 2432 zcmbWr`6HEi8-Q_J$WjbRqE1PNk|>It=e`v&Dq&icNE~IdG$aivOIeaFh7`&cO_sC_ za-Q#fM50Nwn50rusD@FQrlv*By#K;`{r0)CRM0X|tRlS1yxo z-V9@3F3L*O+z?Z39`n}XeQc;dGwD{f@e=<;_QT{AA zN4n7_Baz+5bwN0Cu!>%k#IeG}zhPPJ5bS-aMKLWYSabL!##vd~owXg|?OVLjsM~|} zXRc+D3**sMuZ{eRJE*Ah1Oi@Ni~dW z^JA4;FVGE>a@^#;4TF0H(i~qM(iXhKcjisJ(^1!B7=dn(9yxob*sW{Cwh!!Xg;dj@Qs41=vT6D{C!efq2^*6=Int2!< z{*AQv(g)bpQ%%*YWI(v-0e7H83KbFs(#<=>MxD~w@^emX$3MehRwl$=K`*>%i(_8- z?Ibg!j$0w#fP3D@u#k)qR<+)rRq3QKeQ6Co9BSw4-YQGG_gPBWOItg&fajb!*Je9> zFNobsE5$T)WTsng!s35U0<*q`>g~?B7`n|j9RverqoHU9^wdGeT446j4`&P?Q{ zWRquLH7V`?z|9|?3Q;{Fe5OS>n-y`HY?aS*&Fj@j{-83sL^q3`?^gzW<%O_jrve&0 zJx!MiqR@rg1>)2y9PFGZ?U-XOZIMf&q@ur|eVz{uZ~_b-8slDtIbr>cQn+--i!ax7 zpzv#xQFqK7Cstl$b9OYsohEORQz#;v4XyAjhKF^2*ZJX(GHC3g%mUK7eBISLR@& zhVNotfE;tCEfGg4?|n0Ca99W>nh{j4Q$&yZf|$bcO7c`V#H_6DH{>MSaAvhHDP3g+ zx}C9vThU=q-j+<3Dgsi?f6KgAb;BfkTghJMpU53+sbp>mJwIeiXQZFuk&_w-)*CW8 zhf=0eG=aPWvmv)&7M&PAK=g42+2~KFl#W9qFi}F3-!lf6pprB=PN0rc^R{kiYVy0-D?0 zfIdkdtQt0?nvH++%a1<6h(uMp%+?^Tf3A>rE52cO4z|Lb_$WHR`2vesQ^7pfYQllc zCK{>lW%4Hqkkfw7cdaB4tK5Y_%{+GLQx5IVXk>*oIn3tjS2lL@Fh**ng3vUX*ECn* zj2=yc>kg&_R$2UFgC3M=TZxfM+88Yr(d&*r5Z87t4Dg#;h% z&tmVaJlTfiCFB>-53R@6v+_wZM6u1`&^j-SSuU7&c`Ela(|Gw6H1BSOH#)I2Yr`4d zG@^qQ`(&_n(cPkvv?}&u!hC#ZuR%J`uhZz=6ww;{Jj^c^ql#fa>!0rk16s-SYflgUlZpE8& zwp^@02FlC0Fg1=8FCaXt{=`(TnxgYuT`I4)WMvj{@a&Q-o47L=Zf{nFKUUm=WY2i| zB8tcEor)9|Vu|nd5-~R?8%|l>=cl*trWn6su7gveBcmE<^T8Vb{H2s`#ASfm>@H?z zUBYdgW6dq{TY|Z^^D*+}1AMsIf|Mo3*fuI-_t8cIxl`U#&`<~7bTh|FN;E4ujY zp2oDN*AY!zGPsAYs_|=rE_u5KQs~Nagt{RRKF=Q8|8k+pPsgENX)J4a9l{2m_Os5E zwN%zoNR=0+;IniUS~{)_HhPRBk8c`bY?}e^`|Kfat-2JX&F7$Pw3ZJX>0q_9)mh4( zqa;eW&DCx0z#0=-iKO^Wg8pyLv2%f2c=bnxlHxmc%)4U=`xlJPcc%aV literal 0 HcmV?d00001 diff --git a/source/tests/pt/water/data/single/type.raw b/source/tests/pt/water/data/single/type.raw new file mode 100644 index 0000000000..97e8fdfcf8 --- /dev/null +++ b/source/tests/pt/water/data/single/type.raw @@ -0,0 +1,192 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 diff --git a/source/tests/pt/water/data/single/type_map.raw b/source/tests/pt/water/data/single/type_map.raw new file mode 100644 index 0000000000..e900768b1d --- /dev/null +++ b/source/tests/pt/water/data/single/type_map.raw @@ -0,0 +1,2 @@ +O +H diff --git a/source/tests/pt/water/lkf.json b/source/tests/pt/water/lkf.json new file mode 100644 index 0000000000..4385d02136 --- /dev/null +++ b/source/tests/pt/water/lkf.json @@ -0,0 +1,79 @@ +{ + "model": { + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_e2_a", + "sel": [ + 46, + 92 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 25, + 25 + ], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1, + "_comment": " that's all" + }, + "fitting_net": { + "neuron": [ + 100, + 100, + 100 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "data_stat_nbatch": 20, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-8, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "_comment": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 3, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "numb_btch": 3, + "_comment": "that's all" + }, + "numb_steps": 1, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 1, + "save_freq": 1, + "opt_type": "LKF", + "kf_blocksize": 1024, + "_comment": "that's all" + }, + "_comment": "that's all" +} diff --git a/source/tests/pt/water/se_atten.json b/source/tests/pt/water/se_atten.json new file mode 100644 index 0000000000..8867e0db41 --- /dev/null +++ b/source/tests/pt/water/se_atten.json @@ -0,0 +1,84 @@ +{ + "_comment": "that's all", + "model": { + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_atten", + "sel": 40, + "rcut_smth": 0.5, + "rcut": 4.0, + "neuron": [ + 25, + 50, + 100 + ], + "axis_neuron": 16, + "attn": 64, + "attn_layer": 2, + "attn_dotr": true, + "attn_mask": false, + "post_ln": true, + "ffn": false, + "ffn_embed_dim": 512, + "activation": "tanh", + "scaling_factor": 1.0, + "head_num": 1, + "normalize": false, + "temperature": 1.0 + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-08, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "start_pref_v": 0, + "limit_pref_v": 0, + "_comment": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "numb_btch": 1, + "_comment": "that's all" + }, + "numb_steps": 1000000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 1000, + "_comment": "that's all" + } +} diff --git a/source/tests/pt/water/se_e2_a.json b/source/tests/pt/water/se_e2_a.json new file mode 100644 index 0000000000..425ca3cbf5 --- /dev/null +++ b/source/tests/pt/water/se_e2_a.json @@ -0,0 +1,77 @@ +{ + "model": { + "type_map": [ + "O", + "H" + ], + "descriptor": { + "type": "se_e2_a", + "sel": [ + 46, + 92 + ], + "rcut_smth": 0.50, + "rcut": 6.00, + "neuron": [ + 25, + 50, + 100 + ], + "resnet_dt": false, + "axis_neuron": 16, + "seed": 1, + "_comment": " that's all" + }, + "fitting_net": { + "neuron": [ + 240, + 240, + 240 + ], + "resnet_dt": true, + "seed": 1, + "_comment": " that's all" + }, + "data_stat_nbatch": 20, + "_comment": " that's all" + }, + "learning_rate": { + "type": "exp", + "decay_steps": 5000, + "start_lr": 0.001, + "stop_lr": 3.51e-8, + "_comment": "that's all" + }, + "loss": { + "type": "ener", + "start_pref_e": 0.02, + "limit_pref_e": 1, + "start_pref_f": 1000, + "limit_pref_f": 1, + "_comment": " that's all" + }, + "training": { + "training_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "_comment": "that's all" + }, + "validation_data": { + "systems": [ + "pt/water/data/data_0" + ], + "batch_size": 1, + "numb_btch": 3, + "_comment": "that's all" + }, + "numb_steps": 100000, + "seed": 10, + "disp_file": "lcurve.out", + "disp_freq": 100, + "save_freq": 10000, + "_comment": "that's all" + }, + "_comment": "that's all" +} diff --git a/source/tests/test_adjust_sel.py b/source/tests/test_adjust_sel.py index b1cbdc5afc..9bed3606fd 100644 --- a/source/tests/test_adjust_sel.py +++ b/source/tests/test_adjust_sel.py @@ -82,12 +82,10 @@ def _init_models(): return INPUT, frozen_model, decreased_model, increased_model -INPUT, FROZEN_MODEL, DECREASED_MODEL, INCREASED_MODEL = _init_models() - - class TestDeepPotAAdjustSel(unittest.TestCase): @classmethod def setUpClass(self): + INPUT, FROZEN_MODEL, DECREASED_MODEL, INCREASED_MODEL = _init_models() self.dp_original = DeepPot(FROZEN_MODEL) self.dp_decreased = DeepPot(DECREASED_MODEL) self.dp_increased = DeepPot(INCREASED_MODEL) diff --git a/source/tests/test_finetune_se_atten.py b/source/tests/test_finetune_se_atten.py index 3614fcb13a..47fedcf685 100644 --- a/source/tests/test_finetune_se_atten.py +++ b/source/tests/test_finetune_se_atten.py @@ -147,67 +147,77 @@ def _init_models(setup_model, i): ) -if not parse_version(tf.__version__) < parse_version("1.15"): - - def previous_se_atten(jdata): - jdata["model"]["descriptor"]["stripped_type_embedding"] = False - jdata["model"]["descriptor"]["attn_layer"] = 2 - - def stripped_model(jdata): - jdata["model"]["descriptor"]["stripped_type_embedding"] = True - jdata["model"]["descriptor"]["attn_layer"] = 2 - - def compressible_model(jdata): - jdata["model"]["descriptor"]["stripped_type_embedding"] = True - jdata["model"]["descriptor"]["attn_layer"] = 0 - - models = [previous_se_atten, stripped_model, compressible_model] - INPUT_PRES = [] - INPUT_FINETUNES = [] - INPUT_FINETUNE_MIXS = [] - PRE_MODELS = [] - FINETUNED_MODELS = [] - FINETUNED_MODEL_MIXS = [] - PRE_MAPS = [] - FINETUNED_MAPS = [] - VALID_DATAS = [] - for i, model in enumerate(models): - ( - INPUT_PRE, - INPUT_FINETUNE, - INPUT_FINETUNE_MIX, - PRE_MODEL, - FINETUNED_MODEL, - FINETUNED_MODEL_MIX, - PRE_MAP, - FINETUNED_MAP, - VALID_DATA, - ) = _init_models(model, i) - INPUT_PRES.append(INPUT_PRE) - INPUT_FINETUNES.append(INPUT_FINETUNE) - INPUT_FINETUNE_MIXS.append(INPUT_FINETUNE_MIX) - PRE_MODELS.append(PRE_MODEL) - FINETUNED_MODELS.append(FINETUNED_MODEL) - FINETUNED_MODEL_MIXS.append(FINETUNED_MODEL_MIX) - PRE_MAPS.append(PRE_MAP) - FINETUNED_MAPS.append(FINETUNED_MAP) - VALID_DATAS.append(VALID_DATA) - - @unittest.skipIf( parse_version(tf.__version__) < parse_version("1.15"), f"The current tf version {tf.__version__} is too low to run the new testing model.", ) class TestFinetuneSeAtten(unittest.TestCase): + @classmethod + def setUpClass(cls) -> None: + if not parse_version(tf.__version__) < parse_version("1.15"): + + def previous_se_atten(jdata): + jdata["model"]["descriptor"]["stripped_type_embedding"] = False + jdata["model"]["descriptor"]["attn_layer"] = 2 + + def stripped_model(jdata): + jdata["model"]["descriptor"]["stripped_type_embedding"] = True + jdata["model"]["descriptor"]["attn_layer"] = 2 + + def compressible_model(jdata): + jdata["model"]["descriptor"]["stripped_type_embedding"] = True + jdata["model"]["descriptor"]["attn_layer"] = 0 + + models = [previous_se_atten, stripped_model, compressible_model] + INPUT_PRES = [] + INPUT_FINETUNES = [] + INPUT_FINETUNE_MIXS = [] + PRE_MODELS = [] + FINETUNED_MODELS = [] + FINETUNED_MODEL_MIXS = [] + PRE_MAPS = [] + FINETUNED_MAPS = [] + VALID_DATAS = [] + for i, model in enumerate(models): + ( + INPUT_PRE, + INPUT_FINETUNE, + INPUT_FINETUNE_MIX, + PRE_MODEL, + FINETUNED_MODEL, + FINETUNED_MODEL_MIX, + PRE_MAP, + FINETUNED_MAP, + VALID_DATA, + ) = _init_models(model, i) + INPUT_PRES.append(INPUT_PRE) + INPUT_FINETUNES.append(INPUT_FINETUNE) + INPUT_FINETUNE_MIXS.append(INPUT_FINETUNE_MIX) + PRE_MODELS.append(PRE_MODEL) + FINETUNED_MODELS.append(FINETUNED_MODEL) + FINETUNED_MODEL_MIXS.append(FINETUNED_MODEL_MIX) + PRE_MAPS.append(PRE_MAP) + FINETUNED_MAPS.append(FINETUNED_MAP) + VALID_DATAS.append(VALID_DATA) + cls.INPUT_PRES = INPUT_PRES + cls.INPUT_FINETUNES = INPUT_FINETUNES + cls.INPUT_FINETUNE_MIXS = INPUT_FINETUNE_MIXS + cls.PRE_MODELS = PRE_MODELS + cls.FINETUNED_MODELS = FINETUNED_MODELS + cls.FINETUNED_MODEL_MIXS = FINETUNED_MODEL_MIXS + cls.PRE_MAPS = PRE_MAPS + cls.FINETUNED_MAPS = FINETUNED_MAPS + cls.VALID_DATAS = VALID_DATAS + @classmethod def tearDownClass(self): - for i in range(len(INPUT_PRES)): - _file_delete(INPUT_PRES[i]) - _file_delete(INPUT_FINETUNES[i]) - _file_delete(INPUT_FINETUNE_MIXS[i]) - _file_delete(PRE_MODELS[i]) - _file_delete(FINETUNED_MODELS[i]) - _file_delete(FINETUNED_MODEL_MIXS[i]) + for i in range(len(self.INPUT_PRES)): + _file_delete(self.INPUT_PRES[i]) + _file_delete(self.INPUT_FINETUNES[i]) + _file_delete(self.INPUT_FINETUNE_MIXS[i]) + _file_delete(self.PRE_MODELS[i]) + _file_delete(self.FINETUNED_MODELS[i]) + _file_delete(self.FINETUNED_MODEL_MIXS[i]) _file_delete("out.json") _file_delete("model.ckpt.meta") _file_delete("model.ckpt.index") @@ -223,22 +233,22 @@ def tearDownClass(self): _file_delete("lcurve.out") def test_finetune_standard(self): - for i in range(len(INPUT_PRES)): - self.valid_data = VALID_DATAS[i] + for i in range(len(self.INPUT_PRES)): + self.valid_data = self.VALID_DATAS[i] pretrained_bias = get_tensor_by_name( - PRE_MODELS[i], "fitting_attr/t_bias_atom_e" + self.PRE_MODELS[i], "fitting_attr/t_bias_atom_e" ) finetuned_bias = get_tensor_by_name( - FINETUNED_MODELS[i], "fitting_attr/t_bias_atom_e" + self.FINETUNED_MODELS[i], "fitting_attr/t_bias_atom_e" ) - sorter = np.argsort(PRE_MAPS[i]) + sorter = np.argsort(self.PRE_MAPS[i]) idx_type_map = sorter[ - np.searchsorted(PRE_MAPS[i], FINETUNED_MAPS[i], sorter=sorter) + np.searchsorted(self.PRE_MAPS[i], self.FINETUNED_MAPS[i], sorter=sorter) ] test_data = self.valid_data.get_test() atom_nums = np.tile(np.bincount(test_data["type"][0])[idx_type_map], (4, 1)) - dp = DeepPotential(PRE_MODELS[i]) + dp = DeepPotential(self.PRE_MODELS[i]) energy = dp.eval( test_data["coord"], test_data["box"], test_data["type"][0] )[0] @@ -250,7 +260,7 @@ def test_finetune_standard(self): 0 ].reshape(-1) - dp_finetuned = DeepPotential(FINETUNED_MODELS[i]) + dp_finetuned = DeepPotential(self.FINETUNED_MODELS[i]) energy_finetuned = dp_finetuned.eval( test_data["coord"], test_data["box"], test_data["type"][0] )[0] @@ -266,22 +276,22 @@ def test_finetune_standard(self): np.testing.assert_almost_equal(finetune_results, 0.0, default_places) def test_finetune_mixed_type(self): - for i in range(len(INPUT_PRES)): - self.valid_data = VALID_DATAS[i] + for i in range(len(self.INPUT_PRES)): + self.valid_data = self.VALID_DATAS[i] pretrained_bias = get_tensor_by_name( - PRE_MODELS[i], "fitting_attr/t_bias_atom_e" + self.PRE_MODELS[i], "fitting_attr/t_bias_atom_e" ) finetuned_bias_mixed_type = get_tensor_by_name( - FINETUNED_MODEL_MIXS[i], "fitting_attr/t_bias_atom_e" + self.FINETUNED_MODEL_MIXS[i], "fitting_attr/t_bias_atom_e" ) - sorter = np.argsort(PRE_MAPS[i]) + sorter = np.argsort(self.PRE_MAPS[i]) idx_type_map = sorter[ - np.searchsorted(PRE_MAPS[i], FINETUNED_MAPS[i], sorter=sorter) + np.searchsorted(self.PRE_MAPS[i], self.FINETUNED_MAPS[i], sorter=sorter) ] test_data = self.valid_data.get_test() atom_nums = np.tile(np.bincount(test_data["type"][0])[idx_type_map], (4, 1)) - dp = DeepPotential(PRE_MODELS[i]) + dp = DeepPotential(self.PRE_MODELS[i]) energy = dp.eval( test_data["coord"], test_data["box"], test_data["type"][0] )[0] @@ -293,7 +303,7 @@ def test_finetune_mixed_type(self): 0 ].reshape(-1) - dp_finetuned_mixed_type = DeepPotential(FINETUNED_MODEL_MIXS[i]) + dp_finetuned_mixed_type = DeepPotential(self.FINETUNED_MODEL_MIXS[i]) energy_finetuned = dp_finetuned_mixed_type.eval( test_data["coord"], test_data["box"], test_data["type"][0] )[0] diff --git a/source/tests/test_init_frz_model_multi.py b/source/tests/test_init_frz_model_multi.py index e5e5733c7d..fc37d82397 100644 --- a/source/tests/test_init_frz_model_multi.py +++ b/source/tests/test_init_frz_model_multi.py @@ -180,20 +180,19 @@ def _init_models(): return INPUT, ckpt, frozen_model, model_ckpt, model_frz, data, stop_batch -( - INPUT, - CKPT, - FROZEN_MODEL, - CKPT_TRAINER, - FRZ_TRAINER, - VALID_DATA, - STOP_BATCH, -) = _init_models() - - class TestInitFrzModelMulti(unittest.TestCase): @classmethod def setUpClass(cls): + ( + cls.INPUT, + cls.CKPT, + cls.FROZEN_MODEL, + CKPT_TRAINER, + FRZ_TRAINER, + VALID_DATA, + STOP_BATCH, + ) = _init_models() + cls.dp_ckpt = CKPT_TRAINER cls.dp_frz = FRZ_TRAINER cls.valid_data_dict = {"water_ener": VALID_DATA} @@ -205,19 +204,19 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - _file_delete(INPUT) - _file_delete(FROZEN_MODEL) + _file_delete(cls.INPUT) + _file_delete(cls.FROZEN_MODEL) _file_delete("out.json") _file_delete(str(tests_path / "checkpoint")) - _file_delete(CKPT + ".meta") - _file_delete(CKPT + ".index") - _file_delete(CKPT + ".data-00000-of-00001") - _file_delete(CKPT + "-0.meta") - _file_delete(CKPT + "-0.index") - _file_delete(CKPT + "-0.data-00000-of-00001") - _file_delete(CKPT + "-1.meta") - _file_delete(CKPT + "-1.index") - _file_delete(CKPT + "-1.data-00000-of-00001") + _file_delete(cls.CKPT + ".meta") + _file_delete(cls.CKPT + ".index") + _file_delete(cls.CKPT + ".data-00000-of-00001") + _file_delete(cls.CKPT + "-0.meta") + _file_delete(cls.CKPT + "-0.index") + _file_delete(cls.CKPT + "-0.data-00000-of-00001") + _file_delete(cls.CKPT + "-1.meta") + _file_delete(cls.CKPT + "-1.index") + _file_delete(cls.CKPT + "-1.data-00000-of-00001") _file_delete("input_v2_compat.json") _file_delete("lcurve.out") diff --git a/source/tests/test_init_frz_model_se_a.py b/source/tests/test_init_frz_model_se_a.py index d98c2bc14f..7545e3aae9 100644 --- a/source/tests/test_init_frz_model_se_a.py +++ b/source/tests/test_init_frz_model_se_a.py @@ -128,20 +128,18 @@ def _init_models(): return INPUT, ckpt, frozen_model, model_ckpt, model_frz, data, stop_batch -( - INPUT, - CKPT, - FROZEN_MODEL, - CKPT_TRAINER, - FRZ_TRAINER, - VALID_DATA, - STOP_BATCH, -) = _init_models() - - class TestInitFrzModelA(unittest.TestCase): @classmethod def setUpClass(cls): + ( + cls.INPUT, + cls.CKPT, + cls.FROZEN_MODEL, + CKPT_TRAINER, + FRZ_TRAINER, + VALID_DATA, + STOP_BATCH, + ) = _init_models() cls.dp_ckpt = CKPT_TRAINER cls.dp_frz = FRZ_TRAINER cls.valid_data = VALID_DATA @@ -149,19 +147,19 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - _file_delete(INPUT) - _file_delete(FROZEN_MODEL) + _file_delete(cls.INPUT) + _file_delete(cls.FROZEN_MODEL) _file_delete("out.json") _file_delete(str(tests_path / "checkpoint")) - _file_delete(CKPT + ".meta") - _file_delete(CKPT + ".index") - _file_delete(CKPT + ".data-00000-of-00001") - _file_delete(CKPT + "-0.meta") - _file_delete(CKPT + "-0.index") - _file_delete(CKPT + "-0.data-00000-of-00001") - _file_delete(CKPT + "-1.meta") - _file_delete(CKPT + "-1.index") - _file_delete(CKPT + "-1.data-00000-of-00001") + _file_delete(cls.CKPT + ".meta") + _file_delete(cls.CKPT + ".index") + _file_delete(cls.CKPT + ".data-00000-of-00001") + _file_delete(cls.CKPT + "-0.meta") + _file_delete(cls.CKPT + "-0.index") + _file_delete(cls.CKPT + "-0.data-00000-of-00001") + _file_delete(cls.CKPT + "-1.meta") + _file_delete(cls.CKPT + "-1.index") + _file_delete(cls.CKPT + "-1.data-00000-of-00001") _file_delete("input_v2_compat.json") _file_delete("lcurve.out") diff --git a/source/tests/test_init_frz_model_se_a_tebd.py b/source/tests/test_init_frz_model_se_a_tebd.py index 594bf83085..1b282c00d5 100644 --- a/source/tests/test_init_frz_model_se_a_tebd.py +++ b/source/tests/test_init_frz_model_se_a_tebd.py @@ -129,20 +129,19 @@ def _init_models(): return INPUT, ckpt, frozen_model, model_ckpt, model_frz, data, stop_batch -( - INPUT, - CKPT, - FROZEN_MODEL, - CKPT_TRAINER, - FRZ_TRAINER, - VALID_DATA, - STOP_BATCH, -) = _init_models() - - class TestInitFrzModelA(unittest.TestCase): @classmethod def setUpClass(cls): + ( + cls.INPUT, + cls.CKPT, + cls.FROZEN_MODEL, + CKPT_TRAINER, + FRZ_TRAINER, + VALID_DATA, + STOP_BATCH, + ) = _init_models() + cls.dp_ckpt = CKPT_TRAINER cls.dp_frz = FRZ_TRAINER cls.valid_data = VALID_DATA @@ -150,19 +149,19 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - _file_delete(INPUT) - _file_delete(FROZEN_MODEL) + _file_delete(cls.INPUT) + _file_delete(cls.FROZEN_MODEL) _file_delete("out.json") _file_delete(str(tests_path / "checkpoint")) - _file_delete(CKPT + ".meta") - _file_delete(CKPT + ".index") - _file_delete(CKPT + ".data-00000-of-00001") - _file_delete(CKPT + "-0.meta") - _file_delete(CKPT + "-0.index") - _file_delete(CKPT + "-0.data-00000-of-00001") - _file_delete(CKPT + "-1.meta") - _file_delete(CKPT + "-1.index") - _file_delete(CKPT + "-1.data-00000-of-00001") + _file_delete(cls.CKPT + ".meta") + _file_delete(cls.CKPT + ".index") + _file_delete(cls.CKPT + ".data-00000-of-00001") + _file_delete(cls.CKPT + "-0.meta") + _file_delete(cls.CKPT + "-0.index") + _file_delete(cls.CKPT + "-0.data-00000-of-00001") + _file_delete(cls.CKPT + "-1.meta") + _file_delete(cls.CKPT + "-1.index") + _file_delete(cls.CKPT + "-1.data-00000-of-00001") _file_delete("input_v2_compat.json") _file_delete("lcurve.out") diff --git a/source/tests/test_init_frz_model_se_a_type.py b/source/tests/test_init_frz_model_se_a_type.py index 3221245065..b356dbf6d0 100644 --- a/source/tests/test_init_frz_model_se_a_type.py +++ b/source/tests/test_init_frz_model_se_a_type.py @@ -132,20 +132,18 @@ def _init_models(): return INPUT, ckpt, frozen_model, model_ckpt, model_frz, data, stop_batch -( - INPUT, - CKPT, - FROZEN_MODEL, - CKPT_TRAINER, - FRZ_TRAINER, - VALID_DATA, - STOP_BATCH, -) = _init_models() - - class TestInitFrzModelAType(unittest.TestCase): @classmethod def setUpClass(cls): + ( + cls.INPUT, + cls.CKPT, + cls.FROZEN_MODEL, + CKPT_TRAINER, + FRZ_TRAINER, + VALID_DATA, + STOP_BATCH, + ) = _init_models() cls.dp_ckpt = CKPT_TRAINER cls.dp_frz = FRZ_TRAINER cls.valid_data = VALID_DATA @@ -153,19 +151,19 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - _file_delete(INPUT) - _file_delete(FROZEN_MODEL) + _file_delete(cls.INPUT) + _file_delete(cls.FROZEN_MODEL) _file_delete("out.json") _file_delete(str(tests_path / "checkpoint")) - _file_delete(CKPT + ".meta") - _file_delete(CKPT + ".index") - _file_delete(CKPT + ".data-00000-of-00001") - _file_delete(CKPT + "-0.meta") - _file_delete(CKPT + "-0.index") - _file_delete(CKPT + "-0.data-00000-of-00001") - _file_delete(CKPT + "-1.meta") - _file_delete(CKPT + "-1.index") - _file_delete(CKPT + "-1.data-00000-of-00001") + _file_delete(cls.CKPT + ".meta") + _file_delete(cls.CKPT + ".index") + _file_delete(cls.CKPT + ".data-00000-of-00001") + _file_delete(cls.CKPT + "-0.meta") + _file_delete(cls.CKPT + "-0.index") + _file_delete(cls.CKPT + "-0.data-00000-of-00001") + _file_delete(cls.CKPT + "-1.meta") + _file_delete(cls.CKPT + "-1.index") + _file_delete(cls.CKPT + "-1.data-00000-of-00001") _file_delete("input_v2_compat.json") _file_delete("lcurve.out") diff --git a/source/tests/test_init_frz_model_se_atten.py b/source/tests/test_init_frz_model_se_atten.py index 5554ae415c..7889440cd3 100644 --- a/source/tests/test_init_frz_model_se_atten.py +++ b/source/tests/test_init_frz_model_se_atten.py @@ -146,32 +146,6 @@ def compressible_model(jdata): jdata["model"]["descriptor"]["stripped_type_embedding"] = True jdata["model"]["descriptor"]["attn_layer"] = 0 - models = [previous_se_atten, stripped_model, compressible_model] - INPUTS = [] - CKPTS = [] - FROZEN_MODELS = [] - CKPT_TRAINERS = [] - FRZ_TRAINERS = [] - VALID_DATAS = [] - STOP_BATCHS = [] - for i, model in enumerate(models): - ( - INPUT, - CKPT, - FROZEN_MODEL, - CKPT_TRAINER, - FRZ_TRAINER, - VALID_DATA, - STOP_BATCH, - ) = _init_models(model, i) - INPUTS.append(INPUT) - CKPTS.append(CKPT) - FROZEN_MODELS.append(FROZEN_MODEL) - CKPT_TRAINERS.append(CKPT_TRAINER) - FRZ_TRAINERS.append(FRZ_TRAINER) - VALID_DATAS.append(VALID_DATA) - STOP_BATCHS.append(STOP_BATCH) - @unittest.skipIf( parse_version(tf.__version__) < parse_version("1.15"), @@ -180,6 +154,38 @@ def compressible_model(jdata): class TestInitFrzModelAtten(unittest.TestCase): @classmethod def setUpClass(cls): + models = [previous_se_atten, stripped_model, compressible_model] + INPUTS = [] + CKPTS = [] + FROZEN_MODELS = [] + CKPT_TRAINERS = [] + FRZ_TRAINERS = [] + VALID_DATAS = [] + STOP_BATCHS = [] + for i, model in enumerate(models): + ( + INPUT, + CKPT, + FROZEN_MODEL, + CKPT_TRAINER, + FRZ_TRAINER, + VALID_DATA, + STOP_BATCH, + ) = _init_models(model, i) + INPUTS.append(INPUT) + CKPTS.append(CKPT) + FROZEN_MODELS.append(FROZEN_MODEL) + CKPT_TRAINERS.append(CKPT_TRAINER) + FRZ_TRAINERS.append(FRZ_TRAINER) + VALID_DATAS.append(VALID_DATA) + STOP_BATCHS.append(STOP_BATCH) + cls.INPUTS = INPUTS + cls.CKPTS = CKPTS + cls.FROZEN_MODELS = FROZEN_MODELS + cls.CKPT_TRAINERS = CKPT_TRAINERS + cls.FRZ_TRAINERS = FRZ_TRAINERS + cls.VALID_DATAS = VALID_DATAS + cls.STOP_BATCHS = STOP_BATCHS cls.dp_ckpts = CKPT_TRAINERS cls.dp_frzs = FRZ_TRAINERS cls.valid_datas = VALID_DATAS @@ -188,28 +194,28 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): for i in range(len(cls.dp_ckpts)): - _file_delete(INPUTS[i]) - _file_delete(FROZEN_MODELS[i]) + _file_delete(cls.INPUTS[i]) + _file_delete(cls.FROZEN_MODELS[i]) _file_delete("out.json") _file_delete(str(tests_path / "checkpoint")) - _file_delete(CKPT[i] + ".meta") - _file_delete(CKPT[i] + ".index") - _file_delete(CKPT[i] + ".data-00000-of-00001") - _file_delete(CKPT[i] + "-0.meta") - _file_delete(CKPT[i] + "-0.index") - _file_delete(CKPT[i] + "-0.data-00000-of-00001") - _file_delete(CKPT[i] + "-1.meta") - _file_delete(CKPT[i] + "-1.index") - _file_delete(CKPT[i] + "-1.data-00000-of-00001") + _file_delete(cls.CKPTS[i] + ".meta") + _file_delete(cls.CKPTS[i] + ".index") + _file_delete(cls.CKPTS[i] + ".data-00000-of-00001") + _file_delete(cls.CKPTS[i] + "-0.meta") + _file_delete(cls.CKPTS[i] + "-0.index") + _file_delete(cls.CKPTS[i] + "-0.data-00000-of-00001") + _file_delete(cls.CKPTS[i] + "-1.meta") + _file_delete(cls.CKPTS[i] + "-1.index") + _file_delete(cls.CKPTS[i] + "-1.data-00000-of-00001") _file_delete(f"input_v2_compat{i}.json") _file_delete("lcurve.out") def test_single_frame(self): for i in range(len(self.dp_ckpts)): - self.dp_ckpt = CKPT_TRAINERS[i] - self.dp_frz = FRZ_TRAINERS[i] - self.valid_data = VALID_DATAS[i] - self.stop_batch = STOP_BATCHS[i] + self.dp_ckpt = self.CKPT_TRAINERS[i] + self.dp_frz = self.FRZ_TRAINERS[i] + self.valid_data = self.VALID_DATAS[i] + self.stop_batch = self.STOP_BATCHS[i] valid_batch = self.valid_data.get_batch() natoms = valid_batch["natoms_vec"] diff --git a/source/tests/test_init_frz_model_se_r.py b/source/tests/test_init_frz_model_se_r.py index 84d109bcfd..fd916b3fdc 100644 --- a/source/tests/test_init_frz_model_se_r.py +++ b/source/tests/test_init_frz_model_se_r.py @@ -136,20 +136,19 @@ def _init_models(): return INPUT, ckpt, frozen_model, model_ckpt, model_frz, data, stop_batch -( - INPUT, - CKPT, - FROZEN_MODEL, - CKPT_TRAINER, - FRZ_TRAINER, - VALID_DATA, - STOP_BATCH, -) = _init_models() - - class TestInitFrzModelR(unittest.TestCase): @classmethod def setUpClass(cls): + ( + cls.INPUT, + cls.CKPT, + cls.FROZEN_MODEL, + CKPT_TRAINER, + FRZ_TRAINER, + VALID_DATA, + STOP_BATCH, + ) = _init_models() + cls.dp_ckpt = CKPT_TRAINER cls.dp_frz = FRZ_TRAINER cls.valid_data = VALID_DATA @@ -157,19 +156,19 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - _file_delete(INPUT) - _file_delete(FROZEN_MODEL) + _file_delete(cls.INPUT) + _file_delete(cls.FROZEN_MODEL) _file_delete("out.json") _file_delete(str(tests_path / "checkpoint")) - _file_delete(CKPT + ".meta") - _file_delete(CKPT + ".index") - _file_delete(CKPT + ".data-00000-of-00001") - _file_delete(CKPT + "-0.meta") - _file_delete(CKPT + "-0.index") - _file_delete(CKPT + "-0.data-00000-of-00001") - _file_delete(CKPT + "-1.meta") - _file_delete(CKPT + "-1.index") - _file_delete(CKPT + "-1.data-00000-of-00001") + _file_delete(cls.CKPT + ".meta") + _file_delete(cls.CKPT + ".index") + _file_delete(cls.CKPT + ".data-00000-of-00001") + _file_delete(cls.CKPT + "-0.meta") + _file_delete(cls.CKPT + "-0.index") + _file_delete(cls.CKPT + "-0.data-00000-of-00001") + _file_delete(cls.CKPT + "-1.meta") + _file_delete(cls.CKPT + "-1.index") + _file_delete(cls.CKPT + "-1.data-00000-of-00001") _file_delete("input_v2_compat.json") _file_delete("lcurve.out") diff --git a/source/tests/test_init_frz_model_spin.py b/source/tests/test_init_frz_model_spin.py index 7aa3d514dc..b5c480c2ba 100644 --- a/source/tests/test_init_frz_model_spin.py +++ b/source/tests/test_init_frz_model_spin.py @@ -140,20 +140,19 @@ def _init_models(): return INPUT, ckpt, frozen_model, model_ckpt, model_frz, data, stop_batch -( - INPUT, - CKPT, - FROZEN_MODEL, - CKPT_TRAINER, - FRZ_TRAINER, - VALID_DATA, - STOP_BATCH, -) = _init_models() - - class TestInitFrzModelR(unittest.TestCase): @classmethod def setUpClass(cls): + ( + cls.INPUT, + cls.CKPT, + cls.FROZEN_MODEL, + CKPT_TRAINER, + FRZ_TRAINER, + VALID_DATA, + STOP_BATCH, + ) = _init_models() + cls.dp_ckpt = CKPT_TRAINER cls.dp_frz = FRZ_TRAINER cls.valid_data = VALID_DATA @@ -161,19 +160,19 @@ def setUpClass(cls): @classmethod def tearDownClass(cls): - _file_delete(INPUT) - _file_delete(FROZEN_MODEL) + _file_delete(cls.INPUT) + _file_delete(cls.FROZEN_MODEL) _file_delete("out.json") _file_delete(str(tests_path / "checkpoint")) - _file_delete(CKPT + ".meta") - _file_delete(CKPT + ".index") - _file_delete(CKPT + ".data-00000-of-00001") - _file_delete(CKPT + "-0.meta") - _file_delete(CKPT + "-0.index") - _file_delete(CKPT + "-0.data-00000-of-00001") - _file_delete(CKPT + "-1.meta") - _file_delete(CKPT + "-1.index") - _file_delete(CKPT + "-1.data-00000-of-00001") + _file_delete(cls.CKPT + ".meta") + _file_delete(cls.CKPT + ".index") + _file_delete(cls.CKPT + ".data-00000-of-00001") + _file_delete(cls.CKPT + "-0.meta") + _file_delete(cls.CKPT + "-0.index") + _file_delete(cls.CKPT + "-0.data-00000-of-00001") + _file_delete(cls.CKPT + "-1.meta") + _file_delete(cls.CKPT + "-1.index") + _file_delete(cls.CKPT + "-1.data-00000-of-00001") _file_delete("input_v2_compat.json") _file_delete("lcurve.out") diff --git a/source/tests/test_model_compression_se_a_ebd_type_one_side.py b/source/tests/test_model_compression_se_a_ebd_type_one_side.py index 9ad1970e9b..741c95b26e 100644 --- a/source/tests/test_model_compression_se_a_ebd_type_one_side.py +++ b/source/tests/test_model_compression_se_a_ebd_type_one_side.py @@ -98,7 +98,6 @@ def _init_models_exclude_types(): INPUT, FROZEN_MODEL, COMPRESSED_MODEL = _init_models() -INPUT_ET, FROZEN_MODEL_ET, COMPRESSED_MODEL_ET = _init_models_exclude_types() class TestDeepPotAPBC(unittest.TestCase): @@ -444,8 +443,13 @@ def test_ase(self): class TestDeepPotAPBCExcludeTypes(unittest.TestCase): @classmethod def setUpClass(self): - self.dp_original = DeepPot(FROZEN_MODEL_ET) - self.dp_compressed = DeepPot(COMPRESSED_MODEL_ET) + ( + self.INPUT_ET, + self.FROZEN_MODEL_ET, + self.COMPRESSED_MODEL_ET, + ) = _init_models_exclude_types() + self.dp_original = DeepPot(self.FROZEN_MODEL_ET) + self.dp_compressed = DeepPot(self.COMPRESSED_MODEL_ET) self.coords = np.array( [ 12.83, @@ -473,9 +477,9 @@ def setUpClass(self): @classmethod def tearDownClass(self): - _file_delete(INPUT_ET) - _file_delete(FROZEN_MODEL_ET) - _file_delete(COMPRESSED_MODEL_ET) + _file_delete(self.INPUT_ET) + _file_delete(self.FROZEN_MODEL_ET) + _file_delete(self.COMPRESSED_MODEL_ET) _file_delete("out.json") _file_delete("compress.json") _file_delete("checkpoint") diff --git a/source/tests/test_model_compression_se_a_type_one_side_exclude_types.py b/source/tests/test_model_compression_se_a_type_one_side_exclude_types.py index 5b6ac4e13e..bdf09cf3e8 100644 --- a/source/tests/test_model_compression_se_a_type_one_side_exclude_types.py +++ b/source/tests/test_model_compression_se_a_type_one_side_exclude_types.py @@ -66,12 +66,11 @@ def _init_models(): return INPUT, frozen_model, compressed_model -INPUT, FROZEN_MODEL, COMPRESSED_MODEL = _init_models() - - class TestDeepPotAPBCTypeOneSideExcludeTypes(unittest.TestCase): @classmethod def setUpClass(self): + INPUT, FROZEN_MODEL, COMPRESSED_MODEL = _init_models() + self.dp_original = DeepPot(FROZEN_MODEL) self.dp_compressed = DeepPot(COMPRESSED_MODEL) self.coords = np.array(