From 3d1463394386bb87fe688111f6e0ed700578550f Mon Sep 17 00:00:00 2001 From: Kaushik Ram Sadagopan Date: Tue, 22 Aug 2023 00:53:31 -0700 Subject: [PATCH] Squash for going public. > > Co-authored-by: Ning <7022920+cndn@users.noreply.github.com> Co-authored-by: Kaushik Ram Sadagopan <29103305+kauterry@users.noreply.github.com> Co-authored-by: Ruslan Mavlyutov <133805601+mavlyutovr@users.noreply.github.com> Co-authored-by: Maha <7280004+elbayadm@users.noreply.github.com> Co-authored-by: sravyapopuri388 <51169165+sravyapopuri388@users.noreply.github.com> Co-authored-by: Jean <107696+jeanm@users.noreply.github.com> Co-authored-by: Pierre Andrews <628467+Mortimerp9@users.noreply.github.com> Co-authored-by: Can Balioglu <8310727+cbalioglu@users.noreply.github.com> --- .gitignore | 141 ++++ .pre-commit-config.yaml | 15 + CODE_OF_CONDUCT.md | 80 +++ CONTRIBUTING.md | 37 ++ LICENSE | 400 +++++++++++ README.md | 97 +++ docs/m4t/eval_README.md | 59 ++ docs/m4t/on_device_README.md | 56 ++ docs/m4t/seamless_align_README.md | 84 +++ requirements.txt | 5 + scripts/m4t/finetune/README.md | 132 ++++ scripts/m4t/finetune/__init__.py | 0 scripts/m4t/finetune/dataloader.py | 229 +++++++ scripts/m4t/finetune/dataset.py | 172 +++++ scripts/m4t/finetune/dist_utils.py | 76 +++ scripts/m4t/finetune/finetune.py | 186 ++++++ scripts/m4t/finetune/trainer.py | 367 ++++++++++ scripts/m4t/predict/README.md | 213 ++++++ scripts/m4t/predict/predict.py | 76 +++ seamlessM4T.png | Bin 0 -> 198534 bytes setup.py | 15 + src/seamless_communication/__init__.py | 7 + src/seamless_communication/assets/__init__.py | 9 + .../assets/cards/seamlessM4T_large.yaml | 50 ++ .../assets/cards/seamlessM4T_medium.yaml | 50 ++ .../assets/cards/unity_nllb-100.yaml | 108 +++ .../assets/cards/unity_nllb-200.yaml | 212 ++++++ .../assets/cards/vocoder_36langs.yaml | 198 ++++++ .../assets/download_manager.py | 27 + src/seamless_communication/assets/store.py | 22 + .../datasets/__init__.py | 0 .../datasets/datatypes.py | 47 ++ .../datasets/huggingface.py | 126 ++++ src/seamless_communication/models/__init__.py | 5 + .../models/inference/__init__.py | 6 + .../models/inference/translator.py | 209 ++++++ .../models/unity/__init__.py | 49 ++ .../models/unity/adaptor_block.py | 423 ++++++++++++ .../models/unity/builder.py | 626 ++++++++++++++++++ .../models/unity/generator.py | 220 ++++++ .../models/unity/loader.py | 270 ++++++++ .../models/unity/model.py | 326 +++++++++ .../models/unity/unit_tokenizer.py | 199 ++++++ .../models/vocoder/__init__.py | 19 + .../models/vocoder/builder.py | 134 ++++ .../models/vocoder/codehifigan.py | 137 ++++ .../models/vocoder/hifigan.py | 194 ++++++ .../models/vocoder/loader.py | 41 ++ .../models/vocoder/vocoder.py | 39 ++ 49 files changed, 6193 insertions(+) create mode 100644 .gitignore create mode 100644 .pre-commit-config.yaml create mode 100644 CODE_OF_CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 README.md create mode 100644 docs/m4t/eval_README.md create mode 100644 docs/m4t/on_device_README.md create mode 100644 docs/m4t/seamless_align_README.md create mode 100644 requirements.txt create mode 100644 scripts/m4t/finetune/README.md create mode 100644 scripts/m4t/finetune/__init__.py create mode 100644 scripts/m4t/finetune/dataloader.py create mode 100644 scripts/m4t/finetune/dataset.py create mode 100644 scripts/m4t/finetune/dist_utils.py create mode 100644 scripts/m4t/finetune/finetune.py create mode 100644 scripts/m4t/finetune/trainer.py create mode 100644 scripts/m4t/predict/README.md create mode 100644 scripts/m4t/predict/predict.py create mode 100644 seamlessM4T.png create mode 100644 setup.py create mode 100644 src/seamless_communication/__init__.py create mode 100644 src/seamless_communication/assets/__init__.py create mode 100644 src/seamless_communication/assets/cards/seamlessM4T_large.yaml create mode 100644 src/seamless_communication/assets/cards/seamlessM4T_medium.yaml create mode 100644 src/seamless_communication/assets/cards/unity_nllb-100.yaml create mode 100644 src/seamless_communication/assets/cards/unity_nllb-200.yaml create mode 100644 src/seamless_communication/assets/cards/vocoder_36langs.yaml create mode 100644 src/seamless_communication/assets/download_manager.py create mode 100644 src/seamless_communication/assets/store.py create mode 100644 src/seamless_communication/datasets/__init__.py create mode 100644 src/seamless_communication/datasets/datatypes.py create mode 100644 src/seamless_communication/datasets/huggingface.py create mode 100644 src/seamless_communication/models/__init__.py create mode 100644 src/seamless_communication/models/inference/__init__.py create mode 100644 src/seamless_communication/models/inference/translator.py create mode 100644 src/seamless_communication/models/unity/__init__.py create mode 100644 src/seamless_communication/models/unity/adaptor_block.py create mode 100644 src/seamless_communication/models/unity/builder.py create mode 100644 src/seamless_communication/models/unity/generator.py create mode 100644 src/seamless_communication/models/unity/loader.py create mode 100644 src/seamless_communication/models/unity/model.py create mode 100644 src/seamless_communication/models/unity/unit_tokenizer.py create mode 100644 src/seamless_communication/models/vocoder/__init__.py create mode 100644 src/seamless_communication/models/vocoder/builder.py create mode 100644 src/seamless_communication/models/vocoder/codehifigan.py create mode 100644 src/seamless_communication/models/vocoder/hifigan.py create mode 100644 src/seamless_communication/models/vocoder/loader.py create mode 100644 src/seamless_communication/models/vocoder/vocoder.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..4be13638 --- /dev/null +++ b/.gitignore @@ -0,0 +1,141 @@ +# JetBrains PyCharm IDE +.idea/ + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# macOS dir files +.DS_Store + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# Checkpoints +checkpoints + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv +venv/ +ENV/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# Generated files +/fairseq/temporal_convolution_tbc +/fairseq/modules/*_layer/*_forward.cu +/fairseq/modules/*_layer/*_backward.cu +/fairseq/version.py + +# data +data-bin/ + +# reranking +/examples/reranking/rerank_data + +# Cython-generated C++ source files +/fairseq/data/data_utils_fast.cpp +/fairseq/data/token_block_utils_fast.cpp + +# VSCODE +.vscode/ftp-sync.json +.vscode/settings.json + +# Experimental Folder +experimental/* + +# Weights and Biases logs +wandb/ + +# Hydra artifacts +nohup.out +multirun +outputs diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..142ed102 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,15 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.1.0 + hooks: + - id: trailing-whitespace + - id: check-ast + - id: check-merge-conflict + - id: check-added-large-files + args: ["--maxkb=2000"] + - id: end-of-file-fixer + + - repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..83f431e8 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when there is a +reasonable belief that an individual's behavior may have a negative impact on +the project or its community. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..27b59464 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,37 @@ +# Contributing to `seamless_communication` +We want to make contributing to this project as easy and transparent as +possible. + +## Our Development Process + +`seamless_communication` is built for Meta AI Seamless Communication team public release. +We engage in multiple projects internally and will update this repository with our progress upon reaching specific milestones. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `main`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Meta's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + + +## License +By contributing to `seamless_communication`, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..d1bbe803 --- /dev/null +++ b/LICENSE @@ -0,0 +1,400 @@ + +Attribution-NonCommercial 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-NonCommercial 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-NonCommercial 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + j. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + k. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + l. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/README.md b/README.md new file mode 100644 index 00000000..b9b6ef73 --- /dev/null +++ b/README.md @@ -0,0 +1,97 @@ +![](seamlessM4T.png) +# SeamlessM4T +SeamlessM4T is designed to provide high quality translation, allowing people from different linguistic communities to communicate effortlessly through speech and text. + +SeamlessM4T covers: +- 📥 101 languages for speech input +- ⌨️ 96 Languages for text input/output +- 🗣️ 35 languages for speech output. + +This unified model enables multiple tasks without relying on multiple separate models: +- Speech-to-speech translation (S2ST) +- Speech-to-text translation (S2TT) +- Text-to-speech translation (T2ST) +- Text-to-text translation (T2TT) +- Automatic speech recognition (ASR) + +Links: +- [Blog](https://ai.meta.com/blog/seamless-m4t) +- [Paper](https://ai.meta.com/research/publications/seamlessm4tmassively-multilingual-multimodal-machine-translation) +- [Demo](https://ai.meta.com/resources/models-and-libraries/seamless-communication/) +- [🤗 Hugging Face space](https://huggingface.co/spaces/facebook/seamless_m4t) + +# Quick Start +## Installation +1. Install fairseq2. Follow tutorials in fairseq2 [README](https://github.com/facebookresearch/fairseq2) +2. Install seamless_communication +``` +pip install . +``` + +## Running inference + +Here’s an example of using the CLI from the root directory to run inference. + +S2ST task: +```bash +python scripts/m4t/predict/predict.py s2st --output_path +``` +T2TT task: +```bash +python scripts/m4t/predict/predict.py t2tt --src_lang +``` + +Please refer to the [evaluation README](scripts/m4t/predict) for detailed instruction on how to run inference. + +# Libraries + +Seamless Communication depends on 3 libraries developed by Meta. + +## [fairseq2](https://github.com/facebookresearch/fairseq2) +fairseq2 is our next-generation open-source library of sequence modeling components that provides researchers and developers with building blocks for machine translation, language modeling, and other sequence generation tasks. All SeamlessM4T models in this repository are powered by fairseq2. + +## [SONAR and BLASER 2.0](https://github.com/facebookresearch/SONAR) +SONAR, Sentence-level multimOdal and laNguage-Agnostic Representations is a new multilingual and -modal sentence embedding space which outperforms existing sentence embeddings such as LASER3 and LabSE on the xsim and xsim++ multilingual similarity search tasks. SONAR provides text and speech encoders for many languages. SeamlessAlign was mined based on SONAR embeddings. + +BLASER 2.0 is our latest model-based evaluation metric for multimodal translation. It is an extension of BLASER, supporting both speech and text. It operates directly on the source signal, and as such, does not require any intermediate ASR sytem like ASR-BLEU. As in the first version, BLASER 2.0 leverages the similarity between input and output sentence embeddings. SONAR is the underlying embedding space for BLASER 2.0. Scripts to run evaluation with BLASER 2.0 can be found in the [SONAR repo](https://github.com/facebookresearch/SONAR). + +## [stopes](https://github.com/facebookresearch/stopes) +As part of the seamless communication project, we've extended the stopes library. Version 1 provided a text-text mining tool to build training dataset for translation models. Version 2 has been extended thanks to SONAR to support tasks around training large speech translation models. In particular, we provide tools to read/write the fairseq audiozip datasets and a new mining pipeline that can do speech-speech, text-speech, speech-text and text-text mining, all based on the new SONAR embedding space. + + +# Resources and usage +## SeamlessM4T models +| Model Name | #params | checkpoint | metrics | +| ------------------ | ------- | --------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------ | +| SeamlessM4T-Large | 2.3B | [🤗 Model card](https://huggingface.co/facebook/seamless-m4t-large) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-large/resolve/main/multitask_unity_large.pt) | [metrics](https://dl.fbaipublicfiles.com/seamlessM4T/metrics/seamlessM4T_large.zip) | +| SeamlessM4T-Medium | 1.2B | [🤗 Model card](https://huggingface.co/facebook/seamless-m4t-medium) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-medium/resolve/main/multitask_unity_medium.pt) | [metrics](https://dl.fbaipublicfiles.com/seamlessM4T/metrics/seamlessM4T_medium.zip) | + +We provide the extensive evaluation results of seamlessM4T-Large and SeamlessM4T-Medium reported in the paper (as averages) in the `metrics` files above. + +## Evaluating SeamlessM4T models +To reproduce our results, or to evaluate using the same metrics over your own test sets, please check out [README here](https://github.com/facebookresearch/seamless_communication/blob/main/docs/m4t/eval_README.md). + +## Finetuning SeamlessM4T models + +Please check out [README under scripts/m4t/finetune](scripts/m4t/finetune/README.md). + +## On-device models +Apart from Seamless-M4T large (2.3B) and medium (1.2B) models, we are also releasing a small model (281M) targeted for on-device inference. To learn more about the usage and model details check out [README here](https://github.com/facebookresearch/seamless_communication/blob/main/docs/m4t/on_device_README.md) + +## SeamlessAlign mined dataset +We open-source the metadata to SeamlessAlign, the largest open dataset for multimodal translation, totaling 270k+ hours of aligned Speech and Text data. The dataset can be rebuilt by the community based on the [SeamlessAlign readme](https://github.com/facebookresearch/seamless_communication/blob/main/docs/m4t/seamless_align_README.md). + +# Citation +If you use SeamlessM4T in your work or any models/datasets/artifacts published in SeamlessM4T, please cite : + +```bibtex +@article{seamlessm4t2023, + title={SeamlessM4T—Massively Multilingual \& Multimodal Machine Translation}, + author={{Seamless Communication}, Lo\"{i}c Barrault, Yu-An Chung, Mariano Cora Meglioli, David Dale, Ning Dong, Paul-Ambroise Duquenne, Hady Elsahar, Hongyu Gong, Kevin Heffernan, John Hoffman, Christopher Klaiber, Pengwei Li, Daniel Licht, Jean Maillard, Alice Rakotoarison, Kaushik Ram Sadagopan, Guillaume Wenzek, Ethan Ye, Bapi Akula, Peng-Jen Chen, Naji El Hachem, Brian Ellis, Gabriel Mejia Gonzalez, Justin Haaheim, Prangthip Hansanti, Russ Howes, Bernie Huang, Min-Jae Hwang, Hirofumi Inaguma, Somya Jain, Elahe Kalbassi, Amanda Kallet, Ilia Kulikov, Janice Lam, Daniel Li, Xutai Ma, Ruslan Mavlyutov, Benjamin Peloquin, Mohamed Ramadan, Abinesh Ramakrishnan, Anna Sun, Kevin Tran, Tuan Tran, Igor Tufanov, Vish Vogeti, Carleigh Wood, Yilin Yang, Bokai Yu, Pierre Andrews, Can Balioglu, Marta R. Costa-juss\`{a} \footnotemark[3], Onur \,{C}elebi,Maha Elbayad,Cynthia Gao, Francisco Guzm\'an, Justine Kao, Ann Lee, Alexandre Mourachko, Juan Pino, Sravya Popuri, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, Paden Tomasello, Changhan Wang, Jeff Wang, Skyler Wang}, + journal={ArXiv}, + year={2023} +} +``` +# License + +seamless_communication is CC-BY-NC 4.0 licensed, as found in LICENSE file diff --git a/docs/m4t/eval_README.md b/docs/m4t/eval_README.md new file mode 100644 index 00000000..65362ad4 --- /dev/null +++ b/docs/m4t/eval_README.md @@ -0,0 +1,59 @@ +## Evaluation protocols for various SeamlessM4T tasks +Refer to the [inference tutorial](../../scripts/m4t/predict/README.md) for detailed guidance on how to run inference using SeamlessM4T models. In this tutorial, the evaluation protocol used for all tasks supported by SeamlessM4T is briefly described. + +### S2TT +[Sacrebleu library](https://github.com/mjpost/sacrebleu) is used to compute the BLEU scores. To be consistent with Whisper, a character-level (*char*) tokenizer for Mandarin Chinese (cmn), Japanese (jpn), Thai (tha), Lao (lao), and Burmese (mya) is used. The default *13a* tokenizer is used for other languages. Raw (unnormalized) references and predictions are used for computing the scores. + +```python +import sacrebleu + +bleu_metric = sacrebleu.BLEU(tokenize=) +bleu_score = bleu_metric.corpus_score(, []) +``` + +### S2ST and T2ST +To measure the quality of the translated speech outputs, the audios are first transcribed using Whisper ASR model and BLEU score is computed on these ASR transcriptions comparing them with the ground truth text references. + +Whisper large-v2 is used for non-English target languages and medium.en trained on English-only data is used for English due to its superior performance. + +```python +import whisper + +model = whisper.load_model('medium.en') +model = whisper.load_model('large-v2') +``` +To reproduce the whisper transcriptions and thereby the ASR-BLEU scores, greedy decoding is used with a preset temperature value of 0. Target language information is also passed to the whisper model. + +```python +prediction = model.transcribe(, language=, temperature=0, beam_size=1)["text"] +``` + +Whisper-normalizer is run on the ground truth and the model generated . ASR-BLEU scores are computed using sacrebleu following the same tokenization as described for S2TT. + +```python +from whisper_normalizer.basic import BasicTextNormalizer + +normalizer = EnglishTextNormalizer() ## To be used for English +normalizer = BasicTextNormalizer() ## For non-English directions +``` + +### T2TT +Similar to S2TT, raw (unnormalized) references and predictions are used to compute the chrF++ scores for text-to-text translation. + +```python +import sacrebleu + +chrf_metric = sacrebleu.CHRF(word_order=2) +chrf_score = chrf_metric.corpus_score(,) +``` + +### ASR +Similar to Whisper, character-level error rate (CER) metric is used for Mandarin Chinese (cmn), Japanese (jpn), Thai (tha), Lao (lao), and Burmese (mya) languages. Word-level error rate (WER) metric is used for the remaining languages. Whisper-normalizer is applied on the ground truth and the model generated . [JiWER library](https://github.com/jitsi/jiwer) is used to compute these CER and WER scores. + +```python +import jiwer + +wer = WER(,) ## WER +cer = CER(,) ## CER + +``` diff --git a/docs/m4t/on_device_README.md b/docs/m4t/on_device_README.md new file mode 100644 index 00000000..6ab321b4 --- /dev/null +++ b/docs/m4t/on_device_README.md @@ -0,0 +1,56 @@ +# On-device Models + +Apart from SeamlessM4T-LARGE (2.3B) and SeamlessM4T-MEDIUM (1.2B) models, we are also developing a small model (281M) targeting for on-device inference. +This folder contains an example to run an exported small model covering most tasks (ASR/S2TT/S2ST). The model could be executed on popular mobile devices with Pytorch Mobile (https://pytorch.org/mobile/home/). + +## Overview +| Model | Checkpoint | Num Params | Disk Size | Supported Tasks | Supported Languages| +|---------|------------|----------|-------------|------------|-------------------------| +| UnitY-Small|[🤗 Model card](https://huggingface.co/facebook/seamless-m4t-unity-small) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-unity-small/resolve/main/unity_on_device.ptl) | 281M | 862MB | S2ST, S2TT, ASR |eng, fra, hin, por, spa| +| UnitY-Small-S2T |[🤗 Model card](https://huggingface.co/facebook/seamless-m4t-unity-small-s2t) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-unity-small-s2t/resolve/main/unity_on_device_s2t.ptl) | 235M | 637MB | S2TT, ASR |eng, fra,hin, por, spa| + +UnitY-Small-S2T is a pruned version of UnitY-Small without 2nd pass unit decoding. + +## Inference +To use exported model, users don't need seamless_communication or fairseq2 dependency. +```python +import torchaudio +import torch +audio_input, _ = torchaudio.load(TEST_AUDIO_PATH) # Load waveform using torchaudio + +s2t_model = torch.jit.load("unity_on_device_s2t.ptl") # Load exported S2T model +text = s2t_model(audio_input, tgt_lang=TGT_LANG) # Forward call with tgt_lang specified for ASR or S2TT +print(f"{lang}:{text}") + +s2st_model = torch.jit.load("unity_on_device.ptl") +text, units, waveform = s2st_model(audio_input, tgt_lang=TGT_LANG) # S2ST model also returns waveform +print(f"{lang}:{text}") +torchaudio.save(f"{OUTPUT_FOLDER}/{lang}.wav", waveform.unsqueeze(0), sample_rate=16000) # Save output waveform to local file +``` + + +Also running the exported model doesn't need python runtime. For example, you could load this model in C++ following [this tutorial](https://pytorch.org/tutorials/advanced/cpp_export.html), or building your own on-device applications similar to [this example](https://github.com/pytorch/ios-demo-app/tree/master/SpeechRecognition) + + +## Metrics +### S2TT BLEU / S2ST ASR-BLEU on FLEURS +For ASR-BLEU, we follow the same protocal as Large/Medium models: Use Whisper-large-v2 for eng-X and Whisper-medium for X-eng when evaluating ASR BLEU. +| Direction | 1st-pass BLEU (S2TT) | 2nd-pass ASR-BLEU (S2ST) +|---------|----------------------|----------------------| +| eng-hin|10.43|15.06| +| eng-por|21.54|17.35| +| eng-rus|7.88|5.11| +| eng-spa|12.78|11.75| +| hin-eng|12.92|10.50| +| por-eng|22.99|24.81| +| rus-eng|18.24|18.24| +| spa-eng|14.37|14.85| + +### ASR WER on FLEURS +| LANG | WER | +|---------|----------------------| +| eng|27.3| +| hin|41.5| +| por|25.2| +| rus|33.0| +| spa|18.0| diff --git a/docs/m4t/seamless_align_README.md b/docs/m4t/seamless_align_README.md new file mode 100644 index 00000000..d5b9ff12 --- /dev/null +++ b/docs/m4t/seamless_align_README.md @@ -0,0 +1,84 @@ +# Seamless - Speech to Speech and Speech to Text Metadata + +This document contains metadata information for reconstructing the dataset we used for training our models. + +## Format + +The metadata format is similar to [NLLB bitext format](https://github.com/facebookresearch/LASER/tree/main/data/nllb200) with some small differences. + +The metadata files are tab separated, gzip files. Each file corresponds to one alignment direction. + +File naming convention: + +- for text, we use 3 letters: e.g. `fra`, `eng`, `tur` +- for audio, we use 2 letters and a 'A': e.g. `frA`, `enA`, `trA` + +For example, the direction `eng-trA` corresponds to information for reconstructing English text with Turkish speech alignments. Similarly, `enA-jpn` corresponds to "English speech with Japanese text", and `enA-frA` corresponds to "English speech with French speech". + +Each line has 11 columns. + +For Audio, the columns correspond to: + + - `cc_warc`: The warc file reference containing the public audio url + - `cc_sha`: not used + - `audio_speeh_segment_url`: space separated audio reference. See below. + - `cc_lineno`: not used + - `paragraph_digest`: not used + - `sentence_digest`: not used + - `text_lid_score`: not used + - `laser_score`: score of the alignment + - `direction`: direction, e.g. `enA-jpn` + - `side`: side, e.g. `enA` or `jpn` + - `line_no`: alignment number + +`audio_speeh_segment_url` is a space separated audio reference. It has the following format: +` `, where `start_frame` and `end_frame` correspond to the segment that needs to be extracted from the audio file that is referenced at ``, resampled at 16000 Hz. + +For text, the columns are similar to NLLB format (except being tab separated here): + +- If the metadata comes from Common Crawl: + + - `cc_warc`: the reference to the Common Crawl WET file + - `cc_sha`: the document sha1 in the WET file + - `cc_document_url`: the url of the document referenced in the WET file + - `cc_lineno`: the line number in the document referenced in the WET file + - `paragraph_digest`: xxhash.xxh3_64_intdigest of the paragraph + - `sentence_digest`: xxhash.xxh3_64_intdigest of the sentence + - `text_lid_score`: language identification score, when available + - `laser_score`: score of the alignment + - `direction`: direction, e.g. `enA-jpn` + - `side`: side, e.g. `enA` or `jpn` + - `line_no`: alignment number + +- If the metadata comes from other corpus: + - `corpus`: corpus name + - `cc_sha`: not used + - `cc_document_url`: not used + - `lineno`: line number in the document + - `paragraph_digest`: xxhash.xxh3_64_intdigest of the paragraph + - `sentence_digest`: xxhash.xxh3_64_intdigest of the sentence + - `text_lid_score`: language identification score, when available + - `laser_score`: score of the alignment + - `direction`: direction, e.g. `enA-jpn` + - `side`: side, e.g. `enA` or `jpn` + - `line_no`: alignment number + +## Data + +[arb-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.arb-enA.tsv.gz) [ben-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.ben-enA.tsv.gz) [cat-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.cat-enA.tsv.gz) [dan-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.dan-enA.tsv.gz) [enA-est](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-est.tsv.gz) [enA-fin](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-fin.tsv.gz) [enA-jpn](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-jpn.tsv.gz) [enA-mlt](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-mlt.tsv.gz) [enA-nld](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-nld.tsv.gz) [enA-pol](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-pol.tsv.gz) [enA-por](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-por.tsv.gz) [enA-ron](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-ron.tsv.gz) [enA-slk](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-slk.tsv.gz) [enA-swe](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-swe.tsv.gz) [enA-swh](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-swh.tsv.gz) [enA-tur](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-tur.tsv.gz) [enA-ukr](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-ukr.tsv.gz) [enA-urd](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-urd.tsv.gz) [enA-vie](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-vie.tsv.gz) [arA-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.arA-enA.tsv.gz) [arA-eng](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.arA-eng.tsv.gz) [beA-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.beA-enA.tsv.gz) [caA-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.caA-enA.tsv.gz) [caA-eng](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.caA-eng.tsv.gz) [csA-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.csA-enA.tsv.gz) [csA-eng](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.csA-eng.tsv.gz) [cyA-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.cyA-enA.tsv.gz) [cyA-eng](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.cyA-eng.tsv.gz) [daA-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.daA-enA.tsv.gz) [daA-eng](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.daA-eng.tsv.gz) [deA-enA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.deA-enA.tsv.gz) [deA-eng](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.deA-eng.tsv.gz) [enA-esA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-esA.tsv.gz) [enA-fiA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-fiA.tsv.gz) [enA-frA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-frA.tsv.gz) [enA-hiA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-hiA.tsv.gz) [enA-idA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-idA.tsv.gz) [enA-itA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-itA.tsv.gz) [enA-knA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-knA.tsv.gz) [enA-koA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-koA.tsv.gz) [enA-mtA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-mtA.tsv.gz) [enA-nlA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-nlA.tsv.gz) [enA-plA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-plA.tsv.gz) [enA-ptA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-ptA.tsv.gz) [enA-rnA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-rnA.tsv.gz) [enA-ruA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-ruA.tsv.gz) [enA-skA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-skA.tsv.gz) [enA-svA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-svA.tsv.gz) [enA-swA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-swA.tsv.gz) [enA-taA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-taA.tsv.gz) [enA-teA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-teA.tsv.gz) [enA-tgA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-tgA.tsv.gz) [enA-thA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-thA.tsv.gz) [enA-trA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-trA.tsv.gz) [enA-ukA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-ukA.tsv.gz) [enA-urA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-urA.tsv.gz) [enA-uzA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-uzA.tsv.gz) [enA-viA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-viA.tsv.gz) [enA-zhA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.enA-zhA.tsv.gz) [eng-esA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-esA.tsv.gz) [eng-fiA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-fiA.tsv.gz) [eng-frA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-frA.tsv.gz) [eng-hiA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-hiA.tsv.gz) [eng-idA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-idA.tsv.gz) [eng-itA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-itA.tsv.gz) [eng-knA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-knA.tsv.gz) [eng-koA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-koA.tsv.gz) [eng-mtA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-mtA.tsv.gz) [eng-nlA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-nlA.tsv.gz) [eng-plA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-plA.tsv.gz) [eng-ptA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-ptA.tsv.gz) [eng-rnA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-rnA.tsv.gz) [eng-ruA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-ruA.tsv.gz) [eng-skA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-skA.tsv.gz) [eng-swA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-swA.tsv.gz) [eng-taA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-taA.tsv.gz) [eng-teA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-teA.tsv.gz) [eng-tgA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-tgA.tsv.gz) [eng-thA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-thA.tsv.gz) [eng-trA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-trA.tsv.gz) [eng-ukA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-ukA.tsv.gz) [eng-urA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-urA.tsv.gz) [eng-uzA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-uzA.tsv.gz) [eng-viA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-viA.tsv.gz) [eng-zhA](https://dl.fbaipublicfiles.com/seamless/data/seamless.dataset.metadata.public.eng-zhA.tsv.gz) + +## Download script + +You can use the `wet_lines` script to download and gather aligned text information from the metadata. This script can be found [here](https://github.com/kpu/preprocess/blob/wet/preprocess/wet_lines_main.cc). + +### Example usage: + +`zcat seamless.dataset.metadata.public.enA-swA.tsv.gz | egrep ^crawl-data | tr '\t' ' ' | wet_lines` + +Based on metadata information it receives from stdin, wet_lines will download the corpora, find the paragraph and print the input with an additional column which corresponds to the text of the paragraph. + +In order to retrieve the sentences from these paragraphs, one can use the sentence splitter available [here](https://github.com/facebookresearch/LASER/tree/main/utils). It will print the input (metadata + paragraph) with an additional column which corresponds to the text of the sentence. + +### Reconstructing sentences from metadata: + +`xzcat metadatafile.xz | egrep ^crawl-data | wet_lines | python -c "from sentence_cleaner_splitter.cleaner_splitter import *; split_clean()"` diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..3abf7e4b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +pre-commit +datasets +torchaudio +soundfile +librosa diff --git a/scripts/m4t/finetune/README.md b/scripts/m4t/finetune/README.md new file mode 100644 index 00000000..13a6caf9 --- /dev/null +++ b/scripts/m4t/finetune/README.md @@ -0,0 +1,132 @@ +## Finetuning scripts for M4T + +This section demonstrates an example of M4T finetuning on a single translation direction: English-to-Korean. + +The trainer and dataloader were designed mainly for demonstration purposes. Their simplicity should facilitate the code transparency and portability. + +## Data preparation + +M4T training dataset is a multimodal parallel corpus. Each training sample has four parts: audio and text representation of the sample in the source language, and its corresponding audio and text representation in the target language. + +That kind of dataset can be prepared using `dataset.py` script that downloads FLEURS dataset from [HuggingFace datastes hub](https://huggingface.co/datasets/google/fleurs), (optionally) extracts units from the target audio samples, and prepares a manifest consumable by `finetune.py`. Manifest is a text file where each line represents information about a single dataset sample, serialized in JSON format. + +List of input arguments for `dataset.py`: + +```bash + --source_lang SOURCE_LANG + M4T langcode of the dataset SOURCE language + --target_lang TARGET_LANG + M4T langcode of the dataset TARGET language + --split SPLIT Dataset split/shard to download (`train`, `test`) + --save_dir SAVE_DIR Directory where the datastets will be stored with HuggingFace datasets cache files +``` + +Language codes should follow the notation adopted by M4T models. + +Below is an example bash script that prepares a training and evaluation dataset for the translation direction English-to-Korean: + +```bash +export DATASET_DIR=~/m4t_dataset +mkdir -p $DATASET_DIR + +python scripts/m4t/finetune/dataset.py \ + --source_lang eng \ + --target_lang kor \ + --split train \ + --save_dir $DATASET_DIR + python scripts/m4t/finetune/dataset.py \ + --source_lang eng \ + --target_lang kor \ + --split validation \ + --save_dir $DATASET_DIR +``` + + +Output manifests will be stored in `${DATASET_DIR}/train_manifest.json` and `${DATASET_DIR}/validation_manifest.json`. + + +## Finetuning + +`finetune.py` is an example finetuning script that initializes dataloaders, and launches training loop with periodic scoring against the validation dataset. +It is recommended to launch it with [`torchrun`](https://pytorch.org/docs/stable/elastic/run.html). Multi-gpu and multi-node training are supported out of the box. + +List of input arguments for `finetune.py`: + +```bash + --train_dataset TRAIN_DATASET + Path to manifest with train samples + --eval_dataset EVAL_DATASET + Path to manifest with eval samples + --model_name MODEL_NAME + Base model name (e.g, `seamlessM4T_medium`, `seamlessM4T_large`) + --save_model_to SAVE_MODEL_TO + Path to save best finetuned model + --seed SEED Randomizer seed value + --batch_size BATCH_SIZE + Batch size for training and evaluation + --patience PATIENCE Set early termination after `patience` number of evaluations without eval loss improvements + --max_epochs MAX_EPOCHS + Max number of training epochs + --learning_rate LEARNING_RATE + Finetuning learning rate + --warmup_steps WARMUP_STEPS + Number of steps with linearly increasing learning rate + --eval_steps EVAL_STEPS + Get eval loss after each `eval_steps` training steps + --log_steps LOG_STEPS + Log inner loss after each `log_steps` training steps + --mode {FinetuneMode.SPEECH_TO_SPEECH,FinetuneMode.SPEECH_TO_TEXT,FinetuneMode.TEXT_TO_SPEECH} + * `SPEECH_TO_SPEECH` -- finetune S2T and T2U parts of the model; + * `TEXT_TO_SPEECH` -- finetune only T2U; + * `SPEECH_TO_TEXT` -- finetune only S2T +``` + +The scripts supports three modes of finetuning: +- `SPEECH_TO_SPEECH`: in this case all model weights except the text encoder will be engaged; +- `TEXT_TO_SPEECH`: only text-to-unit part of the model will be engaged in the finetuning, other weights will be frozen; +- `SPEECH_TO_TEXT`: only speech-to-text part of the model will be engaged in the finetuning. + +The referenced finetuning script does not support finetuning of the text encoder. + + +Below is an example bash script that launches finetuning of M4T-large on the dataset prepared earlier, using a single node with eight GPUs: + +``` +torchrun \ + --rdzv-backend=c10d \ + --rdzv-endpoint=localhost:0 \ + --nnodes=1 \ + --nproc-per-node=8 \ + scripts/m4t/finetune/finetune.py \ + --mode SPEECH_TO_TEXT \ + --train_dataset $DATASET_DIR/train_manifest.json \ + --eval_dataset $DATASET_DIR/validation_manifest.json \ + --learning_rate 1e-6 \ + --warmup_steps 100 \ + --max_epochs 10 \ + --patience 3 \ + --model_name seamlessM4T_large \ + --save_model_to $DATASET_DIR/checkpoint.pt +``` + +Excerpt from an example finetuning log: + +``` +... +2023-08-21 14:46:16,936 INFO -- trainer.1100368: Eval after 300 updates: loss=8.7755 best_loss=8.7755 patience_steps_left=3 +2023-08-21 14:46:16,936 INFO -- trainer.1100368: Saving model +2023-08-21 14:46:35,863 INFO -- trainer.1100368: Epoch 006 / update 00310: train loss=16.3768 last lr=5.68E-08 +2023-08-21 14:46:42,610 INFO -- trainer.1100368: Epoch 006 / update 00320: train loss=16.3730 last lr=5.59E-08 +2023-08-21 14:46:48,285 INFO -- trainer.1100368: Epoch 006 / update 00330: train loss=16.4598 last lr=5.50E-08 +2023-08-21 14:46:54,390 INFO -- trainer.1100368: Epoch 006 / update 00340: train loss=16.4218 last lr=5.42E-08 +2023-08-21 14:47:08,461 INFO -- trainer.1100368: Epoch 006 / update 00350: train loss=16.3906 last lr=5.35E-08 +2023-08-21 14:47:09,067 INFO -- trainer.1100368: Run evaluation +2023-08-21 14:47:19,205 INFO -- trainer.1100368: Eval after 350 updates: loss=8.7462 best_loss=8.7462 patience_steps_left=3 +2023-08-21 14:47:19,205 INFO -- trainer.1100368: Saving model +2023-08-21 14:47:44,981 INFO -- trainer.1100368: Epoch 007 / update 00360: train loss=16.4267 last lr=5.27E-08 +2023-08-21 14:47:51,383 INFO -- trainer.1100368: Epoch 007 / update 00370: train loss=16.3630 last lr=5.20E-08 +2023-08-21 14:47:58,305 INFO -- trainer.1100368: Epoch 007 / update 00380: train loss=16.3666 last lr=5.13E-08 +2023-08-21 14:48:04,396 INFO -- trainer.1100368: Epoch 007 / update 00390: train loss=16.3605 last lr=5.06E-08 +2023-08-21 14:48:10,630 INFO -- trainer.1100368: Epoch 007 / update 00400: train loss=16.3518 last lr=5.00E-08 +... +``` diff --git a/scripts/m4t/finetune/__init__.py b/scripts/m4t/finetune/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/scripts/m4t/finetune/dataloader.py b/scripts/m4t/finetune/dataloader.py new file mode 100644 index 00000000..4f01e884 --- /dev/null +++ b/scripts/m4t/finetune/dataloader.py @@ -0,0 +1,229 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import json +import logging +from dataclasses import dataclass +from typing import Any, Dict, Iterable, List, Optional + +import numpy as np +import torch +import torchaudio +import torchaudio.compliance.kaldi as ta_kaldi +from datasets import Dataset +from datasets.distributed import split_dataset_by_node +from fairseq2.models.nllb.tokenizer import NllbTokenizer, TextTokenEncoder +from torch import Tensor +from torch.nn.functional import pad as pad_tensor +from torch.utils.data import DataLoader + +from seamless_communication.datasets.datatypes import LangPairSample +from seamless_communication.models.unity.unit_tokenizer import ( + UnitTokenEncoder, + UnitTokenizer, +) + +logger = logging.getLogger(__name__) + + +@dataclass +class SeqsBatch: + src_tokens: Optional[Tensor] + src_lengths: Optional[Tensor] + target_tokens: Optional[Tensor] + prev_output_tokens: Optional[Tensor] + target_lengths: Optional[Tensor] + + def __del__(self) -> None: + """Explicitly delete tensors + to force GPU memory cleanup""" + for tensor in [ + self.src_tokens, + self.src_lengths, + self.target_tokens, + self.prev_output_tokens, + self.target_lengths, + ]: + if tensor is not None: + del tensor + + +@dataclass +class MultimodalSeqsBatch: + speech_to_text: SeqsBatch + text_to_units: SeqsBatch + + def __del__(self) -> None: + del self.speech_to_text + del self.text_to_units + + +@dataclass +class BatchingConfig: + fbank_feats_pad_idx: int = 0 + """The pad index to use in fbanks batching.""" + + batch_size: int = 5 + + rank: int = 0 + """The rank of this worker in the process group.""" + + world_size: int = 1 + """The world size of the process group.""" + + num_workers: int = 2 + """Parallelism in dataset preparation.""" + + float_dtype: torch.dtype = torch.float16 + """Select between fp16/fp32 for float tensors """ + + +def worker_init_fn(worker_id): + np.random.seed(np.random.get_state()[1][0] + worker_id) + + +class UnitYDataLoader: + def __init__( + self, + text_tokenizer: NllbTokenizer, + unit_tokenizer: UnitTokenizer, + dataset_manifest_path: str, + batching_config: BatchingConfig, + ): + self.text_tokenizer = text_tokenizer + self.text_encoders_per_lang: Dict[str, TextTokenEncoder] = {} + self.unit_tokenizer = unit_tokenizer + self.unit_encoders_per_lang: Dict[str, UnitTokenEncoder] = {} + self.batching_config = batching_config + self.dataset = self._load_manifest(dataset_manifest_path) + + def get_dataloader(self) -> DataLoader: + subset = split_dataset_by_node( + self.dataset, + rank=self.batching_config.rank, + world_size=self.batching_config.world_size, + ) + data_loader = DataLoader( + dataset=subset, + batch_size=self.batching_config.batch_size, + shuffle=True, + num_workers=self.batching_config.num_workers, + collate_fn=self._prepare_batch, + worker_init_fn=worker_init_fn, + ) + return data_loader + + def __iter__(self) -> Iterable[MultimodalSeqsBatch]: + return self.get_dataloader().__iter__() + + def _get_source_fbank(self, sample: LangPairSample) -> Tensor: + audio_input = torchaudio.load(sample.source.audio_local_path)[0] + return ta_kaldi.fbank(audio_input, num_mel_bins=80) + + def _get_tokenized_target_text(self, sample: LangPairSample) -> Tensor: + """Expected sequence is [, , ..text tokens.., ]""" + target_lang = sample.target.lang + if target_lang not in self.text_encoders_per_lang: + self.text_encoders_per_lang[ + target_lang + ] = self.text_tokenizer.create_encoder(lang=target_lang, mode="target") + tokens = self.text_encoders_per_lang[target_lang](sample.target.text) + eos_idx = self.text_tokenizer.vocab_info.eos_idx + tokens = torch.concat([tokens, torch.LongTensor([eos_idx])]) + return tokens + + def _get_tokenized_units(self, sample: LangPairSample) -> Optional[Tensor]: + """Expected sequence is [, , ..unit tokens.., ]""" + if sample.target.units is None: + return None + target_lang = sample.target.lang + if target_lang not in self.unit_encoders_per_lang: + self.unit_encoders_per_lang[ + target_lang + ] = self.unit_tokenizer.create_encoder(lang=target_lang) + tokens = self.unit_encoders_per_lang[target_lang]( + torch.LongTensor(sample.target.units).unsqueeze(0) + ) + eos_idx = self.unit_tokenizer.vocab_info.eos_idx + tokens = torch.concat([tokens.squeeze(0), torch.LongTensor([eos_idx])]) + return tokens + + def _batch_tensors(self, tensors: List[Tensor], pad_value: Any) -> Tensor: + padding_size = max(tensor.shape[0] for tensor in tensors) + dims = len(tensors[0].shape) + padded_tensors = [] + for tensor in tensors: + padding = [0] * 2 * dims + padding[-1] = padding_size - tensor.shape[0] + padded_tensors.append(pad_tensor(tensor, padding, "constant", pad_value)) + return torch.stack([tensor for tensor in padded_tensors], dim=0) + + def _prepare_batch(self, samples: List[Dict[str, Any]]) -> MultimodalSeqsBatch: + samples = [LangPairSample.from_json(sample) for sample in samples] + # input speech + src_tokens_list = [self._get_source_fbank(sample) for sample in samples] + src_tokens = self._batch_tensors( + src_tokens_list, pad_value=self.batching_config.fbank_feats_pad_idx + ).to(self.batching_config.float_dtype) + src_lengths = torch.LongTensor( + [src_tokens.shape[0] for src_tokens in src_tokens_list] + ) + # output text + text_tokens_list = [ + self._get_tokenized_target_text(sample) for sample in samples + ] + text_pad_idx = self.text_tokenizer.vocab_info.pad_idx + prev_outputs_tokens = self._batch_tensors( + [tokens[:-1] for tokens in text_tokens_list], pad_value=text_pad_idx + ) + target_tokens = self._batch_tensors( + [tokens[1:] for tokens in text_tokens_list], pad_value=text_pad_idx + ) + tokens_lengths = torch.LongTensor( + [tokens.shape[0] - 1 for tokens in text_tokens_list] + ) + # output units + units_list_raw = [self._get_tokenized_units(sample) for sample in samples] + if None in units_list_raw: + prev_outputs_units = None + target_units = None + units_lengths = None + else: + units_list: List[Tensor] = [ + value for value in units_list_raw if value is not None + ] + units_pad_idx = self.unit_tokenizer.vocab_info.pad_idx + prev_outputs_units = self._batch_tensors( + [tokens[:-1] for tokens in units_list], pad_value=units_pad_idx + ) + target_units = self._batch_tensors( + [tokens[1:] for tokens in units_list], pad_value=units_pad_idx + ) + units_lengths = torch.LongTensor( + [tokens.shape[0] - 1 for tokens in units_list] + ) + return MultimodalSeqsBatch( + speech_to_text=SeqsBatch( + src_tokens=src_tokens, + src_lengths=src_lengths, + target_tokens=target_tokens, + prev_output_tokens=prev_outputs_tokens, + target_lengths=tokens_lengths, + ), + text_to_units=SeqsBatch( + src_tokens=None, + src_lengths=None, + target_tokens=target_units, + prev_output_tokens=prev_outputs_units, + target_lengths=units_lengths, + ), + ) + + def _load_manifest(self, dataset_manifest_path: str) -> Dataset: + with open(dataset_manifest_path) as fp_in: + dataset = [json.loads(line) for line in fp_in] + return Dataset.from_list(dataset) diff --git a/scripts/m4t/finetune/dataset.py b/scripts/m4t/finetune/dataset.py new file mode 100644 index 00000000..52488b0e --- /dev/null +++ b/scripts/m4t/finetune/dataset.py @@ -0,0 +1,172 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import argparse +import dataclasses +import json +import logging +import os +from argparse import Namespace +from pathlib import Path + +from seamless_communication.datasets.huggingface import ( + Speech2SpeechFleursDatasetBuilder, +) + +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(levelname)s -- %(name)s: %(message)s", +) + +logger = logging.getLogger("dataset") + + +# Full list of FLEURS langcodes is available at https://huggingface.co/datasets/google/fleurs +# Full list of M4T langcodes is available +# in paper "SeamlessM4T—Massively Multilingual & Multimodal Machine Translation" (Table 5) +UNITY_TO_FLEURS_LANG_MAPPING = { + "eng": "en_us", + "ita": "it_it", + "afr": "af_za", + "asm": "as_in", + "bel": "be_by", + "bul": "bg_bg", + "ben": "bn_in", + "cat": "ca_es", + "ces": "cs_cz", + "dan": "da_dk", + "deu": "de_de", + "ell": "el_gr", + "fin": "fi_fi", + "fra": "fr_fr", + "glg": "gl_es", + "heb": "he_il", + "hin": "hi_in", + "hrv": "hr_hr", + "hun": "hu_hu", + "ind": "id_id", + "ibo": "ig_ng", + "isl": "is_is", + "ita": "it_it", + "jpn": "ja_jp", + "jav": "jv_id", + "kaz": "kk_kz", + "kan": "kn_in", + "kir": "ky_kg", + "kor": "ko_kr", + "lit": "lt_lt", + "mkd": "mk_mk", + "mlt": "mt_mt", + "mya": "my_mm", + "nld": "nl_nl", + "pan": "pa_in", + "pol": "pl_pl", + "ron": "ro_ro", + "rus": "ru_ru", + "snd": "sd_in", + "slk": "sk_sk", + "srp": "sr_rs", + "swh": "sw_ke", + "tam": "ta_in", + "tel": "te_in", + "tha": "th_th", + "tur": "tr_tr", + "ukr": "uk_ua", + "urd": "ur_pk", + "uzn": "uz_uz", + "vie": "vi_vn", + "yor": "yo_ng", + "zul": "zu_za", +} + + +def _check_lang_code_mapping(lang: str) -> None: + if lang not in UNITY_TO_FLEURS_LANG_MAPPING: + raise ValueError( + f"No language code mapping for {lang}(M4T)->??(FLEURs). " + "Please expand `UNITY_TO_FLEURS_LANG_MAPPING`" + ) + + +def download_fleurs_dataset( + source_lang: str, + target_lang: str, + split: str, + save_directory: str, +) -> str: + _check_lang_code_mapping(source_lang) + _check_lang_code_mapping(target_lang) + tokenizer = None + dataset_iterator = Speech2SpeechFleursDatasetBuilder( + source_lang=UNITY_TO_FLEURS_LANG_MAPPING[source_lang], + target_lang=UNITY_TO_FLEURS_LANG_MAPPING[target_lang], + dataset_cache_dir=save_directory, + speech_tokenizer=tokenizer, + skip_source_audio=True, # don't extract units from source audio + skip_target_audio=False, + split=split, + ) + manifest_path: str = os.path.join(save_directory, f"{split}_manifest.json") + with open(manifest_path, "w") as fp_out: + for idx, sample in enumerate(dataset_iterator, start=1): + # correction as FleursDatasetBuilder return fleurs lang codes + sample.source.lang = source_lang + sample.target.lang = target_lang + sample.target.waveform = None # already extracted units + fp_out.write(json.dumps(dataclasses.asdict(sample)) + "\n") + logger.info(f"Saved {idx} samples for split={split} to {manifest_path}") + return manifest_path + + +def init_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description=( + "Helper script to download training/evaluation dataset (FLEURS)," + "extract units from target audio and save the dataset as a manifest " + "consumable by `finetune.py`." + ) + ) + parser.add_argument( + "--source_lang", + type=str, + required=True, + help="M4T langcode of the dataset SOURCE language", + ) + parser.add_argument( + "--target_lang", + type=str, + required=True, + help="M4T langcode of the dataset TARGET language", + ) + parser.add_argument( + "--split", + type=str, + required=True, + help="Dataset split/shard to download (`train`, `validation`, `test`)", + ) + parser.add_argument( + "--save_dir", + type=Path, + required=True, + help="Directory where the datastets will be stored with HuggingFace datasets cache files", + ) + return parser + + +def main(args: Namespace) -> None: + manifest_path = download_fleurs_dataset( + source_lang=args.source_lang, + target_lang=args.target_lang, + split=args.split, + save_directory=args.save_dir, + ) + logger.info(f"Manifest saved to: {manifest_path}") + + +if __name__ == "__main__": + args = init_parser().parse_args() + main(args) diff --git a/scripts/m4t/finetune/dist_utils.py b/scripts/m4t/finetune/dist_utils.py new file mode 100644 index 00000000..cf006289 --- /dev/null +++ b/scripts/m4t/finetune/dist_utils.py @@ -0,0 +1,76 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import logging +import os +from datetime import timedelta +from typing import List + +import torch +import torch.distributed as dist +import torch.multiprocessing + +logger = logging.getLogger(__name__) + + +def is_dist_initialized() -> bool: + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_rank() -> int: + if not is_dist_initialized(): + return 0 + return dist.get_rank() + + +def get_local_rank() -> int: + if not is_dist_initialized(): + return 0 + return int(os.environ["LOCAL_RANK"]) + + +def get_world_size() -> int: + if not is_dist_initialized(): + return 1 + return dist.get_world_size() + + +def is_main_process() -> bool: + return get_rank() == 0 + + +def init_distributed(loggers: List[logging.Logger]) -> None: + """Initializes the distributed backend""" + torch.multiprocessing.set_start_method("spawn") + if "RANK" not in os.environ: + logger.error( + "Cannot init disributed context, as environment varaibles are not set." + ) + return + rank = int(os.environ["RANK"]) + world_size = int(os.environ["WORLD_SIZE"]) + local_rank = int(os.environ["LOCAL_RANK"]) + logger.info( + f"Rank={rank} local rank={local_rank}, world_size={world_size}, is_master={rank == 0}" + ) + dist.init_process_group( + backend="nccl", + init_method="env://", + world_size=world_size, + rank=rank, + timeout=timedelta(seconds=180), + ) + logger.info(f"Setting cuda:{local_rank} as main device") + if not is_main_process(): + for to_mute in loggers: + to_mute.setLevel(logging.ERROR) + torch.cuda.set_device(local_rank) + dist.barrier() diff --git a/scripts/m4t/finetune/finetune.py b/scripts/m4t/finetune/finetune.py new file mode 100644 index 00000000..b25ab3c9 --- /dev/null +++ b/scripts/m4t/finetune/finetune.py @@ -0,0 +1,186 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import logging +import os +from argparse import Namespace +from pathlib import Path + +import dataloader +import dist_utils +import torch +import trainer +from fairseq2.models.nllb.tokenizer import NllbTokenizer + +from seamless_communication.models.unity import ( + UnitTokenizer, + UnitYModel, + load_unity_model, + load_unity_text_tokenizer, + load_unity_unit_tokenizer, +) + +logging.basicConfig( + level=logging.INFO, + format=f"%(asctime)s %(levelname)s -- %(name)s.{os.getpid()}: %(message)s", +) + +logger = logging.getLogger("finetune") + + +def init_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + description="Example finetuning script for M4T models" + ) + parser.add_argument( + "--train_dataset", + type=Path, + required=True, + help="Path to manifest with train samples", + ) + parser.add_argument( + "--eval_dataset", + type=Path, + required=True, + help="Path to manifest with eval samples", + ) + parser.add_argument( + "--model_name", + type=str, + default="seamlessM4T_medium", + help="Base model name (`seamlessM4T_medium`, `seamlessM4T_large`)", + ) + parser.add_argument( + "--save_model_to", + type=Path, + required=True, + help="Path to save best finetuned model", + ) + parser.add_argument( + "--seed", + type=int, + default=2343, + help="Randomizer seed value", + ) + parser.add_argument( + "--batch_size", + type=int, + default=5, + help="Batch size for training and evaluation", + ) + parser.add_argument( + "--patience", + type=int, + default=3, + help=( + "Set early termination after `patience` number of evaluations " + "without eval loss improvements" + ), + ) + parser.add_argument( + "--max_epochs", + type=int, + default=10, + help=("Max number of training epochs"), + ) + parser.add_argument( + "--learning_rate", + type=float, + default=1e-7, + help=("Finetuning learning rate"), + ) + parser.add_argument( + "--warmup_steps", + type=int, + default=100, + help=("Number of steps with linearly increasing learning rate"), + ) + parser.add_argument( + "--eval_steps", + type=int, + default=50, + help=("Get eval loss after each `eval_steps` training steps "), + ) + parser.add_argument( + "--log_steps", + type=int, + default=10, + help=("Log inner loss after each `log_steps` training steps"), + ) + parser.add_argument( + "--mode", + type=trainer.FinetuneMode, + choices=list(trainer.FinetuneMode), + default=trainer.FinetuneMode.TEXT_TO_SPEECH, + help=( + "* `SPEECH_TO_SPEECH` -- finetune S2T and T2U parts of the model; " + "* `TEXT_TO_SPEECH` -- finetune only T2U; " + "* `SPEECH_TO_TEXT` -- finetune only S2T" + ), + ) + return parser + + +def run_finetune(args: Namespace) -> None: + dist_utils.init_distributed([logger, trainer.logger]) + device = torch.device("cuda") + text_tokenizer: NllbTokenizer = load_unity_text_tokenizer(args.model_name) + unit_tokenizer: UnitTokenizer = load_unity_unit_tokenizer(args.model_name) + finetune_params = trainer.FinetuneParams( + finetune_mode=args.mode, + save_model_path=args.save_model_to, + device=device, + train_batch_size=args.batch_size, + eval_batch_size=args.batch_size, + patience=args.patience, + max_epochs=args.max_epochs, + learning_rate=args.learning_rate, + warmup_steps=args.warmup_steps, + eval_steps=args.eval_steps, + log_steps=args.log_steps, + ) + logger.info(f"Finetune params: {finetune_params}") + model: UnitYModel = load_unity_model( + args.model_name, device=finetune_params.device, dtype=torch.float16 + ) + logger.info(f"Model {model}") + assert model.pad_idx == text_tokenizer.vocab_info.pad_idx + assert model.t2u_model is not None + assert model.t2u_model.pad_idx == unit_tokenizer.vocab_info.pad_idx + + train_dataloader = dataloader.UnitYDataLoader( + text_tokenizer=text_tokenizer, + unit_tokenizer=unit_tokenizer, + batching_config=dataloader.BatchingConfig( + batch_size=finetune_params.train_batch_size, + rank=dist_utils.get_rank(), + world_size=dist_utils.get_world_size(), + ), + dataset_manifest_path=args.train_dataset, + ) + eval_dataloader = dataloader.UnitYDataLoader( + text_tokenizer=text_tokenizer, + unit_tokenizer=unit_tokenizer, + batching_config=dataloader.BatchingConfig( + batch_size=finetune_params.eval_batch_size, + rank=dist_utils.get_rank(), + world_size=dist_utils.get_world_size(), + ), + dataset_manifest_path=args.eval_dataset, + ) + finetune = trainer.UnitYFinetune( + model=model, + params=finetune_params, + train_data_loader=train_dataloader, + eval_data_loader=eval_dataloader, + ) + finetune.run() + + +if __name__ == "__main__": + parser = init_parser() + run_finetune(parser.parse_args()) diff --git a/scripts/m4t/finetune/trainer.py b/scripts/m4t/finetune/trainer.py new file mode 100644 index 00000000..6a19ca0a --- /dev/null +++ b/scripts/m4t/finetune/trainer.py @@ -0,0 +1,367 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import logging +from contextlib import contextmanager +from dataclasses import dataclass +from enum import Enum +from pathlib import Path +from typing import Optional, Tuple + +import dataloader +import dist_utils +import torch +import torch.distributed as dist +import torch.nn as nn +from fairseq2.models.sequence import SequenceModelOutput +from fairseq2.models.unity import UnitYModel +from fairseq2.optim.lr_scheduler import MyleLR +from fairseq2.typing import Device +from torch.optim import Adam + +logger = logging.getLogger(__name__) + + +class FinetuneMode(Enum): + SPEECH_TO_SPEECH = "SPEECH_TO_SPEECH" + SPEECH_TO_TEXT = "SPEECH_TO_TEXT" + TEXT_TO_SPEECH = "TEXT_TO_SPEECH" + + +@dataclass +class FinetuneParams: + save_model_path: Path + """Path were to save finetuned model.""" + + finetune_mode: FinetuneMode = FinetuneMode.TEXT_TO_SPEECH + """Allows to freeze S2T or T2U part of the model""" + + max_epochs: int = 10 + """ Maximum number of trainign epochs""" + + label_smoothing: float = 0.2 + """ Label smoothing coefficient for nll_loss """ + + warmup_steps: int = 100 + """ Number of steps with linearly increasing LR""" + + log_steps: int = 10 + """ Log inner loss after each `log_steps` training steps""" + + eval_steps: int = 50 + """ Get eval loss after each `eval_steps` training steps """ + + patience: int = 3 + """ Terminate if eval loss did not improve + over the last `patience * eval_steps` training steps""" + + learning_rate: float = 1e-5 + """ Optimizer learining rate """ + + train_batch_size: int = 5 + """The batch size during train steps""" + + eval_batch_size: int = 5 + """The batch size during evaluation.""" + + device: Device = torch.device("cuda") + """ Where to run computation""" + + +class UnitYFinetuneWrapper(nn.Module): + """Convenience wrapper that does a forward pass + and returns S2T and T2U logits""" + + def __init__(self, model: UnitYModel, mode: FinetuneMode, device: Device): + super().__init__() + assert model.t2u_model is not None + self.model: UnitYModel = model + self.freeze_s2t: bool = mode == FinetuneMode.TEXT_TO_SPEECH + self.freeze_t2u: bool = mode == FinetuneMode.SPEECH_TO_TEXT + self.device = device + + def forward( + self, batch: dataloader.MultimodalSeqsBatch + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + assert self.model.t2u_model is not None + dummy_context = contextmanager(lambda: iter([None]))() + with torch.no_grad() if self.freeze_s2t else dummy_context: # type:ignore + assert batch.speech_to_text.src_tokens is not None + speech_encoder_out, speech_encoder_padding_mask = self.model.encode_speech( + seqs=batch.speech_to_text.src_tokens.to(self.device), + seq_lens=batch.speech_to_text.src_lengths.to(self.device), + ) + assert batch.speech_to_text.prev_output_tokens is not None + text_decoder_out, text_decoder_padding_mask = self.model.decode( + seqs=batch.speech_to_text.prev_output_tokens.to(self.device), + seq_lens=batch.speech_to_text.target_lengths.to(self.device), + encoder_output=speech_encoder_out, + encoder_padding_mask=speech_encoder_padding_mask, + ) + text_logits = self.model.final_proj(text_decoder_out) + if batch.text_to_units.prev_output_tokens is None: + return (text_logits, None) + dummy_context = contextmanager(lambda: iter([None]))() + with torch.no_grad() if self.freeze_t2u else dummy_context: # type:ignore + ( + unit_encoder_out, + unit_encoder_padding_mask, + ) = self.model.t2u_model.encode( + text_decoder_output=text_decoder_out, + text_decoder_padding_mask=text_decoder_padding_mask, + ) + unit_decoder_out, _ = self.model.t2u_model.decode( + seqs=batch.text_to_units.prev_output_tokens.to(self.device), + seq_lens=batch.text_to_units.target_lengths.to(self.device), + encoder_output=unit_encoder_out, + encoder_padding_mask=unit_encoder_padding_mask, + ) + unit_logits = self.model.t2u_model.final_proj(unit_decoder_out) + + return (text_logits, unit_logits) + + +class CalcLoss: + """Calculates negative log likelihood loss for S2T and T2U""" + + def __init__( + self, + label_smoothing: float, + s2t_pad_idx: Optional[int], + t2u_pad_idx: Optional[int], + ): + self.label_smoothing = label_smoothing + self.s2t_pad_idx = s2t_pad_idx + self.t2u_pad_idx = t2u_pad_idx + + def __call__( + self, + batch: dataloader.MultimodalSeqsBatch, + text_logits: torch.Tensor, + unit_logits: Optional[torch.Tensor], + ) -> torch.Tensor: + assert batch.speech_to_text.target_lengths is not None + s2t_numel = torch.sum(batch.speech_to_text.target_lengths).to( + text_logits.device + ) + s2t_loss = SequenceModelOutput( + logits=text_logits, pad_idx=self.s2t_pad_idx + ).compute_loss( + targets=batch.speech_to_text.target_tokens.to(text_logits.device), + ignore_prefix_size=1, + label_smoothing=self.label_smoothing, + ) + if unit_logits is None: + return s2t_loss / s2t_numel + assert batch.text_to_units.target_lengths is not None + s2u_numel = torch.sum(batch.text_to_units.target_lengths).to(unit_logits.device) + s2u_loss = SequenceModelOutput( + logits=unit_logits, pad_idx=self.t2u_pad_idx + ).compute_loss( + targets=batch.text_to_units.target_tokens.to(unit_logits.device), + ignore_prefix_size=1, + label_smoothing=self.label_smoothing, + ) + return s2t_loss / s2t_numel + s2u_loss / s2u_numel + + +class LossCollector: + """Aggregrates loss history across nodes""" + + def __init__(self, device: Optional[Device] = None, reduce_op: str = "avg"): + self.n_samples: float = 0 + self.val_sum: float = 0.0 + self.reduce_op = reduce_op + self.device = device + self.is_distributed = dist_utils.is_dist_initialized() + + def reset(self) -> None: + self.n_samples = 0 + self.val_sum = 0.0 + + def update(self, n_samples: int, batch_loss: float) -> None: + self.n_samples += n_samples + self.val_sum += batch_loss + + def reduce(self) -> float: + n_samples, val_sum = self._collect() + if self.reduce_op == "avg": + return val_sum / (n_samples + 1) + if self.reduce_op == "sum": + return val_sum + raise ValueError() + + def _collect(self) -> Tuple[float, float]: + if not self.is_distributed: + return self.n_samples, self.val_sum + local_val = torch.tensor([[self.n_samples, self.val_sum]], device=self.device) + all_vals = [ + torch.zeros((1, 2), device=self.device) + for _ in range(dist_utils.get_world_size()) + ] + dist.all_gather(all_vals, local_val) + losses = torch.concat(all_vals, dim=0) + reduced = torch.sum(losses, dim=0).reshape(2).cpu() + return reduced[0].item(), reduced[1].item() + + +class UnitYFinetune: + def __init__( + self, + model: UnitYModel, + params: FinetuneParams, + train_data_loader: dataloader.UnitYDataLoader, + eval_data_loader: Optional[dataloader.UnitYDataLoader] = None, + ): + self.params = params + + assert model.t2u_model is not None + self.calc_loss = CalcLoss( + label_smoothing=self.params.label_smoothing, + s2t_pad_idx=model.pad_idx, + t2u_pad_idx=model.t2u_model.pad_idx, + ) + self.model = self._wrap_model_for_trainining(model=model) + self.train_data_loader = train_data_loader + self.eval_data_loader = eval_data_loader + self.optimizer = Adam( + params=self.model.parameters(), + lr=self.params.learning_rate, + betas=(0.9, 0.98), + eps=1e-08, + maximize=False, + weight_decay=0.0, + fused=True, + ) + self.grad_scaler = torch.cuda.amp.GradScaler() + self.lr_scheduler = MyleLR( + optimizer=self.optimizer, + num_warmup_steps=self.params.warmup_steps, + start_lr=1e-9, + ) + + self.train_loss_hist = LossCollector(device=params.device) + self.epoch_idx: int = 0 + self.update_idx: int = 0 + self.patience_left: int = self.params.patience + self.best_eval_loss: Optional[float] = None + self.is_best_state: bool = False + + def _reset_stats(self) -> None: + self.train_loss_hist.reset() + self.epoch_idx = 0 + self.update_idx = 0 + self.patience_left = self.params.patience + self.best_eval_loss = None + self.is_best_state = False + + def _wrap_model_for_trainining(self, model: UnitYModel) -> nn.Module: + wrapped_model = UnitYFinetuneWrapper( + model=model, mode=self.params.finetune_mode, device=self.params.device + ) + if not dist_utils.is_dist_initialized(): + return wrapped_model + return nn.parallel.DistributedDataParallel( + wrapped_model, + device_ids=[dist_utils.get_local_rank()], + find_unused_parameters=True, + ) + + def _update_eval_stats(self, eval_loss: float) -> None: + self.is_best_state = ( + self.best_eval_loss is None or eval_loss < self.best_eval_loss + ) + self.best_eval_loss = eval_loss if self.is_best_state else self.best_eval_loss + self.patience_left = ( + self.params.patience if self.is_best_state else self.patience_left - 1 + ) + logger.info( + f"Eval after {self.update_idx} updates: " + f"loss={eval_loss:.4f} " + f"best_loss={self.best_eval_loss:.4f} " + f"patience_steps_left={self.patience_left}" + ) + + def _eval_model(self) -> None: + """Calc avg loss on eval dataset and update evaluation stats""" + if self.eval_data_loader is None: + return + logger.info("Run evaluation") + loss_hist = LossCollector(device=self.params.device) + self.model.eval() + with torch.no_grad(): + for batch in self.eval_data_loader.get_dataloader(): + assert batch.speech_to_text.src_tokens is not None + loss = self.calc_loss(batch, *self.model(batch)) + if loss.isnan(): + logger.warning("Eval loss value is NaN, setting to inf") + loss_val = float("Inf") + else: + loss_val = loss.item() + del batch # force memory release + loss_hist.update(1, loss_val) + eval_loss = loss_hist.reduce() + self._update_eval_stats(eval_loss) + + def _train_step_log(self): + """Log train stats""" + if (self.update_idx + 1) % self.params.log_steps == 0: + avg_loss = self.train_loss_hist.reduce() + self.train_loss_hist.reset() + logger.info( + f"Epoch {str(self.epoch_idx + 1).zfill(3)} / " + f"update {str(self.update_idx + 1).zfill(5)}: " + f"train loss={avg_loss:.4f} " + f"last lr={self.lr_scheduler.get_last_lr()[0]:.2E}" + ) + + def _train_step(self, batch: dataloader.MultimodalSeqsBatch) -> None: + """Run one train step""" + self.model.train() + self.optimizer.zero_grad() + tokens, units = self.model(batch) + loss = self.calc_loss(batch, tokens, units) + self.grad_scaler.scale(loss).backward() + self.grad_scaler.step(self.optimizer) + self.grad_scaler.update() + self.lr_scheduler.step() + assert batch.speech_to_text.src_tokens is not None + self.train_loss_hist.update(1, loss.item()) + self._train_step_log() + + def _save_model(self): + logger.info("Saving model") + if dist_utils.is_main_process(): + state_dict = { + key.replace("module.model.", ""): value + for key, value in self.model.state_dict().items() + } + torch.save(state_dict, self.params.save_model_path) + if dist_utils.is_dist_initialized(): + dist.barrier() + + def run(self): + logger.info("Start finetuning") + self._reset_stats() + self._eval_model() + batch_itr = self.train_data_loader.get_dataloader() + while self.epoch_idx < self.params.max_epochs and self.patience_left: + for train_batch in batch_itr: + self._train_step(batch=train_batch) + if self.update_idx and self.update_idx % self.params.eval_steps == 0: + self._eval_model() + if self.is_best_state: + self._save_model() + elif not self.patience_left: + no_improve_steps = self.params.eval_steps * self.params.patience + logger.info( + "Early termination, as eval loss did not improve " + f"over last {no_improve_steps} updates" + ) + break + self.update_idx += 1 + self.epoch_idx += 1 diff --git a/scripts/m4t/predict/README.md b/scripts/m4t/predict/README.md new file mode 100644 index 00000000..8b24b166 --- /dev/null +++ b/scripts/m4t/predict/README.md @@ -0,0 +1,213 @@ +# Inference with SeamlessM4T models + +SeamlessM4T models currently support five tasks: +- Speech-to-speech translation (S2ST) +- Speech-to-text translation (S2TT) +- Text-to-speech translation (T2ST) +- Text-to-text translation (T2TT) +- Automatic speech recognition (ASR) + + + +## Quick start: +Inference is run with the CLI, from the root directory of the repository. + +The model can be specified with `--model_name` `seamlessM4T_large` or `seamlessM4T_medium`: + +**S2ST**: +```bash +python scripts/m4t/predict/predict.py s2st --output_path --model_name seamlessM4T_large +``` + +**S2TT**: +```bash +python scripts/m4t/predict/predict.py s2tt +``` + +**T2TT**: +```bash +python scripts/m4t/predict/predict.py t2tt --src_lang +``` + +**T2ST**: +```bash +python scripts/m4t/predict/predict.py t2st --src_lang --output_path +``` + +**ASR**: +```bash +python scripts/m4t/predict/predict.py asr +``` + +## Inference breakdown + +Inference calls for the `Translator` object instantiated with a multitask UnitY model with the options: +- [`seamlessM4T_large`](https://huggingface.co/facebook/seamless-m4t-large) +- [`seamlessM4T_medium`](https://huggingface.co/facebook/seamless-m4t-medium) + +and a vocoder `vocoder_36langs` + +```python +import torch +import torchaudio +from seamless_communication.models.inference import Translator + + +# Initialize a Translator object with a multitask model, vocoder on the GPU. +translator = Translator("seamlessM4T_large", "vocoder_36langs", torch.device("cuda:0")) +``` + +Now `predict()` can be used to run inference as many times on any of the supported tasks. + +Given an input audio with `` or an input text `` in ``, +we can translate into `` as follows: + +## S2ST and T2ST: + +```python +# S2ST +translated_text, wav, sr = translator.predict(, "s2st", ) + +# T2ST +translated_text, wav, sr = translator.predict(, "t2st", , src_lang=) + +``` +Note that `` must be specified for T2ST. + +The generated units are synthesized and the output audio file is saved with: + +```python +wav, sr = translator.synthesize_speech(, ) + +# Save the translated audio generation. +torchaudio.save( + , + wav[0].cpu(), + sample_rate=sr, +) +``` +## S2TT, T2TT and ASR: + +```python +# S2TT +translated_text, _, _ = translator.predict(, "s2tt", ) + +# ASR +# This is equivalent to S2TT with `=`. +transcribed_text, _, _ = translator.predict(, "asr", ) + +# T2TT +translated_text, _, _ = translator.predict(, "t2tt", , src_lang=) + +``` +Note that `` must be specified for T2TT + +## Supported languages +Listed below, are the languages supported by SeamlessM4T models. +The `source` column specifies whether a language is supported as source speech (`Sp`) and/or source text (`Tx`). +The `target` column specifies whether a language is supported as target speech (`Sp`) and/or target text (`Tx`). + +| code | language | script | Source | Target | +| ---- | ---------------------- | ---------- | ------ | ------ | +| afr | Afrikaans | Latn | Sp, Tx | Tx | +| amh | Amharic | Ethi | Sp, Tx | Tx | +| arb | Modern Standard Arabic | Arab | Sp, Tx | Sp, Tx | +| ary | Moroccan Arabic | Arab | Sp, Tx | Tx | +| arz | Egyptian Arabic | Arab | Sp, Tx | Tx | +| asm | Assamese | Beng | Sp, Tx | Tx | +| ast | Asturian | Latn | Sp | \-- | +| azj | North Azerbaijani | Latn | Sp, Tx | Tx | +| bel | Belarusian | Cyrl | Sp, Tx | Tx | +| ben | Bengali | Beng | Sp, Tx | Sp, Tx | +| bos | Bosnian | Latn | Sp, Tx | Tx | +| bul | Bulgarian | Cyrl | Sp, Tx | Tx | +| cat | Catalan | Latn | Sp, Tx | Sp, Tx | +| ceb | Cebuano | Latn | Sp, Tx | Tx | +| ces | Czech | Latn | Sp, Tx | Sp, Tx | +| ckb | Central Kurdish | Arab | Sp, Tx | Tx | +| cmn | Mandarin Chinese | Hans, Hant | Sp, Tx | Sp, Tx | +| cym | Welsh | Latn | Sp, Tx | Sp, Tx | +| dan | Danish | Latn | Sp, Tx | Sp, Tx | +| deu | German | Latn | Sp, Tx | Sp, Tx | +| ell | Greek | Grek | Sp, Tx | Tx | +| eng | English | Latn | Sp, Tx | Sp, Tx | +| est | Estonian | Latn | Sp, Tx | Sp, Tx | +| eus | Basque | Latn | Sp, Tx | Tx | +| fin | Finnish | Latn | Sp, Tx | Sp, Tx | +| fra | French | Latn | Sp, Tx | Sp, Tx | +| gaz | West Central Oromo | Latn | Sp, Tx | Tx | +| gle | Irish | Latn | Sp, Tx | Tx | +| glg | Galician | Latn | Sp, Tx | Tx | +| guj | Gujarati | Gujr | Sp, Tx | Tx | +| heb | Hebrew | Hebr | Sp, Tx | Tx | +| hin | Hindi | Deva | Sp, Tx | Sp, Tx | +| hrv | Croatian | Latn | Sp, Tx | Tx | +| hun | Hungarian | Latn | Sp, Tx | Tx | +| hye | Armenian | Armn | Sp, Tx | Tx | +| ibo | Igbo | Latn | Sp, Tx | Tx | +| ind | Indonesian | Latn | Sp, Tx | Sp, Tx | +| isl | Icelandic | Latn | Sp, Tx | Tx | +| ita | Italian | Latn | Sp, Tx | Sp, Tx | +| jav | Javanese | Latn | Sp, Tx | Tx | +| jpn | Japanese | Jpan | Sp, Tx | Sp, Tx | +| kam | Kamba | Latn | Sp | \-- | +| kan | Kannada | Knda | Sp, Tx | Tx | +| kat | Georgian | Geor | Sp, Tx | Tx | +| kaz | Kazakh | Cyrl | Sp, Tx | Tx | +| kea | Kabuverdianu | Latn | Sp | \-- | +| khk | Halh Mongolian | Cyrl | Sp, Tx | Tx | +| khm | Khmer | Khmr | Sp, Tx | Tx | +| kir | Kyrgyz | Cyrl | Sp, Tx | Tx | +| kor | Korean | Kore | Sp, Tx | Sp, Tx | +| lao | Lao | Laoo | Sp, Tx | Tx | +| lit | Lithuanian | Latn | Sp, Tx | Tx | +| ltz | Luxembourgish | Latn | Sp | \-- | +| lug | Ganda | Latn | Sp, Tx | Tx | +| luo | Luo | Latn | Sp, Tx | Tx | +| lvs | Standard Latvian | Latn | Sp, Tx | Tx | +| mai | Maithili | Deva | Sp, Tx | Tx | +| mal | Malayalam | Mlym | Sp, Tx | Tx | +| mar | Marathi | Deva | Sp, Tx | Tx | +| mkd | Macedonian | Cyrl | Sp, Tx | Tx | +| mlt | Maltese | Latn | Sp, Tx | Sp, Tx | +| mni | Meitei | Beng | Sp, Tx | Tx | +| mya | Burmese | Mymr | Sp, Tx | Tx | +| nld | Dutch | Latn | Sp, Tx | Sp, Tx | +| nno | Norwegian Nynorsk | Latn | Sp, Tx | Tx | +| nob | Norwegian Bokmål | Latn | Sp, Tx | Tx | +| npi | Nepali | Deva | Sp, Tx | Tx | +| nya | Nyanja | Latn | Sp, Tx | Tx | +| oci | Occitan | Latn | Sp | \-- | +| ory | Odia | Orya | Sp, Tx | Tx | +| pan | Punjabi | Guru | Sp, Tx | Tx | +| pbt | Southern Pashto | Arab | Sp, Tx | Tx | +| pes | Western Persian | Arab | Sp, Tx | Sp, Tx | +| pol | Polish | Latn | Sp, Tx | Sp, Tx | +| por | Portuguese | Latn | Sp, Tx | Sp, Tx | +| ron | Romanian | Latn | Sp, Tx | Sp, Tx | +| rus | Russian | Cyrl | Sp, Tx | Sp, Tx | +| slk | Slovak | Latn | Sp, Tx | Sp, Tx | +| slv | Slovenian | Latn | Sp, Tx | Tx | +| sna | Shona | Latn | Sp, Tx | Tx | +| snd | Sindhi | Arab | Sp, Tx | Tx | +| som | Somali | Latn | Sp, Tx | Tx | +| spa | Spanish | Latn | Sp, Tx | Sp, Tx | +| srp | Serbian | Cyrl | Sp, Tx | Tx | +| swe | Swedish | Latn | Sp, Tx | Sp, Tx | +| swh | Swahili | Latn | Sp, Tx | Sp, Tx | +| tam | Tamil | Taml | Sp, Tx | Tx | +| tel | Telugu | Telu | Sp, Tx | Sp, Tx | +| tgk | Tajik | Cyrl | Sp, Tx | Tx | +| tgl | Tagalog | Latn | Sp, Tx | Sp, Tx | +| tha | Thai | Thai | Sp, Tx | Sp, Tx | +| tur | Turkish | Latn | Sp, Tx | Sp, Tx | +| ukr | Ukrainian | Cyrl | Sp, Tx | Sp, Tx | +| urd | Urdu | Arab | Sp, Tx | Sp, Tx | +| uzn | Northern Uzbek | Latn | Sp, Tx | Sp, Tx | +| vie | Vietnamese | Latn | Sp, Tx | Sp, Tx | +| xho | Xhosa | Latn | Sp | \-- | +| yor | Yoruba | Latn | Sp, Tx | Tx | +| yue | Cantonese | Hant | Sp, Tx | Tx | +| zlm | Colloquial Malay | Latn | Sp | \-- | +| zsm | Standard Malay | Latn | Tx | Tx | +| zul | Zulu | Latn | Sp, Tx | Tx | diff --git a/scripts/m4t/predict/predict.py b/scripts/m4t/predict/predict.py new file mode 100644 index 00000000..354ca0d4 --- /dev/null +++ b/scripts/m4t/predict/predict.py @@ -0,0 +1,76 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import logging +import torch +import torchaudio +from seamless_communication.models.inference import Translator + + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def main(): + parser = argparse.ArgumentParser( + description="M4T inference on supported tasks using Translator." + ) + parser.add_argument("input", type=str, help="Audio WAV file path or text input.") + parser.add_argument("task", type=str, help="Task type") + parser.add_argument( + "tgt_lang", type=str, help="Target language to translate/transcribe into." + ) + parser.add_argument( + "--src_lang", + type=str, + help="Source language, only required if input is text.", + default=None, + ) + parser.add_argument( + "--output_path", + type=str, + help="Path to save the generated audio.", + default=None, + ) + parser.add_argument( + "--model_name", + type=str, + help="Base model name (`seamlessM4T_medium`, `seamlessM4T_large`)", + default="seamlessM4T_large", + ) + parser.add_argument( + "--vocoder_name", type=str, help="Vocoder name", default="vocoder_36langs" + ) + + args = parser.parse_args() + + if args.task.upper() in {"S2ST", "T2ST"} and args.output_path is None: + raise ValueError("output_path must be provided to save the generated audio") + + if torch.cuda.is_available(): + device = torch.device("cuda:0") + logger.info("Running inference on the GPU.") + else: + device = torch.device("cpu") + logger.info("Running inference on the CPU.") + + translator = Translator(args.model_name, args.vocoder_name, device) + translated_text, wav, sr = translator.predict( + args.input, args.task, args.tgt_lang, src_lang=args.src_lang + ) + + if wav is not None and sr is not None: + logger.info(f"Saving translated audio in {args.tgt_lang}") + torchaudio.save( + args.output_path, + wav[0].cpu(), + sample_rate=sr, + ) + logger.info(f"Translated text in {args.tgt_lang}: {translated_text}") + + +if __name__ == "__main__": + main() diff --git a/seamlessM4T.png b/seamlessM4T.png new file mode 100644 index 0000000000000000000000000000000000000000..cd33a9e134d1b6ec85694e3a1967423bc689b16b GIT binary patch literal 198534 zcmV)eK&HQmP)at5VQ9hz=bbGKoXf z(h7EQXe$&&FjNJrQ<{DWZG0ptQgIVkDfk~)!C7#yh*WTKa1cZX5#5|RDY$5O-j`I` zBHqX4{WzR+xm>^-P#G)s0x0R0kxay-wbZ)gdxM9bQ>tdNsG=+i{{6e_^U?L*Pl#Df zyLJ%SPh6MIE|+$m0#kqeUDcn-ni~Dz)Ip6I7T}SIm2Ha&-X$I}Xer{V;JnMng3~Ua zJD!zfocNYl(h6#ZxJfLhJM?@9mx^VrwS(B+pVe2F#T@EU%wZEI7>ZC)fdmENfBe&q zKaMSOS71;sj{+>pL`e}7vc&VypY?`La=`luFqi^{?gXywJzgFd<3_&8=TK+j__Bk_`$KM+7Y(;Kh3mEXg zm@3T>wDcow@MtVpyjnZyK{}w-qyF<(vIwj6pN%#&{>050wE;+yymOZA85RC2nYeM| z#(QyA(CEMmF*3FbrTm`v-(cBZz4e;EN$H)vVO7UcM@fgN0 z`9#mN)*i-VJt@tM*W-LC2NTP^)%&X3wtH&FGw~*ksuU$eg*r*c#yBK+=z%kWD(M>F z;UV|%Y+PGUw8*TShgJG-WpalfhNIms(tZi&T`;8knZXECi$6?vF zkvPcm{+YN{x|59eMsDWg{q91<1wdyvrW})(a23~41?zQ;JV<&yG|=luCNs&~Q7Lj< z*tuH{sp-$_)t~;G$N6DdDhd1`j7o)BQ=WL}Z0aWIt5a4@z=&*C_jHw<(EeXQ1dP!- z>kwstRjVYb${id2{h@gDJ_dG-gW54~wx%0_4xUi3m12@<*U=ZMu~0k8kHLQ@Q9fbo z6sOwn+rY++h6CH!eQwnk6&JjvvNxeF`e`8|MQa(l%wEaJ8ugWs3C%Y;T}^7O!HOn3 z<1OC0LZ#{GFzOYJWV}Yav6e0_Fe}%fs{hCsRpc?+Qa%4Rk9^hFY_rm&4Y1%AOMzYv zwYYHT@wjp0#*I${GPOUb*e>M#w|n$DvYu^x#*6<|r~)9^g6ysgd#0Ifr@?ioY_+=f zk!R&>vkD8i%(8%iy?-q4WL1%|{^Ob!D(5OrXPOO8w5;SVK}kA1vRXhpd}x?Q3g+|h za$AHb;O`h)3{F7R{JDIr1nNZfKbG`RQhscApSw%?=-5a3hcb)R3vq^MSc&G;qmXm2 zCq&9+wQI6xeRe-}I@Y;1c3rSH@NhE$p{~C*@T%z%s1?r0s(EohYZXW$EYbfAgQ(J| zg^i4{a#O6e6mAL##s_h zfNomtZrr$W<0Pg7wYtGZnH2mEC3bDkmf7N$tox9GTv4F-+Cu1?~a)0?Um@#n{DZd#o7K;R5y! zBgMhXg`cm70t)iXkvBgD6R8lfU4(jEbLx$nZ%o9z8XUsVGqQ2LG1OVD%#NM0kKfRy zo)sIcTZ?N1*=OS-{Wiu^kxHFS@-!zywJ&R5y@E{!S>Y)&($FWULw8;FMFM3>g|!}P zzJ^?5E}-zj zvWe2$dUjP8>yUZR*j8%T!~g4cbhrR`*tFST(j(2%UU8B_V$#!5srfkH48$ZOzY@iV zi|RoKd>7K@Rz@YBXP{zqP>!ejtDu3NGoZfTBQv7CK7HX+wN`(c3Vo0(m^f4Jrk&Yl z*S0#C-bj>FYmYoPwE{|q(0ztEwZo@N^|N??+m-oP3RrxYIxk+y7v<8?m}#P@5M?FZ zs?96O$e-47l2JLfQ9Va|ytz@5Z;nO#nkjl%OuY?m&X^^egQPK9C^AfX90zUa0jS}k zqlGnrDLOh%QNI~y=s(KRGCZxdd0FTrvNp$CvgKN>c_Mxh7CiQi8#lfbJVC?7HWbUs zLY!suqWLr4AC0+f-w{K90}yWHiC+Lhh1LihHbfYhmn+Tjg1FigJzFY2Dyxfmqvsu;aulo? z$}B?*5AyJXz{w1&h0H)KUMHZL)7d_PCf`LlN&U~XWAX@>FCP_$^#U4bL&r<-(oLWv z+7XQM@Ctg~Jlg%qe&~WT1rt;#M5@JrcWZdl(S{EpTg9>ZtqKyhC@&gGXVEmkmmM0p zu9l#~)Mx4c=~$uv-jQ2=2(5uYTQk}Q{g^ievP0J6>%Oe5*yiEF_~kbL#*G_a11~;` zylrGgH7^uCRASGJ_m{5`pBqPr0$|QHtli9{J6h_s$h}>Q5$0Q>iF?cCH*Qmh#)lhn zX{$vSh?%!IPe8?3)jR!}CXeqjRM+{SCnu?$o*|E_mR486smA$do8_=OTofe~y#J_4 zjnm|bCq#eeZ?6HwD`tL{pHKSxr)NmEJUW!)o@!Uj>Q%!TEMmcU2%5%vMaZ7uJCbEe zT<^%#w~*Rc(YYMo#8{7Z2;q|ncn|5(XC%hb3~L;eG` zQLqq=9UhQW<)2Rg&bXQEa#lh=+LHBqQDgQ&ypP8S9{TV8>9Ucvl+lvXDBZ}|>=}5; zJ8%D?$^dHDlcOSy)@{A%RcFjY1j$Lf0-hxe=$^rkoCl4w=CkhW&nZ{Jg_V2^tjsW} z|4>D@d*Y<7s7=ti?CgRVMj}e^JfFU zam;bjDN|+u8SNHE6Xp!6Pfu1OIAHR;KLk_DpRLze(8VV(4+}v>NB2!ZAt%~pkc_s3^dP+{985sQ_^r>Yt9hUd{fNib z)gScWR`qA=x7zi)9NuQ?(wy#ZplwVdqoViMpqlZG%Bi*GDVc0j!6C9csEBJlhQZdI z-je2K^m*HQ?V@BqOvsBe>p^X02$ z2cn86HUFU-n8Vt3>uA$Y<&;jtl@N*CNNQG$4$p09|P*)v8Jj zZ%tuYZhiH}jT@hjGYVg9TwBFEZHGL*XL2VxMC-PQPV< zl?6>~YrS~rFJ%hV;%WejI=6kM^$NX)A02qEsZ(>y7Fo{{_tOn1sUi)Gd?WW@V`Tj! zdjk`26s>Q>(tA=#&~~Cy zm=o#u-md9%yH4XW^C2&acw1vyKzqZ-&pwuZ1(?jnLcmo$EtqL%=n{O-mK>g#n$qmt z`T6-eF!Xg1M4r<}Dm%;!r^sso>~s3v6m0+xP|r{)c}Fs`Y|q82P=L4`&l)G|>B(Bt z`Q|-4gDaBKb3X46&nTI&oxz;?#MQ)yownzze)M(VR8M-|2jNJ$cRi$8J$nw+zHEY# zF?vzjzc{|{)aUsSuzLLgGLwhB@C+2RK}H(~i(!Kv&AsGsG9_LuSXs?Nl_JEiwc|s3 z8Wt7(EE<9%yrcOlc$r@1fhuykffOAZyt1emZ%#F(hO*I?44F-7xA=PN`FeT0apMc& znb99m^W8Gyhu(P1j+L*K6oZrWv!?pS3v&O;zXCjs>005U;~&YsX!$DA^Igq?4j1b{ zUWYiBpW@Hp8?grS8mjN6T1@h?Lf~o*DAAe?AWs9BoK9ZVGR`PNmrc6BLD-d5*Hd1< zk%$n=b>*wmH)2X>F(ehGh4zKK|3);%Cgd!@kp?j}jN9tboqg}TIR&Okt|0A^VdQ=`e;d{(i zwBz2Yc>te6CyUM4l)#_31#BvDLr=+oPx?>$87f8)lD_3&al9bN@tl}l@d11&MX zN7Ib?P~T*zeM3M;HvdhT?ctM2&dG_f$u;oASWTZA79=tRTk|}^u^WrQ<6W$A1b6whJrDac1 zxwsVY=Fs~jH-fP;jy~T|XIX(R?yTNF4(?9}75a4M84T3{r+hsFGrU)<%c@;Kpv~FZ zr2|F+h-zJuo*ljxZ3>aFWCX@bB@kHgipG=rk$tVj1;Qu|tHV0Vk4GfLmi)D5D?g(1 z5$oHaFwC&sP^p%K*8*{w$*x81QmRX7hL>&cIa+{!qr968hR^u843l#oqrMMd@yq-&7q8V)?JVN?!A zyn#FBP-rz~*$(MY{k#sV6L&92yd3Pbq3F|je~ti~w>{TI(3a0ix;4yisg8t0M&o2G z1RaE?;pS%{>BV{ds4UUcA}m@>XFWKg^>}(?z5b}r-u61DH^b6@ixxdNt3D)y898kZ zy@$+U1aWKT(KBbP)y^B2^)K2aSVab{IeP4k8#n5?oaJS*-op#AT8rmI`cDoY^qP5Z z2fY!JwNW>Gc*RzErnmq|?VOXyc)k$i-lo?vf<=46h1Ohebqu*b?>L?&Wdn?9?(8S>v18M6EX`Z9Ey* z)ko6sw1pjd@zGa2NI20OHYcBpa0H5 zRlF%`$h_^9a<4hehzg-4z>f1UJ7s!Y@|(5wN2ksi9G2jAJw4q zuRvM{RNFA$XUH&?^xpXqSj=b~m1}esfnHIiM~<_0QPVWV)0)TPmERQqZCIZo;(3yGPbadNIh}sWxejO#|N$?^*Bgj1B`Kx#Xc>k~bfGeEu5WP-9q>055bgZbGk<<5Rdv4kx>CRRSJVE7#GyVp6FFPp2OfL|XMOy16NE%~n7rlpS z#ux&9M-7yJJ($z;u7#s9jGNXVUSF0=)k#y9CKG@aux{^I{68`>#_D%&v-H(%9@nvinYpZ>U2ViF~6Ef4s2i`iWZTeQacK=6%3$S+i3@d zgvTrJS(*}Qrwp&7_0VBlZ|9LkK=;u+zx?G6aFNSfk=aUgFCSE1%O5lj$uI*NZ@ro9 zI%+R~jcYlDe0+<}Qh$%GHCx4He^YBf8pg&tX7Ek6|Hk)1>(v=~mBm-CTWl+^+4v0a#tZC7@K(nbTk5Y%=lXP6%+pBJ+6~bz?GoyP+I$y%Hq; z{|;12NvnBwV^Cnt9{11M<;SWBHMZ_K*isCXp__|nqL=2 zv@Joq08ma0Uo0OXlFmx99;vW3Ckpp0^MS@^Ovg9H<2Sw=+VtqY=Coz5H@@7k>~rEZ ztN$C#8T$Vx{LY6%%7}Jf1ZZU6cAxqLAbTsQKu7nNVbPrRn5<4DPgFEjapqPJucu0k zEMvg&*FU4PbaT0yr-u>FEmrWiJc@;J}KqqpEB2r-F51`l3B8-oQKw%BV$ zTWC)zI&6(U07YcO!Ew>Q4dH zIdt7_gA5_ixC3tW?jJq;MMO|GFAWTj$A;?R!E`v&Nav)eu1=I8cc z#=d?WGPPOsE{HB@XX?lL`pxisomX&+P<_P6B%tk&c(Xxk6*AZuAGO`M5g{X6mMpT- z=fx`Aj;4kiHd9hREbmf{Q&;PMh1ZR7L?N1vT09y24|2C}6CDwKV&Fn4;;xl!?1g$l2;ilkK_XL$5xFLT8gc#+|KLG%qfl;sA`DUb4QioX{m zd<;;6{;btn`=m~@>uZ4KFTg5IZGDynVUIo%Y$-t0{EXbno~#I!+6e&_Gyu+hW^pJz zYwfsslvo3|{DZU8?(fj5 zyvx9?n|C}!nvqAdkfux;QGTA0^-S-h_J-n>(L{$v*wJ#C$>IQ4dzRc1mQ6ciOMsiQ zq5aN2v2vLYp<~g)-SAa=KKk4dOtanIXRksy6+LMdtU$(M@hS`oyAx zm64!{t&GHqMz?-6=B+@bh1UF(OYgMi7fGZyR?dZZ98TDrK^J4bUO)7fScWvJ3TflP zpkIrEXIxjv@M1j}v58b{ufU3OGUEueNgF0Zv5If~*Zjp_1_ynsK6`i z)aR@7$2iVEWOcHooYnjKDFM!0jQoB!fF**3Lf5=UTAd1WPR-?>8DHDXI= z%)AEdbsU}tJ*dQHx3#@r^F5t%79AL_;w|}{?iN>pgJzC{_pf-)lOYoYd5?m~T{Z|c ziZ;m{wOe#L3yB1N`2j{1Al-&aq!F*J&#M%XYS$a%@wI3(V~-=WTznC^`jiHcJ4bq2 zHo4}Ji8K9a&G9wXR6q6)w>FjMkBfh=ho$zO$G&3D>aX;k8Kg};uZ~is7bj6aLLQs( zDtI6~eM!tX9)AY!$%av+;t1s2%qIYOKaw{r9ugP(jw}a^XaXoFT`@~tiY~z5gUKQl zN)A8RJ-iJlAk-4vX@Hl3psbqP(XJso*cvXhu(Q}{P@;_!V`QIKHbaKqN)2W4uQj~6 zk|#N|5eTjTQG9rqg^|tR<(5W9RvTl~7G>ty_F3A6sQMkPz8P8+LeRi^qITE)i?-+) zkcn%}Kcd;=`#K$L{+cPE&serhUli|n7TH2}ZJZd7EI%d2qINRIazO8)s{c2xg>~Lm z7^d|hYrn<{fWy+09P~+6F{4dr%P_y14n~_mFW0B_I-RKR}@JszS-xGMQ22~ z*5ZGHcApisgwS`mgf^{}8s2qrhj!&zlMQ4TbHVaVU4j`RD*K zZSo8JN@w#X-~51BB(suFYOu`mF9)iXL_+S}0o&f9f!iQ2!$m^o)xt>eofUc)*)&Pg zMq26Zs@w=<@R8JJ9-Jrp^CRDB0!{&vKiQ^_Pq#(X%Tmuz{x%ocY+e5^c74Oz+b_&TS zqCxZOcI#ZdIW=F@@6lP;89Aq9zctT|&j6CYD1<_xnTPRD09H_Se#T}GY# ziXeg7__wG^>0z6sxSTwr1oMh$WY$+ifQH%4p7XfV=# zG{!3eAs*5kwq~1A#Lv2K&9*Y!>|&9A<%UHyS}Pi-`z4+0gPN}{WL$fQLRjyaTf=V8 z;)(WZySMrp;N|I;ZXcCXYYjK9!^oZ^eY397*bI(e0&)iSM885Wv8bXpDEXyfKm*g` z)8p7~9(+dH{tGNfwE)HyM#vfio~z;Wq>e5-zvloe@EqD0&QkKkbSQf0I`fzi_0k-E z!*5&n7~%GMVKXO2&OJ4G9n}2jCE}65%(qs|zx$%nR0u`DN4T@=z9BN)@9HQ6rR|_Iq zB+$J#KdWmmv|>QtTHg8~n$~c!FS2qiF@MHZL}Ko#lLt!lTYH828|X*l8o2i?uO+9J zyjy0d1?y?f8o%7s{5fQOtfd=gou~OLz@~!k9fc(&Nk@!A3F#$^L@+gm+}jj@=2CXP zctALHX76j~Gf?pDEcLj^{~FZwDpTR;8T1!Ly!T!(g^)eRD|C%L4`_6-PZ+bh63<1j zxOTnu4(al@*X)UmJ$Y(b&NMpUdhfjRMld?IRTG9mnZ^mJ7xb1aX zFIT@|Bl0Wy{TDap(*{6iwz5r5g08KscPTRIBGN!_LE6A*8?O5#v=m!5hQf7{Nm~Od zuKZ~|lR~evZ%wCV)C$D3TwCA*&6fggO;kjQd^$P;3*M2oU~Q51M+F`o(MdWGfpq;I zUqt5>x%72*&-A!fSc65Y*FlN-Gb+H%c1a%Z){bk+%8KOU6%kvDWf|RDXP>!`)^p=C zpnWWYK2PL!=DdE7`mX@b6t`B>UWD``XPmsn)e0~w;AhdLct%v{%`R{FF?8@X$nz7# zh$d;ydWMw-#6=WIxr84I5LJ+j8l#9763I+D9<>4oUYw(!G*43q%`1$ks2$!+*y(V1 zMalE1z;f(|B^8kgV7~#n7_n={&+&;upr*f(hjgt!ae}tj8C;FP=JQ2fI1jzWnWNf30k5r1zIwc(OQ5$TTP%tm zJu)Ft5e(IFE835=SJO!zIJ!7q%ds9T{xf_t`<~&e*O0NGl8<~QWL4!48SAqOVb)qA zc|m;^4WiGO1bv2vl1Se z1X=_Nhh)jKk2tgyOZRWR;AJ`5M)~Nnqm&^NL{(AUnJ8|BV^|f&B(pbR$<)z-m6&*8 z$Z^EX`puxmikuuiWm#w^9DBC9z8UZRSAU;I-4M*PzL*8gwnC@RvJsbqz~%GE81&si zFNT$Lx&?oJ$|D>5s)Z~%1eD34g|M^8CBmFZTQgM|Nvp?I(=rA;y^U;b%<8s)XAD-a zUkNRTUcaP4+Jx zoAy6wm;Fd3ys?~|Tk_1Dfo_v-#wr?S)xV;48F{wu^}J|qWL{eHJ;jaB0SE0m1oFIp z#paCQzcTj+-H01l434@Yvtr>jX8ZF|qbMsZ^<&GEDIS$!c0=DhVk9$!-EZoFU;oXX z=8G=35oa1yPPAmlzsvAf&wT@F@2{f*HN&Slx71pm_JO4r4+qlDpARAL33l=c_Bs;E zReD5)6;EX@QEs8mCZw-otJ=Wg|E=X5N#x*uHUDpd{GyKyKLsiI_y6{G^?Kt4FTvfe zz5OGPX_O^JnF`7bQRdd#B4Hj)(>>4YrQhCIy@~SP;>oRUajP4Lqm)Qaz6Kc)Am_UC zB`xJEce)J(+H_w_(noWhj9KYI_fjJ)ZL7!su_+nRez~Si^5=O6heHrAl`)#4@BhQ>te)pJRHP4%OBGqCic!sEtu(Do-f|EP$4HeLn5 znc`CjUO;g@c>m9X+Bk+i=Yz8Ab>LX)+Os6834)aMFvcA7kG87#M;$E+)%{VUt!JVB zt+PYx+iEV`f5=NlgB^H=^-??w6gA=GR()@W&wg6Ukwn4N+34(q&XM)Cq}^WWNyL)l z_IAN9Q)=Y{4S~#RHi?M%&hq#a$Xr`u|6723Oq4z`X80s{$&tL?zZ>UqV=&Vj?$xbi zzivZxQbF|n5XiW;XgER7245>pB`{V>FBx;RW^bHDug6wEE@v+R{WGw9h>l%!c8B;h zL9bCUTKVwc>m7jgcvh5DiNRNEf7S0;>zFZjqiYzYtHgfNk4Ng_jpv4KPh^jf)osn6 zzoKduO_j$FtfJwK?}L@%T9Jrnf3A#e>z@EDODg3DE!BiMMneYx9yL|=VfQu%a}ec+ z9IR!14}Xnw@C26hKl=QDG{Tw6<2jy~=4cW;epOY-tnD>>euqZ)vJ)Y#7vRCLmEc56 zaXqUM<#ce!NXVzJ@+j9^d4k^wfb(S{`^Vs|g4A9W4%%-H^Kdvp9o`nt?4=Bp{{k4? z?|XMg$PbKI4YaR^21kfYdKBeu_YCeOeUw#B33etOLt%Go$3=RJV!)??d&H*!UZ_au zX?fsO6YxfPJNH*?P8!-}OZoNWSR+HDpo*mtmo5f zmOQiJ5&~LVbJiyJ$)iQq&YFjUdv84vUDc+2}O|h0Zg!dzQrddb>xHP=d}{Ft>q2uclpAI3CU61uof?+G>j)d`9!@u?RZCKLxD}KadYz zt!Ol7T(pLN7?F*Sw%04(q48VWh&FkJsBB>=>(er#Xz`U{6D7t*{m4I9Rx2AKXXp@L zZ(l2OAWM$qBD+_aaksjEHY##1+fbjDtqjfCIQ$CmM7?XCe&WfI?}h{gUc-wR!PC-E zNMuaCQfg={uq+p5EN`TE{%c0kODSggayLUvKQc#`7a@;JXievybGaDrF%lkr+{Afd z5~4pzCn?|k4m~``2a2qj`PDAtmO3x*pHXm>x-dO|Fips z0S@jZU{d02a#w_J0b{SIw=E2*pN^831BKnAAcr3Tx@>uNyO*2gdCJo_&7A29|NS-q z8??70P&;l45(A6J8G+xIfs_4_8PZ#CZTqbDM~M7?_PAx#V)dhYZEmBY1F}Mhk2P;g z_K~9T)_}%l?2b2w?6-jS8-<_*`t|et<7)ktK;E}C-`i5dw#f;rOkQVW;1hrabD4Y^ zn{383F9H=ho=rT)@Ha7r-!kWNU3GDH#ucA2B6kP=o&vT@n=Few-2_`rq z=;)SBd*q*;;8~zxRS>LiEeAcBFsI5nN9+Zg6Ku=qYJ1jOva+5h&*bN3dqabwP**ZL z4<8u_et@g}LHFUy&|ciALYpf~$Eh`3eAT+vnq9<`kuV#JXbvxGb~L>d<_;LNdIu2= z`e`kf8j>;R!MK4jx@fotRsz)7=Jp2xE9C7cr_E^7<2=1-LHov|(*DPQ3N0Qo+k*Ib z+AnEF=J%`(tNZ5AT4F>FUk-R78fn3*mIbX1@zBqm-PcG}I6*_seWaos^tXWS#^=EE znRh09~njOdPXC zoCj^v<5u>O$|HPESy)D470*=|E9(|9kg(aL?A+y0F>Md$Pc8lcEXszaEI3{z;fhuo zVK5a$I`48y8eRH~a%t_MQ8e4VlIUi2&BnkA`Y~rF$;&kPM3a!7_IZ9=@rlI|e2Of7 zD^TK3)Bu_5|pSM%THlL?F#y6Rd z^I<88_Cfl3yO*03I%G8#VWepIhj6BxWhK9Ij9S|>$5f5@ka%_JoHIW%vsrkesON9S zeQU?!NoB4IvPE61PxeRF_s`nHKMqSDkZfkpIHUV3^{;~3mw;4fta5oQUltxL zqU};fcj@P@JX0lX9AF!)dG^l=EH_P2J}ncjmqP$6`h8{o`D?NCB>%lW4q79FS^H4M zViWpSY@epxYP3?n9&KOtpx1jM8&V}Qi4X^`=^|chcI513Tb?SOViv2>j<=JVR=?^L zL*A1!UKbbjhH0cqx=x!v9<2;L*`GqDA|QQ{q|w(NMr8ZXZT4rP62oh4XNM=MEqk<< zS&hcd0Id_zAj{;5G*X$0bg*HibYFj zldmb_M}Z0&Q(`PZ@i5-6a)WvDoDNI15^6cX>wg2nC0G&m;aRDRoPTX53G(Lv7Rmi^ z1+=$@){<7h631@>hfM)|t;WZk%X5%OPfI&ro3fjse0M``#CyPSE49p>L0$F{RZeX7#CKBwwG4 z8Cj3rdJIvxyQ(XOBfqrXy|i^f^{Ei=MM|H}emh3!$% zE;6S2Rcb1_wl;^2xyYFltp_1vHS(6_pVQ6WdGy!F*mLv6tAR0R6p+9v?dp#1?Q8&i&s=GQZV zH#%V|wEp0{0#kwZ5DYzUhJyGF@gc;lsX$w&;{^SBQL1g~68Q+;a*LnwI)yr%Ng}t} zK2V1Ga?oeOs#m+3vHduVLWzmRnqR89#H6nPIvb5c4lW9N&r5zjSRVxVXW~PK#lO6w z<*k4fe0)04PH%o;KQ4at-FRW8Zs}Tu4tV!tU`N)n3qmz4QhiZ|X6Zj0<3q4wF70ka z+YH)n=bHUch+lK&&{d|0PV>D+lh7*uBo2Xt(XR`HKZuO>w;J{v{{$k@-;lKh+OlQL zI63P<7B{=*8-D{jmf5bgIo2{4$t)XuzY(>+0hoPEI$ZeNDixG!fOpEt0ME?h28KBr z7wluPKH!oL2h#aLwEc&y{mDsR_OaxrgI3L$pL;F%n9i-klP&4C<086CZqWcZGz2o} zWdu6FN=+RE9#tqa7C`R0)?&+n>Jzgd`B`hxyw^d?&|k4n(~!rPasTr5#*ImM@3myE zt+D970%@I)nYZTmqT}1<+>k)>fV?O>_YB6KDQ8!h{$c^}ge6s2R(D+1J(A@3##&5I zTbQZ{$J7l(Smtxe=)S)wU3zPM?3Wi=4vb0Mgc3q%bv+% zWJ@>cPMTe*7v2zXEhDm zIKK3)9qJvc^#8Oyz7X9}8CeIT`x_-#vLZ`QmJ1fYIsZB@KAuY`*pc3%#RB{LIWd9^ zF1?52pwDug@Cc;kkI7eXL9%Y%Dzc(&}F=b2ylPfoTBe7IQ5*8<_&Y{ zC*g@-g{B+lLfc|?Xk^)mL|y9ecbqAhsH^-oY>%|^=sC21St=`4y|*45-LrZ{d@O6B zi+WL-tlLmK1!KoI79H2E(gM|B@ZCZ2jjusP*D5-h70KOC7~}L<&$L_PZd-E00*L0# zK99#r85=R*XUll*6M)0381-1QrMR*A^1Rl0;)k)T7B+Pkw{=UR(b7Y10qo2vJKt^G zSISkEUv@>ZNyeyEgLN22zv;DTUirDb(elX7u!SEBEvp(@ zA3Q{x$HD!Le+D#pzqK)-LE`H9jZEm4#Vyh|G%T6W-q*TDJXXt@<*DC^;;#TFklZyc z(mxPIrSAd_3QH;yB6oN-yX3Ir4dzu+fV%@iM^N`XZz3Jp-~{lEJ%!4TP>hKtg_>8`t@`8%yDtG$>cfxLE3zuAd=e6;;ep z;ZHAw+kjh%7EQ(iQ1wPXoOF)V@XjN{R?%2w;3Y3>j7MXhusMd498fLz&tf#kv3#1R zBV^qFGn;*5L#CFzL5dW53qy|~Fi!K7_hoZ@<3<2!&Iq(gNO?sb7|H$_(ELZu@v3ze z3VaaY}7#gwHrZb=}@ zef%M6KPaTHKqeLu()_7N@6Jd;|6HW*j6{wTZ5(%5Wzo77 z>`NeK9-NP4uSb2uo zHAgr{TDJ#BnE zG1GJ2_;c{WJ@lHj`I^^_8!Lh0TP>g3oFP+Z^=qKe;V;YjqX2kLtmJd${l5fecKo6w zVrYpI&oweM93zt8vxGcK682anQLyFiZXL4RV$nLMJeSRbUj2(RB6^*X{A2LZ6xC-O zIn|*jrQGDB>(N&>6F^3*I`1ky`+jUsFY*Lo~D zJawBje+ikCAu|>&uv*zckj8G#JLTr`?tj}xb=ay`{j)Yfi(YQ~k0K_2bh|h2jXw#i z(5~+z`~T1D=Z&R6w*LoKXu$30C#+Bb^?U2cnA&&X3|)&q0hoo1MfL_v*4lz3y(QNj zSH0TXuzYHpp}P(0%(C+Ed*u0t;Qc=da_(%5jN%0plG3L>r+`z_)qDaolswA%ijEl4 zdbh^BayvSOh#ubvJdZHT)0*d{Rb))^>ZdUBx92ety)_1EUlcO6ry0dIZ}%ROmGtXD zda(RN-*)|^lV1cvvg#=jK1?5qT;)aNQ62W{QxPmseo=GxE9f^)1q!y&YlhE84{b2A zKcogVqmPWq(8SA&8mlEy8#Cm!SoLq?5N+n9G8>pd#z0b^sboNx8?y znIbgGn-eW^mhDq%X{9R*BFHyekx0~HXu9GQQA(nHYspb<2$5I8f zCZ*y6WSMIl-@R+m>}n>dAEYgrhpzgT5oySCMKkDK`YKyD3og)jl^bq2KErieUq5d{ zeB;KAMTp`&ZQiuUv}lppqINWeH(vN}>Bex81kzJ5$Qhbsw+s`TM}zA+2=kKXqmKWn z(DAE^TeI_@u}*pxVRE!Niq-K`)|jzcM~*hXhANKxfGRkz7HYM9tL}4(7Sx5-<8`96 z%glJvMTd*ld&BL%^=$*S|K%`KST9@L26lNZiD%`<_5KaLv=!<|R5_nNUZepheT_#! z%N4kKz8P#XM%#Z+xs;#!lYa4_(}p@!$}*pXPhRJ!4c0#-JJBTi8VyGJZ;hAtUV)?p zs~X}lM2pj}sN`i;OU<1%=9K)c`waA&NBPvC?V5i28wRZ7iX0TmoGr{8z6 zjzm^cEmlC5*G|*Jpl?p(1?{7)ss_@cu-<- zRQy{I>Vslx%(0@(@eX|II;AyAuTd0t_n+;}g(mpu!UQ^Kugxk7qQz$BD#J@_ zEDK;+Xi)`+^N{f)`DBZbEucRm3YnSXIKpzv-=goAAR`u1U!=$)9f|79StH@`yK&>j z*8-`(_EZ*)^m5vCSt*Nl_vro;@rmp53xJDP?7kbE8t66+5Dj{xStrIV@hiZ|OSf(`!BoZZA&lFtZ6G(T-N zPuc$pf;t2$(se~=hd#)t3%BzE9C?UUB$9-eLI$%Vv7pLoFtziY!^vj`(d}sm4 zSP`IW%{O>aKT4&xG1kL-PGS|tHeMgAb+k7UK-E(ntZ1>+ee%`QYj55gH*Wk1Se{7s zTV@A&KPwd19u#@hcc3zVmiDS=TK5S+E|65COZJ+mG;SPZ@-9u2j;^nIuGXz5%X;$o zUR<^!L^}uC(D-z@Rjg9ZDxa*eJ3YS{7}^gfvq$%4Q7INuhUj=X0Sfy@&n#7FLczW8 zqFX)ep^g&|yjnfh>i-0lx+8Ox3c+2MLe1tH*Q>lj4Emu zuT|K4e?ikD8ua=ujGDC-bhG#`UO^(*IjojqlD8Z0Eg2L@XVGmckQ{w9n7V6MUZj5-S0Uv@IU{1$e*9B5cv8PVDZj}+^^IdGz8y`loGxI zIuK$t=SKiNG>L6|7MZs{&$xX*%RYn@D{KEf+ww7+@i&mQPH6u-S*2cvwT$$g&ZMZe zWsO6Q!J;ceY9MN_KtG5s;>kiE8Y>KW^HL?t8BjZ>Tdmc-um@zg=af}H8Ctcq0Bb;$ zzhnO_D#wKGB_F-O9_H;6ZrqrHSKO0rNS(FkvQ%Gtz1F_VR)4Es zu?v8eC#pVur3`wd0jEM|dR^3yq0DxT6kP{7ZP^x$6!qeDc1Nf!!bGt_uf#%KG1n9m zX>od9QJFK*8FciBDtC#~3F>%{q#fCD>QBQ(%NYhccbIbf5{bgob5k7D(sgc#+MoB^ z*wIh_U1p+FhumHd@>A>s=gH50I#iunAWTPT_5zw!u=27q=X4PjVMsLz8APQ9lNCF2 zBl^oW;NL(tKZnO8!=6nY8#tn;E#io=u2WeI5sl_$30@i1iiPaguiBeg>6C8MbCNSp zYyO(8G1fTkOe$ETtWlJ8nQ!`5SfXKx-*((cWWBMi_2Lp$5l}yJ9Oxd+>H75>H*Q!M zE$(&aNhZ6pPPS#BX1Y-RWcM7pAMrBoJ@-BdgN=CK7&zJBj*lNa-)e8 zM!E6cX?bIX?o2PjR9qe<0eeNWe<*%_E3hn1AI8Bd#j)-2tHglvtXxB$wU-~A4l5B; z{j?Q#SiH}q5;OUQ&5Z{|@UjQLk%cb$0k7PzTAdy> zbuEp!wX@#b+^{2{7O(kY=a2+D*SA?nASO>CggAuWlvzGZcGYJb59%3SAa?e!0UOEnJ(0Xz2kcJL|Dj=EZp)$9J6z;?IW` zgjXWQZEjs0Ekp;y)Q-gGywI{sT23|~dxh+ju6ZGTGP}qWDN>+hn+L0E zNurHZKPPx;K4?5rD9ceq7#VX7P$+_)`o<)rj-kFEAW`=89~BE*gMOvs+l>BZti!^n zU}X8v<4{FZl$+X=V^Cgc zWEV@OLAXbPqw}0nl-e^d!1AKy6YlfC#DPQF$LICBU;U+ly*MW4SJ(tEXSxKQXj+-| zW{RF;S&viqgpLC#)VqxEKZt3069+2u`bQV4UCD89P_r`!q{~)446H8)PPS!1&OEGW zytGXFRarqnV_DHq%lhWyy3WL$vMUpGNJ`IN!9NOC*OA&pB3>ylX6Ll%togEt1nU;4 z8(#)wH<8Ol-lR1YHDguu%s{lh8ycPgeC>AdHvrc-`L8%2caa1y&%qUe)e}=ifx(n? z;_m7yhmkrW(v{bx%Z%CulwzI-?RWO#a%T{cfIoqr(NTrc&P7xZ;ahk$WvR#(1^gSEx5r3<2 z=#6X67?k>vTr;cRXm1R_6GhY>iHa6?Z>)UdU_}2BPt-SpTcB>d8Jz4# zs^g8dJcFhF(YPBKScXa)NbK>PzX5ns@y|Tv+qahrg4-@-b8icV-Gp0u_Q$yhws&#(PHzWn9+vkyRdwaZ*)6!u>XT`kKWm6)R1 z!4Kq~i~S_Azj~Wl@4x0Li9JEP!g$=M!>GRKdU4l*czDhf_3L|5w3?1VKN`OrY)l>n zM%$v%LnD6C+;WaewZofV(s&1WEztWE=*Vv@fT91828r|bYKadwTmL0S^k0rcyw7&} zH*TDWjIBfEqD^Ho?pWK{$c=mVe;3k-S^X{1#|FP>=TQNW4;KYcqQydWzB!jYlf2GM zJ^ZN~D?S`q45{@LSa6bt+`;uVAJxQ4!H?r|{Nnj@t6uy4zbkod7~%IQpcEIwhdWtB zd7=7@%k~2Wd|PvI5-zlAkodnHu6>r_3<<#(DVMexZhvarQ_sL zG66e&2cr7cDnvokg<^>(_70w(`Ya4#SMcMG^q2lgTX1iFq8GLQqj-zlaL|Qp5mGrB z@1mF%)vKuaw$HMYXna(dNMTkW<)ZawZ2hRMfqVU0*$hPA5`Ay8Tp^H+WvBl}5}ANm z)19gRP0=+luciOD5Zw4O7$I61w{vhUc~~y;R+k@m21s6`z{ENQFDQ}y9?c8?x#JgL zNDyd(7cU`ca4`%o{W0Scj}D1J&;NcBuPq5n49SWHTa9OB&0wlb?9msfTAN*1c8{`W zisBSZr~f7yg?yKSGtCTJuSg3*c_8EUWH4;WpN^|%S{QXZ1$>h8e}tp={48(!u~nq# z)#r`vnAojLY6HgDOrUittYl0dJln(g*|2@f?^Ylr^>7UMG^3CCn7hMyV{O^!!1eWo#kg05B=hX$kvke^k zi-~AY5wObhQag8lJRpZP!;Wi@(u>Bx0;s=OvyD_}J>P+e!FLg$IkxtzB|KAV7L zSA*iM%$T#lVMAg}2pa6mlZ4*+uLQDNRSD$5KV2jOchWQ=j#+?ik*{df3Y_hhUC4+} zKCn!P#0ET$L-;5Xi)YD)o+J$TqxrM#e?_kUFsz~qN5o{b8T#+ZsjL5JLF}vnGMEk0 zl8zSk$p?>k=!ZlV7zWpfXEm7qENGYc17XT;R52nJ8d?2spl!AO--2-CgJAj1NM$Tg zJBDMrX6~uc9eQPeb{GX38!AC@eI5T{z9SPXN?aZAG{MF6dZ-mYWuUdt*vuSzD$)N% z#b)PDA}g{ebe`oWU-KNPM4cffxu~)Z+v}TEg&}%pFo%N^{F9i{DK_TS#TqYrS>Qw7 z9uGh4sjBRWV$=R6$DG8d>GcR8_tUx+vIt zgBvbZ2Y6@czvk&(zboju1@49d5=k8bF`(Jn*%)K-Y=Ec5{|16*tTuST$$A_=0k{xs z-XsRnv1vI*DhZS!GFpod0rzi}rWN=snet(AemPaX2xa8lSILSrk`d@Iat`@D&(BqP zo^3!!pIj81Ej@!xovNmxy1T=6=rpWZ1X~SyQ`sIrvyv#5_Qov%iwo;{xpf69*%mXG z83rBF>oY_N)Yh&KuJrzSMdW9H=ikJhZ#CMDo@H%9XVaWmPjl9CN1cnK9m;9YdwdiMRX1Du|P?>&xK|8wO_cQP_@gW`cFoCr2ihD zjQ(G7=(lhCC*ip%EyB9SEyFBryHJ_Q@&K)25r4aD{8*r!(K}S}qm-Z-!+-5WkiN;T z8_>?%zi07DHihc}RY)G&{`E<3sq08MydVl$N5e+q0hi%4sL-0>gGW}j{ath#4^1J!e$_1dv>5Tp zu9jabhj}Yd>|(?z`>y~u?EeWB{yeiek_@hN4L2@gG*Yc=8 z)AVW;3yBon$-4j9JV?Ek7qomSc@j~=uR<*JA zWNa-)KEsKk_?DnRKcCU|*iGsi2t`QYWJdwGCY2pBm1`!IK>LLj#>ZD#&_Sf;S&T-X zca-||AaNnZW4Fyo{qLEnu{tZ#LaJ+QtsRR5r`c9Lu{9B@P-G39aC&CSsFmDX13NS$ z@cFzaO^WB^faY>r{>`xb!WsJCT222o{S0p3_UBR2e-5s-+lqKuG+x-{S^66pSSR-b zQm@vy>i|9hSgZ~iV>oOuK+ZHCQId6jn?YexB$%l&V(8TQYWXo{&K6wHDjf0Qq8+39 zwnEesl^F+FW1L-QA4WSeuo#qRk@KG&NBCk&hr8-}DrYjp5WOoL3?G9R;Vwe(45Ks6 z=D*7e(bC6ill?6G$qxe7CgYHzfhDcL#|2#!v_D@Ddyd9(f**nxrF!i*z5+Uac)TcG zM{WD7C1*-T57)9y7y9%(h}SZ?vmBDV)|C%V@X$2qAv6y?S2Nb53yKI8>*&+6`2Q1u zYDu?QD#9z|S#6{Gujh*NKU(jN_u&K2A^WzBH7*OJ2A0^P^{kFxKLyu67xL3hwSA7D zj{gn7>N5;glGcLFn*_E5r%hm_wh9{)1=lk343ZC^Ezd-z%!O9Jr-tk@*|{4;&E(U0LG5%7pajikkPB@SPdwo{L#NwmZ(VYG&RumN9gKZ~~h@qXoVStlu7R{&X z|9$Z5@u6lx+5s(!QVdD8Y5RfFX=^Str)PKmsyJT%T-qpn_RgwL0G^~lXYT%$L?o=5+*Ckjn-t*{?!Qf4`4Pw)_=nDz*NeYWmb)N#H@Ge zaY#-*`YF&r-zyS{$FZXKHlP_a9<9}HIi-pLm;)8tXLxIIL@xL(w*K2dL7UcpB%)X9 ze`~Ry`WFxW%;#m}`eIniKm|D|ei^snSxFsYG3aY;twqi9T5rX##z^M*9m;*`mw@a@ zW#dnT`$nmzBMFTkN$NaT6{Wy9j%Usl2f(LcC>SyW5*PoQcunzdCFT~yO^Sxit7-sO zZ_|w2KgA|Xp6boO(KdZ5kokO<*S30&uacKr#LX#Q0RIWj!^{|61 zp7l`>q*rH3=-9Z%EoZp;5&c*)X%;GSWu!#n9J(ZGW1*u`R z%lN3M4`(bbPU*c?4BG#n<26U8UBNu$r+NlCv=#f?29{yY?0M&G5U0pvkobQbhZ9CQ zo3W_wZ?a9!KRR}aJ0$zQbG^@YX3&|Mpm+T8PVd7iH?NQJ((u8GKv4t>B2Fk@EcRcH z+uEZDSsw)&lga4Ma3nWQgPZT8gucRz)T^`)UM8kxur-`*QM*32N^gQ=Jk4SpN^2u% z!+|!|GR$OLe>adw$oEmUdu~`Xx7UB(lx6il(!tgGPl^}vZIYiJFX#D}V8s{P^+x0Q ze`HD)`hU+K$)%$B(6f4(coK;*2Wz$FyfFeD@Omc^-G3fl1;EN$N3!u{wDq81+OATD z2bptM!H6}CfP0dVFN!YG7zoC=A6NMVAg^3UkymwV*vts7HUXK8CMbXvuu!d+CIku>uD5@aj| zQam)CTVL_eO6R4+nYneFXIcLaq9Ou=`wh1#6Ugen71OQK|CXS!RLt{PsYI^*9b{~F zRCsL-SL7^yNoYMpOOkZxh^@ZJ>RF;lfWOAsaq7PVt(ZChcw4l-4B``jr)0ln)*tEN zJWCJ^+cEQ8ovg7*65_-ocISKUA5EXF&0uW3kw?$rN|8O?%t#M2^UOyu#L&4(Z zK#7zU*=|iwxgt??-OnIOZmhsvwkto<^~*KM@i)E~AOSoaJ4)s{W&VzM}u= z8o?5AD}cxx?A7Y>Ndjg0|Lt|*-e^&hOF-7rvY47Dvc#=nk!?ob*X#Hsu2TVR7+O0T ziSIp)R{?NnN0Wml_Gh-RRiBz*({%A_bPo;=i4NU4P-(!_;~MeZyZ;7YKmFW+_U2&z z!Ll53t7dSOUkFouAR$J34;@<1#K<#0a7uxk2K$2fe_+D5tCYv;k zC1;x~Sp_^TLn!OA+7Ti-EyFYB+3LeKE?T%Q7E0^KfG-_RcG)TeHaH*q*CEGqPTs4|#n;td07h0eYsMi#WZM%g zveYZ&;IK@Y3Fv1!E}`Qyf|+MH&9#V!VO?1I->f7>FomN{kS5Fv*kHlqox5V0d)z^OxdPA)C)%q`KEtSyvpHY)lsLW;UVRPEgUcaaQ z+W(_xR$o^qb$%p32AFb zVJgY0cw|SSkTm)*K>%5@#U~l=1KUHSzCteS#b8*%ieCOQxO>@u?R5M0-|h=2QaHXvAbYG7c@(T( zCGWRFN4ws`ip1#g$v+V<$xvbcw4Y9jsOOPRmfpTeH{OYd7_rI+<&A_VyGYipK51(s zy{I_!vYkdLD1mEDi+r;8@YMVVMeC5Sne}GqZ~VX$6L~_F-5VCcPk<#ho(kykBl@pZ z1K}+F=YhUxOcWo-?31n*C4R80*kszh+BDpBzud7(E9XtLL+LpJ4^RQqOmc z2){?i1sXE$zZ+xU0$c&y<9PGZg6a^`f{syz85IvkjJQDGoYhFHR_4ih`YyTq?WK>? z7kt*W^QOnYS+knuK*gSFahTL(k@-Zd1&ySeiD{< zE8a?kI@WRyB7fbP3+TNIH_@Bw-uKKE2yfD zNw4g^EKn@rZ_o~@J{5Q+^=e}~vZlA0{0s;r{oC25a!hs!qi|x@wEMHa5zaz7U&12T z7GqdM)7)D3#VpN=maWN1t*ffH-yf&lus{u$w50x{q#PyfTDBO_ql8&zcX}}y!(qzj ztl;VgwNl^K@Mcv?MQHOpZ*!%k`z`}8yEE2FS?$Ad$|t}RM^FEo=o{64J=ci-BQ^2H zAiE{kVa?;sPX(HRm^q7xEek?2gnNGjSkm)xn}kYO9#W(oX>9wrinyrY5B0nB=)${yZmvQ3EAucaTZXhygwSJ5h%+M~NTRFI&}D zQSIqzs)ff)>&UZgN47c4IrNdjD8FTSKJdWZ-ykd4{5g$JL{Qwl*lbWlBA-e0jqd-f z8$8p6^8TM{kp^~r+ZWAC;zvBaN^YZO@n zU3QU)8*h9i*s^V&4b}3cYgV#PUNdNCgSRvoXc4+la0)oM*FeeBsPb6Vlo4LD<32w| zmzWg^WFBQ$!0HE;Y&pR{YP$qnY~T8%o{s9jw^5P)Yg$Byf)F%E#2fKL?MVF*PC9Ju z+Tr61d%xR$K}giozHa(?&~*dkD=lIctMvbGfQ7>!$ZZ92he(r$Y%R;hLQb0-v%a7wJ9V9xes7$SUU!O z*;u{;D^dGsFuXj3Ra;=b?-{8k=!dZS$j{J!G~Nmr zP%we!M74PY z9byEt&#+880Hf~sP(Y9V2H@fE5LgjId;?V3O~T zZf~3fGilVFNG9d0rnO|%oE#vUFElinN`RNyv$KGWhFZKhoendg_bjKJ!&>X=zJCi* z`fSnY0*o}%(KxSv2(ye(d?2q^=s&7}TAObb|Hv7)+UJq6Hq`Ifrq|zG?jHk&WT0(( z&DK$+WkO>}5NR217QNF(YS5@CzRMzgrzNxmaqIJx^hO2$mG%y1TfZQaKFikw4UEaN z^1w5)6=sd&)s>+F&tiSa6hqEZ&1{TKhn6|4^1oG-)wF`=Lv7!niu%wT)p@FWx3u`@ zjXB{^Pj^WuT2jI6!~8HNAD!kSK;ltJympZr9peb{l44&7?hvC7zWLdpgMAsa%lo}i zCr>B=(@aJddgGsiR{sqw62jbI(3g=ZPj0=W{$Qut064gpKpN{QBzk4gt~V;PaoKfN z`fkiZ$9I)ew0r;uu@uYrxeW~|fOFzHJpI5e!+WX1pLb^lx8B6mQg z8pLYrBlX&v@BVcP8<|bdA|9+Aw{Cv<^Lbxj`I%U^O&LyikhcQod<(EPb~}uNrQ`5Z z@NqJesIG888-S%{TZo~pbb{9#r0{cVuxPkW)oxsjGmeP$4T&~Z{tYGP4ZNaafQHOuTk~sb$f&MbW{7!K+@9B6+U4o0tb0>W&zg^%ivgde_%w&6 zwR{k6(XipG6^*C<%&+HUdm4DXu|fUF?KgeT#!{)@8Yk_1u@Ui1DTKVSW6eHU!Jwru zx2>PSvI1atJjxJO2B(E+# za*WUQ&SGxHhe~AQ?b57NpgoS%In~r!l-$9;|2I9)YsD+{=O-XkD z5za7QJDz7~eG}Y9V~#hOli8!$u0n?2r-V2RZgf#l2jRzo&GrK*k)n3=ho7<`H->Sx zpM1z6179LD%njPMBEyIJ$mo}C+9t`dRxzNdG0S09N1D?v$7GT*3izQ)gY3JN{L{gS zeywg&V!tQJ6CgfWcQuB>9&o$hwS29b)o_9mL!!*`9fm~@XR16OhgOvaM< z*n=pRiAw>W5aO_LzqgZ0M!4^Dp^Gc*(0Mfkw7?r5jkEPh7aQD384N^qDJiEG9k)39 zMTN&^&@2bHu_7I+NU+nNwV-GXXhX6F;KtR+m|R)^(vne;7^GsY)$$0dky|tMdiygu zWazY(EW06akr1k3c%@?dvTZa7U( zNk)TY=?uym4lMd*mMpKN6?M)mziJ*TK?4We|iGWAG>T582b*c&~XW{}x zJ`4i83@F))*EA*_Y~CcsO$g+SEH8J_D#+bKgl9V)B173x0qf~h&p#4+5N<+A7X`x~ zh#? zH`BIP^qyvvljuF%e^~Uh5i34*riTsu2 ztR=ZE&e_tyg2|5!Y1BeTztm?=R>qxJ^*lSD z`KDZHxy2TOy#?2gYZXktCTHily$vCFMO2#it0d(5Kml^Q(?aooB$%eptt=;&YNYqzc@*20G+{j$@sl(3Oq z!O^jC84J92P06a z9(?Y3*X&AFj=DjNgV+&vqc}Srdguho5xcm7oI;=z$v&Zz47`ee2Pg?UY^$&*C;@^@ zy*@ojd?+_=`~qZ>tg>P$PWu?0ab{!wl*pIq+U_KXT9a*dU0A= zGn=N4QzNUr?i0#RfoC6urK7D?2o!wOG zfRP2-pM#Z+ch(#(4B?JN65vF?)KK}_+P%r=AzuJA&VbX6ARFgk$V!+K5WXuch=)6! zZbSBm_9-xWKF_N13B^q=cJ~Mqfd~0Lb zm|QO6d?SD3Lr}IyhLG+e#GqTtl#H2BwoOi(lu>PNyfIzYZqN)P2m;~aA{t4TwGQ%u ztTjz}LN)d_09Ru4#4PDkHM#D`lW3%NUTpGEJDHCv{pSW2VzkM>4@k(Sv{#Xhf&S|+ z8j&>Gq#!BR&&lF~Bp;x<`z2$(k5|y=xn#(r*~IcwhwRX~j3^O@WK4s!#j*`gdE)`? zJf&mPG#f0Rz|EJg8#6fP6Mz{YBf5NoYXE1O-LF;4*9L5oFE5~oL8o$G$!C;b*7sy7 zO7rVLM@JR8q051MZA2e}CMHOc_Y{2St4*4XOAdp!$>rI$I)>&y8!Fl-)NeH$zS<6D z*}WcJ5pH~2IOCa!$Bt4UL5+M3XtFJ1YNrtoHdRA#I?~W#r{DR)((c(ZY~y zXEN&{c*?=SIJwYBVP!i~T3x2e@UwU|!28BkI4iF9q-O&;wB*oR{Tq)%jqX|4+vm|S z$yOj!qAb5`ulte=8QVN*=(%_j{Rdmcv6k)^Al#A9f(Gm7mdj=RTxeeI$GaviQh&>2 zShjb&mJiVYj|hcvt7ZT5U}+;d95fEK%b5JeS+I8Zyl(;SFzSTCxIj*3#3&O^{37{0 zG%+3+K5hd$sl6QIX-3%D6g0TlGeCt_8>^F$`K=jPk#dG?w66obZJ#y6PPdD)$rHUe z9b?`QF!J8sFJ|QnnG&x8LEzdd7LqgHluVBf)`_2fP;Ch^4`yZb-osb% zcMZT?LK0@_^F}XA4TxS#Ac<6_wWGv$7yFt9Zzo zb{is#h7CV=w@36^=l*y9B(WOXxZkIPTV`XP*5Ka_)cXY&tO=Vjh#eAbYUCJ;H-r0y zrs7QfId2Ya~^Qh&{r@lH^?t#$4((Pspn$N0gAW_ zNQ>ZPn>YRp9hBenE5F4J2afb2BrCFv&jW=&D{!WbS_*=fJ9z&Os})&;P&SqUNGwXY zA`-vx9Z*Ryv|FPFASzsYb4Yz)Fjmr|%E?lo2`He~uTj>Gyj^rn&iYzneICBXI(Im8 z)mh#dX&EUANR^OtSW?mejlTC>$xf>lX1(Fa+D{sVudS%owFCW^AYzBO#uB9lQ4F50x;Xq;ZJj!PEW)hjH@+Bu}g zZTCn^^+shjjnogC@5UAQ+4W=rkW2=bB0V9ww_!&pi^sxP9wh!(fF6xucLa_aPSH1D z`2+hviI#ELyhcn88$2rk(mGIiev`!sPH$s-23)==8CwO<4wxkq6PXyuT!uKyEjau$ z&;cIx?Oog6(b}>2rwuF}+EKznVD!~^%W*mn?A7-6dv*q9E8UriZrn(Mf}G>ftn(nq zG-#KJ<#sP=bVA#L>!!w7nAQf^u!&i@HB~QP< z-`Mj0J#F&Hx?}Ytz<2!#yX){|tY|;DKoJ-JymsG>hD+EAWIsPNz(cV5SUdw|`3JUR zJNw*}a3_A`w$~>e#;ihC|0Srs|7`Sj>oUOOT9|&9k$jZ{tXhJkbI<+84IiC307~U1 z(Fjr|+32_UNnOynQFHjoIis~rgPnG*q?t^cjr)vcl$)G+xEZT?8<<&Gl7ZSdab2Ag zwM(@jBUWV4VAhtXy?)Vvd;VKNuj9-8eOFlI*P3*pOvbP*^dFU5NG=qC`n7F?IkG*i zr`J4TySisj{k`I&F6ildG|sZi-0Ylq#$5^LfAe zTR)c>x}!H6+E!M5nPQ005oIeM_-d)c;qvVlDY+cnBubj*p0C*DGC{An!H!yq<0 z*fMe!f|AY1&xi!R({)jHDn$J8ITCKVHpM5xs2b) z>UE?V(U|QXsV6FTkP%mH@z#Tm#p_lC6cdoOC>dvNshTM6v6!dZJGE$5?HxVRf6pdZ zg*DxefHqK%jc6jfsm+TeaN49$JI;>;0hy%%ObmT9A_0loEWttJ%t-OZLRe|icK^Ol z0A87$LW~x;C^u%-3aqjsmZ2=N$kA~>2qk&q_K4A9G2%c|d?c;)R}DwDHXA}`Ii%(X zj+^z_aJ;TFRCdJIhI%I!%c!4%@ok0r<)ewL+lB16+l>g0 zXi4<@a$nmukt2L%_~XX(6iB%hfoMjORtNf8rM_(gZ%pAG$GzK{ACS05$+5h)Di8V< z{dAK%^Qg6jw9`(_IssidknMV`rT7^GAp1uQ04<`qkp-$Rc{EvGaa462gYMlqxPRRy z?EbWH>-+X=L;uOqe|GeLm-ZR`Z!NY79zV+$cMDW)ve<^qK?D>l02;E!N*>g>{N?0gzKoHJotv%UG`P|Ga=PySFaLycu6Wjt%vfj?o_p z-#i>mnazD{OU4Dm@aYrWwvA1$NhL5{Qqx|2(eUh41R1U>qvQ(>TpDPyDXu~bd_3t2 zlVi@din)Uers$!?|DgPppryx8A&{R}QQSH53F)si@h~y7C#{=-tZLtc{g3 zz%dnL=LP9rqN5kX!8|u^OycKL^X}id^pr#=(q3}inq4R)H6Hl}57FAf!DlFx-Z-7A zXj;@48GvuHx;NeqB`&rcSUYalBh1P~t4z1Huv&?)? zUYt=&Zn4^W>*gZQY@*i0S+vC3PDdr+T5QA5a0fS*!V~%OCjhxX8_|_yhcjJc8aE=t zqjH~0)D1^7go07)2fdavj77Gl&W1vMnK{R2SU2W1@)#Y2`{E_ng~e3%{wQwb()tvG z*n$kBJwoMqc>zUgSys~6TJAj>hvw6LIPd@IMntTuLY6V5=l{~yp`z}(-SOhmZ-Di_ z0jh137?RT`p@aO)WZ%B&H$DeXhYRyu&jx#PDs4J9yYQ{YNP5&4&2)5OMFV$dF|Mj)z8_}Y5=rqH7<`H-W#s>PVu^SCH`wemw`dM!% zX8WV|w;&RhjLk)S5*ga5{^Z6ev|3wgT>cw?`SUg?Pg5K!G>=N{h0@OOGB->c!U+tY zw`G{K;{}@xvVLln_?6G3jfK3Jaw}dq&_M?^?BH8lwkg$EVd?ad%qJ@o^K0>{uDG>S z#AVUW$0K%mac+uF0E}Zj89r1)y(}5ewUq;Vvz3j+eX6Q%d>X76TfWNcluV&bk;iA* zFr6q#Ut{48RBudR`y>DGTh=YZYdJWNRx-BeJY|du5fDrC-d3vm(B{J6EoVnSd%eGZe@&DZLzEZPe z1$5nv`OwZC1}+V3Z|rJ5@g*)K$!qQu^qzhtp*q>&L>HrlON8pSWxy)J%f zx`J53U$I@CD9!rLsECFNeR8b!{}ohgV-^qV%trW?U~L+@&+v%oZhLqY&}n)nj82Ea zcpX;!;6Ha2WGAf$WEEzl7(^{mSgRKfUt<@*U1LFcIMtNnqYFMNc+IkiKdk=mHnIBi$!>Htg=(DzO1ZVNRp@Brz5?@f>Iq)E3 z%+Jq%?kZNFxmeYdF66rE&%oZ8gvB7Ab!z?$Gcb(xxQje6G&!C7V4>|44QGPBgIg`P}hNxr^EE$d zLQ&H-Ee7qfU*BhVO1|h`yC_u1aIqrl$}U#k%iX5tzazlObws$0#|XY**Ghhh6KwCk2f(UcgXdo2tn;WmA5fu**V z7esv?AKB-B0x)h$c4k4P=AdhA#EOh~5vsPwWkiR|5l%ILVp3&`#>^bKOUe4p`V3c~ zIJ32Zlo(S)HI|aOPKLfZ$Bxwjfna-tUa3F1uZzZn&rM5x^KXEU^Y6xj7*(4n%ZODI zH~G!KTgx%LdY_hm60#|^{M%@p6)eq^0AsWe&oL-HsVUiwe+g&VKa(%ATe85XWmhw^ zln~Xz&^#GjrYriPGxV+b20(9O#49u3jlT;1#3SZJdp2;MowI@!{+}JX?Ufw*swwf> zIk>mR&gL7OnPilAtAWCX8xx4c-4bsr1=8(0r$PMwGgmhCHvYMVq+=JA#+XPhdqqJi zJEj(@(6LcTQ`L7+mBI)84BHFpg$$vU1 zXxjr0_QybQaBnd~wF7c!&KA}ie;QR?rRRSK^Q;Z0^wE}4)y_}fqgO;Df3%x=D>4fp zXN*jH*x;`>r*fj-VyWjq#rA&*XY9cg&rkGcXCwetu}|B=te@B8TEq6ZDZcVYj$wnR zx8?|L-DNalcZPUtJS0+gATk1 zje)f6@)&`3_MZ-2H<0Z}G}DM4-Ya|{0q&z6tYFj5^~S%1s%?CTARpTfOr>l_d6ws^ zkulM{<&W%N8YfpEtDmj;B`9QltXF-ld3)As#wvfeQQ<`UH~6VDaK_2r`}zb0_ozKX zWA#U-L7OEG_+{u?n>b`--%x*&l^5coy|{y=X~+;Z5+^psZTAFdJumvoZnjLP=k}ku zk`pmr@=3x}N}c?<-3u2|0KZG+NG@SHI5F>8Sou8FWFW(jLyD*+p5uT~Cu;DntSNrx z9%Tv1I%R>8B=6XoX-mWyC@9)9%jXyp>lhI5S)I7nMl=vHi%lY(-AX#Q;o%T<@fuN` ziBAr(bnp_@4az%pIM;wF73fh=%k2u6bK@U{)R)~pQ`ns|zKIF8{>b&9 zTA8N<-g-1|7nw;=)~)HRs%5v!4!sv~AcMlK9@+eTqw>jq>YMRr_cfaUNqR59$ZQll z*A=@~snqDPUfa&!E9l9^>XJ{@E~)icNe2b@H-@1-j3tWtH5+rchP`Lc@0Z*5Z|nK5 zC9upujR@68SVsm|up4%j9-C_PFa9XPJkqWN>y>6CwF4P^Ni{xZw9W}XV=iORaQe|*q6#_TTPr7Aax{yR!&6ChcvP1cL_{SeZeEy^? zM~~wOHaKK6^9sf11BsybT;*rZ1l^ptmb@6;pCz<6ZUA(tnz)ha+>UDUxJ{6)b6NE@ zk3I_??gVsEQY`QF180<*41ONnvdw7u{rEe$&Fy-8&7M}pU77);qSc}x0h&wzgY4?o zz{-$I{-~%Y+q#d12k&0;{$D%VxQWD#6K(&VnR*+Lhe2ZNjU&;@PsKU%)j{#^YpMWP zgJYgX4MKIk5dn5%0@_Kci0sXQdSH^yRt$NE#V)0bej~!M)ztPi2ZpnF{qyrxp2PBs zE=c`&lWw(?mP52(hyoeBr6?x(DKSk=YTZ+APGt3s&6@SstK@j8J_wm4WClqIvHs@Y zFr$8f-9%F#1ga*(RnmhG38q~`o>p-tcLA93{=NL;DB!9 zW%+hg)1qd(Y;HMceY##zvP~`+B|Ca!ke%24aXb^ROOE~bj8Ha4_^W<0!pL*2jJrN+ zUv(g%>-8w`m1?c;Wn0Fsa>Kz)XFCb6+d35at~Pm#kc$y>t~zadtH)bX*Hy7Fl<8lfj$#e>oP&l@fQnu zw5%aWpA#7ueiH3_rlJ}5*DJQG&0)5FEKD9F5Nyfp)A!jY92a}^E_9$@8JB6>_ z_;>K_esUD=ey;b<&qv!P?Vwa&WVf_Q0POcyOV#x?$X}0q8N5n^&PRGe$(W9u^kNXb zh=CE%y3w8mdBu&ju#O}v&V6{m!cEKH!=%P4$MOHhIVu2-__l7D8M=9JnSkQ)uyIZs z(vQvc*>+m_F2m$L=jT_HRPXYVVkP=; zAgOMmZ5okbqhuT6r4xw1eN;C-0fG5|iGZ$0$@|uNf6&^|r+Yr$S{u@bs_Bz!8wMNK zdQ$z6R;J4aF9Lm*QLvmZwRn>ea6EA1#`j~?0Ept*{?m@Cr)`$O=5TMRKRa|?=*}w8 z=2xsqA`abnqnNWR3{r!CriP_GLl0uW+Y4!KEOT|oL(70fiUZmoXc5=PCtF{|JQj4n z*~@F**I~c@_qiVnfaKnO2arAqER}n<7g3w1zSCS_lU>X3-XMd0sez$F>*~>~F|cuZ z-?d-S4p5H~Lq7!-A4P|Y7$eVWo0gc{eiW9kC-4fq$;=t8INvk1g4`AzIOz(HaCV^B zw{83@edKr^x2-j+MCJtn-v}K>oH~hwJ|aUW4m9!_`KB8;&OlN@w!dgV$jL>rO-B1R zHj{m{X6Kxqva>UHL_35illKs9icZQvJPZ ziss3Hm#3cH-=Pu3N?7Q7{>QDKkW@eVvN~$*XomuL}a*sriB7hrbC4e9#E^z;wkszo`xyYWD@WKv|4tlX2{SW}N z(p@io&Wz!dx--frOLODKN$9@2NXAMskIb4JZMI0!Hq~x{67&^?Kg9@UABuwhtxx~R zc`_T~&6QchjT=c^E=IEkK!fdRHsjZxM)A?c*v_?v3tP?T_W~6eCw4r2vXPjv{XAZ*BG`uJYI(r+Z{u}D+@)}+1jEL)&6!O1zx|>w?JcbxzxKl~ z{B3Z51vp%2B%FXe+nNKVn&cdmWU~S@Pgb5t+MeRMXjW5eP+Cwa^JSOK13mQ-6!f0~ z88E(fV?;dnKTom$Cp#P5?73(C`<}%v{K?*&6cZvvw%V}YzF9vHXU->Ol}Hu)wzCCHlnzH*=h%TAW~~FFWUr8z&B+FBTTXmV)13IZE6(I%tuZ{5fn-;B2f6&J`Y47;fB zZ@M4!>0%)Y#^5WxijJ~<3)HzC4f4)c#-^ejC1EV)%WSF4sS)I}c_!$NcZQIY`eUs? zlt2nI^*%<6t#0x2QOVcFe}};pK`GEig_gnq|B=|AgG&r4CUKD79bOd7((Teoj?VV2@~k+;W54!- z6+M9Z+1??WXU3m0E_&Q=!IS(Oe|w-N-&qxllJC;Cj+r82X9ZCr*ji=x-)BSSfr6Nd zolYSu%$1EqM|R`J8&Hiyc5_GAuXXQXctHy?bp&WQSdb1y-l9*^?t0cIN`AcY`ps9= z=gkwXOa6Gc4c3hSmx+irCM$SZU5@UuH(YdB_D+)O5Kjj=^6%+ej7T$8d^Il}l5Pb- zFSOKezXW@t%6g(Jgex@ zP0lH*EVmSb8tZ;PnzKc7uR6y_IY(qRv$h;x-v1wZUT%l)M(AFte~m)ehKDJp*W(bb zCzn!PF@-D07yk~8a#OXV&+I$apQy0i7yM)cy_QP(lW1&4+F-!O}?@*&Py=4 ztqsUh@QQPpg;*K2MXM_W(Iz4~w7KYB@`~tg+&BYBzID4dW^`Jv@s;m{Yy~w2WA;!_ z7B-$bO-;ORG~Cnjv%UNEUsc%C_PFCEXylqadyMS6rqg32?Ym;0f5=Qpub{VI!uA>r z>Ci9!`rA5p*#AxODjfLAIDeFY${j6cn5u?FOlm_I4ELTr8y4lmr^I4IW>rByM-j5m zo#nNHMn22PQ>Ek_EF`-R{hq~Tg6jTO=ts|PovD*kFsFmWcU`l9JPRL>sWJ(>35?eMEx*l}kB`N~4I57s&r6!EPq)eSX6(4;FT>M! z#yxNK1Lxp(Tlsm-M*r&?^|5MYFyiGMvK4}?qxNz91mF;6Drk#{eUM1MRXlQrz1 z0=+j!c~EZeI5~bQ!~jO?jAES+K7ED=bFuImFAn{{she@7hdJW##ahW{T~}n>f_iN3 z!(rNy>*s05^kXBfU^Mk;4W~WlVEc2rT@$3+vtu3Pwe3~%Iodi~OuR_V<|$V7XF$fQ zmc?Zgc|uOkINgTw^S^X3H~t=;`R4}h7PeQ{23s{J-RG>~z{{KKV|B_ZUT{E<#*xu4 z4U3Grv==}J!O3-1@D~l-dt=Lwg;nIy`oDA%)Sqz56wzn!EWR~RcST{;0YFs~2i zov@bKS3#E?N)hCdrojo;Z)x#&!0SCY@2Pa+r|5Qa#W4|`;g?A25z(->scK%4;2G7R!XP&l$UCy>i7lz0 z18+f>ISy^hZ!Cdj-H%7r$Nk46>+=KLyIo(OPd^KQv@$C$fKTQhuuBvQIO+tI#c(Ti zhBf=F29K3n_`78GI=fpRbQn4=@>T}vBN}m?3ND!OIn?w8+Ip$vVBX}6WD*!94!Ve- z+h_TQW8`MqpzS{^a6`F+Znv9G$Jx5D@8bW0GqwV~tvaZKwANJR5UrLPH3Ifam$JxM zofl=%lj6H0FfDd^C-JThOhW2f>x3Oi;RRSnhOTei_+&VN(5@z%&lav{|Fcw}u*Z_Y zfmyTl&{_{RfyX&M5?Q07$7C#__9H6Da(QlJEdCLyV>r0?BEqP;F$$jSUpdzoz|!Z| z?COODLQJ+cAR7!BG(V2o+O53@MK_a9Brc}^7yM+*81kp?<#b z>cA9_GOvNjBP%uI`=>AGS^Gjg&4W>nU0wW=5| zvTm)2%t*qbP(6dUsb&K9zJef6|Ek7r5Owr>EMFb}?uN*q!@{2)OQ7w$k;)NToZ5!q z#3X1^M4QPKP)>8>#uq}ye>y4I8Z_>uTj$EIT%tKXBEj2yRsuWVu`s8;RrS9vY=dl-0( z-H)%_k70GqU_2Z1X5u0Am}iPG!qxLFh85vAqy6_tw-TOg+i3WZb@Zc;);l`BcUX@9 z+65@f4!gIQe(CPf5)v08ab5EF|3Dt=7r62d@Jr1Zq5wyWmstgeJl}7fvPWnaJZOlY zrspX3l*C2;c$oBhbI&lpmU!IEoEXT%2a1?1O04qtHEqYZLc{t`|EcQq9)PK+YYXo9 zSlHvvl_Pf@MTI)B#5|%n?pq;uy0znOWO+cQ)~2BeEt_UdsB7`76zRr|Pr~z!rOo_3 z#N1L=YR7z8nNrEu?N@8)+<(>CTw*x6NMpSdk)f6Ca>KpNi2}1#9)^>1Q*v%@HRC*s z(Wv-JyxGBc@xMDrI!vb72qdQ3+sqtsVqM% z6+6iqvmIS4FRH!|ulZ6?`E6z@@;a6L`Jz}a)UY+XGLRf_iCf+cvY*v}do~?s^t`f_ z4m~MQf>t@FirhViI23kSvx4kCb`wxANW}zeij?#LdX3*cb z@u48A;i(5ww{~?PrK{5a$;_uoA_vH9di4M@cvz?g3CodLFgs%`|Ifx;Z17RO?a=P= zMMaRz!Rji9=41Ilxi;h&n7+>?&w0EQLHJi#nx}18i;_x%6Kuyu^CjtR(NAwU*^2PF3COD?XsV2 zj*C}x>G%lyqw7K*sRZJ%Pgc_Qa@yNJJzorN{pz^Ez!Qx4Mxc~+)MvUde1@~F{kiLa zJFDQmLFq@lA-TxfOa}4~AIV)Sybgjj@YT={x&$8a+Tyuh# zRT0?eNL_0`?u|ic=tM~^%9+f_ltu}P*dwuTj zl|lUW9xEr@&4cE{BXY=E@hapR{wuG_WTkpSy{8mgj>b7g(Ejq}815Kkm!2J|xgL8? zrC%8ib|)7yvFr8Y7+hakDE6XkB^?sWxP(^_*x9WJKi}wP7WruPL7L zRnFt3!IoFyh7CKKV-+&%x*7~#J>|2;MRdANkEdW91+=TGR6ypvIBzGV*1sEK4}q!? z(MpifhK>%I3X9K5yP6U2KN=TVX>tGY0PfnGqJ53uehrwRRkmJx9yM^XZ&b_vvm*ls zbhv16VIy{x;pG6F?`5=X*RwI47b*7uD%OK0QcIb+J*6%;3DAhE=+@wq7#*K9#=|+5BqSBB4CXmy9x~#D=A)<@&i%5^WsO-j#HzAup4)K;| zQ`I(Dm_*(tWwi!v5|Q3ij07j!F2fmw7ZBF=%Hsj=?xH4cIq?#6#qECuPBUr!R9hzPO59_ZjyuR`ZkZpvkhAQnfPFZ1W)UrH<)@`wHN5R zYW9ov_mz$Yj(QiY?IiCG-l{60WW;hR^jc#3PI$55Y0o61BXk6MaxWN{{16}6#;ul9 z-MDcwJh4A(4%+_P^5f?{%%#cr(TuA#6{{_SOZ9ffbLwgUYOnIqKt@!je>27oRX^n1 zk_P!oD;-C)X#INMkZ__kH!zXyLskb`iDvJ$wRZ0e&)mo7 z4@!8qI8nxtdRwF@dP^oSZ3Zabp@s{8tAceo_m?AAoA?C^FEK9$$MAdCkJI@Bcf2GI zDMFbwU*Z2N=)MIwRWRF$$fp|x6+1_Yf9}$+&>l+sy}Vx>#E)$Pv)CZ!weRrUsm}1c z>wD_EKdGRrl!$hIS&nBY?}+rJ*TLBJ1G0m0>w8Zg9$O?Tc z)|xrz%i$Gc4*9|}i1T8}T~J?xH!aqm_+yw{=*2NujFui0!F>;`ZS(@D=?QgSgAL1z z9=%%I?nivzctkh;Fs=|s7$Wk~U6NP7v95jh?>`_r_a3iH9oecAI_sp_xz6|{?S!L> z2f9XWIv$ez5gqcix@cF;P>|rtc1rT{1m7By0VU=}Ymj>?`%#q%$$?(K6;v_!vHA(n z>DOk=vk;8VmRsrXYdwc_ug$n`YC2C$^-(Yw$B8v$;H}{-Q*;t4%hl5Q+HT(afA4<( zs-Fe{UfWgA<2bLio~)Jm1pK$HD7CPJ9#20Ym!_MZW6gM~>sgYRtcHwFTeAc_Q2Xj| zK(;m0&-tm-JZHGB!HBJf^w#p9PA8h9OA-h7RC-TzdJekYgJDp(DC3~EZ?es5m;uY+hDDjXN7H9&?H@>{{)V<%Pt z?aV2Yc=pJ<_QrX9J$~;uAUAHLFl=3dq;)f``)d>Jf^NU|_-XfNnN_sDD2DuQ^ZEvO zo9-3f5D#+3MnyhuUVPM#LB<)qGPp;@OdWwnLW$x53V1d#?+gM(JyehM{%fyYiKJUF zf8>NC2fWdo7FmF~)TJc`7EK108W~IJ#-XtUuF2qM@%2Eh_Ka5?l{2-DzAJPu3xaf> zpfqwf{3GTAOKBN2+An=|%gjv& zs!V*C!Mu!AEo2S3^(~&?Iy}zLgpC~vJgzWPYNod)^q$8jn+nBL&ib$0)$7rP`kVdQ zi+TizTHhxJ?iuXMy%UI}ez)HX`vyeHD=xm$Q_W=$zD{@=gssb5`IjlA4 z@DkB(@a!fh+p@6s?gB{nqG7f5+t&6WOz+mla1pLwdrA++tVJz!7(mZP3 zDACnsHH*0*X>RRuwH-6M%e}HqHwKvw0d0{^pKNf$)3&lFovrw|Fip6#{^EgO4qvepAVT<*9 z8Y%m%Mzm)C1;Cm${6^G10k|Xa_*Cp*_jWC2g`4Fi>85x%=LUo%$i9Ch*tyG$7^lf% zRUvU4b*}peD^Ak)$thxTo8tZZ*keT1by#=ghBml&MU)9DfX>P=b_A9Kt&Cp|$F?(E zFdd@-`IW(scDZ@n{ZHbYNE8H*k(WMc6!Cds$@fCLAS;zAfy6F(Q4|~3h~6qXSo{zV zP4m&d^2Uut$jCUVr95x9_hJrzje(ozBwy_m(8zi9*c)#^bg;-vAm6A9ao!>8BAMph z5_2+GAM*39HMgjSf;Q_J8Sq9`oy^?LpqISReWZk3ylzhWs4{;bBKtD(7~HV9XM%Zs z9LRQNj_HH9Kax=S*lher`hGl%`(I8Zxa#=-wY|T8&_^GUoa;*@dKixaVErNSaq|9O z8&{&je8aSin#RV}a)}cn8&do35w1e{Z2RKKJo{DOmRnm_1r;@5wU#XZ^ayG>N7Rnt zSkC}4#-Y$^FltClT3wUxPdT_x^jI;bxOK+|m^p_loZVKt+g|s^AIM^#yw`bwq!9$@ z*%`WXJz;bDetGkO*LTB`GcQ#O!D9U3Obvz^<}wOIfw7mXw&t0x8#n%KXgzuA)5GdL z@5$32WJ(cl%Vbvc@cYf~7s2xcYohogtu*wAhKze%a6>*H`TC~%$)G<3A1znX;N}+* zH8|0R_Mx)ZBaTn*%L+w)6C&|NHD1}Lx8N9p+O&vI1M`tH4$bX}=?4P24No&Htv%{C z9eUBMuiph?_elSh@bFOptUc^WTXRKyYF05QZ1AyKx~jHc44Z;jfA!0jj;e}jGQD%L z<9Wrup8Hx_i7weDhv`#Jvd`(e`fak~sy*LkA)Eg{Xk{T26#|TLS;X}5YS|*!y2_X? z8!AQ^4fD=Mw|Lz2Dr|ulsK7$YB&kX#8w_E%q-22cE1`fwmjuQv#mlRlSp8Oj zs7nys{0dJOtTY_ZS^LufzbsbqnHf7X-<#oysfHFwtKPizNzuyKGbx_Jmj zaGrdHELZmxjXxF7=jTIQO|ARee^zcl7E=mr3OWcYxNogr$kDhlxcU~zt+mYd&{KK& z6DrnIu()UrG)_XthE*eVc* zKGcuquz-EM?P#<^k+ixhWbvs$2lk(e4|S}W?XvE66C9n%xsme*wdVQgcQj zbji|z5%WI<=&SF@Ck}HvaIidSBb?o{!^y$@uxr!N;3~$-ea!5km5*k44i%BCO;e1=^-3P@8;j6yx8Bly z9T(#|7%L`~{gcSMLVNrbG2X)tph$q89i*tcUk#>bKkZ%J@ZHw*MY~`%;u=+$Uw=@T zo_D&A^=;5L$CvC0=K3BL<49aLb?rrp)_j>Yee1#9_^0tU7bpvS#v(TJ-Q;T%_m-Yl zbp6I3fmJQ$fQ}s6jb3LOR&@r^Hbsl#1B>noyv+iu>BW7jJDCr`NLrSG%V(p5eb0O!;sz$!EtETU?xDBEzoy zy}VLhlX!&bos10NCz+HOKmQNwRFzW)qnBQURmF`EJC@fBgZoy=E) z7SPBb$r3?Q{ZU+kldc||SWgC&<3Ca2=%!+)-zo%JTgHcxR^{_L`~?>je*!S^lsOYf zQ*%u7UsdU({x%1i^bQ|SrVSj_gB zKTP#akWZ@PSfo}%k=n)@ThbX9_i&M@~ z$=~kI=}7Z{6%@())}SG!%tAtFqx#z2?Qj2$JG%D+#iv>{m+OxXUR(3FxgX;N+ZME* zkwbIi#*G-+xb;c*P*GND#;wUe*}AKyY}b14I@KHJ;B4Rau7jjSG#hS#{7O*J7WMx? z=Uo(xqvq#o*8En?)SPw;Y+M&0{~8ZF(^%o1HuF2*ju!p-o-Ounk2m9^-|V^FMjpk# z0f@n&FpfBtg^q~qj+B5Ll&AWI4Zjgj_SgKl(vez7ezj~hDg#Cfk0*;t>g}iezh4vw z>`v-nyn>mr0%6f?(Jo6}&*7O!u8_)06BmE7N-=Y8*~e5qJFi#Js;p_gfELjX2nVvZ zs=q3$@pCeqK8xY~(GJ!sCbara&Z?!ZZ?Jau$iB(qUwc1l_G|!AwTZ#2mhX<16pKF0 zkYCjC>W{(%9liBJ$4@?dSZr$-8|Go?*zt1c^WA`of3jU44dS8au0{Wlo06IjyzlY{t)lX^+&vup8%wImXW8hggyd=4Sg3}U;Wd6qX%{Pa?1tS^I(qY z#?~!H5ojtr(5TVLjm98{TV+o&w3g}eK+c^vfRp5sCL+)6Nng#nJm$9QH~q3MhxGe! z|4^X7$MGBKnMG9(XKf$v{{<7HqAp{42Jb$+hW!3}v9g#&Rms|x1uOhg@6n{UN2g-h$Xox)FAm@Mu-!N2VHW@sJG{%hD1PEc;iw*8xb<^`(}Z{O8u(smSMlT_(kS8npRfXqC^)g zSHIVyr|g!^e3-^szSpLg(|^IovG~Ua+8bxV{{ᔦ$J8Bw_dbWvQms_=J)$doXP zEy+(#k#6m}9sb#*)k>f(;!MDmd2DWNsftGhjp}@7n4ghY^6 z{sv%tAT+>Wao7Qafr2P;eTW1llYGoRQ3!R{wO}t9H)V>NRpT;4m7SBGg@cMcpQV|h z-|+YpX367Gg+!a)J-t5{?Zs$g*5$~h6fwo*EHpEQpz$$byQv47<_;kr#Lxv z3bg;H8`Zt8$yxSIyz7MGsn!kij3mk&du|MQe)s19^)@%-UeXT3s=K1VmfP{TyknF4 z)4*6OQoV8GMhaJ{&qGxisI#O0gc4^-157_-gMQ=2jei|vTyMzQ3LU@dYokqswXcR~ z9aQe#-&onMQTX(F14eLQevNNdY}}Xe^l#tjFi&&1&GGd}{0+bp(;Xp*X;g?UC8`uM zOdc_|LRogMG+mN#T%oAH)W#~cAGClXPql%=elFYcl_=+>^pc~4#b&^mYU}2(m-TW^ zH`Jzl1yzfRo^f$Z84RvLF-tn+t1)5P(qOCcF*)`38=Qnr{{51^G>v|-zv?XbutXG4 z^`X#r!P4JlPov+Yu!?}P@F(>{)?q6FFoU)?>D7%JH}dFI{#o2JeRM|u4H~le+_-V$ z?*YXSAO_#ttv4t={>|X8<9}qUK8vHWA&jOh23qbc@i(7K%o1GD{JY_r|VA8kw6;A#D+M9MUIBLCa$C}csev1TMJ zji+hMx#ObEvo^K;NccrSMAuj$mG6d07mYw z+(|iz&fH7bt-$-njT=*#wGE@=_iqLUJoY|*CAiXebo5WZ)0vAWZTB|>Eqh`4=i9#A zxbYoWoG%}EH-op1Y0x;&j;L73NcXyHY0|cK{ec}FYP%pZ46*%ixS;r|2T;P8a&RBf z-^k%--~Nk9j+l-RUWXU!`ZZq}AMMzw$iQ0`d(0iVtI~D-hBJD}IuoT!m za_&_Hoz2m>dpQkR{!+>P=ZhblE5oRj67$TqU!6Wz&oGm3u7xnPR`e^|j3QDue7Z>7 zzn`>1rQG#+zTwPw^3ovZ&Hy`XWFk^r#mp9*t$V1X<~B&1rk#tiSOzU)-inP@7KKG> z+%58NK)R77PsPqeUOTd9*0E_jz@R-8z#BJid=g~r66M4DZ#l@KpCFL&&lWSYj$y5s z7A5t_A1!&u&>oDKPJksEvDy~b3!+DKLjz$qC7sE zWSqGMfYl~RBeg~(TGYyKG9b~I{{zFnML#yUe-7%>z~EKJqf1Ok)Q4;sqVpD%UF;_v z*Nq!DIw04Ga_cQ|W_~5VTW6O~lkf?GPJUWuD-3qAEtEI&^4hg(s`W{g`G zu=bIyBIkbCcG0cBd>H--a(5N&cpKXD?zvI%liC}c3z?2Ngsh{I;xX_XD`f7>?bBk~ zUpPoU7=t-TSpe$~3}+U>&GImp3ta+rY4FPVW%~^yQRiM7P7W5Luh$vd8#iuz2vAtV z4)OMbNl1_xF;WtcKUFvwudqX0N+m_w9F$}4fT^qRm2x!Bk+uL#d zyB*|Lo*?a9*S4_-ws5=P1Ch5-2jq|V#3=di!gC~i_|d+${0YD~L5MvY<}p!VL#zlj zCJ{y<83t|8$vcC%rOsyzWHOU~Q=m)S$QTeHM^)Rr6>_=p$$$#Q0-z&1{3`b zxJNSt+SmiuYa`ghmqhqWIu<^&^R#hD(ckV}Vg>m(J1AuEeSkVBvoPJh4I+bqak#JK zPRi%+zxpSANnNujCQ?BEy+BVGJ{ck&bvT@uL$*!(yTi7Pk>wZ8*01B zP_}_p+ksCrObe+o+`2a3I(m0Wl;#Cc(zvMjwzDPM8yKy( zcO7f$Yj$0(rxG)^HVdI+*mZO-x;bKd1^z}9J_@gRD}P>i$rg1!&-n!4Nn32mTp9$f z@EplzmvG3pAAZ*$oF;6A07cGdU(3Z;YW<%Ha1-?0ss4ZV{snrM;<^t+tNy~a%u{|y zG8mGe1St{dt~KIb1M*xWqeMc(BgwVkKrfxkhN9CeoTUIQgk~Z=`h=Bl=pF452R@zc#CVY!CZvYJXz{UuVLEO@&q6y)k@c!wC=Yz+6W@ zutzqly@`=TZ;3)k8aiMPJ-Pf>`bS}DOuK|_rbwRS&$I13qOZ2MO(r>0i7A$Te!6DL zdm(m$~aFeSGVdcB2&Q4@jFkOZjH=i`T5=3J;?a5N5wbq$rs^s;14pQ@cA1@!gM z*IGv=)~Ew^Rb5xi?(24KSdhiQ*Exs=97GA6FjZI{0sV@QX7~{&5j`rsL6TUESd&Zs zhE-N=BxMVXPEwJT{93XY?EZWwE|4kmSW4io1SiSb2o+U8dw;-dBK0B~Gob*RnOe=` zxDzH!*p=vRAT55|g>v_PztCTgwAY&W$Ud~`LG<}AURXP@0C%qUVtoueA32bQb6)~X z=%h%!!^!SV*m{)sO9{d5%H)U4d2Mud1OBxa0$5A>)Fx2$g$HW~7m$3MC^2T~iq$(R zQAjf{#U8ayc z|8#`y8g1w*+K&9HGe;0Hc8yA&`cX^ArrqA&PXXc4FTUoxlv?Yu`cX4HM~PGGUE`Wc z5+bFQfEjKTOF3b}gx!RI4w^+WadQ9@dmTKnHU^7mkK*-Qt3_ZpZ@1-G9DAayu*~Tk zl}7T;bb#j`#Yx=MMP}_WT4)4)jm**Zs!uk)ndLM-pEFL0yU4TyFhR&=N5uCA zLZlSLqQTrji40@e28}5pudKDwm2(J*AuZCY7|{m$-BwVcLpJ6n0Wd)*xFdV9M+z&8 zh{0sC0OEz5RV@-6Ih?8@XfF&XM4dVeuu)*bZ^{E4f>o>LSlLmN3mcIN%LoW4DlLK{ zX(`$ehSNED2j7;lX(%g`q;iyL=_T};FvrBR6xXA4RM)}dO*8EiCQK*?9{&!UW539` zwmG=oia zfp<09H3$X8BA-b`$=f0F9S5aK6yTvkNUx^Gj0ddwLYsaJ;J&Grj~BDV291+p@oRFZ zt%h1kQA>(x!Nk?4-EK)0f4Wp@nUV12+S|QCYzLPFn8(N(xdS9nly{#WYbXX z0JLXm5t8Yil9g^K6cNZnW72$Xv~17E&3Fw#nP=bF$>d)zn&)HZ+AAx9h^vVwz$iSPac=Z-hB3@I3VIx%n!9q3>Yp?8*VO-oycz=)S-_5zgme`=Nk2(#wTj$>td zoO5ZN#-0qDx`s>vpq|2TVSsT-^rhm$vE;Uwx4~`Tq2bU5BMA9unva{;0uxr^3rv_W zVKfY}k~M~BU;;2{7eH9TlLeQMdTqRJ&TYf6t46qPNqPM8oHJ+65&2QF78kmw`2bxjySy`SQU!OBK4}Xk!tGvq45b$NW3&Y7O_L)*bpkjNNw^k01plZwVA+Q zDk)Vk#qhJ0-6PSqe$FAt*=`(4i&bJOx#&Z5k8_l2p<7_VJUP47Ta}?WfzLxaF9U3q zxsFps+Z^a>&6#by;LIXphuFu?YAYI)ua&)_wq(MD-GVB^9M1uip{Lvr&KGj7|B8)v z0q*<68q6cP4;9l0_Qr=a3nX{5yM-9Sa+@%g@3NOMMw@Ea zQaG+x9K*3e`Dq*xNR27S#7&NU-)^+y589ZH$kPrkqJqhXjVK4SduF&zH!p|>B@T;_ zkkm$Y%fuq%1`N*D!4pU<=ZCa`efK9!Jgzrwk^PIOK&nf2k}@y|?1>Ggq`352jRKjN zbczJ9(=#p z&*k|bl>$? z8r;}v(>o%)kPZ`wN<{MvIRzS5p&EvQUAMIG}NW(^=Mo`Aj6FJ&gNr za}iWm2iY!ZjJDS`N?N+w!1`1|Fie;*A&XK!d(ilYEP!|YF`gO!Jv$wepZ+-p@V`7E z6F%10+9SB0!9~g~$bgvS82SoW2Har;eF=}QcjQDsWQKFuKBAhO;wdmR(}RRs=2?c$X|$KoitVpUm=Dqo>~|ft-&~Q z1C*nUYeb(887Chm(=}pV2t4dUJ_#GFYr#HB(_tB-fmn?7yu5VQ>tAz@ow<^Q5emoR z%HYYo(kq(=&K+J}#lO|+!YOnRGQ^#gD-5?XCgyk*Hn$@uOxR9P;~ySt$3N#UV|xO_ za5z53<2^T70ADN-vmns|NRQtIh>OIT8p>=1p=cic!m@Q=35Ca=uMd1rAZ|WQbK)|w zWkjIbBq)xx*Bw(HEv(bXHXzrIIcW5k(-&&G#xmfs7p_zJO7S3|D~VlCjzCEsr+3S7 z(~d0I`n*e)TLSQMN5b_HUvMShfLj&snqWU1|#_tyM> z5B|01f#^?Z>Ewwxma;7$0ZzmSSe4cEF`7?wPor_@nZY3^ye22qpv0V{JJ6z#4qtU%P0RTsAz-aq~Fk zT~TC8l5TMlLO-Jb>eC+Mul{-`dv!4Hz7Ld^5uXRwH{XuQi!$j@2IneB<~umOc^}7@ zC__+&<=};|kjXm`VcI-!I2^us2*W(Eu2JQJvNd@_9SB4|$(UM#Ym+>O+-T)7-WgFpp;C%PLUySFBrfZZ(@~J}RJQu!ZNH%C4fGg@{$h;3#>r0f9 z8`D%*NM-vm6z0$XP&s0D?`_6Ua%`6 zFGmrLMfC~L(~g;I^T7+Z?gDX)1?00%9J%3GN`mcdokUj)sWJO8u>)QN$=k?fPK9e^ za5^LzG;IaFqFfUuOehC6MtiGo*JpO{*rb@DkckWP1J{Bh@&>{O6<)CR;d>({&8-{g{Yl!9Jz4KmNju+U|eH-hTb1uw+9Z8(7@qGUBDtaMM zhql#aau?7HKMvcr)R-PL&1z}-pbpGlGR0}*jblqf!Ic5Ii9!P#!Ey&Fxg%8>AsIb* z`t?j2i+l!Nj<9M&j@KEc{LzBKOUgas1lo5g>IfyB|3#pO7bq{VpuSokMF&s*!gby# z?N$vrmv;`_)LflZZK!Ds?A&TQhwD^sWdZ*rIaZ7~4kt{Q0H9r*IuM6q3|vM|#;546 z1)@yI0SFGsTEq{ad=mN9Cc(uDOq?LeSmfL8N^HYCEJ$6y4=vD{Ndb`Oyy7M8Fx&T0n6j!!1~}XVsm*k zG7+bc)jO^Md?qo)PLf8_ph1BN3q;wROC8@_hfKaw7T6VTAMIU!r@8%q#XtqyF1$};5;#v2gM`jVCu0fNP%0mAqe6X zOTCKgo?Go|y?03l9DLA0b&g-5PM9!Z`@pec*rv4R8ytI>9v^Gw9lX}YHyVQLGg`rQ zUj%JkxumEe74J~do#Xu!DLg4MiWI;+>%C*ENi_*&N-;Npn+e9j=Qux#FmmX65X!`v zeJVcJew=)+Y+7jJTm6@ik;By!$D`WUJGm{fZ14JdjLVrTFHGx+Yg8Uod>HN=T`sKa z0Tv)QcB6*0fg{&nNda+TMX71eq~0l$gbSkvq7%Y!8c=@Gu1*jDzva99$j7p8os$$} z^kxQqytB+L&6X@$&tjAoH8Yevdy*9=8MUkkxT@8r!2!Ns9$uo)%j6ShVQGgzohSro z@+h6?sCYSh8WbU8L7fwrnSwEKZqgrurI}iW4>kN89Ju&?OGU5@)o!;lV+5O3vZb z6NXpx(C8E6pIxgsr(5(=*Ek0N`))q(v*V%@qLS;mqItxq^upY-Y~4@Dn`?YcW6}eP zABgyG) zzHHi|+B#vZ9(nrQbJv+Xn%|%(i?bn}oZ%Y^bm<0x@^%J@8j?B;DTq(ae;by?NHguq zCU7aEbRIN|;~Ygh_(H%jzO7Eryq!E+)cEz*hjLDH@QPz8ELWBZ6DDjdTC!?+;`o)m z&j|u?sRgjs=u>v6VA2Sg@Q{g1#lv4E?sg?Q)O?RX!K9NE2dh%j7w9-J@+5WP`aKYGF`a;qDZu{8O`o|or23M}Gcf6(zl{4B z%g6e}=^YD;zo1317J^DlkF4GrjIue*y0X`xK2rpIJV6An>v)8=U;E`OSXn-10n9pV zCygbbs&eRj{Yi7ZBV=nxJWU)fE|$Zo1a`BjYgB&Jb$HD&ajCRi+rx4> zrVKbem#-8ThEwt3pA}MjVm-iq947iWnV3SM@^X%@OVAd+EJ7b#hmx{0tq&J>LCB92 z&;5F#D6Pyb^^ew7?7)O9cW=^H*9K-Swqr`1z~_?;uCKCt1C?S8szEp?*0JTelY%}_ z_fxByg#VRex$Fy)SAb#)`KbU3tp>!=8o;=s66#hg<* z+!H2D*jh;A-_hY7U&D)F&s@_0&ShplITfQtP^Re>1#$?XKetKwwSRay0P~?-8Qj!3 z9z}gf?m@^LQ~6PONmE5&;!^Wxj(2gWJowjT@(xTKi<((fxfUc@lus2!oXqFc`kgygbAAg%^1aV5pV=O#lYlP zAx1XKW*E(H@%n@*7Ij^Wtdv2SQ;wIANH8u>DSs-xXUYaS5xBnPxL&EPG}ol|z~!cX zIkoYM{A!`5Os)-_WjbZ-=+Qqa5b!^OdN^pnYai?ISmSGQdD_h1yt$LEa6>YzTAu@L?_dO^Ww5VtYB@I z+{BP(w^>9VIJ35?pr+dTsZH&%5L!h)7}#ZNz2GG*^2v5w1pfK4r@616_-^;j@BDuE ztydlGcK-U$+^b*yvOD4V&$#cH{c?5r<_V{|7ykBz?i=6t1MXYjc8ojf8_&B}|Ircm zn%}*zIkw>BlV0z>|HRjI=P$kVihKOYr=0b^&EGriRCmI0$8^^-N|%XB3RE;tXp{PEJo?nu1)=*-pKe)4pAKMfC=UkTmxVJB z1O!d{3wie9DrTCXkjKXzCR4Lkq~T3+Mvv?5FXqqdQa`>$HmSf#IF-hlM~ZJ9b+r51 z-~3Vc$_f9E?oK_nPsjerWA3}3{X|0wLPX;4PtJHRmz{gfe?B}T0uMVauD zdgZkCj~1fbS^00j@qO;n3*V6hU-FY5n3I4RTsN!Jb^GUi@XU-jJ?9_p*O(-^KuQGg ze>G2NMaV5^Mn$a*b1NOnk#(+L`5c1Vw(gxUVM1+iWSta0DAEtHE!y-0^BHT7kccy` z47lk_x?&aJ^0bEUHgKE-6~`)b^$-S@*F`HTxuif`1Lu4sN*>O01V3o+l7F%8?albU zeKSKt{5dD<1)i61ffncacK{ajXmQ1ZE7qVvif2rmM6W%Ns7@xMl+-u~XO6FnR|gI> z05u!4jx4QDmQAo=+ynK_h)Py-!0>U^3ZXzG|DHSTzjxm_{w;H}*%UnV?9cEp-i(hFIsI9xUR>1R$q?83eYiyqs5ZIFgo9*^;A2 z120|)1?hSNKNQGNqLV;rtViWB$SgAie0{9`85gQ8m)4OXuV=gk?}@i|DY|mV`Zk|x zwWj+|PX7NikNgH;0FQq0OYSv)`0MV~FaMv-j=<(`7XGan`S(YE`$`vH^_}kPZ@Sd| z=^HO?;z#1|#NWBmo${MM=}!9X>zm)>pL(#X(F7Nvc|WQdW4*||>-dt3==z^?<{9%xdka-;xZmOj$zJWqup#v!;Qu2bD8sCBN(?-9 zo;XKJYdC9nw9lMRm@r{|ko;?}3QE%wyDMBS4VPX6Zk_Cf~5khifP7+Q4_{(IqEscVWsbZ3Lmty^s!G^K^KioY}z0nV?2cu0R6#U){`_ z`3I7?Fde>*({gu3(6LMadh-R<M~sx(j@QJ?>tKInZQdgIIlat z@4<(Y$C9tO>>``z&14m_G2pL^;B@ymHqmF2?hkFU4kfTOEajh5zBlFYYV+B|j-D`K z!Z?r?$3ckhnfR0~qSzy7%RHBfW7idlJe%t#VpHY~sYzX8rL56m&ekSfKwI$4l?d8j)^T? ziq*!pJ2!suf9^I!Ik4pT!4JP^i}-WWmw&;1`=9+m5+{;>uYd9v+)>~Bla2)Z=5c># zM)J)_rEdlIy3hX)-G@GKbD!{^4>!bMRM>FT(e97_tN+ef0Ew%2zWpqB_L*mh@#A&4 z`;UBYAj8xgf9x^to#&qA-gD7;Ga2W(bKWZFl|xR)PdVvz?vhzP@$X%;x}Boj;n>RN z0wehccT3)J=pJ|I-ut`r_-OF)i;ts}s+I1Big&qDz^}1FHvT37+6Bo!*jQ*AR=T6< znYbF+o7%AxCQMj8EO)NkUp(EwMLD&AgYmFDC&c06S#NGgbu6`qmF?)JndMC=&u-*y zLhp>CTk+t611z^Pf@{Fw`UE0H1TOx3VA8hNg>3QocWyi-(sB>GQGo`)v*&$d9f(+z zOIjV0>Qiw!8&ByYK>jPhM&->f!75^CMqR^|kcPhB=&JH$3>W2CPY2jYqlZ+~`amx@E_c?+h<}kecy=S?<{EqMLn&7*h`}Kz8qw7N!`@7Qg@dFwc?@z)`f8(j{rt7Y39>>LK^SWmLW!*dcZlBfT zj)V91;wCqfyqn(?@acR0;_km^{^0o;0r-Hs^0JGYwupaMUUreYoscO(ua#O)sP959 z!#2-L;w*(HuIEh8+EK$X2*SB$T>!7;;!c<_VS)s1u|-{jVUz)PPB!+Q$R`N0oFGfe zTVqE#nHX04I@x0q8$GWN0!8{c$2$u-1qsY?AMm3M=z=aAs@Tra<7mPF)He#L+0=GW z=pBt_uCP8yYK`X`%Y5J#I{m*r2zIGP`&(_2_j!5R zQqg%2Clym!Wkb`NUovj8iTy&=RztSgFivh*>eo-c$oZXDH3}68zWB~Ris){`zU>>& zx?}$6e{oRr|LUmkcR9ozfbHYKF^%*tKxzUwaM!)vPQFOeak~KH&b@Z$TO{s&?ZJn+ zOu9O~`Lt8rum9=+R%VnJKPYlLAQp!p5|R(i@;isgFYX4s@1wW4Q%-z6FJv*4C@qP* z7UAx{yYIQbi67hVkuQF!3y)+VD};eQYL0d1xuZ??(YTv^_?$2Q2Gy*zD0oh!It&wg z9DL?{Ol+!>O_(sD9)b`KcXGgR>W`8|E&iZgr1HcvT_!%^J{Oxw%>*XJpKN9^);ZPb zz$rNn5lcUiv#f?EE)& zod4!uH0OVH^vUj(*T3Bz^VQFYl70NaPdB>`8xo$#Y?j5NCrdV(FXyg5WUO! zWuG=1)x}=qF&5FaEn_-@#_G3_4FK@-|8i?si^!6AXx++g`0w~@_ifLA)%ozPCj8}--tAufr(bff`inni%XRc0e!h7GI9~tD znGEVN;JK#I`uDLJc@@6|9Eq#A!|lD7U*tY^?*m-{-+S4`$?m_8Kk#r?mE*@kTJkSG zs=MzeKQJTtmZag`Gx>4%-?a;3@rqggF)WgG*M0cTE*>NRKYYjC4N=(E2jVuci?ES# z^PTQfpGn(25`Q26=q=(Gj~D9OJ@VUT6yG6uDR}?k-R*n-7V#eW;*o}MYCwXYQP*xX`o6GOWua4vP=Crp?yVI-K}i5-|@>fB|nQ$!&= z=E9ugojG`YUG{Ub+ahwjG8boafEhEaoVj?;Rj(oT8Q{HBftVrJf0OsshTg6Q{H%^>RzLCi%rK0&oO8AhNG0y(}9xwXRnzNfJeQ`xmUf)y()auz52x` z#Ue!F?@!;fZ!T|^=IB59-)48+e3Ry{cV6xO^l$wWu?CUsiysPUZRnqW@9Dib?0Z0F zVUaL9aM$uZd~r8m+@%-M5`rxbwD0TV3gqWR+a-W3i|-4hb`0Ke*S!sC77@w9OTgE4 z9a&Oj(-w&N!N-2%d;T!_7ZHibOJ+nQ{&-36ZdX7TW`Jdj=^u*nk-JlV`6{^oI~HD3mn@a-@D-ZKC1e4U}RbHrVM zElC&2xQ6tbMUOiJ+Z}U{&d9oM_n$yWz<2&d(k(vH%eCvTJ^1PFIQJD|t_JTq_pI(X zl5_EKTn@234*{YM%Y^3AgL=Z=@RnlN&;Pj!V!c=zD*A@~@(=!l z(jbvHrw02{VD~zgno4eIR}4S!dPUHW0GBEuJ2_ix=Fzv;i9N19dUPfz_Z_u9w*2j^$kD7@+$Ut8vX z^lORyxK>$)-Ei}r?%t2y(i}(PFA{!m_uuvHqq&&|e)Aho?T+J4!rO$8e6%|aBTj$Q z8(a=>qH2%fBfZUg`3eB5)V}kieLrK|%?I=NI3O)y;`si;_%V?7U4ZdpAnn(Hp}7mW z0t~`ZP~V@hKn3e(G9o#fmko=b1bxqDw9H3~kwhc*WKkA$xVr78JH)LzryF=-1KXDN6iKF7CHpef(nl1(E=40U!O1 zIN_-W8xk<$+h6)}^Y_i;-`Jf0^{cjAt?$~EPNy=*9`4{tz?*N2o>7d-#z#jci zM;g*E?!=3`0Pp>!Te@p;7hm!iFXzZR*S-6%J%;b`lK{y;4it$7^NTzDO3*q^$qSDc zzn7r$-*MyBY{78d)=Auy6~E?PNWkZA^1`He!b5P+q`oG%tBp|4zq_y+wN zo&1Yn4ADvrxTxjBEEhkdY0^n5g%%ca(qeLYTs1ejv=K0aA~dD7zf0oNb)`*R$9T2Os~9 zkUk=U5qAN8Zb1OL#qhwu{#Lmy4Bef-D2bcg1Bkx~GquD~!Cxu;R-mR)#|k6e+NHd` z+TCt-{x?JQCQO*H3Sj>c+P6{zo|C~%rj%_QfUTK-b}81={#>pSv&~(v1ad(lgmE&Y zF~ME4&i}gza??^|psoM86hrQ1c*SuMMpW-%@LB>;Zi1q^fdLVe`8ZUfG22umtco&U zE7}{m$n(D7#Y|($DKi)xA7$p5GxNA*9&sTfLF#DvkVg^OxP^soW9ylVUGNBZn8gn z9+pW6PTsjV7(R2o0M+gJRLstLAYyPf#;SKxT2~VYj;a_~u;>*9Y4r?C=HtX0T z!GXK(O&<6C#DfphV~N2NUw*nfj_(7EBv|{k-yt~p#MjP<$glQ>F3hj}A~2U<+|l>8 zx4vZ=5SBm5`%@Ny@N35L{etg`WG0@4(aanl`~CRhU5YWkcm2>=?!Hex+(jRAe(c`@ zS)Nh6Pz@VY1GrBqhX-HVDg@)X_F9evA?xWWzA2oEtY* z)0(?&*|qyGcc+67hd{u` zha<6uvT6{?K1f*J&g2*Ky8Xth*++N-s$-OK4)`*0^YITj3sY?|JCE=DgXNBo5c`Gv zvFwQh#R5f1elcuBd40fKDd)yBao1DY3a}VFjbVcNhmES1i1UKnRbXp7bHaoPBCsuz zP-+J#a!-nj?V=xEgb6Zgx2s-WjhyAlFo6G8j*@v=UZq3pK)LRdzW?Pj*t7k@V75) zcILHT{B7|)M?Thl_=CfDyzrzu{+S2e3xD(C83#Bg0bh8-KXxzv&388h;a?tmqVwOH z<+S5xai4bI{^IW;3DST7QHZ+q!Zdf=fZufbZbwlr2L5Dob3&E+`5-vOU45 zMkG?|4I~qYpyHM~jy-Kx0jh8N zL61oKwTSQN!(Zo(JO7S;?(?KbytY5?_aAV7^_{PEublXUF3d>4W;ftW-e1Lc|9$IQ zF7Ckm){KPxuE+nUE{|y*EbDIA0;6WNeH$wrXP7 zIOUy~x!mC$wQ+6|NB*q^cx8;Q@z{~$Dmsph;*H4^tr+Y|8hTme^U3B>tHWWxg*12= zjKnSLbo7_Y!^yaE8paR2z6x|7`)v>KB9dqr>-yK4CNIqul|_tCOr&vVAN666h`19l zUVja9oe~@IUBCM=cl`f#ze_$P>*s@N=bJCP?|%H3-K+ojaYmYQ+FLm2%|~4vAa=pE z9NOr2+;y+p|5G1H9?zu^9~XZ2k6qJ|dH7l+_B6PCmY)X1JR1N=s?*b~$ zvX#>I%C%lVvMj5;oXjnNM}H3eT5d98xm$p;p2Bw%`U?Op`KMjm*U-wru;|68=(l(O zx(xrebIuKXTrXzLrxVgH${B(pE|E59@<@{;`OZIat4zb?6LRtU=>seSOvETq=q@^C z8Poh!aZ!0fuROyDqM#@nA5VY1l{vg@nRuGPzIg9n+ymIC+Tts;nmlUwcDDF^gAod5 ztU!qs7>oGG?@OoraQEo%x4-yw^TChyLmldTpv+f)uY2XB|9A5n^ZT}!o^i)M_rG*9 zAs>Zr-<*8XYu(8wzP>q&1XX+_m(Cx4)!9=K+}Z!nB|-KCBD`?Ihm>)i#u+dm{J-zVv&Sz9Z@{D#lsPQ#q{?CosT?w83lxkRs>oq!t z_;sTJE2^!vmjqxdE4}QvEcrBY+7c44O$UC|Y5Qf>kui?T$4vgs(+4Uerr!cc$00xL z6mbb77m@@@IgIFBxo|v6eYNe|#`TJHMdY6rExGMoe-b7P#n|MZoW>jC8f6QXI~XU* zqJA*Rp;-ClVSw*)EVTBu>M)i(R(>e~gX6aL1@Phcy`-hif);t%Yj-g^5T-e6EB6eB zR$g-C;B`DV=Z1gIG-wwUWhQ3AgbAAn&VaXJ(#;Bqn8@Wx$#X*Zge^hIM1!wS=)k%E z*7<)=VKq9WahFzJ2#A#xXviXr#}0qV=dWZ}|8-GB92(&jAy%jexLjO~yatBy`t&!z zGK~V677W5QOAfV6lXuGeAwS%be_`k`m_Qwv3AAv|P6#&aB>5xgGr@~=@hK=hT zz5p3`^M$!&V}rG1V@W%=X_sy3#kkp8*B~SVpJ^(SFRR=Zik2b_S?C=Q*D50nygr9s zGb}n(Hm<{Rw-|Nv4GGt-4Pjghe3F{JvW#K>R#T8N#`A-=qLzFNY5VPmCo(Y80}?#6 z1XZ(}vs5eU05<2?V=_o{5OvJyJ26i+ytqfAJ(G4oMWy2DQ*si<>H&Ja*Os8XPFs#r z2Y}GO-r^Y^W4q;u^jNlY`Eel;=?=w*3w#(?!k$uw2@|#i+_-PSjenemvIoKGZG`qF z$%LvH*3{Ib!g&Be2pi8=**M_pbABx4{!8;eg+0-%xdp&LCw&2KO=EF^1{VPTNd+6F z@3n1NFLQyHB2dkeSBKDxny1ZtPGEr+iX=$^WEJtfxMTn|F9;nwCgHG3XzNg9 z(!5`!3w>eD^)#&eTCh`!ceVR~n-K?qThz8;1>}FtI?rWs_nE*oP*x91;c+@`p7l_> zO;o_71ht5PO~@=Zpw6?l38u*!Wf`6*o*f}0y*{LBQ||q)KrLrma^Rt}PlaP*5MIZ} z)FPZZ=gR1i9G3~3Fk!;>02*=0o>2BkngApp-a-scHfALlHb+yA6vc44OxPX*r~4jV z3q%??|6`fbc-wWr#NM*;+J6pQsg+|ZZ=l<6hcu+1Zg6P^13Q)Ycs%SR`H!8kH|FIX z@vgA($`_~t$PqH|>dY)eP%*F(Z21mq(E_-TI>cFqoFrt~u*ONxyXA)>LL1;ykfri0cMt^h1EFNu|y8+NefvO zLqVOwiZsHK&G<#~Y`zvUEi|QTwbn{|3G|#&#_=ihMeJyQ3l~ zhZE>lz&37V^zAk^Euq=In;7QjAyA&Yt=)#UGIsIC(q*;W!v5=?j>mp0lu3mZf?8{p zNzq7oMstbtW-^GRL0uzR1IU+p{^NdWFN$)DeoiJg->VRzm_7ih< zf|BDhGHR|LwFHXxx^>dp?8QeRN<6qwc&#+%E<|qr$8quT1~=aDqG>Bhuc9Q0@SHMf z-0!HMZrb7-G|urieXC$I6lzWYuAtRJK|b}!zYUp5f+^StdmRI1=tf3R#<3WgAwCI? zXCguTGF@Us0D;HWJ`8I*8Pu~^IQY@DH7wXqyO-pDH^cqlN%oTaN zIv7K3FsbNs1;8W;b!W6>0V0sbLs8GHSu0dtiCkZs>{NyPR*D*h(JYE7;?KFh1IP`3 z=tN4k&!adlBip%QZLp$we8PkY0){$NIU~;zb6Tz_5Pwo0)cjA$)l6P{7T9Uh#>Zh& zr%hnlCeHn*2%$C|_G^_`pHF37?P?4M?Z)I*MpDxSrW7jy9EOtb*M?Qi7}pjn+iJ)3 zj<)slQG8xN(8nLr48639qV?g)c-G-n3%cQn;!5*}bN#0py)|EZJdKL$he4>?#wKu+ zNknF0*F;qo+*-eU+oe{|YJf%ZkBUGovP<(Rh1wwa-lUL@5!VE6;=p6Y7Jr6;wQpoFIg0@@cP8&IGj(rhYSiEs2ncyQgdsoFur4 zbB>YZWbI*;j4LTkr6ygUT5Bp$Rys-ZH0NCN+P&mLU>k@3oU=LLRlqH*Rs+=|zyY6> zlylV#deXT;jOL}>Az|ih@QOf9QwBqfEXN8*TUf+jxzfwf6$8JHbU2bn6g3DGx8zfY z$>JPUg0A@97+(Nq*RO95qmH?d-gQ5J{5K?DYtgqS|@=`~)zQTC0}>72fo zc*R3He^)ESgb5o6jh)iSfVK($QgXO!Ip2)4k+2L{9wZo{d8g291y~xu~Qc|8RTnb3qiQS-_i&GugCxZpRg`sR~ai-<%J~mVY zpl45XNJu728o4BLx}-Bb8#lX1^rZ_k-zQGtuauz~g=9{Kp|sVvPCSd6Xoi(ZUPymg zNJhi5Fw1mSeUssl{IK}VBiq)(Vwn8MjMJ9V4$3d}+T0+~7wtw%=o&r(qOvPSS!06Z zCCEt5@gxRCdf;Q{T7Gr3d2tnOrq7*o-cwZbH<%LbX-EbDdaus)7uZzCP~I!Y|F-A2 z`(b|0*(XQOgin|-VY`5BK@NfWZDm<}P%KxI)}y?!!=vzRcQ0~89d{f7Cl)E5y+nH! z(gFa_^&@1xZp^DS%+aRY9SXU1R}kWl0;#~TNvijK-rwI800NNj;(Rp}EofyC3)(K2 zFO1VgUL7c-M1@o&4JDZ1K{Q3ymoHQ!MaDqO@rXQ+N0D20xSO**FT8(P9kSV>OtD@m zz2*Yvw-|MQXdCf8`1HkAEAUHxMXu|2pRbm zuzFhqS3F1qA~*lHalG3j^W|Lk0;yklP;00* zJoIu&o7OYwJ(7iwt3q2{Jah-?hO~&;Or^{201Wt~q{obDswjm*h^kA1z3T!M5VAo% z=<^M>uHi-~2-N0XQ?0o*8kMW2uEnaFD@0=j(c__!{NpSjXH6B^)C1HI-)M|QK3QbW z*=xhSTiI8=Xn+*MoYdq!vmeYs=cnb_PtOE09+rmqU? z4GZbI8eE&>IGYF6BfuR>dyI3IE5qXx-wRC;@={b?i{oqHm3iwV**m4HmoVRGReIf| z-5_{0g-acd4N7j2h|{#^nFiy0v0R#bKfsv~^@o?s!iu710Kkh{RdZ%9Kl9R z*f8wz=3yHf>qWEk)d}rnuYQ`YJq=4S^(Nm{LSq1!fg93Rcb~gYFgm!t))Yon!I1geUf2l0h+chtKDvf!fbmJ>dn#lD zRF^8mA3p5$`58*)l_|VVsz@4)hPp&z&@A9^c);9{@7n^-5Ghq-oQZ=gloK1wQ;X3c zq>If_P=TSklsCq@tzWmUh2UHmBD-TeHaqSY8 zPW4nEe)}cjM`Ky($JCjvKwyqrP~F6Fw_JED2maxXq>O-9$mhyg3wv5QCQO(>LR$5E zA^x7VC&3WLu}^!x5$glb{~8})WQ^yB!W?9WB3A~irPu_N!VCFT1ztmN3jpqBYgX{? zg-8Gfd{WvLR&6qpjJ+gKA%+ymSYI)^H^ilrl;w-#Qj7hT#3~W!%GJV(dAH^n0!w*3 zzI%Y3Awp=t0;oHqL|QFzSxR68m1Nk4I(dO>a_bJ1kSD=*v`~fayc&^d!*SvRPWyvx|?A z=C2g0ir9VFaH)cJ&};O64hyX~VQt!BXji-BYUwgT99bKsb|M#W85Tzh1UF6yr* z0D~ZP?g5{hFjG7IE0tkr*HDm#6w;(GT(HBn|4g7yWz(NY*0pXcARF`AL!)roH*D2Z5psZlBCrp?yA%V|a zt33Y?L&-|SXRb^6lp5VIqU;GhNb@Lmta7SR0d;ydFz3RevNAT(m$WPnDILgSHEp{Q zAgZ}zh+78VJCG5`#4CXBot7`@P%z?&>cH4Gflzf^HcWncQJkVK-qG)2qvh$Yhk<26 zX;c#iSlvw; zuzCciT$Pp#XWL?vTqQGQ)AXH&=n3E-A19A_x8haVa0G6Fmb*)e2@Iw&f`LRX(KTHf zpTrL+j5_T#fzk-8)g8oluB`5yyq4yqfmYZ}VM8)Dd?6#2Q=3ge$ei(_Q6JSXjqTKMYpJ)@a+HE4vQXZVE*D_SXyH)OhgBZ<`OtVh5&NQavQ)s%2~^ zOqj4ez*81=eWS<|Y!t_k&lysmIp^d}SUY^tr452^6;*?-#!J|Z(%?{VJ=?(<;$yc1 zuXksQ-JSAbVM$k#M*)e4wOX4F<<*H$7n@>#TzOhmn!^1n-c_6Ib5+cdc3INIFMrI# zHAmuc;V`qv;2{>zx8bm03>l~`_qe8LUby9Bu@d8pg0z0nqW<37twOE3r|V}dMyaX@Cgg#beghvUI=f2 zefr>1gH6fyNKTGm#9_`DmQ&kFc_vKQPDppevajdy+8yeVwrtZL{617z;8KN6c`a$G|zvmL+HkB4Jy8x`BQqH1J4)X+{R|hf$ z(k+Zk@}1*JH%rktC`Uyt@~Q+X@i{UI%!LZ>w1X~M{?T7u6OCh{ykrS`S%!|_YB|%y z=kR^@i&`hvg0x2M!&Z_|nA}=degEenPi9ib29gBtzXMQThEmx2v`#)rAT>{T;y5F; z-R0Lk`WqJFp^smANRQKARAISF=xO+6%HTF^6}CH3qHA6Vmq@fA#Bm@F7w%D>%OeJ%FyQ&;asjI~;}6g=~O~jz}tL?;(BEuPqN*_=*O7kTl z5ZTuScGR$@Q3F0-lF*B*Y2XRP&?tshfkGyn;)Jp4Xvjsa|H_ewA(PL92@`e)wvyDB zSV&8H>gF7lTiY|(bZ>9VKs0B&pF&Ic4DFF|{73GtU_ZApG-~Jw+X$Enjco$uMD1o! z1IfAMoqq&&s5CO_s#5437}6N?37SYXfY*eJA3wP1lok<`Aqdyn%V?}~W$nswIMEV_f6 z*F3tgtJlT2JIR|o64?-lc$97sUI)Ap)#KI&3~b1+)P*OG38iP#VzX z{3#4rV2zD2__dSMre4)I`B*ws3chuc^>Mgq-E*qwCUQnq43HuDLprD;4$G) zIE>GYEk~qt#%p`k7oIR-uOYW+hT~tJqDF+(0ic|2&Y(j1lcC!h|X%6-~}5~5N-WR${$Q= zu#p(h@{C>eMaFx$4p7{a3O)X2+1Km9n9~)<DeE~Iu&o4j15E4E2wlE9uPC>!8c#2+b=+7_v8Z5L2X40k=^N%C z3b4cGGel!7eSz0l$|Pw(;0X$YcpDKGbWtkm>O$<3ZdHI(rw3d1Y;N$>DK@F@0j^x*`2wS z7&JWKDH{!ZZ)`W)bMk$Fl~HH>K*g388(hwIY932S<;b<#OD@l725tHlA z;CPadTEJW33*tk_j6R+~v}Zx1f+CBp(MH zQ4J^ZdyP0Q9De!N}bafK+`h*L<9Oxv}-6X~GE=VO1vRcu52vrKm6-R+k_g2jaLExHZj$ z2@^IC$Pa6P=jL2mDQwXAmmF*yF9_#t8jX`OVJp$fla~h0PH~~bV3qMSt>)}wL~6H z# zEo7V%T}Y-3dHj-~8UTnYT{jN_o&UlJCv`lS1*nOvCHRNG3=2#;yRTMmvTsr1Nt8`MiZ*T(yX3IkJktA8eXJh^@D1~ zsia47v?`{bv?olMuuga(oce1ajd9J|x!U*_78_zgf)EDJr}=MdAu(ZIh=UPK!K!#R zZhw-9|y}!%$xTUa$p>rglDKo%+)Rpw9;ISC!?m7T-G~ zQDMwSODKh01R#SXoJL0AEL>uh{3zpRIXpu)jvhWAst_8a_}wBaHM1SDky-m&#-c%B})=fm=F zd~l0Jz7mofzztE#HSmu2FsTtkHSb1^9*V7G0qw?a@_lnX!{`-kf{M+t(`MN#gAz$` zJI2J282&1v_Al^cmbs0FI@L#&3ZaI(WW; zawuQHJKi3T=sWfDnlNF)c;K8GZv2yReHsVKaE_nWa^+$Jxq`KVIpg77e}Ru>)bbW5 ztP~u1-TuOEr5rj9>A_sHO^1Q_bB=d_rR&RB_?(;mwL~6uOt~Mca0QBBW;Ln4*o}68 zvU?ybw;X}-26jHZ1F)Bi0$m+^R$5@MSmD{CXw&qjnykJl%Sp~-zC@DqmOe-ml3`;k zC5wEC0y*^(VJz7pj~8Otf@AJrMA*%r2S;sZ7aCEaZY2_HHC5I<4B!Cv6EK)z$Sgbn z*|JxLDK9SwfD4b1Ndn2$isM6CQsj|yqu@Em_+ta0?@gqB1_Wgo&TWDj#zjXKc#fdv zXD3|K@XbSv$)rXTCae$H@gMqO%#Ap;To{%e|I~t9Lx@nZyFPY1U;&XB%hr$$6(~@ez$6G^2LRLN|&p?=^&RTy{Bf|zS3u;+FgYco|aZH;onXxMKaL!%>$FAo5 z1;mORB@d2Gq0X_pMlt@#04Qo&J_d5Dxd|(Qw&}4-;4#B3m|z%pP0FtXnzVuWoiJen z53dXt$G@kit^|Qj%jM@Z*MQLqi~qXcoN#=@j53JjH_|3-7&w9%s|nU)CQgqZ7!2*i zMd@idsA-OqMrg{HgAIqo={8Q6AE`ZY3jCu zvCPP)M`{+r3}vm$5R=~KrISJEjSs0W3;VQyRT^5Cu>j2%WyZ{TD~N{>2)UrMYSmXm zf#v%}ih&6~DqOl2M9IV2L1K?4RE4HVbBcz2zY>xSnz2A!9|O68CE3ae6DFk4jDLv za9$zQnaIuc2R3@QqYs`;*2|cXKGY76HN8=D?;r?m5*GjYOj&9S5pA9L>)5f-mdX?9 zv8=pC)SJUg>M@qK?P1_1%1Xo;7T(98^ zZxke(Erk07KT=OX;Al1zQ_g%U(}6`J72z$Q-Z3 zOzWqK)>#Hb;Nmc>qu>n2u;nYM-eSCT&FaNy)yp)j-8j$F63;@u(NfERTNAJLq;wNT z1+~B-e8A*7#Vuo**k{ITXs&B>T`s8&R&}U$lPXP^AR{*>i^hNGPeoXcL*dTJi%V{S zB8~qfkP}SB!Y~oa$$1wnF?69a=uZW$qY}?yMBty4dnn2_EPJ> z0RfCKB>p@T-JztE-#8+K9f0%z!c)L9s$ANT4N#E676sN~E-iDY5<}}@wuh*{ z0!Zx_`V}BaSWu3kSmpSytw#arNYkQzX;(Zye9QAhzjk16lgmYNSSatuJ8cW+a;I`i zQ*-A*h&N$$;02E47zafhEYm5%ae4{fAm5_27EEe7VK*X2tQU{}z{_(;Ha@1n-Q~`X ze{OQ;m`7Zw30nou6{6f9iu{A|A;BTJGh#uwYgXMFM;CX!rspc?QC}qSL&0KhTM_1M z(S8hq8xIjxiEulFcL4Gwk%<)xN^9$l`3m_!!&E8*h?0m&D}gmQZbyRnu59eCufndF z>vCYT*UNxM`6cjhEF}6gKhG_Y=wfBd#U#t7ajvBO^zl_dZBfz(t0T^!6Ppg`PWN5X z(~df1os)qTsjt5>1T!>PE?#%QgRp4iRo8b8inP-%La-s!)F40d@I0@9E*`be+SOjw z>R@4Nt%{CsUg8B5W&AaH)ad$EIp~v`PS~r!{$(=$gG-)XAQQO0&A`!ck^q^TY4nZcaR;9_B>SwU`D5t7V&5^i5ZdG`9C zB5T&qi}D9*l4B+_OPQ>3ZJZM|B^C}cA(?f!u4e)a>FkSsOCNt9T9>ifBIl2d1w7xk zj{qm*r$VLeV_#~Kppfo~_{p6D6gOW&k7C1& zGB}yo)_EZv2dz@-)P2H)y@%fTM+@UNNHhLH^6om^6^Q-%F1}xeQA7(>X6FWu|9}cJ z8HZg3tZxz#*n)FGup8wav)D-qKdf|{q+zfsyAHTw?Yn&-iNLwgyUtG#94oxz4@aTw z^#IbJ7IkYy=u=u+h3WUwG-2|jW#LMB`L+5?Z6GGyzD z9i)!92n!a2Pak|gc})c7S5A}j@L=2w1gDTBM@NRfeL=@8NWefi7ve`xas?@<`mTLS2CJi_o>W|J@dSYvXr8zxL6S|$nN(`Ro&$0iokL@o8F=LQM}8(}LlTj`zo-mE zS>SbwgoHwpc{(#IWyimLMw_q&2+T1K!*htnjUs)-?bIWx)LS`CQho_sSHW{Hq~CfU zfw1?uV(o4}YK-721+70Eq*6$j9dpOu;%?p1-7l#cx-~l@<>* zKzGgN0!>yvczNPNr@1-?SKbbmZt>)qSH#T-#r|NjS$SmHQ zWCi1d^XqGYs!BD%(q-R|(rr)}r9dUXhS&??oYBT9YrT1} zG#j_Lr&JDK3n-0dA!dP;HW1~TFkw#tF9uu3pvSnm)a!!_6g-F>8vn%NI!CIs6EU7( zJjhQXSUeLhIr6@jaKdf@N9agCT~1Fu6Q`$pLsm;RjG?`w`Qobtbcvx|>K0Q>8qRIr zH3-N~2U^cDwxk`7(ymHEHb9vsGl^`%awILo(6ZV!LE8peBC3~&xv0&TY@a|~6F?T; z>!OuF6ciR4QoWPb)FoxHq-o=puPuWzUa9>0@g?ZnE;J7D@y8zR&OY-k?&K3+=idCL zH#EQT@A%`6Y5q98_|hxx+2@{bejk7GDfb6od4Bf$w0rc6N9yU{NRW7eVu3<%kgmhA zvk^c8m<)%bE;qB6w{#$+@Y3)Vj)@7vH7$0X$F_Nph&xIdf)I~OE5SN0$%R@b?t}@u z4xE+dSo1~P<_BEs-j6|u<$iO;=%op1IE)92=NQ?P_eqZHP4~W zi-_y;t81@MTA5oj1@;;qlpfj=+<#KTZM_cw?KxcRx9y@NO17{KRF43Y^~IB%23`B| z8k02)C_)e;YT{+Hu!9?5>ZG+a2@PylRHOtw@~^P5f`a_34o{S!GjLvXoTM8e5<;ao zdJJqwN;lSI;5F?M3BPx~{Vey+bI*1spY%Fcf=CX&`M6UT@|KSPMndq384>vL2S44A zfzQl{!A%1-{07-C!qMYMKfJCfvN1%w`r{!8;S|x9Ye8KLQ~oc^F^DO1`Em`<|^fWy$`tUM1SRb z{<&wl{g++rthnipyWNqQ4>1QAOv(5s1^^~fr11~QKT1yTL^4Q>12Fsf@U>UEGfq3z zS@FWluekUA^haC`TzbKIZr=s(aC;3m9XJH^Xe3bc7&T8(^FPmtj06iJft81q6K@2R>7+>rxTKg%$Jachi znHZ%QLr;S0`l4{uypINYaT!%)EC4%j(#|dKGa&-1Z%SZ*wjzum^?JZs*a%yK9ZNTG zxDEj4infS^-Fq*;xcQTI_6) zqd%xZIfa_Mh2jhc)hg5=hpkA3;YN~kb58KLilHU{r0@yVA$6>#}N4x&u zBeLnTBM3rnY_|*dUO0i~T43_dIRMUGf92)ws(lx^64caz*as#0xa~Op*nxg-ZXS}Z zbM6yFr#7v_=zv~pCc%0hABP$~7@k762hmpG#oeL?T*D0Y0X7mE$H?V%p7O!A2Ft?l zXtbD&QtT9ZdtRmp!=M%Kl?lU0njuLIQX=SVR4zeQy3n$s^hA%gf-} z9}$}twJT9kOI?W~I;q8iCbJEi&A4@L+~IfYhpu-2$8X=`uDR;+QOQ3FkqEr)rmNlO z?my@*x$r!UX$DC&4SPQ=U?Z%S!onn+0UUx<2*b7}X#sMC}UXDk`)I=bP17 z_pAi8=I*4Cl(oqyMoV77@s|>g8+p?hkLNhd}(vz(i2CLfu|Hc zc02@i2qqyB7z^GoCCC8^_!?tPLx{mJ%HyTd>q_N?b6dSMUKus;lpc0uX?n714v|_Nz! z3#i#PFxxl3@eS@j{OX+z0k~<1MBwlI`-AS<{hL>|L1@<(*k2$zIY^#WLALUAH5RyA z-4M_M^ed++^EmaQEYs>+a6$z@9~J(#$JCheC_S?-OXfI_&S840Jc6IYVrv%-6Kdl6 z{hPdVjI*7~fq4CumzD4STgaf#iP^5p)~8W@pvSCbO;k7@KlXF)A6WkhTLEI-v-|~m z>iv>r@;Gvt_eQBRH)`NH=P!DqRFsUIze(juO_!T7)h$ZF5wuo2v9&HFtT$fJQ+GrS z*czWkKMr@&LO_sx0()^Q$D8YE{Z$%FYAF&4f&xaj-R> z%H9RFn!vW0@Z)<3uJ40LD=8{HH|8moFHx=0jn;%sMce@xKisgHkiDr>PJFG~H~3W_ z6wTmMBBhB2oXzeloWb>W`>hc1y?-Si{g|-j=xjYC{Tw2f@Jh2xIVGLIlp3YX?@YW`n;<%|f#F|KqW=|?HW#==&|><&O$e*M-^7u)CNLa)mx z(7|eCmBQ9WR)U^IP}U_l`bGZP#B{~WeUsF*bB4SKL{Xa{k3)!b=u&O+uQW|xeFcrk zDGTHOvOI03Of}GE6LR?tf6rej;%>mhAHBuB`HiQJ=BHD0bkY3eg>c~Ck-h>P z6N`9RvtgN9MnMS;M;S+bPSnuKOBgz@j@N<{27+3pSQSJv0FIH3^MT=%Uo5S{C}>n} zs9I0hYW&Q#S8U)sF3nnVg^dJQ9T0cPsO|MhaY9doeWEW%e0+O)@82#5O9oLBNUOha zaTFFYhOPdfRD4Vo52p2L2}Re|Imaz|$f*_Qv%YDJyYmNY1>*&ss9iTN2V*z`+^k!H z>?(}Jf&lca$bLX+tMKVm#Ren2bO}d}8C}XmM0dc*DFAuLB?(F&9z> zz&$W|_v5Pt!_!uW0y3PijXarDn}rJ2k!04Z+)M{&wp?L^%!k@Zy#9>j-zGltn?oc4 zANth;?vjz-Q^>giJ^P{QOV5NOIi9i^Aj}WYn}TQ+U>(smkI56M;5B);m9_$$jO?`I zgcSe}(wrYctvV=2AuzHjMZ@zOHoVTs3QQr#lTYfm=Ml-j{e@qU9S>dz3k?CbuiA&c z;9atI5yK7ckQN7y(-n=S_{i_g*Iel)>=ATx1vim6*H=Q@3$8qIY)y}eK9A1vb(q!( z`NJAY6w-R9GEB+}j)7#_q~omuZjTlNDnzXRHy^_^b4L=}3n(FE6lL?qt3{*?EPsvB zEKbgP?XeU?D>dXU1Sh?YF(a&>P$R0}J6AALvnX<-j!327m?OU(G@59paq7GE9 zSdF37%6RA7&)Q1z590Ql-Zu&fm|JV-F91OV*$00cH37QfvA^NbVG#0CV14r~b{8cC z^;M2vg@0wpc_=<}K4ERZV+j6IpvI_YuH&^@f4$Zx{4i0dQQ%xzC-s{^;Ht|m+K_k2 z=`8Tg*VZDym;upSoG3)BXe@o=U*9sl_iwiW>QJXg0XS~~MZlC=9~K`5FTY_JcTdWp z_|!RG+EIB>zgQkF3zli|5{E&uO%&-DXc0~j!FTDFATY<&c?OiyY&1BBt_(-1V-8Um zhG1zCs%7eVnQBprhy!)5G~b}U?&U0s`x1^6p{<3~0jG+(QG4KV0~#2=`R+JRm&K>i zb0{tTuxbl^teF59gj_nWjBi8~`~4@q&fR$3epd_6e&u=h#FJ0E7hitGz3|e@?!}j0 z>BTwaq}Mk)_u?*v`1o!uL=y0cC!cbU&+@MYN}Z{2lOX8lwb%S27!rMy&4Xrr!2D3( zAJL9n=jg^Llgn{48p%4qL5R!k9N&_;7Ovj|69`aUhQqlu5@{cttPX()^F%&56_C`7 zORJGPso$SkqXb_WdY+0AFu`X5oLA{&pzfJ z`NEfGzmK{XUwj31ILNpOT2y*bh~s99l+)jIs(a6c=ecv<`j){TAGz>BsX zjEJlOZf!6~5Oxj_5!jM|9vo+(R@Q!T>cB$cQH{aH&6kpy7An<`Dkn5#T%&KL$G5w| zDTHU@adw@u=Nj9O(^*u+Lg;DaPU^WQ5Fe|J9|if`7mqALMw(Dq87+90B$8j(?_bdq z2;4h(IC6dDakB4%^QQOyZ9AxW!czu$9@a$lxkW=6@a;;@H?pdCjd-wZxwzvYFkzmE zGoWv_c7Cm^j|sM-O}G(L54IhE!EhOpD?||KT(QaP*vc6N zEQ35*rdqJJg&M%pH-}hB2UcR7sguvEpEE#9(&Y4<2oIi#;mEGTmqL^Fk^l6i+24^S{G2n-Xvo0Q$9dzfhe!gx>qoC4 zcOMQ#ubZ`sh!bKC35=JPqYs#<@3o~%BPffYj6M(&$tN|KUc)t0bsamQB_(mk680jI zIbkKBlt6RiCAU_fh(d|$cCHSmqLpEZJ7L%2`u&%=&;7H1xj3u6{UJune>_U}C%IEG9jGz^(y3bS`->hCj*k~#yZ>_c;oA?nA)warX!s# zkNl(b2NM&X&VF?mVqkbJw~qgDh{2qD%XLzV(n)248ECo@xmj*}a%o|D#r)p$Epuvu z<#=Tps(dAw)O1%Mk}H>9aNg)7&Jx^0yN^hMjlKg;U!0X9zV}c2rRIZ&?_V1M7;mJG zaWfUdO6EQD+}B1Y0GCiYQ@!aQIeD3zh#Hm5sjlP}YP}QZhME4&oH8hH9vtTyKR5qx zB}O!YnYBnS-r9f{PUB-o6{}tSNtaC*=CHQ+;3lcn!K5PDga_e_h1baReft6ku-?bV z81lzwG9FHL`9$pCuJLr{e(_7IFeZeOP&I1I|4f`)slTG!3COClW z*YWu42AJHq^H00$F%n%j-f_2^u&ICwwK(=%950Jd=HwV@cv-@kaEq_By^t1Z&WVdI zX+CAvz1st!-Zis`*a}5jL3;q`2*46FPO3~AisX=U1?>=AHbctcwMZt%uOHZD!!B<`A^V?vE2;#V5_M!O;_m2c$6X z8wj%%7{~? z!pDN`=&r+GF7F9uU?LCOTB=KNcX9K&b2Mqlt-yki4*ojkTnkDq@%16{SBkAF>_#J! zP5bv zt|OGvsA**HT1_MI;DuvtgKpH#vI4@h7O>9+v)?$Zr(ip}Vhpqhr2zA^PLzU(Gm=h3 z7QMs)UsDvp8xq);s3;*UtHL^uJ~Y9q-Sfu8 z*hwN_V;BID1moBB4Oql%&VK7#OljgHzwryZz54`Nj;Bo!5*rYD@p#0q`d<2zAIOq` zk@So2{u}xMkX)C5p_B%4b-{!lF*cgyz?N9II;PrBt zc_Y^@)l{{hLp^nZjV9oD<_BPiVH*_a!RIrTENz3~WFbF!wDt3_Jrsg_gs@`b88Q}e zo#K~vEnnM>#9Jf)TVdU9wZ%D!mc8og=6jdA6Zq0(t_(9LSRw)MzW4s-dX0D0sB{L8 zX>A9g#Yys^5AWbp(kV%-m79q&&a^ksI4ckwJ#Axm2Oig}FY2^xJcr`rQfi6B2}LON zdpTu-*C?gJ1V4@!2st5>zZMdi5twUMTeUVY4%37ValX?MMDY>e_(6~qHQQBm1{@p| z@$0viM_`Y9`Dv#{5YB1$b0vuFZ+Y)uB>xVt?7fW>R3N_Iy9u*5<@-?Ab}xDhO>~V( z65$mhw*sY(Pa)9yU%X+uHf)IT2S^COAvb;dsB4Nsr8rgcPOO@}o$rIz-WV^LFf#GPZ^Fym|w&z<-8^2>^l{ojf+V-OyJ@hPIw`h zD+FV=IpU)PZ4uu5#y2<%;QRDi4mVJO@o~ZgyY8Sn#V--ev=K)F#*ck`=(c+A$f4HD zSsmh(jhh^?P{4tDNuCC&e@b4ii;yWyeQ>TG%#jN(&rKT_8;d%nm{5RHWoXAIWxMhC zqDZ(P#CJrz8V0_Vqn&d$9_++2VS)Ii(%TN+tBxOuxA;+zIkd{hc_7*aXBuZtL|*Q+ zxb5IQ?wPNA)fEAbt`Ij|ak-lGjD+W{hxGkAp)x%BYe4IIZ;iA<-SW&Ijx)ja z$G+AFXi;m9u8Nu~L?t-4mbwACHs!+xe{(voBLJ%xDHL6DkQ}nX^OmlgvxK@B`Eo+Z9x+5yW!iyKeHO@uB6MaeZcpd|H5 zzvlo+F&yq(p1-E-6gQ^ZYbEn?4jL7($g|3xu#q_Y$xpk_eNq3;+xad)YA^&xPv%q{ z%Hh#r|791OcK;391qX||#x5sSxs{*%xf!FiO;=BJ=14)fT=6rjB?USx= zlJU>Y`#CYIcb*v->RlV0e?b*#t>hmZ9UGwipe+<10SWdi#pK@tpuKd>CA|C3_wz3% zVSGlo30WF652-v0Bh#y|W_4NuNIp3O(K)mX(0~S_Oj07Bi6*D69#=-jxrGiTv^UT% zMn1CUeUL7YhsD7D?{UVj0X_>JF*4bb?qsPjOO=UCU@#kpC@sDd@I$vB8vL;j32NU! z%LSpa+6#ky9t3%C>~YAbY)unnLz(UN%(=8w9S_=0a3~J$cx`%|Mw{Or2Q~J*B5uib z948dY&@4c(z0j=wwg!{#mcp_NFzx`n^aAN4U|P#+pH0l~z&m)4D}rYH4_QhW5bJ#N zwO6R)-+18A^xnTwAURebN;1UqSis($u$o$gIhz=$QP6e>m>Of>xWS8do}0+krMZN-S2OB!Ej&uEf~D_RMAT?j71;%2WwJu33ta3MhcZdIF9s z5abVsWfY$TWSddb7>xj*)7{l!8t;RpL0 z1M3i;kZ_!eMwC)t%lrRWIk3(>rwN`fPMROT92+o!p}uVvAAhj!yK$)DN>wd^Fb-mP zbo}4^##5W$_Psyt?~57(39f0HPnqe{>9cyC zaO^SeWU#a2loMY+TgZI1%l+Beaq~W>C!bcmt1)N)yfR*0P)1;h*cNetef+E~VqeF< z*e>mYF4p_;FF&gKsZ!jq|8n=xe|$_B??bF9Z;h|NQvc|0 z-1%4S9e5hQYy`CL{nH}8_wS+4KISHj0k6O8;bIPi4>LnmfqmB85`#*+eF*560lZm; z*S(hcaU&XPHJ+`QC3btUUxn9#D+hLr)&x%1K${#T3s}UgzRg5=E-G(AAn$Q2Cv)sGhezrT~G;?Sgg)vX`V76`!9$CJ(&Qv~+nHym;>E>rN&-;cPL%f+_pWo#a+h6jo~bQj8^r!T zbngSim#Ygpdx2PnSjYItnYDjnneKbw;f5R-`Zevf0Xtt@(<$bx*mnE=oAa9XY=~W> zu16h;WBJ!-i~6{*kAzvT3^`EapK>taVCPf^rHM8xj^*>;epc~VZpO(>Z{j#^^v1k< ziR%Pvg4LxR+HWml-^DTdsad^NPY7DzXXMen?%gvxq^WD{_x<}WYSPq4VDr=xlo5k> z|K-Nk3Ve^7uen0~xO3FC>Ain701csJ;OBm68Wd<>UhC^ufW}Yv%(=E`D^UH)FfLpv zMwI&1$Xgp~cy zR*XOxnr}~z-dfv>i^w6sYb4=RvcSl~#2SU53QF^8`&K!-gy3XiXEErTC;^ofk4r}t zXbYyGhNx;!=4D2w{kr06Ev|PwJ2!LF$*Q7M=9}O6w{$Pzri@{7foQM|Q3X@mAe(vV zY*BN`g>&L>2;$C^C@cO&GB7TH?u$DeIqy!hrA->GMv_0B3xN1h!#k}HT zd|7|Wi;xlfD3X3rUMb?2J`e;>;n||!)-mi{( zxGk5)sAEtBz#0&DJMNp^5Y!;n^_ri#)9pr-bYUKfMNIN5#A$RT^rCm}Kjr7=L<>3Q zN^V*Cu0G(A|F1)q}}Dj7SFFapTpE{7dn!i2=${k~F@w z{qz6sp!@K3S5{kCLo~ZlW~9}x&4@Ya>yVU>CxwrNNO=76Y%v> HkU37OCTyStbd z#pio15>~(UD|a+RMJf45mFW||__KZe+a^d&M<`6}&rf{xXPbK0*7cS|kNtc-d=Hvc zagB9?>0+H?{s(S+-zZ}_B9h2=&g92@^zWL}fREqw+b@pOZ!MyoiuH&-yjqA&7b(Uqzt7GmD#F4N4ur+U~v))^{DokACdd_x_DPFX!&~ z5GCk*c(Cq5{wwMJ53J0PmFRMm)5R#A7B&~;f`W!8esctsC(^AAgaWP$uo$`Z|H=Ui zwwj!J$k~FX{XjX<8h58^7dM?#N*ekC;&5og)9JHJD|O7Eo?Gc0duM@ycd7xx=jX{o zNl;M5A!aqkI3^^Xq~M+G#UUnvl9|pgCt5du_n%getlBo*gnIrI5uajGU@&E3+k9y- zo_$WSn-3uFZu|a|UcU^$?Lc)>uL2&t`_B>MWPP&=R#$jV^+R;-KmO5M8se|yeTo*G zvLFKEU-BrCQb9uITtXz3e&gR9m=S+hR3nun#QMdB^wD#i#@lL?mh> zLDr1v5~a+od11WSUwr@LFaO-XnCr=j_Kue91}mqs{1XXC3j-IhUcl8 zJQXtLY2q$Z%cHZAm|I1=S%}7iaVL07hADk&5@R}10-9as#`peZ-xWH60JoUo;X7Sg z7)DG2Np{q^Y*snW35v?4_sV!0tk1jFAP~O>;x90wxWA3Uh7_vQpF5f>R>V=7QZ>7Fkro8CKk_T(uq>NtB>PeI++V0 zzEQ?yaF-YhBSet#z+C~4>qt6mAteJxLLK5485Yo&A&3c=*9KN`=rGEe#%I6sHD^Iw zWZibt`_c>|I9+P4dA?KypP?Shw5SQCMZl#qGW1iwdZ7A_zZ~K&tVlX`3u)yI0z4eG zsu_3v#T|LtT^ltK>-d|1WTW~XnjtPuiyUvSMeO_E{5N-uLbO5bzx<+RhgJ$~Sp4E~ z=AUJD4c0{@thSB>l;Y4+=kjXXkIt^fo?*ybiSW zO!Wsi5e>8>GbD0XrT-M#{D+MTan>o46(65X-XWX8*t-Cud@bo5iNAyQKCqm$GzOkR z1wX75H(zt5y1wz%7t?$HN+I}|Ago#cEUq>0p>DtIOX(ID?scUI&bCiL3d<}bw+umf z-9Y?N$tXgXU6-yCW36ZmpcK_RUV*n)`N}~p5V5_0u1xbjv_J@0Vn@u#TDaV5pM_64 z+ae-xfj$$b6b2=v^YzB~{RkcT*whm>n&fN{{%s&LsW7^FB%5CnS9rxtAaEzxSI6ln zeGhaztGcey$*0U;`#k&1x4471zTce;cLGu# zO7EIq{sRqTeN5Em73N8agLQVy6YVOaKX;Kr+) zU4QFDY~%ROs7SiuwIGDXr>mhqu^e~yNV)M4AHSUw3wV&t(UK3VBxoZExD}5BW4kRV zNF>F^+C8a7+`(2vB5|umV2>Me^H=Uz*{(kcao3!r(-w64f7Of z*^yw}B$6jCtmAG=sxH`R)}X|fi6q-9$-fqnaEo6Uo?|(NTJmEZq}9*N$SOA9wK|Fb zN-iam9`Ui*brJdy4LPQLXP7jw!5nnT_r1=2?B_S5el6x?+111q{Rr;#e*;L(7LnwN zWy-fTb!SzYdCCxX03Lqu)9U!~!y#knNz`e}S8xyihlgeMWSz)NsLjV>4DbE>k^lAs zyY{_*D@V7mK#MMP%Bm5ORFkDd<-;4x_}DYyxJcfaIms=8qM<#L1g~rHT<@LDF&qU2 zO0_D$BEfZhkMD3RL?Hg^7Cm2OgChPsFRP1_9E3F35i<3`-Nj&Nk5D9}=S80mTj(@8f#>K1U>y@>! z+*!1&-GQ6lx3(Q?u`CBZ2*}V{bN&!eDM!VTK5*XG)MF&N^5BK50eeb>Yp&DL%Ma zWrIMr@QXGUmgFd*<3eh7a%G&z0;FMd~e}1UjZNf7}5lxQXB&!`KKLkv98m5|7rlN z6!4m*y$r*!UR7`=s42&1;!4-OBHrc$GbXU{6^%^wTN}P{L>~l4cCGL9}(vsFA$0n(-3?T zM8-3f%_r^DYcghCrs@OCvGEX*_xmsoN@)@WF{dd9`w&9@(%k$Bg* zp*VX+2AsY4D8{ohC;o{ipLUOY;Y;)1=4Ir5IZ%v^p&0hXpoS10^6!x^9C437 z`Bc*eadGqF%S*yFewFit*$wpQ#%Cjo-O9J3*xxg zj%AC#FU{)y!pkqa6OKLB9e3Q(&6k5uKkd|FBJrw4AD=MOnHvoKbYwRE9-sX!%k)Yk zqehwHF09Y}vwzul#FM)vBN#quZGpj%g`fD>KU++6M7_Ruwt#}S znAedfe{T^&IT0yJK3GB5i$-k!Xp7JPp|gtX6>ai;v;5-tn}c%#TW~Ah;W&8sfo2z; zrXKSzDqrcYx%o~v6qc_GH(xG3RO#J+PMNl%X^QXt)7Eu*?;pmH&ISt|aEdRp=Qw+# z10gJVvhlfE*B$+<1THysZV-hbk+&3?wKnveq18glmYZ@`hBdUuP$X?8>$J9ZO9D{n zEcQ}x19i*F%GA;ngPgXGEzzW~ik9Y36J8GH>&fdK`4@<2Aw5iqLNDhn^s#su6Likm z^#x~;xy=<|9?1-6%Xk&$LC&M^33N@twFxyT6%3i$b=S1~uRZXvd(UifaCY~evEhhB zPyBo5+m|oGhJ;|daMof>@-HVles#BmD2T-0?Fa8^esQLFh{X3E9lYm(hKRYY`N)F) zk*SFIc=AL4@v-IZi2}T<;On4q(RT3O`KPFiT916;11i7126BBr zZ-WOK@^ik}ryPW3Z^!rfefZid%p_AzINJ{DenAv@uhFjk%02=yYPpbcC`|L z?JmIXYoj^aI0gdx#d8pW(jA?#p0^#mH~V$fh6rB7x$(d;?%gvYsmaqoBsB!oY)nSI zDuH$PUo3O1TiltHf+wzyeEF%_-_x@)etwbX51YRB8uj$XJMISLACylojo&uAfncdu zHx_0jn0B{&do!bL``8b0+*xkW%99gbwWVs8=~v3sQ4I{wxio7*D7-czD009P*H+eM0~W>W1j&C$N~jYVu|o$Nqy%`f z#8~~>j=>~=I(AaCgnS4x)}h^AR*V6OlL^7IaYCTtJ8}R9*lz(xLdfV>kU}lG>4h-v zTH7s>Axk9&kq<{@z7@;bASJzUh8h1wZ(F=@5NGmZbpLtgCD!5X*Vlw zP!LybnZ;d>rQm2s zA%bJMhI=y@v7IftTe8ct!>n5S_g$lG-P7;=tFPCI zSk7z&$IngDI)?zoos8*sjMQQUfQ`o4GcG(MmJ8#|Bqjq_?f4}HX;IUusQ|9&jru;U z0N4rK9Bs~`r(b=)xkOx>az~RWWG{GII-*qCxSk47w?kj+*rb#(#|sQ0nFiQ^OYuZd zvjLH~sX}j33X61=Mq($+r~RrJdx5fJc+yM~Akq_EOX?o-JSR?WZ4qnV%U6UF_)HXU zJV%%8W`&nyK_r{5y5S=$y(=9_tN6%pBn1!t{QKSS{QHCMbN3%?jxTA5x7RoEYP1Oj zYFNPTX&(JGE`nO}k0SoMr=LN-jT*rYTl~fa!h8PN2TUYjT<}A3h#Q^~Zo2*MhS-x3 z%l6I%fi+ZLqO5-~BOa{8$T`eT9Ih|NKy0TU|LKp+2+Xgd5=b8H(n*X2os|TXb|c2V zhy=xrvp#(B#aBRnOxfD)%zjy>nD)L0A9fbJ8}1y1a|Z-BM+_O*y}SawDucd*pMOxr zfR!7}r?u3u}~(HCuyh46xikLPL-%k|!${#T2gqHtB**Vz>O_1IRCP~?b?xNEQ> zM87PJdwP;R-LP{u=FxwIxg0H#%hjtT98;o=a}-v#66=t&?GWt_R1+3eN#9wjC|7*+ zmvX-pzH1h_h44Vck3(sR&bZ4lK90S2fQ`Io!r*U(EM4kr^~6)=Oo7#p*wyBhC-CsR z02k+NSgn-|DiWB(SKKHBsQT4#Y6gJveYmN6RB&yxVbHAKMg8wzk(wD>yqzvI?FkcUGY;k@>%+-BsElPBNAn|9z5*&1Y28zfWq+c9C7EMb@wENmZUmNl6QoY z;Xx~fG~NgAIAq$D85h3|@!U?Dx%FO`mmBVwwi8gtqzmwKQp55XXWOpjk3#mBj9356 zF93@`bibI!&sCS%zsed(GVN;%V%_3j*JMkfv*4V<5_UZ|_hO}jl#OJ}2~QQ{nb-(hEY)sw=+oa~p< zVi@|W{jLe;!{ZA<$~PAWv|xZ~yvx=*Wb(uz+3I`d1ldKlU3@>NU9T(;mTeoR1-{`xc0qV`du6QE_g@tQ4t`b z2$7V+Bwz_4sl|M8q<<;;oOF6Ud2MVLBod_aN2MvN-S#Zy26!Tkcehpe66BmzdTMb- zIV{oylGdTwNjQ^dLHt<4GM|thVrL4o;Z{XWJgT0IhyB}~Y<6j6kISlL^q%Y$8;x%TAz!0B$Gm%?y;=ZH$2`z+6iy`%sW3u;5 zywNbAjx`|Fvu(qbx5>c19|`8t;Py+%OufN?9zHjf!PeT5%A`?_28*{7^o>|00O#tvcC0mvvTOJEr8JOtISPdvSX=t$+?862kf%mlh48}4i`69p$ z4*GIXb0s`q5o*}|Sma3l{out{x&wFViKkVdB?IHmz#2qg+S@6t@BWJ)``~s>b>;$$ ze{zU19e8NAxO_ypODyizD1BrQqkTL#z6^d$@~=gF^whHR?3}lro}913?}mR$0khtW z#KgM|k2J?3c?*jDm-G5rd zG1id7+}cFbjLLG=MDr0V!#j857k_u}d;d7{p7N}uv;(vEu5Y@$>(39)ne>55RzoG% zGy1ulxdy_t2KHqt_Fpax7jb1+Ur#lF&ClucN_ z;gO+Lp@awwisPkx{SJos=
  • 9?0e!7y}e?g>j2_Bq?$Jxo5jtXx}UM9%grAZQS=s zy{_SRfsO5sG`lgy-FmnLo;arrNoYvc(lXuJdygRHs4w&CgC-6wBti37CY%qbEI7}o z#7+`(adHjk)EaAaXf{^nl_evC-x zh;;3*XUe0)(mrr2%)sQUFvttU@mKb4$cQyK2n$5r(-KmV{ELqQXECeJzW?St@%0QC z-Mof5BdAtbwX7K86fyY3zrICHvd22Ik2r5VxW?tZ?UJ#a1tv*wQ3e1m>zpf}vM^RZ z`l*SMn?xylSh>$MsMXj-kaT!OfO^ecU*9yc{qR(PXQNR>Uat95Pk#w=2Vhp+7@b_R zn$|^E8Pv2)OY(KEH8MOK6f)8Jk=ls9e_0yjoG6@8(8QBPba@WaGv6>^Emu7juWPBN z&q^Ws1PAn^F3%L#0HaN^JO=#2%lh4$^LF6~Ie0FuMgkF6va+5z$7#|9tHlmocENdz zook#t943*HRGc8B)xhc{)I-!g+p8tco9kc3lvutTe4F$&T<&H;2_&^GK>Xsb`Rm5J z2Gp34$87owuGex@H6%%;Y_=Cc8MbCn>aucBMClr^_O_trx((DBE0PMN-xi%qM|z?pL6DCIS{uJdEA&;5aKg6P~<#Z=hjFmMxea zq2g0FiG;O4=m%xXGgcSNvir_Sq%K2FASJ1wPiob9OcVX+7rxXGfpJIR&9@zLPgMK} z#8AXXgX2!Xo32yrQdppEC#t(F<}c5uR;I0dszy)F!Sv$!Jrp}dx*o-=P=Lg@19$y`zbkx{x&v6~$^|h_wM8sWb zmXF&s=^V;y(Td;cWQlIl1w#NmU?s5Hc+ z<Vrn?ZcJEOS2CG{kY&#Ev0E%lu7yO*r)X_ zoip(K!Oi`-{9PI9^eH{ugEh%K8T{FP5cP~Z0`I=(e)sMly}BU-umA9Y898`l)OW*a zapmO~xrcuB0QtTmyq=I?S!h4>8G@99nNdDgp@LY*g42~>Cfqc{rulutqiHtD(z%TJ zurV`6BZ&pB9a=P|95wxU50p7=D>O0 zH%sF}d6Ygn6o#FTd6<FYJI)GE2U*m%YTS|Z!>>v2RBf{`Q~??+ z%5e!KU+Y9dG}D>AxXD3CgEI)Z3zTEFSl6+cpo?1FES+@gstIN}6;WLpwO!w?14Ddc z2#a)g2H<8cG-jpH`7fjl(GoSv(H@Iy?Y>Owlui4g+27euME@Qd-kGd<;f?DuM z#Z?vyw{nN>aYJ965O)A}SpwyzUPdcj%OSD9jBFI|Z6;FxF*+|Q zz$@kpUf4W8<>TLH7)gEW*NY>;@rhr&Wkv)ZbRWLeGjz3#m6eGS2R;Kr-XUsNu`x;z0Jq8&MGc(q3)z^EH9`!BoLwEM4lCmweE zxMR+YTQJZ$igk|f{nOrbiUj9FpDlZ>!+4M!x)M3Vb(~()tQVd+-Y#&1X+wbBptU#j z8rx{CZNSaL@cMSlWj!=)8i)m!-iM)DB%ex$qk0oy7QQ!Ws*^9+DDU$=x0(pc!t0Yj zA|;rkX)(}g&*;Y92B!2g~XRGGs*oLv~;RkZsFfHEu(;uPv-WHrGT_bL|;&L^?8TD#2 z-}|>AjS0c7lhy;MmfH)Bm%BnxV|?vndjr}APKRr=dD0EzEk;r;yZ@i-jv8Hbl;|sC z5WbMDo932QV`#JPN?1m!qAIvGJElu+W^LZX$|#VSQW42(1fQ&BB^*H(1VSE`tEs$# zHH@oi}ScpY)HY` z-yMquLwsM~>2I2mf^R*e*`+oXDHtCI{*BK*nhsR~3j@^5H|Fwj0ybi%RY6Rov? zb$Enks{lL!OHKv0D-N9#lX-E?wRTI8E$HeCrbqj6y%*=byN>OuuWY{P5+(M=Eqw+; zUVKDR3x4Mu_SBtA=W@+F(!Xwbuif96+0P>{J*|p6C%B$AzAH0D`|%ODYcATLNSJ+Y zcC)7?1$Cgt0!3nb6fX^r`NhAXJNeprMWQnT*DLD5l5DLpyN)y|48;6>eD}K>l#lRU zweRBQ%g1foTz};#o91Jl8`o?PWM~Ca-gccq3RXMsyQim#69ulrwnb+*@Ml*!NU)@eHXq%waYEGdHiTh z?1T7T%9u`z=ADCFGgLAQHPO8H@7gQW@gw=S9q;|iVI*Cph6VL&fx_?RMjsA0y6_u_ z4I;TBR$3gX_cqA(az6gbGR}o<5uooU#F56obL+7IN5KrUkVqU2LglUoB%m^TW5gld zU`uCl&4QE|Xj32~FCzt*Rzv>v7B(3R8|dR8#pB|8q292Y08^;oHH+u^y3kl-5C>Mw zqZlhD|Aqp0ohm^D*wGX4odBQ%TEzDS#=rRJa7-Q_U%lkQ^W3|Xk0o)4WZ4z7JNI$- zp++AgEcr$GT0(9ei0@>3#?{fa*e>zWxp%$&>=nI3YdF&W9@+|_!gc13%;NaPqTP16 zuof#OAQGFfIDzl4@D7DpWJ6L2Sh0i%T-xeW3}dw2@wTaGKD467{KGPlbg_J(p#*q0 z-TblPFS$cAo1PLP)aoBz!(n)`ogYV;H{Nl#J8y+1<+ zhRpe9A=;j(C-EJL8pQUv@4-*I10U4IkBIU+U69u4D_u zb&X*MpmCQHfgE2XVVPVd8|VuA zil*L(@{W8DlQK&!wDEN^Q+FU_cP9YhAZd`=X~Th?9vu+~7uhJqfG_y8lH#Ma@$dNK z?w*l=XSwTUi`SD+DkIviy!_&ZSd5P{YU;L4kIv>$V|Pp>Pp;p8nLB?V8M_XQGam60 zf?GNuLrSK{X-a_0z>Q)>Ymi9?l&!rPKz%8fxS5~^Ow-yM4+_7ouZXPx#~*9|@^$+@ zvav}(h?Y2ugkU5^4$cU(^rHw=9}y0S5a;kGKi%1MSDuDYKi!5P0Ue8Y@@j32f*vZgQJS3{WZ z(tDfok#UPI#@%t`qp>xdm=Q21dG(JrANlfA-7e9wVbSvw7CZN~nD21?5_xS9`8Qw* zR0D+D-hi_@FzOGe6~Je4FTa(NUMGK%%q6LRI)VmweJl}+XrBjCE0Kn>K|4izSF`C3>w zoQ9Is_q~&_c@#e?=~vg3!6m$5>PJxxDU&oqL6`+I@Dj_KI0R&r(gQVesv6%J_@UeG zcF%t0`R41sR=o4vv(yCOZbDq3mFP!I_pzUU|JFR}n@3H{(b`>n^MEtH*l-S|+^IYq zaV(`N$!8#Hc0sr64+!=+Gf#wXKTZ%y7rcN>5kK4zcPhnq0(mcV<$W&JAAqIRVOe#&?l~56K z#jD8#~#;Fl3mm627}pp3Q0zpApmj zS{qTrnRQ2=-#wHg3N%-7%xrA$`fE6x5X520J!skg5x(}5RoKpXBrp@0xcMrz~v{lB!X`>&y=K*)!hwWe3JOAxxy9qk9 zi`m^n{*3|23Dw@UZ)JFNti^gQs2Y+zD1KBITCPRA1#p}ir9B9~-jR&?fq%3g?8I9= zZkQ2u@m+KM9b_DdPKBj)oPT}w7u=8hvk$E8J#4XFzxi(tZ2gYY+h#k zeqi-2Cu|&wEGoAer-TpC4R5#xXI$&dTL-0ECDu5nag7Dd@xVUp1J3jK2G@qq@3^gW z1cnjPJh^Q^eM-$2lxrvGTo6R92g{ZBjw*gdo@Wpi<(7A^c`jL2^!y;803oiS3`KxN z$thbKmsLb1F_N3XmJQ{|?f9NKc0#)`11g~PmCK~}i&Blj8(MW<*12@SJ&zmolF%e* zo$oZ`!6K^^MFz1ed`Rozy-RoHIQ0z&?`-vmj|AU+@BPk#csF1BxYDz`;tPJ|xcled zul{1{cA}OG5DLNr6IN0tEUSb(T%kKeF8B&9SV#7?Ct+bou#+;6^UQN!b-NFNIoGvm z-02gEz(|ZK>+6UC+sH5hCj%jMP^$)WrR@Q^;Kryj7Q zG>YxX!&U-ipPWwGAPj8}K~;Dx218GW14EQ5%1}~|+wZc!Y}@wr%)N>D&c8F>c&gjo zs65fA^5gRKia3^hthZmrXbVqgS2HUQgi=*Wu3Oq=o4`2=HZ3V9me{YQ@f-8dvC2=>^C z4s&gF$njC*5d&1kakxbm{96NXLsAGrx+(=&WBsjw?grF#PA!Tuz~@`iHa#rM-jR4d z%*#7Qu@ zE)^&TAU_brNpu3|xgdDFBg%`7TmMj7EHz5hA)n7YMqaj8NYP^x&9tpdbLH?JyaTP% z3A+s_(T9k*TP1!N4p`Wow|C$N{>8KS$6dd z`I=_T=l5re#V2OOLELTCvlJs0k8#r@<`L@_-@CYth&DXF&oDl! zU5dE-uNg}t&vO%2h#`GRtQ`^*nl-No&t+W?l>NfA#3rr;9Pgq~;rMzLXg$O=0#82_ zH$bhEtB`))4Rf3}0gG}|>m!3M;n7u94F>_2ibtLR48y9VRS_{BXkE(_JDRBdokVT` zd>t{Ztgs}@nkL|cikcT(`$g15Mvs+C#nBZ=t+HS2Q0y*Y1LRUNf_%Cf>nNy8Ac(;{ zp9K)2RD%bq&m?$%Tnn} zSuoGYrQ)s;oq{e7N~_$-Llau!7sF8zf9B`UeC2udqwRnHpI$Q~K)!)Or4>+{-_TK5 z0!@BeJy{|WK#`b>e|H^zppkL>aYxUHtr;0OBhMm1A4#+tkGjS$6VC}is+`yn8Q!)n zAvmvBOLm^x*qt-p_=bkOt5L7GyD^ertA4z?7Gl{iz3?4H?}LrRU$gsf!tQ|NuQ2Yi zg)|DeJPgRVo|Pcw!Y!IM!TV=G6`8spQ?xGRfNeYs+W<&YD@FEkBw!H&li}t-7~9#YB3ctA$9N*{^^41PZ|DiF1H0bqW2$t3A#6NxS(oc@3^i`llnW6T zJ|~~}diTVPVCw*^>ikR3!KK?oiNaBi1S-Q2JTcN^YL*>)nx;o^terNfl(BB%s!?Xdsn4G(=YX;%@xA?a(}a+#wk4;Q8mCT}+(C zorN*&e3u?&*~V?pIoX-+5{zYP>h(is&Fte_igrK7w%EV#qUNI-+lolyz3pG$;;e}2 zHhCxQ1S6=S=;V@B-3z0uL}(=RD+h0198jRiRyRjp2D=r+(8NvJEI-K)(=hGO&IPxHUw9usvAT;{0*L z4TEzyGCXko4sGF33%zP!y|%@V@7jwy#eV#!KhnHMu=MM~`!BoD`jYzoqJaE3;RV~)d6}%k?N&Qe9l|-i`4jDsAs-1d}jx? z3lL*{L37x+sjR?*v1#Q2&@^;Fzr}g^zF^-!f5brEod5PXrU|+83|(?Zq<5XE(8hL` zrC@n0(|%nz+6fEZme?Sot&{N8Ba&e8VF1b?Hn5#2<%|f#wL#r2CEjfP3E8U) zQUvV!^LUV>bq^%q$5Pj>`=Md|nqp#O zs37tB(xBzwv+P6!`Wn1wX`X5dtDNC+f7%y{WpAM>G;7yAR|s@1h4zLOS#jHFK&PKxv#0? zHtzw!3CXjm>D?y+!yqueoNZSNsooLc0~jO)T49FJi8U( zI!xbBw=N!gjb9emptVEG54Ko5fYl*>)FbMJ<>MHCV`kHiy!5o&XrQ?o&hgRi|M>s! zBdW)sT09fK1#99}s0Lc9fn;+lQ`p*MI2FWyVmL_jKWK+!1j4jG(l@@+Htu z%@!~py8cR4-1s4q`1c5UB)0P<0t#qBwa)-uE7c9l7cd_g_}Fg?s3O>`mVSwNeC#3T ztu}%&Tmx$15*8X?jWBol7R6B ziT3@Xr+nXQiQUDUH{Prt$9VH;r@A8}y|cC$*qmXVMEY6s`13X+zPEAMyBl*l>yc*b zbv*8t{e<$pjFG(kfq#7EB1h#Y_l{nparVs!YW|1ZQ;Do`fSB+k+_|*$8xG2Jx1hqS zH|z%cP29aM%H>llD&_ERk-o54GywFslu2@+=ym5tP(>B+%A^9&5ZKAZx2VZdwTMNz z5I+~8=6kzaB+coEHtsxq%^Mb;2os2* z0WCQirjc);qT>S72r0rqfUpD-U=LE2WSfq$RHgB2tddHNLBbLeNcbU@O71rHRaN2J z*EUiK7@Mc026TartCBo{9Wj!KY3P9nOV@xujfC*moW1rrxzEa#Yh|w7JNG{O{Qu`y z@vpNVnJZr_SFZf++&iyqCOVIN`lvbkEar7!ELAlq*1=U*yggX0m_sRX1x|BGoSP0_ zQ^)=&o`~h5l;)%4^|a@Hs3%b+;J!ff3a@qVZ}e@R*a^?$soIS0^+bw<+{9{t9XMvJr0 zI33B=$p^^g8V?VD>FesSXRq@WU`h9=Q?XtbZcP}=mPO+9kz)(1CUe~r24cHL`C@H#MT z4szZr&IxZgJv&-6|2HXLEAx03?`W*Vse)GqVFe)Ur0bE?dCrkmR*q$Uhx%`4pFZv6 zRMKUb#DDHj%XgdHVs3Ex%Pt>03(m3RIstN2VfR`sKp$^DaJ7lQ1%lGiKD>0od2OSf z01#wf`-`J(AJdWkLmw?H@M6KP>Swa~N~i1X2K-G)0>b7eTAX#p%)SAroRsZ+)UE_? zYp~5oF|ZRF04-7z_w0Nyz9p4D6{ZBkTaaVJC^N=|*#KC@_0ep>7_*kj$khYw$|BY< zpihQ`vnJGEY2UU3@7D#j1C@Xc-|}xyD*f`Lg=%vAX|hteK$#4w@>|f7y7KF5Nkjg% zM=Ekp!PoiU3Ock%lIdL%0k54L`RmlsDaJZYu}X#js&X1t=}-P?mT4H4{9FeTCRiO8 z=N-tuL)Vv)Xs|l#k-s`>uDklZtBXn-q1>>a{zix>6}Y7F;5kwGjm)}8fL@5 z&EY6VJxDkfxPg6u<9Mk$(BnveH0W`5bR+C_3D_m zCoy0paf-UIL{ z;VqH)K6v4$`U(1F%N>tv0OuP%sK3~Gbv$gS3%`L-)U>VC8qz$iBkTa_Ysq;pZ$Q;Q?_ZOZeILdFCj z`{Y^!hN*D5&^C2*@l#93N2z107M-pR)H1ojZ+WjXdwt85nmlNrFz_~)K0W91M1?MY zox&gY016-Kf@Y%o)xS8}ZQ22^ z9N$;46QzV_VaH=7fH0^@)ImLcwIMY3H-zTc7;%{2A^ySM#?C*www4;%U6L< ze(CX|6Y-VpiitH!22fyI%3_#V6xJS?qr;ahuAu%owjql{EN z`FkbAV;>wY_zVSXKr;zA{sKSHG)5kKjBsBHw=3aX0=-11)Z{cn7~{Y|XKL^9iNA#x8@ zP&mg$lpaH8(7Up?p|jRP^7n6Uv^PuezekYSc7Q zwA8S=ZU3*|(*6zsY{mgZ;KTo~yV~P$MTs4pe^30u-vtG`)q23{zlWO%e=k_m?nrt3 zp)V9g^CTeqIxFpCxRC|h2z3gR&HwyYMTZ%VNEe%^iUYr4?`qn`QSfBDLim_U5M3es9Dbo$tt-Hi!rK_Vik0q^S-cFu})-;;pL!E!l9KhfAI_`A5^ zQwX5mdygmGDZj7O(;l z`sF>14LS99&n|cSE*)i8fU*HeR?^2c8?=BtSTU&`7vLL?K(L2ldGv0jIc@3sXGvNJ_2-oGpi*eJ{;qx4;2ES5fFgsVQ)2Hl|aj?Iv5w98M92;(kR@G7^QtzCRK=2#!I$>g?db3IES4$PkIdqPs&5={2 zx*Z-1nEOFi3ah(sH!H2a;WFuCPA0BYFC$tr+F!;Rg1*xbb+t%94^IYOcgzmuvk6Ii)xOj!IFA4ZqGx;thL*To8Fv*9qJ^HODTe2707s}SQ z?XrFUWz)7l`|_8yq#vx_(dZe}d5J#upK5I}4*a0R+^W@3ffJ7gUyIVl%UJLr-&_Y0 z4_2U-+C5Ls0ht8k4iHzM`|Yl<#BhUb(EW=V;_8Ctp~k!3qJOC2U*CQIuiskqZM^ou z!PVuTN`P|Rzg$iH=r^A-k2K}_PE)3m*K&cNgO!JU$o)qZ8#NtWZhlk($QKZt@YlB2 zH=Z=7{{9(txgb3#&;H971Iqlf((Lno?PE8nx&| z0wUBdO(i>bStiPTnM!(&BboeZ)ka%W8&3R5lLXutmPVj~sh}MW)(-Am(l2b;RJ%L= zRzb(5+-k{?Qx_AoUyu5sIBg{JI8!Dhw^a?=P07|W-d1kRKVt|Zen z3KSH?3E`b1tH?BDxMpuCY)B6r!%#TS8SIRlxPDt5&4Nh zxZU2+#PuZLv3qVAg@^;dqzV4cHPZk{wvQe;ZmOVZ1R%L!B6k4qfLs6|^DcVh*x#h; zPXfXU86fleOs>yclAN^wcJoP z1=t_W>O5ExIpiCOK;U6=GF2}BfNIDq#6W5edi}T0x3|=sQ`Cxx*1$$V6RlV zS0YWAG*^VnoL5VP=tp;&(hb8;I}EwTg{=?A`${dr|w1LsoATf@z?y zq@gpd`NAd3l_|5RM{%&+^=XEViHJY$*e735(9?p5#kpWobLr5lvjmcbuu(9TdMR0< z^zCpEuhrB6;Yp8aI1ZgNg(}X;GA)-TmoJBfldw*>McdAj-(?slId+Z-Z}i`CQ#NxH zek{m3?`VKbd~(qigzte`?uoV?S`zTrZ!stT?C#M>z^MS_-&MbU>)`Jjc34*6?f0HA z=e_(KbLq&t)eZr?HrX%A9?Aoz>_>7&2MZ2$7EGHo8j-raNWgfpqxq)jU2((*YEH(i_`PUOI!iEe3 zGZt`8%7$0LatXw8^78Zr?0o&4EE-TEQKfou3|W_uQ!>DI_Y?Z=q;Ls&HkGjfYDIa( zk~kWacJ-`m%eR?JC7f}HhQf^?Gl^vaK%BknS8gz$`O??Troxkd&;4WNT^H>_Rp7P3 zXQ%q0dm-TUwkv<*R?`>99{OVadMgAF0grwo|0>`Xn}Rhs*nT|sAG8XdxS5b3Z0^H% z@9r(!?RzQ!;RUPS`h2?%R@IemcCrotNmfd3_N|jC0k&*V9{Y5i9O1`>_4kIcHWOe- zhMe%bI}nr>%@gpH0TY2tP=jw_KcBll3}Y(Z2DZDVI;Le{HVbWii1fG7Fa?_jx>%|w z`Nss)TpJhovBMhL&Gx((V3F@VG8>c5flbRzTvjH=)~yY0=OnGWOIht*Yxt3l@`J*I zLr3Lt;D{wbUY7-HnMh`pFW0}0yYf)ERtT$=S(*nkp8QoDi9NOQlk$|aET&T&O_Yb)xm zu6l&H*M0DAvo64d`JR~)FEg-Or{*~Hx`%8$m$A05i4^H~}ASHjv0 z8(`GjI5Lg4p$lx64?MB&gYRRvnIi%F`m9~%wpbxm9X1zHJqy?wi01GdtF@C!KmUh& zcLqVnPY+FWIHA+#@!-r_+RYl?*Bpe^coeMw)CzF?Wg!`@)LaCYIMWKZQAO(ISalq& znm8`7#WzVHqiyq^oZAPGUNu|&>JD|_oj9TV=S(#G#ohk0@QXTcJ zoAC`Q12jMW%oiUw`+ohFjadx{JMH3?f4%D6$>(kRA`%c@LmRT|ZY=<5cl(j!&EE-A z13)_5xQ10jw>4$hl7G7tedqM7Z|_#KRys??Ve!iE<-L23KiIBDtM=)D0RTe(ogb(} z+nVGFz&XQyq_|=goXEsLlDp#!mpE@G=JTbnp=WB6705ro~2Ocxsd0@2M5W&`8 zLvtBN0??_qFhvSwZZRSpA#QS=d6&U*nyO_5HU%kCEK|5oOogeSEe?)Es;V=JWV3Lg6ZZTsT6HqWKsah zXGdV-FlJyH@nKe;D~yASq77dP$6i4>l`?Ia;Jm=9U^w1EvIa$LG6E|A4}bK2^R8bx zWS;!?GiHu}Reu+~@1QyIzeGJJ-);BRA@n`k ze%6$$^mW@Y0CojlO&&u2c|chXee920d2?6|2=%Kf7vsxSaR3~Va3+3_eW%t(B@!hyMiAh>Zprn! zs5J}jnKY+51@*K-p2Tc70dZ=@n_Rpmed|2xq0BgutUDc-Sp_9zVRgCw{RUD<$JHVBg!-nZ z$x;PbIIG37EN_dR{qFbM)qoeh@pb0PE8cFdy5b#X-2mbbR{a4nIp#1yZD{v8%u;~Cz{;RDx4hh8>x~4iDM6w+|p~Scwg*Emfd?8ND_Hbav+FAKa|XD&x55RC<$Sc&vwqQmNFSD?%;II@ZRdU<0GSqs^gN!16i{4LJIQDm|ps7lt*`WZsf z2l_+;0MP}1_dW1XLk8|`iNH5Ezf%E7qZ18j1mE9-jW~F18~~;vrFOtX>2p8&Cv)SS zf84G@1S0U_H@vR;PRdYrm|Tuc{9(W({lX;?a#JNDgc5)Corx`WB{-)odb;LGK+?gx zdsxtdRDwSsm0)-5F-Rro%g`VCdKdJbe|S?u`8U5 zKMtyUaytmiy1@-)rB1xt;OoYin8MmAn5Hb;oYOM*b|m?in*Y<|EXvAb-}G_w|4CgS|rWW;>Ytty-@f2(?MxZz#=>*6SZaOZC-7$AmEVPu(*sH6oV z={TBqDPc>}Frl0jVc2aS&a6m`=37dXcKLqc69gbvU|dIq7O$oW8!Br^eA3;Ce%LVJ z^7Pzh;&{Q&y~@0~nQ*}h!Pi!tZ~<8agyWOne%hRBCRl#=UD|D{gY^H1Ge8b>7v5)V z8FybpngK~Vgbal8!^HAbQ+`;H2j3nXrMya9+>~cuLx#Ne{8txEtYKpQ=r^BiNsd#? z6`MI176^%pmEaiNa|XaNZY*8aZ0P_`Wh{S5{{~VC^#6iF{m1Y`{$t;G(me95r)IN>#opJvsx9B8 zZ+mll40<>OxSS7t19dL=o?R?f5Fb3+%26&_? z+Z?{zS_N%Y_y<*R!5Mz#-#R_Pbz@m}JK79j%^0anQrAPbsfLYX8fMsLgA3ZPObY_A zC>eo;aq(r6lu64qiJA3))ps3Br3}wPm%dh3HvFhy@Od?G z{D39ZLi=!{A3{Bx`!8SC{zAQ+_wsYhS!ab4QU4u5n5;a%B%7cvBAuKGpd5S8_vLw6 zFtPS+3Ix?ZKzkDBTriPUu~S{cU4xYUOZvakKM{`=fH(=FF-skJcYE(&6d_>|dTj0#G2r9D5kVOCc@O%3)M; ztmy2N5O%%OXJoZu9eX(t6q0hxJOPWdWeEknpML0mV? z2BLZ-bcu7;CoAVf;LOz)TSUl_70$@^$R%VNCO&tM#>tEUTWpbqq^HpXHLPTQ>a`^D7Kemx}d0%M1TV;0W7(5~IXI2je zXt?pKc8xlm8)J)6r!0F)0*gdlvJeF&bmW&mElWvovZp7EY#i1aKGT+vLB0$Uf#-z# zh9T;3x{k{asU*f%a=4vv8Km;~IH`#ZN!BJDG$;IOFqR3hnmb(mEU!7*#=i#Uq7Tu+Cl`j zk;`YZfvZc%KYBbKQVO<9+Z6XtPyBUQ9tRCZ2b5h{Z=HjW%D?drBW?GJZ2)9Io4+f} zRV__!`C~KF;(Az;D;U-u_4qrbmI^x{79JKAuIWxLA zHgVIkWp@&BV-n}4j$ZbuLrw;6lRmP#_>N|3$%19bzeT$G?}YXR*Db5q2aY^Ib>XB6 z#-sp8T%;N#F*rFIZbP7He_vhNY2OAFe~Z3ge3=fCE#}BNP7bGRC59>`E7kwE*g^}z z&lIn5*HikpHr;f>FMtlT+YS_ASw&6}2E*lpR7T2x@1nM49ePK6J&Q=Kz8?DyhDCK^ zmvx8rXQQeMvvU3Wffx(VAFPSF2S+Tf<)hVu%bZUH>X!{6Ek-Vbk^s~*5QDOdE6A-h zs!A0UYnrVzbTFKEX`Qh@6I*7$- z1Q|0@Qfg?coy%GZ{4k0dO%h0rgJuG1hM4lK=-x88d9emAE13&as|IwyDnDc{pH0M= zpr3bsnlbz<(m0-F&J_*Zrr}mmoFQ_|YB29jT+9q5&WE-bHTu#PYlan$Io&TAOz2o? z6Zg6JJ`Y@))GwHEzuRKNK*yGp&iB1H8khk!xVP;nSzw!!@Kd>GB+gA$!_uU}-`kBq zjU;VI6@}f!{h>7v4mrYZgP#vfOoDNK2~j`gRHSF(xN&mM)QCu#o{|X%D_}gRR{%~r z2d5-dNfdm6VW(>oMml*eJ&DGXZI{01JhSJxz$WeIph4UjdJjd!2GC#P$ihg5iSpvO z>oh#+AXbpy*A|Z*ei4UrgBEv0;BrswBx6FWUwQf8J~9Gy8N#Co7YLKW)2;r)^)?2& z`gFYS7CGXyuSg_;&`+%q;#hLIR7WS)XP;zBFX?ejof%vIwwM4?|4HdSb@XWeOodOK zGh-fr77`?`Y-4o^JC+=SWzhXCSa1(2xa%f%<6AYdW7e+5i0;A>nf6`({krxV z8JOJGV_ATyi^1g0I^ms=k=oZ9-XCH(=J9s_=E}F`K2IkXRBu#uK?maT%cCLyU9T`U z8CIP97s5$t`yGwqI||rNb{(J|lbT2@gE=lb6-9|Y^ee{4aaqRHECvls20EsWr3SRC z@Ie^b37EK!`~@}ypeHy^79ni63ynG6TJ-cHx&TO^u79UrM|ZyCW!CzL2Eo5@9` zxt9FPIfTQ2%Suz|J;qu7*!ybi_y1^ot`Rv!=bU-u_efJB$I0f7X9CT*E14}W>=rv5 z+_;(p*S93Oblk-<=wyw%=EH8W#hBons*N&+ocD;GDcrfZQGC4OZrqqsU3CpDAmN$e!B{9`NaL;;t~RIbX?bo;6qVUPA^$j z(COfOS-X}C(1|gIWyk6Omyf!jnWoAM#>d2)Uaj_B{92r_G4b3cH!d>i(VhC(wzP`l znLtx?uFYusrcLS$3%*4^muHX*AA-r^j-r zni#WMtByq6pqH|JmLE6k506wioe*X>!ZR`6StOGQOON^Jy9SONI7Y9SYMd(;*~1g- zkrJWv|0NunxA+x*ez8Km)qh;sW~R8@yhv4k>HtoUECJ}ajFI{u)D+^3Q&%Bz+IB@| z^<^fMQS)IGWgzt!Z3=-pSQkhj&OC7tRUwTd&iG?RY=dBOL1S;EiHt-D;xNXDo1CQP zNhMjS700nxtQ`g>yJP+iZ=A-#ZT^wAWm(S-c*SZLcx} zQGv$9jpGSHny?gC>Le)#&;?1&-S0_4GCla09&<8ErizYOOkEk5|_#GF1;#lZ*b1>&S_5-*y7VU zFKI|g^*7zwD?wXMh1b3jws7(53UyyyEN-68nAL8+VEG*O2WQu31e>|0GPK-SHa)Tg zU{n^)1`k0rR_iN*4JoWWEy=cG>{I5_t^Q9%zz=D-LD&*fOqk_j{=`sO+&R5x6=9ZX zv4+x_aAOgdNCU}W$&CvAp|+}W2(!v$oHo8RDdg^tfYLc9g%gfTGSD@(6B2L|TNMg& zUZB)zP}siUBo9_iOu*w`yHXcRGlBaCy@zGu{>6kE<7e98t84Fe!rh<9tP*c{f z!W0HtlLzj2NWNU=8fTigac`?wTa1OFj=&_)d)jiGO&!VVLo~OgS{xbz)%c?&D(`nTx-) zbAg-~c&vz6RBt1aw-l#hQ^) zv@siqn^)G9EnSnwH|==6#TGR{2l3e$d^};wbr!9>X)wp`O3jI*H>vAyg)pN7x4SX& z6U?3JnJ|d_Iy}K!2EZR(q@?YxA3unZOZmjJ$4Gk5)PYNp{x%tP0BGNz8Pt&k z9M;i6B^I>`YtpbQk(OjP>KBfY8D);SoB`)Xj0MZ4P67zzsvtnbOiV_Ez9eG%o0}A~ zsV)9&q&yl0?T6|*#0@4(#~Xk5&q;lQu^rKTNeW|(Ntq;*4zeTQ$nR9x$#vQboQ`6Q z*+85xR(tT*+?%Um-(qLMGJk+69HlI{&S;Emsks4k{g_i%rp-%_eE{sIV^s)h)bYN7 zty17rTUsH^4hH8Fx5s`CVdF-Vy3bBUR44eRTQOLci?c}>-&5uKF#et&{Z6;LtuafA9f?qgkcrSWU-B zW7sitT%gpVgi(7fi5wR-*0`u)W{H2;uYQBFn^2I&Ehjtw&NxNd*a(!>FEJnE0wvZ} zfk9mz9Y-qV4DT}+suLDNyXq!FYr`iYcDG0))!C+M6gpO>!fmm|T#)ta$nm85n@dpB zxqHt5c0B^#*E0UF3w;gmlj4VterjNrGH8=#rUb?mKTUB3@pm9-9b4=GB6x?8bJ}(# z(7bD-K^^-3i1=ee=bb2=U8owKfB1VeY&~pv-?oH&*`)Gx4Y|m3umaHWA;Sq~lDtz6 z1sOe0Ri{l|#d$2!SQ|u_plxPVI81(VGt%Xs#SKP#s5E2eNEUpH>aVv1G&)BxFknd? z%ox!L*3u~n)Fx=JSYT=0PunG3e8^bP?zPUY+<7IfQ4UM~`e3kohMmm7Lp3JNKI3x@ z`9~5h1gEZ}0qH~6)uMinG9y(JwFA^%Cn}6u?o9BF$4)NW#(`6|{LFGVq;YeLEj9(5 z569gzW`cpt6eO7Ia8nq7osGZ;CN|c)Tlg(W?7#c<0G<%&{KjQdSXHXI8JuO}?(nv@ z!a1HRndNW5yplLOy-TRehZH!&C(IZ89H+L_;2!3c{k+4frlJFLL?s9mORS)cwb#<9xl zOMMGgisF$cWe1`!(%QjwBB8ozS7g)@o8@(ce(O2ec+!NA_FX|uhv>$p0Gqj-DXXPl zW9jLRL2jHHQz2_M^cGvJ8`zhcm0nX1L>u0iojoGSC}}>9F2%3;VfU&%c8JikypVuX zSstv^YiRp$aje;qZH?>IS0J75MDDE%?_(d%ne@DB$HOfZ*zx1{M$}8*u^(Ct`kF6y zef+B0z*z+{2$AEy&j|V7gQFys!p2FIWQV=zvX{!_0g5LR`L58f>Cwo^-Lj;- z!no=%6A-P@XNXQFO}1Wx!9PK`DGJZ>H}sg;J(~)Ta=eII-U8EhS0T0V{q0q$l$onh zhFY!_=PRlSDaYubsv=yV^9vcSmp=5xpnwQVt z6OK7hu7%##RJ$Xxmd63{iNj^Y%pSB2D1DjiXfbM#fB1JYkoJyVXAP7WyR6i@gvVjk zr7E*ui6w5W3Y5g0e}EI*rvsM&h8fl@46Ttbk8-sCeAvF#D*v}utox7{2yaA*i_bZ}=B$)y5>+n7L#~#%jv4>!h zd}4HFa5WznXX|!vXgOYv5#QJH4>;B#lQy}$~Ew-2oZ024cea*$O z+Q7I|DxA+9fXPe7jsJ|T>fWN^$iC$t`yjE%cJfBjiQ%?r5%niB==pBcw?FSFkh-yE z_&7f+jN@ov?jZiQdURMZRqDXa6jnH9u_0w@f%{3hZqaVF#~4~aX%fq_BE{f}?C2C# zLr^+Og(oBEc9t!%(T>hqgnZaVU|H|DYjj5I#}X;NF(ySm(tJ5-gAvDHb0$;840lN{ zr!gPv-_I~W%}BmSoF@utXh}08$;Eag*$!HMoP=j;tSF8#umU?DYIV?teRQRb+*8 zYfQwU+H|_c%*5xaRLPi1#%(86S?YL;f#_SAxclQ{Np49xxiM{vE!GLnCrpj8-2Gn2 zJs!c=Hp(L#jLA!j5@g*e?4~=zuktg77ZQO>VRyA}ITH1QA8d0QcAelGCcC5Wu-t5rEx>vt4O2lyZA#hP%C6v#yrSD2tb8k&7wvITv}9J8 zWEA0q$*8so#BjY0hfT}Pr7MOdBpMUOh7n?Ve6ewZ|Hb9#IzN~O&BBgA!wldH~^jTr#c5KaO1#rVV4S& zyM`5qR;PeeO;3se&6qNU^tu(|)OX#sNLD7-0J-ijHPV>Ie~T@q0Df}8-OI%t1l;MY za3B9}KKfMIIy?=|Oa?FNRLzt#y3usEgL8WEAG%DVY_7Z70~xzP$Me}ZqdelK8^6Cq z{ol&&5A|`4aIQ;@JD(w};rWtIw^Ea(xxh5}c5Xrrk>_1wW}UrAnv| zHm+OnkbF(1-nP|fLi<6kaYU#UqRb2~IGV7bm*YZUjoqsMb5%MF8)6SqyotYWaiC2o z7tLpxeaV%MbG!Gjp-B3C)mdF81n0%oR9R|3GNNm$fz#gLCNC)8LG=ie`eeQZCb)Ah zqeetTs53eXq9F!3owcTbpw(+eX*-S`J{?zAs&$(vru*xFb$LltJ1o?!C_SAqUUlZx zKnv_D6;k8>t{phm0o)%9I#%?HUENNCcg;PwuRRuz zjtOic=kw6yZG34Mdd>LOhoUaI31uSvg!`q(0<_ade|--dG&i$qp{&>eom-QF*DWya55=08d1=px~#fHd`&(Q0cy5UiA);4 zFIH(;WnWsw8v?0GO5$sXl>m%R`fcc=8&O0lB~SNZ$`E(%+zd(wscTFzWOQj`+t2kq zEm?xYQYXU1&tdxL#~@TrkWN#%DeGK^{ta=n>LAzsCC~=N4b72(sR;&7)0ylT$L2-C z9M7P$V8{dbElx!`sooNzpIaDJY@9$}(38w5NX6UcI9F=%(<&N=sC zkC-PSKu%H<))3Y@ueRwjTlP>Rq}2D%Ay?+0I)xc(x12wkz~CNllbcI1ViOmFEasg| zPB#^n$8T)Fr_ZyFZh1aN*B#v)bj^i4di~PJ!M*~X5BDKR@v%BS6EIl)*TItCR7#%i zvqF42Tj~eLS{!uXj<%I6#L4E5#X%4X^>Y60+It*KX5-kSXn-OOO9TTXU|iR8@|N z2fuqFQ7XlR`Iqdfka|s z9WNQ$su*$MNl^}4-H%Y*96|)|ll$+5mzcD4BAeweO$Vtn7Sz|St5(8V8K)vG=8a+f zBN?;cfez;?)pt2&uCS7C)>TH%8JrvF+?Sb{;qjj!V16N70eO>^N0y^xeU z7rq2TO-c9FI;GVflAd5visNgqYXZAS-z;-(*?ai4-w@L5sT}GqSP>G98Gx%Mt-@31 zOVGVO<*j6~t3Yzqx0o4h;yS0F*n4q?_g*BuPCdX%c}yk|Zfd34D)G*SO`KDmrV|0_ z!A`FMY=DzCbIfU+!cganK1gLQm$y_MpJ{hsfJZS`<*N=S{F!*%9;UbxW4sIopZ1X^ zAB@kz4H;#*PJL1-8N*K{eHeUiPjE`^S}{9u z!cGd-qw}YOlMHGdd}FgVT9eJVI5sKI858ONb?mH3D}`C+Tur1IzCo-IR=SWQ4!Hc- z^zBukuo7G*2A8xpbgs=g*@Iwdc{s{~L@@)B6OQ}D9}?LiA0RZ+Qfu)e;^O}tElE3& z9(Wv?a@gS7iR)9rSl~>!kyTD`0yRkHIiO(N++v-eo(8^-@8e$zE5xS4BZR5F|FnF97HxcUF`*b-`k&UOamz-$z-nng}N9XRI-Vq2_}gC$ThPo+d3=RQzf z+7n4Iru%l=pz1|sUu7n*%GxvoqKi8DsW|I^FcEU-IQR{Y9X}yS5qe@+D__bqjm=#c z;Go*>|8(F!?KfqccRC5^KIFnt7EHWa=}#4p7zp}3yFhZJm~{3ZTYE-RiQF>BjdBC^ z23I|BsWB*D(f&I0%VmOWgu~%+n4D)LIMUAH`7n}xru4R0|8ORxk!Xt)M(gszI9_Mszinf_4l32ljvfaR-WIiA zHX&k`Dp;PeUQI3aX{KsjI;wb$M<4Ha4WK@ks(^F1X{3+%OPp^hly}H*ucV7wgan{7 z;YuD>bP${mM;fx8jTCxQW-{R_=D@KTPy{CVavT?e<#w&O2w1R}L^9pzveHht+zoXV zhuXh*HCq=sHO}NxnhI^2ZF?#j!EBg&#`QDQC@g6g8TGXF^SU70ABi-SK~;2}bbVnJ>@;vj z$|Lmz1M9BRC({87Mt;02A=e(Q!ad>wg0!|I$a4U;tq zI0v~u@i$0oQgwpnPs6He!#sShV(FL|U=xTxmmmKN8X3C=DM(~-E~z~7ouoIt9zYS|&0+r=60V zQ%+>Kb1@&#`SKPsL8>uQ<}lUerEpT?(+M_bGOUTmV`KO{a_P84zBp`^ba#Lwpy_=a z&NA1lx(zu+-6T|7@7R{AE$@;MmrO9z~~9CPwq(aj{*&nmV|MMsMAe- zGDr>LdT8C6?wU2c&*5ZQ!>erO{XaG3qdwclO;!l;$Bq9&xl+~Q?`!n0uZHiTiMkrP z!x9B`T2#!QFk#jf$5jdwf$7`=Qznyzef_icDg{UuEP>5!WWu5FZ`3DW*{F>-c)qpMnDc$|Dn zebQN?e}B*pC-2$`5+cT!`M^PU9tgiFFmm12dkIr-F6azeNg z*;0C%{A{s9L7%#qSm&f)EFv{2QiGc?d7<0@lK0leMD1arc1bvS)H`v^b$Gb%PykN? zP%kLeI=ojUIRNW#%k_iS2j^||M5<5D!oey5rz-`8=*wPAj^X_w7u#~jzMtFG)W@O# zTn{c+A!$A}BQR6o?mXEvGc6Ab$Dc}N=VK+?ADbSR5nWzmix_*l6w<5|6E%Znt~FtM zbyrs223URM49JNuFL3W-Gmc_Q!f+jpMs&*?+*rvBa@Go>2Emh%i|_s|j`(m> zVn`Xow&Vs@$F^mSdYz@2WHV)j<5eWbS^>v~mG9|bt4JwBs$|(bq?}Mez)qeP(8<@v z`QbYqDW$U{jLpc&H7=e-YrLYA%s%r*S|0A+7P|-7=%(~6b-!0)SU5*5%}W_KXa(!!n}*L7WdXI-G?R$YEe;&BlzXVy3ak5jh97;2a+Y>$Nrq(|9?RT88L2|#X4O84jYh%DiK`C`dVg|o z&cpZ~pyV>NRpijrkLXX8{S;W+K4W0V+!$oemKQDUvl}vA;W8w{BG>}O5#*8tDd;+8ieV(wyudADyx@|LoAPDBd5%hG?a2q zFej&;=TWmQhNfL)!q%Db8G&;Jx7aYG#xCFeBS+GNk_k^3H-brxmVrh%;-XEsM&i~@S-Hs9J zA=i~Q+PNsKD>{~O_G+^F;PQ@p*!ucJNBH3j)w4ngn+@szR97q-BR8@yE$iMnz6^?T zxx#Zi1Lne(RbyIr_O!PP^vh`ibUCayS?~H7HAGg9lapEArCj+PRTW?-)2_-X8I~oN z7er4GxE8;gKy9dvjWM6N^AxJ9n02cLQ)h-dD%4mKX;G19|rMSb?d2T+sy@Ch4r7a z%>zt3;7i1g|V3tLk%Yu^yB<068T{ zk%`)TP;ZRhH%wT1yv5E#&Ks9}v0|evIeRn=%?NibH*oI`Xlj$r>%_4e&CI0Fbg$&$ z*@z2C10&6@w(S){(;pPb^}UU)cFDt3L}TWE9oFJRQh#4F3D!w1xli2*ZWa>MAFaz? z*x-I27&L52H?2&#wFwxbT{8T0ai~})9P5$Dzkoe*N9Tv9EV|B+&bm%sdGTw83&UDP z7*ozV1oqCw1e`UzFPAew)+{#%v3zEQH>ZJCK zPmi;*NXDa$XQ(c6)-nr0i6a&)-^!c=KaSSPaO9U>$EEGUN`Z|rnny`}YJHO+@uPjq zyHo3F?WkR`sWHo#xU*Rkdt{O~Zy7lzb8U;Afux?g7AsIcuYyL=t|fJ+jcbaY`ol35srlq2 zu$l8|z**s7TH^y%(G@IcQ|$m4H_p+TIlA^#=9l=4bb6-aLqbt0*Hw@H^?h2kK9+OR z2Z+_)SS7C{>-wb2gbRu-@NKeRw~FnkW>IbMx2fitglr#}ZdXLp83~qgcQhzfqdwzz zJymtfcCymlDxmY)zVUO{BsjJHV6&D>!7W4hu$GCBbE^YQwX#`KrBBn9h)KsqmdFfl zgb>WeP6X#-^eyX7t)sPocH7c8IcSvIW{$Z$N|T*b$K7JXv2097&I1f+Y@0~4JC@)S z*YjiQI7wE>Y)#F%TD80syT)q-}`lv@!wX|gs*@;GNo>=lC{*8PGz_*A=(jFkT1zg<2*QC7Cv65FT>^5CGN2SJ*@D{ z1nWDEU1wdDAT?l)o#xzSt`&WaQVv#mkIow?VaRnlYtp<59WS@o0Vo+6oZ`AogrSO; zAH#GQ!>iZ~jA@Ihmtm=|OlNRy;6;pXQ;_aKyECgNru27<7z6r7$|=A$edBF;fgWkf zDvgVifa@Fi%#70i(exOnW0`ApGC&=a8H0T|x;CZh2|wm5=8;^(_W)aMM)n+>Qbz3# zyN6HKMoA{&!q~Y@BnG)(1Iz9T!njO~WI4PakeaD>l|?rG;4(u`6Mk$*wpro(e}Wd$ z`A-UIHPq-&JtZ!cG=xBltK6;>o>BZ-<{hU$`H+EpS;u9jXT}!MxK3eAJfpWJr=gaF z=I+e}sk{bo&zkQx&ysGlq;w@ZW-+5 zSdt~lUB@z(e>UwI4ae?Q%-S`JyE`=<*<5g>V51 z549$rP~cuhd!9aaP2kV(8-uE&dOdONICG`Mm9iFn83JqohS?cyk+s#6pkb6T>R?YU zfQtM>xp#@d4^Q6GSoFroG$cl7hvO#FFNe&-_KF!8W}zSgJK%79n@+VO0Xgr6R&39) zMGK4mr&p<5XUgr%6k|I(74QVEfBZXwBXSj7jogjYj#I*JdKrX#TjnnTtGI`2$|nAD@?vS@fDnK-O1=cn6HWq(Yz`+1 z8}uOEv0#&TQt3L!ke8C-mG})V5u277?5OGzvI5e!wwk%bnT$UE^HgTZNf)gtkJkg% zOjAw?ovPuCSP~#Qk#lT=qg;3{K^f~Frl_xJ(RD+vMaUJ=%4u%6o1p@(*SwZ(u~A5M ze^(fuJIBhCg3iorF>8o}Qw9m8vi^R?^7PUlO{A@*tBtNkarIl&)Yh%ofc6KId*s@# zZl09>4)9KF;+o_LIFhU+XLhLU!AW&Dd0o(km{&(>bF-|IwtA3$l~olbI5$f97`TP~ zqt|8!+Tk<(sIU)cG?u!U+?-D}|La0dQ_p+igy~TN3Ic=%?*(PMn#_tLO)@Jv`6fzT zu#)lZ*a2(&>M@omG)AspQIl+#kkSVhOIyLebpbO_?dPh>_z_;j^{M)u-53Nd3$H#}}JJ4g-B*`Z*(HTiLslefhV zM2b01JYL|&dCLc_m!~cIpqS?m=IYB4#*=@31CQ=)4;Kp>>g;VaxDmNRr=Sm;IXjNS zO24yjmk~W0YxugG1hib!WuSNer}v6=C)F1=1xwqv5h%!~(iW67xn**rozcdaerCh2 z090oI&2(KgD=Xw-0wb<9Pfm)A?mI3*!00RpI<}lPBab>9j$Cx;CY=x&!xlg+lgorj zKui8jrSg5#;dK#>sFsgYYq%Tg!wen!S^YC6=>H|M!ej1vNx#x^#&{i|KC4rNNeU}` zTD9qL%6rt;-(nX*>g+{I7{U3Gj zfb64ln_Yz^@i+Vaqa<1BcwHbpbRGAeejA#c^umJ%$u(dPMON!?TA~{6{v}tR} zKU*q8w1YMtD@z8tK#W^%&f`q5{LrQIDkeoW+vNBjtGJTRODX!qDb;evNpQP-o$_UuW1dbzztH97i#8 z>G&I5oki*MqEcng*Nf{Br$0(%OI4ji7zb6Rl@l52ht#aM!LqOpQeC786+8r3)sanv zDns8kt_5065@;Ts6hQ3jf?T`4vX2d44P5)~)G}{DkQ%5oeT5kVx7bv;#C6IF&|`^p zU}UayK-r=?I?@mQto)j#UbIUC-ReQFLE(0t-N+SsVCQg|YqMGTXkCK+4CfP#$UOE8UYrj|{HtjBAwL1vr~dRV87* zhnG56pd^YWY|4y_-~FRL=<@YncF@ucWULl0J2GC4mY6V6{?SPTs_t?EPJOpTc44+h8%!uN6M9{crk(y=U5Y}x zXP04if;*x?jlQwP1_GN1Y0T6Z;H~oP0N_T)4bKfobc~@&!YB>rx9vde^sYdg(b30_ z!A(XtYC2>a2Z;sX`p^Jq`9(TsD)2qmj}g4HS(IP0rStVhTh+0v-|nIs^DbW=sEsT> zN0?oo66#!<;1=XxvT`{Q<=n-{I+J?7iLWD^Mz0nTW=@dW3yA!~%!M&5dy~uO7**{w z_oz0pPqS=zFZNQxw@N8{>tYeXM-3>LctLigFw0|V-gA($!G%kmm&C0dUX0^E0?o8t zEuCZ3y%?o!rV@r3W2%#egb~EZdXX!&Rb`P%IqS^R%?0Pbsyl!FhyP@r`^SHZ;-B}* zmzi^4@v`px+3)_qy!ex!8VzvTxiCp^Rlo?vZz{P&xSasb?2TdD$HJ!NcebE*M^Xl1 ztlJ{Q-o_@Lb;jx4`4fNljF~5L4b6q8_i6k@uPjNb0?vEIInCc{+9#e`nUTF1D0#F9J!26Z8@L~wQciI>C?jFXEjpRO@Cxh_GIMDsh(N!p%Spa6 zI(9t=8Ew7#5Zd~!M&y;m=`x5IQEmiT5;J|f`vCJ7$uR?EU^yL96)^i69#L3pZn2|CpR|xE`PhZ z`M@>Z`TKwK))7dFMO_~Ky+iHqbIn7rLm#`p`TcPe?(l)D&Hl?*WKYmtUq9Xk_l;@h>g8#f~(0m#44{>k0l`NLoSx;gamqh=V^ z^tg?iFO5LPe(Dn+>HHGt2N%BoS~Co^o+hfZwdg4>BgU{Yk4F5F`KzZa%Bn6h78?jc z{LQW?T;EA#(&n#~7E!WxRtTfhp@}M*t2WLJVh|93>(UE&K4;^L3nUe`mKCf-B63

    Di zZ$W!~?oVUcZ~DdSm&E%7NHr{G*q7s1a1Z|`>~mkhLxekTT=d)X3{1RV)jS+{dd92f zHW>>=0G<5Xzd07%feHI1AGo0*r2g6P6K`YEO0o!h@f%*(>N}PFre8X+RvS7dKJ^E; zwyQn?3o`I`TC%YZE_>^noBnWMA;Zh)b@!Yv-nEe`3-8c6n^6U~XXEp>19U z?g`4oqb*HxBidzR@^5W*g6*wi?hQl996_2V1ubF2Um7oso#JIO#2=lItyP~i*ATew zl_`U!M{;I|iYPMF88pf&eW~`B7;Q7PioaH3B+|o zj={0yZ+X#A`kga8=_q8htyby~rEGfd3Qf6a{iKz|8>|PMKh(8hn3Hxx;DHI!sZ1Xz z`h0&-L!)RAeHUJYy6TE|MC2b}-+SJXiK)Ln6xyyvyYy>+?p0B^ZY2s0EZGDTl7IeO zgR%7DvbSB>PSy_}yjn&60rodU;eYV+uRzk=>|I2X>lcPr+bF7>(>N40&Zc@`)eQry&xF;wV z#1VeVdg%zjlB_QVR+~*kBhRI7xPtJeOsBQw)esn}dgI7HNug;YIzCmFc+6BnTW*fd zPIat`hJtpsKJnLrHsegrG4VIBo`%#5%1#v-0XY4_1bs%|N~-0igFK-DuWelPM*SpX zim@%F%N&XIJo8E=tN;uGa}F>$4?g!<(4ofYJBOS0`qUrZQZyOGfE9zE{nNW5(pM_d z6z#f6xVY&PK*06|kSaGf#O-Xr>A!~~;U)sSwoU=x65XN}@Tiq;*z2!;Z)0nI#>9gX z=hXOnE9}4g?dpdV5!P&c+EtN%eXRSt^Uy(cT)NWwxdHlHyFZl7NwLe}&;xC|)B#Wa zb<>j+x5aTdy9!*NtKmUrU`^~p&K{2ZQZ}Umq`BY-Nu`Ck%)RkoGuI=+&zjXHw#F2Y ze~$bavmuHoz}E;N{B8KEd`bQ}8jksOI1aTMXrJyqTpq9O&bM-ay>xF0IMj54zw7jje z5UMnX_(P-Veo8QxVV67=1zq&U*9An-vt-h8BSQ{4WtOVJ^3zNSI3G|R{OvG!b@RkS zU*sd;LZE(6O+ry-Dg(&B!wh)@-;6r;&=<_L|Nd@s@Rv$4p)4ayU^{n3*Xgo0;oNy;Ja-u@FaupGu4tw)%C3FKk~TDX!WJx7OU1(%F~(_l3K72 z2mAwY;lcQ(*yOq(#2>j7U=!?}#Ly|=d#2=4`Y`^QG>0rpZDdIEyxvJKBAynWmgXiW zzAy}JH8(5|L*e?|Px-OV#0(gxsmAaciY%a`5xI02aoKd2_gUvjfJ@#M1zmBSzD!)WBM&@ejz9RtcEg2klLfAi3yxnGl+Kza9@OC#UP>Op#1aV3W0cff z0tipI2ZY~!x82aLUh`Odm+FR=DC}i-6&OP==79Fq$gAJe)cr$6@!++<{a3uh%mSqh z@5K6k+kGd@J;xXIJLN8|>&4iWOrLV-EGw+yd-zLVUw(tp@o|}~!#7HMul|!Tu^#pX z?B)yKID}V+xy?UHaof@3=AILahgq8bkeZX?xsBBQY5c`A^##ipxzrAYafVNPL)xt_ zqKSkV<}DT^yOH(HIbkE#l{KQ@Hx6<%NL}PoVW+w#nD(~tb2N-#>JfE(9BQWl8#@yY zhigaA`j(olpiC)B1&8TBB#f*}+}v6USA5>3Ezf>~v? z@i+I#aT_%iCa8C&Cd9PSr2@lXsq6}H!tYp6w=5k@9JvuNbcEd+XlQNbT!UKms*1;X z>C9lv9j5@V_+Ev`h-bh1{dUFNW8Vl@jNvd-4d)U`n`KxMDmY@1)>kHA^`v)2t-z`;>^Ma8WDKny?8aUY!x89x{WdEuD=+T z`~jvKBnklay?e;N+HfXxQjAG^wc$Nl?~8{If-I)vpZD^Y1-tD&w@Y8)u0j5Jl=KH~ zo}nT7;3&Mdl;j`MGA7TD9A=_(a(T{V!b%Ni{@nvtydhE@YZFCFOMy`t-(##hlJ9J2Ny)r zXTJRPV3Kj^TQBU0j%cXO-5-JIxbEurnERHmJ2FFLb!hkqzdwwa{!+lc%irFvcJ$Zab>LtX z#WLJY2d?f4_2NrEHOGJuwTtq@t8ACP?LzajFMnBk4)K9Vd-PjRHg%QwHd!U7fUf?7 zG%1m%1rUc%eSY;lDk#ro4av)WD-c!);@u`6{_^7;=?EKP_*eYk59;I}8ba&gFFpXc zKu5pc){BOmbR;bxn=gLj>wnzo?3 z2LmC}cX!lOUBN2e?sZLN2R-Yo)7#x%L9TC40Ib@Azem6EWK+(k)T_8IZgd8AI)>k~ z&p2JqUjhgBzo&cf(Dp~N(iPtaJl5ooZoHt84QTaRD4Xy9P!?P_K#=)wjMiA)cKAT} zdcVFrF4C=IC%;g*a*|gRc*XtVKV6LzcOE*>l7g-5(A@~4Lmfds@qqd{*|a%)azKO2 z-tuO10op}ZQyxfjA4>C7qhFZ7)BuWdVE<}mFX%4D1?T^(wywFcpAwvVU-Rm=k9xU} zw(a@-C{6A7c@X8R{pRb*FGOKlT}8V#!{#B6F2{A)Nf`Ph)a^=#zi+#V%8J%{%FRJ@F^bjO#(la226>s0qLp)odffP-W^m#dVF*G@{%D z?}in3CB^WpE-G_-Dru%A>0(P+I-qbpz;lUQ4@t;$Vi3#Tox&y+>j4?}9Tvh`6=(3r zCx+_XtcNPWsS6BNB|;Rw{f9UIc&s6RRue31{&REg2|xfGA896oj2e$-3-B?^i@HQ4 zOp3< z-)&^-9)Rl#+7nh7!gs@7eDSAAPYfp0uq!e5I2As8SFVUiN&sF!zNZNCpZV?ylLO=t%Hq*}NAozpG6~x4<^%kuEl>_$7RVcr zDCMNvszHFv#r38w50EG~9}I0*XdiM`TJ{v4Xhmj(pR840#FP5 zJ;k;^Fa+R#cSg7>88*%Va_su1&F;AIKos7W2Z+;qj(^%5$&4%QrY_fA)8yrS?0qi} zh$sz!{5yPLwMmSIh=OnV;y4d~`RhpnaDPL3-Ert3Yv14+^dIVT`)6&!Q! zgfMx}Xi=ZP-1Mn@eI7pWE9T1I`mlNKN6Y$QoG}QuuOa$8fP@9yck7|Hj5O`NXTI7T z{m6}rPlWvThgBpXGqFDj$$Z8j2my<`GnU@Ubk&wpaj^3v;^nkIay*&)SO2)qMV4K#|&h&2fryd)|1$e57$^T@se z+zu&S&DwN0lZWGESXqb@1MAb2WtG9@ZZN#2;}l!!u+E5fP27<;)A<8Z%P?U*muRz1 z%qAJPfn&q-cj}?9Y@dHe+mIc=7Z-wWu~ov5zF$MH9e0s~MSsRbgI1q%Ty@1I!TE^? zKOYE%O+WC)AK)wkiAKB17|2$G6>c~aSF9D}TSWc|Mh(cnqqqG|_B(Gtc14q`ipomK z6j*gsN*>X!1}m_FG~W2SWIPX85d-8GNAO_4or9XL>I)A?H_o9_vC3u+Z3VikDIvt|dM=TF6!0BQ zObY7|HVCY;#L$(;wJV%4iV1QX9boYhv3b2;i6#sHK!{>;0CuZ|l|k#6F!f1tET1Vg zu8oxjyuu5F;2jE^dGmo^4p!D7Ass&T2e*pkp9CQ2{vW?^wRz=5?=cttkJp(g0ru^_ z-#etTZ?Liuh(fjwMgS1jU;R)2K_vg;@Go1gs5~dWDJapoFK^=f$4^A$A1k{Fm#Zfr z_aK~RV;>+k;cqf%7i5fLhd9Qlj#MjL$EjiWR}(MtnK4{KfEa1R&{d#YO#pD2xgtt) zut~v9ax%ds*wv5J+>WG*=n;|lld8zFQrC#yZCKa_1o;(!PB`v?Y;+q2s@Wu?5rx8J zMht6K;TqUfIDqYnA zpxuqWehH5Md9OUDAr{js0PeWyK=At1Bj0?=yztUX?fJXlbs$9eK{%Lz9lG;=Q67*c zlq7__!7INWbY&FOHMGOA9wb^Vz-ph1-?$=tVAb0tzj}yWr9^jb27;z^V~dth{li;= z3EJZIwI8&rir^UT(KY~b4PJNUR=81O5&(7oSVKg;@X{iGU@K_-1F?9j(ZQ31fCFL{ zrww^}sG0nKXK7oYPrbIu56JU61bK#Xz3CSZno>a7eYw3X$Y<`=;(df{e(W3Z>LDOa zC{YMH>NSOs_TE8#bQeFkAg=EdZ*A;%v6LhC2Mw@jgb!>Zhy8qx<7MEFx9 zQ*HzUEB`0~1$k~iPqAF-2CMgJ9RLZq2;=jbm?R9qiH1z2L>i3k&_3R-LH=-^LwH!t z2dnioIDFl&1mpFi|K(I`+o7#4di}o%(r;Jy?t7oOY7dAI|%(M>TR%&&d$?x+soJ7iQYkP#;puLX_;$Ybbn1zS`%jSf@78R`XO{W&6LAs*Dvf;=c2g}L&yHl-_1Jn{f}fhY0s zDx~}xAF+1+X{bfVLKJu{67<-o1!NGcz-%8zAVUg-Kh6&j<*ILsjp(p&bKDeu4ZW@m zW%=8`UVYvHbmh>;j+(ytD7oH8K9%KPS8n^+2k&aX35aU|%J+4W%|GhgPwzxdN{S!@o z_nyCcwHv+zdHd00C{%rq=Q9n_y7z`v``q~P`@)BN%X)=x_uX;B>J{epmF3sJt}R~< z-FSWS^XA@9eLCLRo2=4<)riL){yhI6>$+aH38pi10G#5uycBc$eT)m>kxtuo2WE8y?gthswCgiYs z5jz4(oYz9pMRNOujVy^Z!K$vPu7vuv+{HrFGDRulsEJ$DKDd6QgbNErpLjnRO_&$C z1SgQb1`My8jcvxAaPIZSrYey(a$*$o&QV9m6F~W|{PkO7QqQi4L5%&%R}f2a7Wojp zU!7F~+^zgEb@H{Zfx-&5+ZwWunK1k#&^c|3KPg53^U`Q}7Ow&0UK@tL>@8v6iZ>*p-fzNjNeoOtG{d(qF}h7lmg5qvD7zlL zKG~ApUoIrON5F(GZxXKXdJZ;_;JSYl?YB06^OohxKNsDD@7`ft`qudSeR$&xK<>5f zLmxjH#h)5bws-!!-)TwlLwDWRl3)vj1``PC>pm2|zJJ+UE;3`|*h8N;ztH3lNI&?t zpR}_7_1Hssy{utQAJaZVJXGrha zH8v;({ENGbL+6&cN2c@)WS?cN=so$ahwL{1>w%P(D~pqF2|=qtE6*w-IUh3dbi+wx z3e|9&4z^z_ITkZF9l&*ax9@7 zi;V|(B8-yK(l;eBUeYV}BQ^dGv5h&@ zmhI4N-Y%tcX#8L}kH?!SQ9jNJVXQ`mBiE$tTU2q{3F#~$ zP3#5WiX7A6_kwbUXqjUh2cb=$n)1+;ISNlOPb9{s5`G@zmO*?4*k#fz%aWBZsRf14 z7&UMTSz3r@mm}YtMdAEo4--@1{+F}wNbchMf5rscgr@T8jJk-CiL$a>g*nruQ3-=& zIbQqIu$y}@&{@$bfxK zeeb)XY-Ql_2fq-k1p0>`{Zl~NYQdPb;5wb)XjDnF!2n%V#Z6Y|HxwUbHe#S?zrWS! zqbJq+ZXQt+K>onWPfupT_X!_eehZSWa4g-qIRYj9jNu(YW0tS_t-!sC1$`S6Nbo&I zSg{4)Uxe@Wi7WG%K0{wA2NLaX|K^xE*61?i#ec7F4B+q60i@S}IdYGNyMXA_BZ=Kb zS8#1OzxuGx% z`I9Ts4>p9k`DpF zI%9kSjR#d`I{kG_c(Wu)SGs-n8E35R6B8{^F({F)t=oG~{8ctgx2y0HuyF{@$ZN?8 z?}xvl>)W7zShsqCvrj{vaTUh21}j8U9dc|-BBsu}I^*?!bCB-G0qDGcbe-vg9`#SF zz|$Qz&l{wMe?e@JBt4#`{`8udPE!t+0Q5_BJ2I(7tgYH|IeJ1{&Sm0^+*5j0b*cjg z<+E&=J-3vT?^1(}bgZZ@u}ykG>ZWBVX@{aVBRb~jfs?{Jp=iU(Y7Yj z96v&ZVQ5%!*#gdc`M(G@(#Wky;y_m=y+~GSU63KZh`O-gRx7@snCxg)Y-uJ< zl#qfC)nD|6{Hn3D(Zt7>VV#pSNE=>_#5=$O@psW1o(Wbs0&!Zjb2Bv-=)B$Rqaje? z1OG5d?e2THc*PC}Sj~sO>jr?=s{O0ge!~MG1W)0WX#!zRi8zpZCnE4c7Lbl!Uai-| zfIPlg@y$cp5AiGI9v6^_E_@dZHh9V5g7g1Xa1O7-=G&V$K(HbUZw3OZ>^MEAum3K- zKS|wsAz4R~&W$6>IM8{!&%1+V$?YY_A@ zhgFXiuNKQ?l-<%3iQIIVOmN!H&7{e3@48VMBgaNeQ!m;)RVV+X!O58LHO^GyF9}>{ zr3A`=lk3~nu{!iYHePN}Cx&gcIyH`-!liG0Q*aKB=kY(f)s*7EHSaY?n#txUrgNyk zz$hewNkslojNoKYq9pY_CC8j|S>jgD^?p5739K)4W0Q*uiCz!7;&L5;)qe2j%K*Q) zetUfjjdAGDI&1NYF%WcvUQb&p1=JgSgRk9*nVJ0iZwGqfZ1MW{gYD)I9DsC#$s^vx z0t3Di2!EbnggR-7(Sbx@ODZ;mqR*$a8!%moOTV`Vp!HBn-h1d*yu>}0ZU(aW-sL;u zZ(v>#?(Q0rG%lT_HN3G%gNA$ukYzlGjiFj3WyJPw9hknJWN%djnOaZa>(L)D?*XY*|*mMIn;h5re;~3lo z2j7!1>Tb?b(j)j*>#!2^xxPArF>!WNK5W;#j$GI@%g9ZNmy%7_`{d5ZgDN1 z(t^B(%vg7d=W*-l3+Y{eg;pD!&BSV$xEgHczAQ@%S9qO+#YYK47|f-ti8orIoYaq{1V?lfU@~9oEK@@3v4*I?q{`mq zZp<9%AAhakUCC_!e5}>OTO}ke~bHU3Ff2M*0rA z`KDhy*phMw8XZf?hl&0Dx82aLrh)Hca)8Y}fY>{@|2^&N#i@xcqzMEYkdQ+sl=QXP zaQEDJCGHayS0MI>+#?<(eI8z!#{}TJqRl@tujJA3mX60xko37P6`;zb@wDnqp{KM&K z2}etF$P~Z3?gY8ZI_g$dVIf@onXt{AF=_%*1Kd$dTxG=pCfH*TfSBle_P_qXy!m|xdEzgMXjno}s?%T< zrBvscIu+v}z&GNg9YnQwhJ1(dtag>vdzP>KWwQ_!aY2TJDgu5{f4%)yAH1$k$zXVu z?9Lm>_cIgv40#0N4s-+J!fXCO^udZfI)Mh=gVkn|Uc-vKLm&I&_76xmkkM{F)X1g; zBcul_5P^gof|Fl)Ja}yucFzSO%Gb+fO2Ukif>u>XCHofvszsbzg$ z{QB2L!A7*M!fg-_JS6Wz+=L1GdjnpF=+Z7H(`Z zHktU-OjddzL+dyp7c?q2{Kxk1kt2nfI^JIv%mpJmI-!$P>pjpHIPw*$Z^xVt%PX!Refv2)j!bn*ox=fkB$ z*-*H`>lwffz4+BgfV5K1X!3*t}IqHKE3<~hRIepX!DR3*yaMUV&;E974w_JR&*hv!p| zST}N04+%*%D9uu0@*ol|!i2j;s$qGT|oZbeDLZ(Hmv@VUQw-UzgD2HI0A7t8UZ-b_z&&y zZ-4P%@DcUmSAPLS?_fnT|JJ7L@yHD-`iT2C~?>7J2Umpv0`RxNw z62YWUvvY3=s7?WC`9#w{Ag{RoQt3SMttZVP0H7`bOmc}wkiYnh9P?v%_j|mK#tsh+wVkjfmFcZrU1M$i^&4mID>A|z~QpDU6}l)6A(Rj+;pA! z+@Iam$Xk+N6Si&p&BunE^M3(K`>-?|Q>NFLpp3Ai@_wl|bgoC@ERqdH>6Ft$79o6toE45;c3@G0K)^Q)D0#~T9Z_M^wmwI8~x zea#ns4}JV-(QDw(5}UIB@=KES3-&5*=eY1xMs5|_(OYjEMSoi0S$N9d699JQra|8d z9Bz*uX^7rnyne&$%)$M?n2ZC&@J-kKieawrFxeD@wb_V{R-og5`RGDzyr@=XglO067WYm*db9 zF*hABfBuKJ#NW1C!hcWh`iAHP@&`7oI80VMvRH@lOc`x5X3^D9F!`dSFUW#Af(bGu zc_lzS!fH?;U%&PjN0Sp^c%2rn`g@k@VRPTh_-gZe_~12RAqeQV62DVv2Wt9lzrzQv z4qlgqhZw8+GU9*^LMGJObU{5vB& zLN<+Pcb#Q69fA5j`g=EsJ5SdDCRl+;0>XSj_R1)kY)QkJF}#N23pN#D1YC9b>cK+s ztDOw|DpxG?jvL|?e$dW$G$gNf(+v75KfH3w^-X9;*eK+(w^sc2pTy^)-~S7JyB5kU zl?(J9zWvvqoUEe|i&ct$6I_FKl1M;!1^cW2@Bfg_H}@c`2zUlGFC!+YK-NTS9?Epm zW@xz8=#hVJ0l-=?a@{l3M(!D1t}0~qU;~gPzvoZ-;e4Lv=)JapHi}Y~1QUZXu+GHS z!VA`Dc?S6&j%_nxbF#uPu@fY_TI{$$42aVth!eKD;U?=_TRyxhFu9}Em2#cx2`N*K z3Dec9U;S5qFs~u4uZ*ZPoJIG6$izSBzs9V-f5&zN8JCjP6Y;CRe4XQL!7;pIcLd$< zw;OQAYGA_8nC`oOL3B#?Ve(W8mrT?Y)u&3{2!M&wb^reEXw@G$*+BBaAB1@Uy;hZS zz;8Qp+&ub?r;^n)VsXr|2fx@(6fQ=qQ-S1z@3p}SL0Co7e#?z)M&ZAQcj(Uh19=zx z+^ZsD49LK4vlNY8stKB&)W12P`_QJaD$Nr=P!F)uu6;Fkxk1IxHWOLD;tc>qac)Q7 z{b*&JU%lsLKHreSFzJKU!xz8tb^#cdTsW4LJWo9sN<;@ASLLAF0kV*tiJQ*0l;c7cpVpi76C}W_oLVH zhXUllDnS3bH0{sFPonbl(c_7Tqf+>9I_^h=Q`ZLH`+<3(xes-L?E8n&I0SZ;m4YHY~xf3Pbp>{-^)Y z%7pwvo1NR*%#|FlL&Zc}itJ0odF;PD8R#mM|DvXh&;RJh?f4D)blJ~e)Rh_X2G`^V z424c%55OzG``-P|ppJk9Jh1P*ZJ+cTgFIKQU_jaH!* zR!JaXob}-&MacPt`X;EWVwO2df)aQxzNJc#IAMLi!`2UUbdcwiH8DsoNA%%y=Ga9M zxd=MsR6hr5e%j3EJ_7kKNyr ze}I%uk3HN1iT62y&`g1wbe?^BTyCffAZprOhf{^=Yb;Wb>|ib}8_R)JX&3+LUBP7_ z<+yM0fsWjM-|@CyB|sP;sy_8cw*-?lNhSsmAIDCHn?1lv(zDO_nLsB&uHW<{ghCxU8^#>i|K5mP1o9E1v>Ji<6XT#E(n{*8^c#Y( zxoa5!frfX=g=5XmxyK&*yt(&N4>EnjwPgFgcV7~;tJKcWhmV~2bYr8^JL0yxlsAOc zLA2RM*MFJ(wws%n9^a8n&BWp!LN#m&Gy0HhdypT_4^;mjP2u&)WLn#BtYdTHfWIvu)8T)~hA^A~s zBR!j&7(EPRDQtYoX?u>wv6<^OxH`+>UY@fB%`k?eUK{EB?QuR#G^5u<&m@KAYMlcV z&=C=HgDVNc^(Jzt?Fd$oGYxFP)fs>?_a<@PMM!zM{m_^E!*eKlu7b8$?ySh@?0D;=zvmTQ^`r2Rqu*?;A=0(&L@KdWhX^6jpx~kuOSpEJkXw zt^pl@?V{Yj_QAWe?G3AR#T|kPfXD+9@T9mIPz=Zi#Nlwm$Cf(zL!C}STS1u~{bpTV zq~xT>>eXJUfx=1$lAQluU<^;F(%0H+z>}I3u-Ys_sq)8D)`t52;?{^qUK ziB}J(miPbGEqr|k@FOSR2wYW5%~j-Y;_)yQMsqC#sDnQBeDLb`na}>o-G<8q5KCA7 z)`u1C{In{ZN4v9bW}^xy3xvD(#HY<5X#8AM-))(HTUq8N91z_31jYyCHTXQf?Xs&3Pf+Z=2n4y#rDDi_yL-}r;T^*1R)^hq)P zO+`&ve*|#Ejy$lMls=zy++2{Ne?@8WySV=tGg)8k0Qk?{IS`CE%~O8Wjq|csvLHX+ z7w~ygZ5~6(8)y$tcYwk!SMCd#KUbMZMp$H&*uK+O*@b zO-of7jBh{)-?Lnm_}cSd-L4RAcRp_Z;FWpbH@sFszges-Ta2UB4*K+9$J}o0GvWAz zGxWeipYL?9?GH?bp$+|NHUPYWtHEcQdOVfU;e-3%+rFxbU&WPl1o{cQ9*o-yUY*|e z?su4nzY-Iv&;}R&kJq-FfLzst!7Iv6{oViG#_{b8vcodUuOI`tcf9F4u!0kI2KL`A z{0_Vd+_cBZuY9f3!&86%bRaiq2b9l_EObKJ-28yC|DdlIzW>^Ghu=$KrRy1|x8;WG zAosCvJsH>)Ak)q|^JiLNi%nKyZG~N#@dhlK@p@6Fzv|AP{oeO8;UV9@*5&)$m!H$= zCESC0gx9*;dbOtgcA&3!ogK%6<`7RlpMnPI5yB)%8%u;(P9Hjw!!nXL8HMC+g}HP; zOjOSaRhDv)nlegM#OiqFdL-G$?m8y_=8E#X**E+T?_BNI;kT9LNCS29HoizIG*8$q zj)#+Mdg8AnoMYr?dnhAG&>@TKzmg%#tb*oCMqEGK~vT#|>La3uBK z@~q)E?Vwhg>8qT+PKA(+m4+7%smC}>xVCzs#>+DJJK+UuJP zczyWjZ8x;XFbM}zaB`GXc!9m_;Fx$h+p;T=Dm-UwJ?7mL7T;)Gy($f)B>e`a3cF4p z>LQ$2#TtY`yEHeNw&#c(=stmgg>ruE#C;q#V8P$%gWZ1fDTW^*TO$oA27+ zvOh}Df6*H_WNl%Nh8G4X)Q)vLPukKXOR45l$FI?0~#;J+xel_(`&8SGU z0CE-0K%b<7aRVfKZxi}N&pbBe+*6DEOC{1##EuX%0$dgHuPl@{q=x(xVRJG|($7aM zv0c82(%{Lzk~G$aoS_da#qgZgD64CQqO)SL8#-@QYcB#q&;6`-h&oJjEBX1s{JK(~KO%|y(&y6*^6jptI^>GZ%+y-l6 zG=icC)-6w67&a2*&#CZ{0pv3#|KNLm8uAZt@A1D1&f}efM|5cVBKNS7hOvoj#gGPp zoZVX^@Oaa!+>%Y6C#2yOlYcIyCmnX0F)nDwl&*|w)1-;R_~530__|@vIWLCBUt!Jq z#66(G_oO+oAN)3=KMcs{vJUoe&VQ-MRW@uoK^(#fIa*D{D}XbN<4O~or1xNyNM9jy z4D&Mg=}JVBraWRfCPI1b)u``UX2@dRi+cam7^a*A%jnyNi{;3Sp{bpu`88 z7&ddR#(W(v%lu0H-;zi+s~a2UXeLoOYm$7T(ZKlxg?a7)I{%a0%GC4)>l$_aY#?Dx z`joPb_`W!3&e+C9Q0C31qwncL*?gRY9Qg+M<-><9HN4taEv{8ZO_-E}7QHT!NWjIH z3{Qw!HhpdT)U|D4s?_I5VQnTsnvBkT5UiOKR@VTjNtuuV(D5n0mn+!_XYktKoaWI2 z#UtP1>Y`V5q#url6qns?+s5IH!Wm`D$6YKj#4|unjY{p;CIepmJ$Utdx^wuZW635d z$x2E&^C>-|gf$Jr4RG|DO#;{R9t}5^GdQ1drW8Q_!8a{2?mhO99Dy5S@PL$Z zsCFS7IX8KTyp@)TC{%4u?u$D<^jX0aE+B?APNF8F4VOTl%**^()>xEZ< z*XW)019iNs52$0>z7e6Fj6|U2ows+wHMe7~J8_OPE%7uRdJumSyEPgnNu-6v4BhfU z4kZCg$BXp+HR=)PyVCBQ@_mBRRIgS{t)8nQ? zNq%9Ilfw<+2OlJd4KPlA>2XtpoO%yIs(ToNyBu%qIh)}!@<#_XUVUYhVAC|X3>UrO zb=_(|*p!75EiI}2Lt*tF$M$nh#0J^D@mf3XFb@6Chgjvwm>$*0^;vpv;_+XDM;g-Z zK6DP7d3^Oh{c*c#2yFPU+GxVHEB?+p{d9BbTiAURxylVC@D{qeb$&+9U!>_`X z4pdGR#`YQj`>JkHyp*1)ER^5)fxlORS|(+%95DKv)%L=3#Uf#*GL?1Q6rF^ydeG({ zCeeqi;Rz5W9CxzTQFI)nR4<3p=|~^DGzIpa|EhKu;-Zy*V?%NVTp}!ulOlH3j*?C_ zrq;|=EA`laIc#9VZ(0L8M#E0DSya%AP3`|W(45Pq(Q0%uoVNeZcub<3d6WXkzbk+1 z!!mh2SIAS39*{h(8TKZLf#`*Kl#Lr&jp2t{d@m==EeW3%l*Q>T?gaE8{kVEDW<*;j z^|3lg&QX{!G$)$gA5U{;pg$Z#sm3Ph|A4&44aefNzP8^L-{26^k@d(wT~V~f4do3d z|4Pw^bSo)a9yU0t)lRkIKqR(R6o$c}|6@!`{tb#xEWP8byd~G@bt-RSIP`0y%xG&z zVk=yfmJQN$T?|7)2T#yw=1R!F0dY44E^|HQkQn8;DZIZfJoDWjn8_e@5qe!ZPRZav z9Wcp`Opd;`qBiELmBRs>+1z&D@pPbyy0EGJKM!tfic8}|Xu4Ro+O_?EHvKuqy~jUo zo^Qyzo38ui_SIf3Ui`^V&9R3+Z;qY(e0#hx;0Db>aO9V}HzRP2#2_K-1J~QJA5`Qb zl7MYo%Sg66Z7eDz|N7h;gtR>$jHME}gfmTjYw5WyBQ`i*#F?S~6uWMl(g%9xQyL>O zI(}L-==VyQ$q14wiyfm;775pO8XbF9Zb3>3H_k;&$;KU56J873BhJ+5*QT1<&3nVt zaQ1-}WU7D~-$_J(CefK-vEo$P=P(^J?gjFsD!gW6;%`YVvtxQm-BC|(ruE(zoa4%I zL^RG8m5^dyC2tTT)j%mPl6$ByD@dJfJl|ud;Dwj|xh1jTHTD-8!sLm+d)hqq&8OO5 z5t@ffI~JM(=dB>^t|dMo3mRGK|J?CR0h`ajAH3Ro(d+lNn}fhB>Ug6MSm6gmA#4=# z?DxK(*4sIvI`L=BM%pKJ*}48VukzA9jgN>VvADc+Gi0tE4YIi)r8R#$Rw$jwUT6!ibr1Qk*Co z>T1w9L*u^wn+v0rm4Yn{pPy`V;q|b^ar&tBkgm|h1HkQu5EH(TY0fdRX{O zR_r{elH#GxG3ktL9J1KZ3I0w2?$n#sv6UEB1eZCFLAxyV1$NZjr#Cp`!ztqiRR(r< zs@PR$;zf%lAVLMV&P&|nmf)VPR8hpad zIVn83M0~n@Xq?!)SH182A*{KRTz9*37bm@tZ{BhS)UQwJy52;eMECGi{ zW=n&rU{xX9(unulVP`CRS0c9$aXF;DZ1w_|=5RydgyX)TzMo1aQA@|oIR<(lW0_QG zlH%hHf2NALpFMo_P^Tl+l9TvnyA^#6G1o!xv6_vj z4bI}0W@4xJ!U?eLv;?l_JwCMbC~J3XE;deF4dKye!)vMv`B!p%jj+P;hJ*4IRf^WV z=jqQ4J=kD4qxSh@7AyACX}D2P(biCf;hbr7U7C*IC|uM~Eay+n5Q%x!3Y*XeD(egW z?TMBE)MP1EHX8@ea1v(@j^SSercXAdJJiDU_eSa5wn|mcs^6A^4#fT% zzDKXm2pTuDTANgz1x*9rw7u(KSbysawI3;5FG^s!ASCof-41br3fqn{TmnHhZrxPS zH`@0lk;}4J6rNTA%^8;r64iyQFh!gO)cglG=5F&U=dtowzNaKxs<4~}2Fq%=2)7Mt4Pb^km^uRVN-bGI|jr zwk-G{luOj=M8CY+qPTs!Xb_gKRl+?wm$N=ZHDG8Xasw7$@B0WH?2b>c}yY z(qj{jitaNYY&fV9b%}fazSKFnXDf!0LDT7jSsDXt;%_A3WADw@)3KZ#?F~9c&DQuz z3h6TEGXzIc7*nb4v&`ANATecJ{Qw-b;gani7fx|4WhTp0Lw~UPkF(z!2}xgt_AIT; zQR`-a-dRDEr1vC_HDyyd>IJQ4JT3}5ONf5QCfGVhek)<5MmtRtsHiBeZqf6y3dlqa zRUW13dgQ53cuYi;we~n&qKy3Oi@x%cT&-ArsTpuGqL}uO`WaleOAGI?Z?xekH=ojM z*Rdg;7(a{!#GmtZOK)Of(mci65@%xnb)%iQJa7wG=DNvT+oA_tVHnyOawd^-YNG=8 zWy!V0;52TB7{orHCXm`4TBR;=Tw=RsVAp7?57Po#8Mp#m6lw@YtJaMedtA?Z8-%KL zzhQNPyN@xvV-QU8myvAVr&~Abbti{}F;;6Mea9XX6RU_eSshf;w-j4PE|-wa()~sC zxy@1+uJDmf*kzcY@vZ6$ zF*2-gpB#6w;h>XlijDGHcLQ=>CAKA)`xvbu=?sxGXf%v5GPAXdokof1^Ptr$g|ou% zj4`~`sU@R@UsPW0s;H)|8_|BLFqXL*aK z9Z9(0XYu42-P#!Y>xAPTA~=U~lJckL62;KifJ9Ww9CP!TRUEf@U(%GUo`~c!i^B$FnLJsV`;n z&$(PFF*u&T?!W2BB{yqKFh?IBmGe7s!xa*MBzK(eoK-Px6=o2V+8#idm4O;9+S#P=+UsR# z>;JYx-FaL8U#>#{UiQ`t&EW&T+}OO+%@cq3w7KoR)$Udi zhLL&J8L^G8fl^XdGUtnF)8bB8ttq{f$A69(G=d~c?qu)GbZomJSnH(z@nZH*Rk`pr3=ve_& z;Vn8*jyXe6v!?J+*$)iK=dX`k5B1T<^CQu@V{8CNuan{0F~paZdUtLNaY#aL+BX}B zgdacF&KD!(vyA$wfJGTj%Lk!XWHPJ)yzxL=*4lXgZ{FIHs3~wm zT&O_~*&>JH`Q6|{T8<6{8c%}SI@~Oh47Go;hB2lL?1elR1blvl3UxX~vd{{V%%09#Skw7Zkr zF6o>j)(^4wT%Y_~gj{uQ+cUM4^wN5;-Jd20srpaFsR6snsvHkS4@7}~ zWf=b8T!S|JE5j987rk+>84GkGo0`z(&WGk>3q3%e4j;T`ae_V1!iqe6;A+z!bn-yi zDjX)KEt}yq0%<{4Ozu7Z)utCxb1sg2$W^7wT;ueEv32V}N^P-S!eJNJTp%OVzEud0 zoXbx5Ncy>JY8S>Z!e(Oto$xE89qV3kPWPw91+R(w4St<@5^yGqHln^NjNAz~!@jkU z9Dweo|E*W1Db$}fF)gOf45GW3^s|T*>}?391V>SE*9);0;;*6}T46}E`CQOrI=th1 zeiplNw!tH+k4-8|kwMRcjvJ}@c~!*qT|fX@@vKnRJ8h)ihO3I4*|G2y~hICuzRaZZ;icGA5}Y$@8DU%-q=a*ousny?c%S;4W9F&PFa8c*{T`#i3opg5c9y@6 znMX=TsN-T2_iE;6*MY4}eNF7Y{O!$zd9SF0Ie?DtzoK+i7q*_b!&q`1_fGt4NZ8H7 zGvAB9uU7OLtF}X>co@mSusbFW7DrISUq zR;LnNDGf(}mC%Qi^iTB)(;(qhQ-#=|eL#n^%m8dY04IJ6joKO7SGs!SODB}k{ zi;Y*(4=-MP=_QlPp0wH@50{eq(|b0V4$l!e;g_bN=7&=4lWNz|F>N1(SHF*){CxWz zK!5(|x1KbkLF!NRUQu1AI^zZ*P-sv5Egh(U^jlB4`i}p(QOc@fDl&(EXN$^hm+DWW zVOSY*?!RH~bKyBXO8Lf%`z|;)BtAW!4cH8AihUa?pD9OTvT8MVt!k0ItfYL;q_9@TRp!d3(!{h`rQ( zF6_jkMK0SM4qWqIbNrz%FdxaMLu-f|733ctj6cO z(4><5>?iMDtVmq~>IgQa&?43lm(>_69H+u9B*)dsbv?uUretTEhf6qz5!y3W!1dtv zqsPp>CqCUCSKm-8)t5N?tM)-tTDPG#4nHaFh(EI|-ulZ_WBjKkcyTTKSd&`%D1CA) zjNm;@Qhu(*^x$IV&bzo)o-S}!Q!*=l#=3~dmqj~TqMahvvf(*Qm5vIM0dSe{&g6|p!YiZ` z<(m&)qyEOE$6?YZnU44MFk= zpCGR$-gt-n3as4PD1Zl^rR6GXDYTPaQeTq~6g+a=f^^?8$GZ|K_CVsq{BKUTa>oh4 zcOs8H^o8ymR`MO(ulvR%0LVYQ`VU@deSuw}o64L-yJ)ZPKxl|xW72GrV`MqzdP+Bq zsXN&9M2Q*nJNeRy)9SuHom}_R#FxTI`AVi)lKHCOlwgPU#S-!z&*Tj{?(R7uV#>a4 z09b)(FgeE^BP|=4~W>=luE6^`CP9)4*9e&8T7JJTS*D6-S7kOW#$IlL~JYVd}v3 z&BdxanKVUph98N{DsR~|dO}-9c$$Cv zea8bL@1}!So2$P4bXLl~E8ZS#{Lzw$%y*=us%qnVa-#`!}?mCpMCkux^D?WnI8S-ljh`?9yiPC>56q`6Xqa^19^k* ze)%|3opaYAe!EFUL;OPh`uYHw&wTF(O?Zcd9FufA(Z2R zCf>fw-yvj4DDf_T-TyWj3f`PJ_Z8=~{m#dL_C57?PcQ1KsmFbnU(&V}lnT=RYy0*u>UOa!E$+WRZtFqwS~BeR+>QxLJl3T5%y$$+C0{XD(Q{Oiaf8}fK-$tP|04_X_ zCiqiYh&(uRm-0bE7nwG`3&N`s9pEpo2P3xMWaOWmJRJgarHyBTB(7a`3;)_Lj>iqb zPRN%_up~X*ylK$c1&fG3a({LhSEJI1=SP|e`MJ!u%B1NHkOmjMaYdfB1YpKn03zTp z`sQFumNgUS$T!a5aN@g#K|ltptCj?b2ap#~_CNo_TcTG& zVUh=bmo<}GnDpJ&5VRh!6YQNgUKhoIa|qAH!Ju^nB;x(Q$L|0Q*C5aE2a{+Z7TZa) zk_d!2ciwbeQZBp_`+nFMqYxnb=7y-|^6l#nwD853 zerg_Q2&J2sdIEXA^Tq?_P-epzAgOWrVD%qQ8m2WLCg4YJyMg_Vpl^$YMrlb(AigBs zp@50@9XA~a^qR``Z2)rdO2`}_KIkP7cyYR~TI`D4sE(~^027;a? z02_Te@0BY83W$Y^8#00GpSWy5g#FqF?{3>T03OzsdIa@#-)+Cs{&MB*`ow{28shB! zmKd29l7g=$OE7T)Jvw|~W&7_v{^{ny&yUS#KlzD3$DmASop}Z;5jIYO4JFe3_jPSL zhdy?+B^V{V7RCgWA85aSX!3LCp@S@04)qUzpj$UIvVla4W|8O@^x(eVJCx`@uUin+ zO?iOGy7$C`1w^1c;9b^`W=B7AW5PBs(!JxxgKZmdgdA-v3LtAaJpARaMFikgEor?X z3v&b@kdOC$vD+}`9s--gM`DE@J~G0DYm z_&4TLpZJK-PZxCrXHc1mk-7E|h_XBpArgM?pSUxLM zo&k(r?*93u+^m+D2*TO$z9;_#`Xh>O$<^H4Xx3`vzxU}UJh)FKcE~-Nn^>Tx57l)fdwln?fh9$`GO)JOix1X|OulnO5 zZH!Z}xj+oOowZyXJn~4#oe)Ou2*CA~NS?(JvODH|x8IQbRvCON5U>7&-Gg()P-&jh zQshhgCLj|GMg`d0Ojev3_XiHjZ0b~})*5n)HLm7eSjF3~r?rAIf<&Yr%JQk~s zR{f)0x!4kCj{Q6D75^e2uG$ri?P{npJqD7!Z8PO2A`2b6AsY@R?jV;V|0p1BAUxqu z3y{r`e-uCxas>ypBQ)^nlk=?&R!= z&4T0!6_?)fzC349t#G^u=M4R#pEVnzGHpg3I$mC<&9a-wKdk$ixpo~;|GXb)(yw!9YbWFlF>=YT9x^{^ zWII7NM?ivXcr_35gk{6zjmrxMs0$z>F+l$o`ChEL;pzj3qJR5$2jlw4jp@__?Z`eH z#;5=*{-EwW4mV|f;y3t}O-FA#6va(s(r|@2-ikx}R9SZVm0wwl8{Z7XW$z-$cCkn7l)68?GU+JyC}93jn#4bWGJ_AHZw5 zpF*ER!1~3N3rHd$7}^zkj4reU=%E8aeL=Y%`PNh2N-@9U4a7V18oBY< z1>K9h{`Ep*gT(yPqsXx5cyy=&&Q?1~FaVylHrxz>B z8o+}FEE{Y;$D#0B5FiU)_x%p6I$y?z*G@563vCGTuKiu;>zW!2vAxl!LUs z^WVPTjxS*M&wcqhQ9HwmNBE4$6MxV3=TK+QKJyH7;`e_qmJj3TYhZl&@8y0v8%*gH z)&X64mGkLn53nfK;#y43s57z=aKiBbF@UP&Pt6qKJmXNQk;DnoZ7Qx!%Q8&vB&MU> z`pyi}!{yyX_^mCea?Q!-q>6-akeD|7Yge~&;)WnKC~MOE z=#k^?M8-Srup!8C=9^hJlGkUCJn*2RY{ZVK`=HAz!^(Ut13D|_z>B%^*H^Ed0-^BP zKU)zq?Q2|QMPzNj1PJQ&+7I5{k}bi#X43N2zdYKt1N_1{0OG)FkMK$>E>F9Xj>Lgg zX7rV1AC?ks&oo2>JZ*6s}0~dgmf9Xjp zpEgYXLAEC+;1%3^j`Ob(1KEUk^9GVtA^?G$#O(#71Q3*59zjp=EAkvEMB&LBcp&k6 zM~_GOZC@=%`q;ka*N~V`e7nxXe(~z+=}Y}O+SK7wfewOheX1ew_nyC6%?Gb*Kk=J< z`mllx*B5-3?Ai~0A|m!6%*PNx2ILaRh1Y?Hfg7A`GCb$kl40}39r_A0I`T)3I6E)$Ep)z@Y*l64>x}NzA&Aof_btK*YkZ13HZ+c>9?D9`{!gj zpcC-derxisBMm_s+R?UtKJk&Der~$Y6A%7*2WldZoNxDZNMZA+>bfP z=_&4Id-4R9&8On{%frdPDX_|;-t;WPI!!2D+EZas#(=7*E`Jmm+$g_1UzDk&<@f|> z5`D|U=~17S4&+E44b3!XoFti*QA0L3(#IcMO|Za;zv*B|_JO00i2`_9|Cuj6o=rch z5;m|PK~u10y<6#3Oq&q1rok1yeFx+p%@<6V?h#f=L7bNGQpAy7&pb@NodB1MHuN*Kqf+7;rEgc99obwI1|fXNRJXH zgI=k`15Wo;w%m%}e&j@O4n!6uKM#{{YxX6hp+Cd->7ah$4+vXn1r-pf_+Gmj?Um<5 z;TGT23iYbhV}V2f!qI=b@Ygs|~)9W_+=nKXMy#F_EUFnw()BM5e z;A?;P?jS6X3>=9~SM@FW!zb8PeNa{)Ji}MT-x0_yGgVkOb0e%XcoZp}a|7x13m>>4 zOD4hnD}VFD%imkX@SYsRSnW=q2at8}?Z9?*8=2c4d+75KxqX3O0gehxb?W%TGWiJk zedoXXotCJB)r70|EgzL3?mPe8|I&VkPa^+(xa$ZZ3FPWJaWS)c^UB};a73>9V_2Da z`wthbR@!*=&Pp84M9Bfw{%QLJ4I^V|buRiCY zjZsIT8JIOHdHGmJXys>FG!1B5lnb(qH0~Zgw?=mZdsnHPIdo*rT;HwJ0+rUH<5`*p zXL|MLQqVayJ{|@A%43CNER|wUdis!<42!9y8Hif-Rz%ck!fSdu30_thi{@{2l(mo9 zqz1D#%BQ0WfpnCQe*4xy2rRaC|1Qa4=Wj@)9}bve8%aT`wTmDqmt8Gj#UFmv6;=V_ ziP!Oa!VMd)z4`90sMJ@%(3PJ)M|6oW;gyzzZgkN}@1f6Z5QPmjV5eWK_c)8NSpgRZ zh%bZUm_(clevbd%7@eR2xtDvj4$`J875z@27?>Oe+^hIDB#?mF)mx~r?PU8`53Pt2 z!A3!TgNdc|?Lv)i1Y_><#FZ-*5G8msjCRH3{`Yq0uv!iiMi6%BE1*Ea!Cww12`RGo zU;nEp%s$fQ5GR*ENc-RZ(t(gnOQ8LvK!)_BD_bu(U--73)(DcOgl{%NTLm)_l1EHl z!Ahi`Wxf+f>nqrKmI2;@*aKFg3E2z~KTaJsTcqA(%D=eogLfM(pzeVD%jpf&FYdqE z?}UQ<+m9}ZW^7nd0VrX_$)>NyUi$O&JEATMe|x*)@Fg=8AnVY_kBa1<2h<_eLs}^yfk6^OJ&mPyE%Qp9LeDaC;QEO)LF!dEb?3l78>#tilp;4zva9al67s{}1@Y%a zaiboa{%pYY4_DH`Cd7cgTmp$f8{HQ4FIOsimI((?tN>g^Vx_U!=zBK~JG7Z)8q!8- zl{Bul-thTq+i<-Gxq?C-Xux@ukGsy$KS>GI6*M*SFsM1x(tVl^=~Bf^Y?Ue@o`yk2IOeu`l^Ew`hAN0knXF{FRwX51D~t?|K;3~ zdx?V1DOa+?Z_@g*P#4i=@;DoG4+1Pd;La+;Baqv<MN&qI6Y#PX0<3t-?PRTOhW@^Oiitx#P#f|&#$O-LJ@E&ah=niT^~+3 zE{!XK5)WK4YoS0U)vbQ|u!!~8H=b-a1@SBY;1~XGBCjUG1|f6+Rwf|?cB)G(x{UNF zxwhRZwl5~?q-KV1xSc1&p>RSR3c3pJ+5f7aJU;the-ND06^3+Tg*RF;G@}HRAc}lL zroX}K#_*b6OG;60!|{3Qgy#^+TMG1>%~;dhtU*~HXh^P`m#^0Y`PY8a&V~7HS5I~X zNH}to0@8TbTi$G5+YlFLpUJO)(M*i#Dz|3{304CyD9A$mnk?X~GYePsB&xIB$mEAV z&dwrUcqyMxOccSYt~<%55-Z5YUehj)I)&`4B8(x+O6Q2i>`VRuvDx)s0a#bhIzvYQ zwz|R13eh#HKOE>cJAI!0IrL*#eFg*^Y`#Ii(Odgf-l zX+dR-T-(@(8J+ydgN`23NRstXEaNI=<~U9`+Za^|fEuOr#pMiR~e=5CJLSrc$7Ymq>d|ywP*0 zH;PoN+|)u(Q8DQ`6z{!QR0N?|Ac9FtNe~04O#}pjyny(s<-ptJZ$HnTz1OT+Yt5S3 zGkec|zTeOP|NT9C&t=WbnsuA?Ty_t%+AtF*9I|-g?(rZC;cN4?baZHoC`T;T*`qgW zmIT_j{kqSL|0Gt0`^fR++43ac@BEK`t@tU-MHga1@YBES7d`Z0`ScTMA~4^!HZLpf4JVnGcflGCIQ_M zA#2qOwX~4@C7Mbn21+NXjudy@A8-{VXdQp~1mN-%&Io?+|K7(}J29KfvGpGG2HS75 zg*r43+s}LP9P=2z=n3BvZ4h2{n4jEo*$oDs8U2W_{uy5pe%F8T8`_e8gic9!)%hTH zQ*8cV6$HsEwa85cfjKkZ*OMw3Q*pDSEpWEWda8eZT!T z_DlYOQI3vw33%4#nUq={>halM|GDA=PL?$-PuTt8|M5EyJ!oP{r2W8u_1or7{w;9Z zQpu;>QsE5X^Kfs;%>TT>5t#bd*~Nc}&y|<-~>;JeOT+%dg|te1Q<) zvL+w7o>x*7dJmBiWQFE$La2|2hvEdA%-4JBno;=k0St<?dE&^NohFLM%3!N zaP$}ruj%nSm%Y6YF$wU|cz~6E&FOnrUY}s{q-n0P0H_sahotoeFmJat(ss3MzH4Mb z0-f?;b-^_4x(A`=gbkEhZ;zA8(w ze9d_I%KDEzzHXL(A}#x*AP>0AoG6CtWU{rT>X zw14!^{DlX<_jCdeoj`TqkSW&Ot${r&ep^Y!-c!PT5ftFb%%@A+T<;Nx$R z_b>Y?KRJBrAN~g(zwfwweVHdSm;2BCyPple^56dTp#|UfpZ})(4Y%L-2g6_f__}mi9GC1RITPSE5OsV<~-Im7BtZ-(=|GD$ntWuiBu`H zhNM+3)mEtexy4@(;147Iyet^7vK8>TDnD9%shMPlkbdhAje#jYR)=>5dYbd)VE={h z`d`AY{+IToz+ZlR`fvGU;B$fYQB#4kS$i@;4>>>er&;N)kj#D?b{^t- zlpiqN75sPne%hCRnbA5Ge%^L_x?{ZmC>zBDQKj#q%CtOSgdVn+2Q_@=TRxX3=^kUO zNlED|(c2tof{(XRl7BoRIEdwGw?FN__y@AG9Ca4%@j#KEpFU9J7yrw@*8ED{-~D&~ zZJ~s}^pVb&{e-_~yI+zarB6m(3Fxp#N3ifc%IuQhSRSO)o-oxy(cN;OLq@a~URJfT z^q2m;pP3}KmaoD8mM_>}mp%0RZA}wJ<#nCU$GwY*`-tUDlyCgHU!El4mM1s2B*&LG z1E`IAJn{Tr_}5K3OEU5jBsTPERG0YRClPt~evNl&%M-m{{4@Vgm=7!^0-hMzk>C@=N;xDJ;A)H zR+!l|$vz>b9XZ4$>ps3VDf!1?aWY%FK;VNlxHYqf!eN0rq2*8h9p9~9iNxdZTUJDY zPi&Ih=y{O$L04NZs&)(V@zm@zhTK7|<@O4E-d-Zzf*vj!JOl8C5}$%nKM2$?LD0AC zD3;&)`_wP`^yBX+RW|yEoQ0;+CX~Sa|x=zT9(wgz)LF{e?-%_y5r!-snbrkvyGsIRGcWd1mOvx3Ew4 zCGFP^Kj)`^MN<8R@A}<=Jl;KQ61W~rA8X@JDo%cM*$UW<$#&fOC6Vxt{*#|h{^SF5 zon&ws{{4UCj};H%pK^TV@P}BkZ+SBK%)oPk4Hf*8oXb~W%h;n4VD}V|T%qcLLy+B< zR_}=!TfpV(4-}q={M27TCQFMs8H-+h=GTAj@%w)iU-nb#AI`y(dH>$$zN45#Jo4$k zpY(Se`6_mr{99gs{~!IKq{Z^-L=S%MSA8nE|H1$BPldTaBAthuVQ&fzLNC&#L`wOz zRACirCVjDe-hs;XL1;m7myP8Ikn+4+wuUF}c$>$AY#|M&cxc7X2Yd1_dM?Yq*=5&b zw}10vly?uoe*heXwzYYtEK#O_3L`)ZbGlF@_9^x2(on*aR#`ZdIe&wcYZ7{6KdoYkkl_7`NywC7WP(^-H2ANk>= z&8L6)FWesvqOj~6KmE()-}H+gHC7UJKel~EELXr~J>U7Cd{f>Dq@$1o3}*I7)9nVa zh)=WhKGo6#&!_n)mOc1oKfiof=li=nyrlG7WlJLD2mae{cP7@$fWr;W!=5C6;$Qlh zM3?!1fm-k7jgIG+p-_1Xjf$El^0L9->pFvl)^CO``ulyq{Wr;#hfHsshg?3j_`m(v zzs)7PBOZ!Vdh+qNeCE?tzo*DQQF-il26H3$2mX&V0r-ty_bcqLaEIrc6~FppJBsi9 zeSauSg_!h%Reo9n&Pt338zuctZAs&(Jj3HTEKQLx7!;hxAt<`7w=L+XJ-bRLxr`=Z zAw37sv&lX_hM4g4yb(1oxa?rQw>;~483xPvg3@LV0frtrESYjH?>y7t3I9vW|Bw$^ z0I)$G>pr%z-rjwVyB(7>8Gya=Yn}F%urb+ z^e_7byZ=Mn5{Ans8Xpe|`6-ILWMkA> ze*13>zw!KAN^gw<@W}E$uIbs;Ya`P=YtBCg8{AQ?RVnA=3n8Bk|m+_jpP9c%cr51zc2pT zzwo$U9%!-r{p^4CGwY|cdT~5{bq_L;>W?SYAOCA#dOT%z`Q%^3!skE!KY#wQ-sO{? z%cpaG;>UyM@~J<509_8Y-}?FQN@zwb{l4`J-<3S9!$W&XCgw@Pl)uZT&*;~Xmroad z_0RaqMhY4mT$02z@%P1#b@~&7TP@K1_>qtNpufksye@mN@bE8vl;7nC`j>rK zc(v^T`Mo*|%hQXOrwdcs-~LgSmIsLZ$ohbhMOV1&EVO^;fBb90&-&*+6NZ5XU^PIU z+)?TH%zyZiU;p*DC%=vP$N#4SjZ+HVWHemp)s z`(OR1kHp=g)5|=5(nmf~Iq~XkoELxa(DEt5Z~VG{Cj6zp@|^Ea{9BfXcVPI6fBIKG z8&nF#XMe-Dg&+7Y|I^(*eB|YlNL|MGk&iy$$G`NYNBO7z;eSw^_t*Suzb0IYO+?rc zkst0xD@;e|Q6p8m`Md}NOwzh zcz!(+`#7kSD4$}Jg2NTR?F!8d=~=QEya zUvFO$QOmb$xfAQb?O<@>7xvuR?a7;R$y_ZDLFaB4)>G8=v znj~71FxFQv7k(|t8q32f) z^j%yun>;XDVGh6uxmE~=9ZM%TLEc;|yDU*~k#FZiUy%5dwu;t8idGavbKvm{HjgsW zF!`Ocz_|4?4^Uk9H$*8!gJi*=?>yZDgr%?Dmxg$|FmgJ93Q z&j|Hvt3964O+6$s0z+zfpxb>k+t<=>LDsjlxYhUvWOfiVs-O%2#g-mOF-9vb_duGl zV~)_+_^x(L$}`fTN7`ddW|3z{!8%1t7B(yo75HNK?6-V-@rTZrPw0K-TYg(G8GL}x z%lKdOF{#0Sv_AvTfwTc@EK>gDvoXl|_N^ZiKL6ss^&8i(@|Lme!O!|4H6IhE;uC_KCT}@sYaV~~;~xC$;kW#b|K8na+2^1A zFZ`-Qa{ljq?(>hto`pqMmv+lvEf)SS$<3lH9uSm;4@(lz`&HiMp&vEv`vb%FTf&du z`cs-+raXT5q&@m=46Zmu3f_T{qJOmhG)&~r4+rDUePKg#3K~_qRa(1vu89x$h{?X} zfggcJRs7I&W5nP49uA>Bv76p46+5_vRBX2Oqpn z3~Yzz;nhwUv;DhoFDsAIk~a4|CRsdZ+v{h?D@%)zy0yah~=>LV_*8x<7>%#vg|qF!aq8n z=a~71fA8=8gO9ZYfq!`_HTL;Kk59bSAbv^`*%aY5W z$Jc1-r#2RTFJC?Tj*khPJwYTIgbYg(aQUR+&;99N5q{3k_{xOtvY+4oNB`Jk{O2^0 ze65(tW%Rlq+bo}uS-yh0e4TK4D)90H^YMrBm$@!-^!q>3jdNWd!jK({+&_lU-I++@yA|>vhzLvivR=z&$=~{P<+o{)wok{Qjx%-M{Y- zJ?22ESiXqAG$~P?Z^Jw zm%SrE>;0aOiL$6u z%h;5zCgT*#JeRNdE_<-}y5(!X4`26jy~`Us(%k-sk7NGOpY%VS_xW_>Z~M58-yMF* zM|uCLfA3FbeEaC%|F1t)1FLR`?Q9Y( zcRm65p>!aiaDNzm_0=*{oNF{Gp55<9kMx1}lt9%icECUMM zdJhc^;AkV$bh@XlJs;CUKj~{I__6tLK-R7w?Sp-}W}uJ&tVWv*XsF!_i%t;&Ke9To z^|m+qn#h*1!K_U*YfIWFUHB=L*t)&y`KiJ!!7A-R2xTX2KAoioiV$pdmUL?ATiDi_ zmX6$S#`LUCf;1yfbkutlh9G#R^NxGM64%mxcbAD?o=66dPezIJ<*H(to~nsm=^5_| zVl&?&XlCQ4i9ZhA{Z;CRH;lQ&fJi~#1p4ph!wHE>f|{t!N)pQ9Gba6B!<67n7IL!8 z593Myb$Bd-Wj%m2*ks>j>+06>!R%%QLkD=qvz=NS51o;LX?;6i@-M!kvumw;spZLN zHO|(`!A~3O^|5(DkT`KfUV0`GMdEPXq}U(u&`rKze|0(p?2G8@yb z_)PZgw|PhbLXI)Hm*jZs+$;G9N_jGv@Jc7gH8Q7re72~(YIF3|QHDc-Mr`lZT1F$= zFHoCqf>D~zJMIn7fixUwV-6y!Oh;u3iQ{$ zNBgNE;FIQG_kG8XAv_slU_rU&ZRn{>I?co{*n&Hhs9(zmvv!?T4Oy(H$3_~hZAZ`p zdNsfg)QYIxUO(iQWb25kxa5{krlZ2!sSZD+^Mbn*>nAF;c8-wp9&8fOkzW>i=xqDA zd%%TCtK?S+l%y%UAXPx#h=118-yK29drL+=Il3xP?UO7hofOy9U06^!>82F0o33cb z0eKx7YG*)c*D`!h{yuL}tQYI7t$D}2!6UR*g>cIQ6l!z}e8dH`n5^kVT#BmswRG=> zVI!*S+#A4SFO!jP=qB@4&) z<3l8o(Oh4*uc2p+K%B-ru)}2uZO2W>3|T17%0st3@#i(x-OsCFk7u?glSd3`BB#nI zrmhBS=A+VSxX4=YwPsR4;TF` znb{kRdkAIk{*HS=QZ0UMmCes! zjp_-7G2W0cefbA6!Aj%3TbqY${{(JL=UPtmcg?cy!LfCKvkgC)@)?MoxIrxr)@M&X zSoG;9Uecl3b|jzfu_PiCYD5=Y7yv8UKUwq9AURUC82sz2<*sO0&6b1I*}FI?8Dr7A z&>&hlpJ4;>yp}eJi#_34!|!Z}hu|keU^`r}mh?Au5~Wz{wfJ=XN>Uta>$vjTRCDd? zOj3|r0+bYj5|@%@G=k>JMy+G4*?;Fyw3$wQK}vMyv@sF-PGXbgqsw;xN69W*mqcA4nn6jAac^;3|jTcd+Ao9&*4cH&`u3W4a0ZQ%v|xEv?x zfwcEsdC+GamKyR?7H!K=dF_`y`GFLIPe%2vP)mG^Rh z=G$YP5ldOgN=?bc7a|)!5pi{$r36{^tnSk)>Snt z9&k^HU(N-K7Ws*gu9vU zl-WBrbK!F>+5$Cwr;DC)oB5P|9r{W}K- zUF;m_zJT-h;lAMVjL*NC;Oq7R$YjMjGx6(5)~?0>L#HR%+Dp(Y((efOG>+LSPsHe@ z3<^D@4MHfa?Zs>^e6WZ4Uj&d#h=ZSLE`biD4fzR2K56O7(QcBJo zKu)GT?DQu8YN&&|aGygBeRR}Qj?}QcMnv#L1>ba;Ylzf{)ML!EG))!KvDxbpw1x&* zKTf1^l29x-2577sjU-W50(HLh<9+m#fyt)?*+@wuaKA=vto6q$_4-iG9*A{P0xbkt zp~_J;f7A#46mz{eN@>G=&fpYyB>(2Lo_}MkSIH=mVxADXQW4(K2Mzm(QUQtnWc^0`n3v3N22Y^Qmb?yzz z#7kYLN&eB-3joTiisN%_p#YT)Knh9KgvXsh-vn6S2%+y9PNefsoXm4B2g3Qk4ODlJ ztp|;A$)uyuMicQa_Ovth1fZ456dU>B3BA1V&Y}APT4##AA?=r~!Z5CA27d;l+Q*iI`tJvSwE&%G)Q{Y0`uRmBg2+~(S zw6ufAHlooMJp&}V!FBq)y-B|U4E4#dwl@2*uy(0>dwS^fNm;uZ&?oX)t;#rZ45-ni zzPxtyle9@v2xR@Vw!Trjpr`twRbg2Xu)CZAwmnJtRY;!ip~)7|#>)U-K%l<`OLc8_ z7SoUlo_|}9?Kl5up#b*vKG5o8GBLh}_1exVD)&5*kPN7lMau|;5@fyn`^%Gl+Pt(d z62v*~*Q25Am)(5Fm5T6={?L*ZB_~bnNeG7iZ%L>$FuG{G~>QCdk`(rZwzc5LL4fWDCf)=Kl zM9)6ajDRC4JH-+Ua~uVl?6Qsp)i}NuRPL=XkIFt@QWl1=V`o|!8(b?K`BQ+^s4(3v zZ}JaBI&C{|C;yiEx(9N!%3tNN(M7M7Dn(HRTZXlD!mJNiP^-C#$8$oh*kqEU+4#fVWO zlNlC2dT9j@VsC-cMA6HgD?RDWkR<$K8S$Z(KHc6{Y6p8memrjQ0AG4aZkzr$2IrdH zJRObmAB7@cB>6jkDoOkwbN{7IDljBpjlC_{{LT0vT!i!FCxdntJB&W-fx^8ssuU^~ z?Pw+w7w(CGanR z{lZ~7pl?+p4n?39^7TuHP-)sH2?-run%WuTM~L67B~b~AokG~Y@{5AmCjDV+8`hr% zggwB)&Z~v(A8%v!O2aX!tyGtm?$n;UOrMn=HAr8zB^e>eNlV@q6q+YnK#8p(__oJ6 z)yrbP7e~vvLnJ^WJ0$nCjnqFASIx1N_3|ARc<^1YL7)W`7ulBE1Bq_neoq~)#_n4i zJv7R(&UH_iY0u818F)~7P=DSNe<`?Jh&XS);22RnrqTDb+rZb21-!|@&>Ao&nGV*D%-o4fQ~w^-SBO_pSdkQsb<~iY;z=5su!g}o|uru zLcMq|^{bj{`j#^!CI09OmFs52cgiGT=bJAH0x=+&ff)?Z;gh-{Q($tGuuI6{(R-ks z_yL|qC-k8oY)l|e3dV#uM_b8@Ygv)wXtBGcnN0`75AOG=mzQy} zZFwoIe9d$LQ~9T_LAk#zl!aiSIfSWrAWa2@tMtB;?o5;Xn*dhGl<@srct?zH!t>DB zzs0X&i(}T46isFGF6MJre=u$>mO7f@KjdK|Xhai=bISiG<$2|+YEJ?}HI3K{&05-O zv^M(aV}SxS`4Dbw9!m?QuROmH9m;_%Lv||1tj0&Yi`0;f6cc|EKPC$pQ*O#~Iz(ov zJOpA~SW*wo>~G;L%CiR$9V4?gL46B=OTkRjbQxG}-Sf>l$^*8c5nUt@x&fDJ=^@ z*2V3K%32cR9d!`+h>YocpQL@e4XAODjK34z1EG3oQ{d4w#8ELutGb>U5ZI~-OLMv-<=_aguw1YP(nWx`Tz+>63j3E&rbkC zzo~kTcs24-rnmp1RUdj*`#xHJl-KR^FSl())|V&!1jB&RP1PiTrIbra2pflW0FxK9 zO(n?;BmIpO79I$Z{Sb(X5BylmQOiALv>+t{u$3;K{1ftngbBU074xEP7W7r4A-O24 zM8>|ixZX>2OJ4z|)TF2o3ZoB%HWj{1ZGmHB@^@SX!T%(pi+j=5Hi;CuagkDPA@ojk zOB#d0Fm*vb@SmW0#~?YK4aA1hAA!Bc%5DHnma%NwZ_R#s=v@D5*5h&ACqRzV(zruD zoc6O*3E9gqU_GtSOcP4Ms&s!GtrYn)ezvlzk?$BZSqF-c#ssp`U4V;t4N6%xFRV!B zIJEdln@KVJU)RXP4*;{*wD@2$BoIqd$>^Jq(%xcerPq#D`n3tYdW^MDYRq2B z!|_v!)iwicBxIuXYd*}S(rFDtjkuakM#(ZiJn}I|`#SAw1t{d5fG2gRgH{bPZeQ`m z^%@EN{UMzgGlaqfNVdAy?@9={TZosiC4m>1>hCnZRDBTUL_?WCr4k_&CgQA`D@dtA2? zy~L2dwH}Q=Pu#qt9_)0VJt&Jb(`@A}A*IhDy|rNZ4%X}n#7BEYKb@c82I-8GFJmx) z9e_~g0sje_36LF0ASj<^TI+*{;$5J{sRZ2;T_bQJuX$X|{X!Ez6rOj4Mp>G=roNj; zbOmAeib+1i7^^hrV@bPuhM=>}-mWZxSL^K&(WveB9Lu-jVe^s>(d(3wgI1Gp@vr!T z*3d<^i`pVH6x!?B)=5&uNLj|h{jjl+od-3eE1V5CVjvOTHvzUEG$A!0j2E6 zNiM3`1}u3`E|ucKlGgm*FC*I5ev#2E9H4}CLBMWFi)>3vEVSxsg#1d!@eUc5fTMW% zgcj7(D)C8$nwLnEe<8HlOUpf|X3gk3(qHH6j0C;u$X85ovdWOC5*s`!AtrQ^8PsjC^hH_g3e_I7y zZ)$->>eBT%U}-ph646dD-syrg03W0ds?xUtDu|+9>@kr&b?J&RBdmjM@)cmtS0F`} zjFGQh;PymAd?o^8(=BngUh_}>iSz=S946z-TVE^eH7Pv%KJKq}V zuJ}mqhFUg+2PzUfOALF*{J@4b+*V{&TB3;>7maA%S_2hH6w0J{L5m>-(>Rwn^M%pt zr_p_uWxp~nHhxD9w#1&~0Z2U+!s!I41>UHJb>8`4z^;etsvMNX?H{0T8n`@%p4CbaX&hcf*=dOO`_X^T>V z1gteAX+A+&pLMd=Vfyb1Y$~;HLs}^f4COhU4Nsn=7fND73ouqlXF!S9KnjRu2l7=X zq;)jmmpmyLLiU{q&)Y#@$P<*^dShSN{#9QLC+US(M!L%pwR{smau1t~NOL`J-ry)t z&QGl3A?_@OramR5e+PSv!7V>r*Rwd`DqPIx!@hyXSI1FnhSXyM-(S)Yr7$GJ-C~l| zE`8#eey=vYkdEb(%-Z>-IZ^PpEwrmS%Gb>q);{U+zYZvj-hT5?6jG zn@(K$BC=P)zj&X#`NG<~qYTd1t&mBUCV02TL0PG`A?IU{>UU>$y565Q&}6z#Lm@Gv zaQ%EWuf_dJ`~{rk8#X|$gA|e8%@#Z7TMA__)Bgz)e}_OLlrd`oVU%2M^XrU=M4Rcu zjyvjsyaylB+cEil>G-I%3y!umayiphX-8?ReGO$iF_J?*6o%9}D$_W!AIOF}0y*u` zfgXf>{o2V*C^J|$1n9UPF{XO%z#zZlBJq{hNrQwyzQIeM7QT)wJsm*$(ZTo?U;60b z>P_O4m$+@ZDHpI<2HU?V)PUH#lzO0LISw6AjJ;J4Yt$A=eaFQk?>HN(EFjxw47y-w zC%QwDn^6nGs3GVHywd1aQg90J$s{&>-5-!v(8I&OHI(=k{TA-G{lxo1SkAzSzN+3G zY4V0TuoHBI$6Z5)l{+$O4u0lR$n)M4fN;#hgQLWQnna;@jG7gB+wy#ux4oBk2(|N; zfBsi*-$9gl9wi!%0%4&2E!w62p=}NP;MFCW$cbs2$OVQup0h&SOQCFXcKE^?>o*$%|0bE6%7Ej_L3k`os*0;eXSQ7uFvcPn07!IV_4fcu;i_|Rg$;&tu{aeM6wjW1H5 z)|FqBXch@x2T4aZ9?^NOe<#Y2G?eDy`1ZxbU;LGu=q3ST9EWSdZ(rLkZxif`iH12& zWe2G(JDJzrb$T4EEfdw1o~v;`zWE>Ok);!F0-rtokhFiG-o>QDv}sjsa?CSE?RLrA zERVy3`JRyC>0r(R*LnDO-zDKi@1=8+V9zrCT2lpiO!RgrpqI_V?1EKF2cZ(J6T!n7 zS{^aVM`l4Tkl=&_E6Kry3xdJ4=bCWSjwcBu*CC*~aUrBXLQf}7%D1;u`2@30p$u&Y z+L5wJv+Xg^d+r8gp<{%!0m0&6+BMcmald3tEfySM&nSU@M-$BL@b0r@BZ@kSL6=@h z?tPQdv`1^x0;_)?%h3Q$cax{2(|c=g-eE(KNBurJY#Us;$PWoV?=z{fIv*dBU ztQ9nR=?rgXDb{!69Tx5{0zcGc(;7cdE)#rIehmr@STD9v6+-sgevt}hlfGK{Fr5twY)}exFP|o*hHTe#%|{O0R@F>O+1uD`8ClLPDZu=~yV~$;O@5 zfh1Segg_-x!>1M-32U;?REssgh_`W19WrcI}^+dD;r9X0N&c3TQ2jm-eb5x%n}mHQ zA5cS>VlQTN!p`EA_hu?+=h*Ur*hi}qEwJzdb07mKa59UA>xFsa_X!v|3uKx+j-|2Y#Y zbnI*Cv_srWL1P)N|M0^F@DANQ@QcIAuij4kG;lAb1)2_kwO1Q%$du#Chx;BQ2@&IziL`@$P3qt zq{wIi?ahDFZ!f=> zQLdbG(NCQP&i9MKlYVafA0sbNsFSA2!}~41vfQICmc5N3(=O;@W!S0*!){`|A2h!4kT9!?QWlXxEq??d8KNDPz zPmn5ce2(oMOX{Sgj#5&PT+^=-?@1^~`XT)xy6h=#xF`7Nr-?R#aWc4F@j3Xx6{yXQ zmPU|rW^`1`_SDWu&CgiGxs9Qd3Q9kzZwMn;_>MC`d`2;=mefhcfR;;j{UGViONu1d zU$83IvQ3B2_A++{HGrCIv!+Iqc_Ha%DR>IDPoQK~d(juQQcb6I%9d#XK=*J11;P{Q zycGPz0=Q&lMmt{);W!9VDE5oUX95Ytmt!uH|VMbM_avj6-ZJArJ$kr84Du2Z?83Y|F~9x|dpd>kw{ zc_!J=Wjv%~4kUa4> zakr84oSWkDoAaIpefCJwTz>XUihME1~m zFAJ>fFf`&5>-`B2^$>>*>26C~MBB!4&D%XMa7!w$LG6tl(7^=eSqB&u6DIyRPl~)5 zJAWvYTV7&Y!lC^|_BT7|JjcxU10Bi(xn13^c-fB?<5VaAf+kFPWKkLNy@lG@0Td=->4+4##7S5YF;Dh}cbpC@!lQe{4M28U zhBO)If@b0J7Iaz`N&%}j#snt60#?{9wcYJthIgfNKw=Ww_dK%%O1J62QysLtfef3Pp!)|_Wh{E?S=y#QT4NBa-|NJsbYD=e4&jv+ z+!>sIk|8rBiDwFKm2j10r(v=Zt5iEuk6!w(uaHIx18kTU2cgiW%V5_1Oh8%v27+z_ zI?8z+F9U(jegS)&dRrWnpJU;2B_@@1!#$T}e(aj8bEb9_plxFvu!>_DfZ1`roPF>P zX|vFu{2OuLbZ>3+3%K1H)Sd>m#_ge_jnt=JGzSSY4X!{bvkvO3UlRA&#T1_W!viZt z_3n`Tfp9NnQVPMx*{Uv0dxS1pK;cz0_y7h0ehot6kjUiT9EHFs8Ux&D<=%$Y9p#vG zTsR)hfKe_vVhHjqx=jKdZQ{MuPl}@H-#g}m6wPURyy+WM5R?JOCAS%p9^DIITpP;f z(e*bCEBWW(D{XRIMFvAM&XRX8OqR!;k}n8G*+odr+;g2V^*7uuNHLKVzaJVv31Vk} z^oA1EA+b$=Xo)-?#?Y;?;;`)W^5{r(?6wZ)U;x)uwo8~i*p&3WDD&s636&A0@LwuV z8Wchidlr@xnF%M&kL7bd@${L4g}S!8=d}tFf@Ea##;u7wZ$BCu`p4nHq}8?pLg0m9 z5R^O?gyd(^MEVXO>4au0izrmKF9WxKg6`oR@zND~Bu!Jh?=_ysPfO#fUm*=jIi|Te zB<&y7lJ@~dB8kc{Nu*MJ>QpSbJ8ysH7d(8k?MqPlE=+oq^ zRMv>2`v2^UT6l3BH33_}!Sm^gp^aU{ZNs zAPE+&Q!!@-#|$H^gNuoY_b zTCon=p-noR({l3KE(frYfZ8ttR)~H%iK4jd1|dnF@uGwOP2&_TZ}r>8zL}T|+LQmQGQ6 z@8}9BRZ>CXzJOnEWw^f@Qn&vsh31-G)ajKSiub;!E} z(t1#-cS2iZ$(D3RYw~Zj4_5}w<8&*yt43T;H-ZBZZPocnGHnZJ9V%_5*NpCiAYIoT zgPlhGkb>7!P_WlmfeK5tKR$O&+Iu%V{0FP^+CcpS#=9`Q8`ML#t|R6AxzTJmwThzEOGupk&nNS6ZbnGq>J^>;pdAmf60Iee`83Bb5{P#nRB zw#1m8QE6c(7s}FHJnd=@4k5_S!r#vkxVHes%XGLxZ18AuXj=SpA3U^@;Fk{+>G}bY z2xyhTFEp&%C}7B^M3dEL!Wy$GF(+lW)>n*)Yw9@G`&r$>QIy1yzEW`K?6q}8coX1K zq=1YD`02i55FqGAgXx%Vra``zB(y{D4-I6>u})bW=MYgzvpGjFh)=|n?CQJT1>KXz z*Gk797#Va7eL5uB=}ewpgHd0JQ6fvf>vWm^Q+pgjr$H|hvUYGjoH(oop3|4Y8FTBl zrxk}Ag8Lk&JZa21O&lxdI}%(t4jtCUtjS8`VUV{EJaqWE&XxGPnaw&2k{l@z6M(2( z=c9~9zYVs<1+um+^vCz%E~NtmQpr<1r6JiF4i8NWRx;90xziw&mwPE62YMPvULDKb zMnVcU&NiFE5nn|1PEV9S5dLUAA^wP+NI5`oyo2=m;J$#fH3Xl}b;*t8`FSwI)mu{Y zcOQQ#wk_eO8h+YI8WYg3{su}6Q#ukF_g*@9acHs*jy7|FZ-}PV?4W}|b`aD+ym?qj zFt2AL(2T5gLh=~q?of2K6*cqcj;R5?T*e-mKL7TzG2;~{!~N%B+{nQ`7?0B>{j4^8 zs#a(uz9Dl9P#f7`3(=?_j!AgV-E?5rZeB1S?;N@Xnp{gVUP>X@;A^^(&1%?>Lujy# zA2d1^aC415v@ajJ(6i4dlIGRZ_VT`Ev_16DEoPIbN#yu2g$bi{#%M&|9gIoxp7~wR zbBudgsCVc&AI}t|va{l+W>KKBBzYO(yh|TeL-P#M9K7eRJHlFP)FO?IL);WMygWK6 z3FBG9`i>_AUSqK>GNoe)znFmsB7xjMYX+-g@3q{qrW2CiF3Ep){Wt`A@Cw0ZBr7j1 z`c3PcWTZH+D^S`VX;|9tG-w*riL?g?*6i)7`+q*kCoepq?a-~_y4r29uEfS*Gtevd zFPJVr3TZMRxinq|kqGS-KlOK8XlFE|c?8yVJ&vK#t`DHR(t52<1hUtVVXhwwN!9#3Au1-`ZR~Wp@LWY`*W@83(0Zca+ptYuhl)E9~LrkPI=ePlOD2<&6rQ zB>ZAql!XwCu^MwNlI%-5pd|;Gp>`}ZO^R4DDOl&E6qpvyJWth9JurZ zkb!DuHjIm=XP;bOpSE-Zg~?TH(g-{J4g;3ZWG5o# zD59_=p8$-B!7v2Wo?4>C#gE6M5cI&7-Va)a6D8$wI2;J?%CZ54zlcVIRRZPzTib^L z$-h4O2muVQc-c1997KxfEXSh&>@RmZr%WNRQNhtBmIJv-;@Rz!e#k2ChK7l2am6W^ zNcJzBMvM8CwoRj>=}Pj!qGJ4U6FZgQq&b7|*8!CYz~`vFmfModtA!NY_Zp{W51_J- z9+Jpyn?aH&2|-}W8?ULRHxHajviydK#3_5gl5wg96s?=pIj!IJay=9GhGZ9NW$q}b zh-i)grScGDc@i^*K?Dek?Pw>b23N}v^kjw>%t}Em_AowNAi>wvL;4*9pzRMbj}()B zH1oba?H5lqK3w*#p)C=lu>!ENYh;r~03eh*A_U8ay-5Wlhwx-)tu%21q^vYe{!QVS zE)sxFYj2-D-H7Kkl?KaS4nc*FtjApve_T;I=wJhoz4B}6o?DK+oF@HfW1K|)Ye3iG z@HteDHHHntb@C(P@S!ZXS4Tm^0V~GQMk|`Es>A3+d*#LibM7$whZ=g`@w}|b6F&Vc zL)ir6UP@%dIt~nFk*)?O$S0_Dc}HJ> zrvWvXnIB-%!N1!3yx8mToLww?MY#{X)|PDPibQJ+G>t@kG7xm%)1urjqmbMy@tx<= z)Ch^^Ja!o9|97BK{+gcA&(d8MF3V4F3<=ckvd28JG=9!rR+mCh>O*MR6GoEnR;pLA zCsHQ#RArh>=J`Rii5J$1$~Ohi2- z-)JpKvAUi<+Sa`k@YVX@n%Q~|6y=d*3(`e}Kp6^@Hqd9c;&XpxT3W(SKK7xopCQ!G zGZ__GAq1VUQFo@fFt%*Wc(9Mb7n(hVH@3x){#015j&*a0G}k%Y$)$_{b=q88G>0C7 ztXbW7DXVQ4jFRoU!=+XalZq@z#YfrxsoIqiPw+t?O1|3WgPY;)u6af23{umd&*d6h zl7!PTz_XC8l zo*#j^Nr&}@f4BowYIm?v5>A*mgh9R`$A>tc4RB|Yz3tL#d!;qv;+sP*h_!qkl`ND; z7~>`5*)QI2uNZQ>ipk3X;QH_LznEIaAWbBYNt84K*a&Ti%gIH?B5zP&+Ewn}!?(z@mW>1$B#8wZfI|BY|43Usv~3j8RF z_wey@Raly_w`g>Lml6#|(Wtljd&fCPzEF5&n#*+y3M8a9!C7}}(3nB=7RTW9|JQ;C za2LbLmtOHD)g2J1o^ZAb=M6g9UQ5{4j_LIPNC`fv%(~}g`~(u;xQ1;Vc-DvC2ZGB` z#exQzS{bJeR1eKJSK;`&O7cwawU}RvwLu_&#r00SXC_Xxt#JO3+gE_Kv7-C*%*7il zk4&D`>m6+4)QmN1Z}xfkj&IeV2^j9-XdC{=`X2=ZAo3nRUn-fTa@2&O&$Kcb(AKSG zQG~WqQmhA1DFa8D3*cDfy zTtzDRmcDNlsfUx4EXeo#_x!f&Cyg_CF6q3_JMG(zcz#Uo8T_P&U4O0NN?@Qu)e*hAep#PKKn;?ny|(rrGkzKzzj$)PL+nr%gqH z7V}6k(-!aY-^9fr{tos0IIsb9y*2*l5WE5AMQEN@AAOcW1WKKkNdQJ8IR?|DAE^X$ zMibj86(`PHN_U*(sGMH`_OT=eKg~*wX4PPq#{(b^>s(BfWM2w{B*DVebS;H zgNJK@OsEWK>XpyQER+seLV_@P8tp*)p{-Gp=cZ|-+oMiE`u#}$Ghs-}wJ3`kzhIQH zZgGr&_jEI(A^DTi`BiG;9*hoJi6i^>zV{q^K#58{JifaWZ*f6xuYRM3(b>_(7PLjnhl;t_76q zAxO2K|2UDR8mB-?#5cpR3sYmAZ}RDJVg3U*NHT$*Q-vru>-=GTN-;>m(yeSQ_d5nS zBe@IX*TXQe{Flx!X2)at4Hfm4*g~Pt4*cwgbZ_dexYj%^)JHDNo9* zL&O>R1WaZUOQ6u!0TPg)hZrbz9ULASnZ$?#eE+w!^&7shsgPJ*} z;)9kq-~*7%(N;y!H3#99*S{>Vv980AlASvh-MqXT=rJkU zeOKf`1FYq|=2W*X4H|-TU`bhT=%tb+AXyfc?bTpV-$19JwT-PFQ{2u? zjxML5v`dqrTIUz^Tbt&ZXgxXpXKht?wl;e7ciXbNV({Gg2JpH|uutZb&;jUdlM5=C z?R$-F!HT!f!k&;UN0`+nk~HKGaZHA#f(opAM?pnbt+Xpz^zfWGtgb!}3AJ&a?L}UC z4_71@NDM3n-=`)!@Kv)bUE~=AqEUuWktfoA+eJ;ZJ;4;_PAJj?P}b>j%$FJ8c6da} zOM?dlmY;GFr1$SQ2_3y8f4c8vP)yOps8j^*e=Q>ptdGZz-X0elmWz@Y`{0v6i4(_Q z_fol^LQ(85QGr8)-rZqhP($vTIPW0niGjy9{=Tq1)Fb|Wbrvk$3I}ED5~q8^*|4duae&!4-#doU#w zIZynlGi@G}%kxdW9Pq$zEk9{Fl5fC5Ti%D_kxX7=%jCgz7!L0DRcPD@EB=?xL5DCX zh8YF-+UHn{WMQQId=j>^2K+n+ci5*Q2CYFlY~8?ieBcEml7BfL$_cL8dHCBy)x52H ztB==S=E<=B60`BO#3&d$6;ubT*OWBx@J#7KM$<<_oKk81EV96m_EVF89qo^l&t*ge#pvBgyfG-N&3}Av8Uq{9`HL9 z%fak92za0@iB|3yq^wynu9s4*cft~RtPAYV?z7Jy*99c8(%>`DI2^Dc+L5Wm)FxnI z&FxJOIEMnAGL5YZ&oG4`2RihX(kyLZ=KzUwuPew} zv?u-{OXwv(l8mGcov5@$V2>Yy9&g_&PCEf5UUaoE12(1SUUs&o7eO~FShGUOSah~W zy&#$1>vSB2KM?Areg;YDmDZQ+;#bf{LsN}V4-MSGz1v*;H!+G+ho1L6$E%$9EpSk-xXpXFdruU3L8Wf|RQSje zq#UjqnSf`jZ)md;4B8{>EISp;?8%S>;Qkq^@yN$H5?{3NyXKJWo!5cXQ*pFbnZlTq zDKVI~p;m+T8=h&>%vGr$#eqpmYc7y{G}U5OWUmf-E#Pw3L|zMK*FSNIq%J1^_>+Et zI#Ef!N^|1-fO%Doc|Xg-%?5tN3PBO^76xH{PB?>9L9dflDdTG#R)FI9E0- z{vVtPUswpKVmkfLeKDJshm`L_c|Jg|dqR@KuKeIBn-KJ1NmiB9z{67awpDkS5E%9! z!&7kb#_)4DEw}TLtE>U0KhHyB(px6}I?2~?pEJ45qcCNG+pY1Cj!Px8TzsFRXxDGF zUm`l|2EVuL#uRqMxBd^bc`f<2;h~x4@jl>EUw%mTjDgCvXHGS=Yp{hNEGbiRcIa&9 z8J{Po(n$odR-^TN)l4D3)^`MjvSeD$pqD4ZK;u_ugP_Upp^&pg5QJr{v0)6Rwq$b% zo8oXDCD(k5EX7>MioYi)Hg)j1RhMQ4R+OuD4UA425ps{Q?*Un#!f&q7EJ+)M77UQo z6KsMtH3EnJC-uLwY@U6Ra@Wves$*`z(3(!8E?%_V;$%)M!IBrVdxC8!%YY_Xj*# zgK^+tHO)V1Avdi@+HtbvW`A^=zjOegHe|TYw4!ePYXY!tH9f^6PWMcHdd5Qb+7mPk zN=E>rZR;4$$~9SoofeM5{j)4HODimmi={h-{4p+I&~qLo(YP-O4KhRP>C*shwOycj z67m`oe)P9e;twc`kQ7{6M2TmhcYUiU+0M*gtdeE80FNh*SB}CRmc_ zJyo9|%@;Acl$^FLeJ`M+zSF^qL$j-wXFWzkN~}(Xp*o=jTNA+>>lNzJsCoXd5QwC# zIGnc~3Msho#M4P<3@6Tb0OUm3c7GCR!a}nea>%hKmiK?xHq@?aanJPVU_&T#hk)C> z$(`9`i9anC(!^Ctl#53wkn#>F(!3W2`dDuNrejne8U0ZEyMLwD=*oP3kfX z_aq@5Ac>)3l5f41l7S*E9*9Y=A&48J?fktRSRXw2LiXdK&$N8qFVGTsB0=Lmb|cXV zs1vg>)*_z)EiCi&J8|_?dFY=2OQ%424TU}um*{J3hhcEfr>Q{c#9fWMc{3sAKGFVY?5{cFAxi?p7$^Bw z%B_=rliE-bT~8$ApxXayc`eySQ0k@gA3t5wa{)k5M!6eK(g`VUyBz)3Ui}o{($F>h zsyzZpd*bU4#SvSRk;mi%&6=idW)s5HKZ;g_C;yJDsT}^&OpvrfDRCFyMNWgtoMzrL zeU)oB5|RZ*`4$P0Z6#bl2N(}(^gSG?0`(s8o?Iq+$9R?eurj3;7%eF&NJYY!ns$Aw zn647&s0Y$834lpPW&382l-y&|kTx_wBemgTn)k(|k0x^&v0I>{{gfEm4g6p^sUGlihKTFu zF6oEHRUWo)*D>);C1iTB5ilVfGc;hm1Ly5&v)dZu8v0kM4zsNeS5v*(cpi;u%Ss0hVbPNz`lBt0*M39qhXb~Nt zuQWf$hwW}6ARSjImJ5+Po{;laN)j4Mt*x2Xd*HPhGXb)T9b&qPCi+N&Bxy&IgCH;x zd0a>5fI7cvB9hN@tG_*@BXFtx0At{RWl{;Kex(O`xg96XuS1YkoVVp=W935=>H$Mo z;0$^Rc%z^Tpb2I-AbY*>#-hJ^kAY&+54Unm>Qh+yKO9mF>>5~#TPKtAB3P6t8SlZc z;s(xDRQv8LSxw6X~;Xe2LS1T z4OEHY86;3zq3*WIQAc8+UHhL!CgBh)m)l*gn7nLX_YLw`58j|7;F7kAo*0ycNLBnW zqT#lFk3mZ8CqXl$*8s9uQ2y@{<}B2Z_7gR zeCGo{=y=ipppybi0~-_QH8074m0Rq)kOqeFQ-4t49VoQ5b`^U;lPa}+6mV`X`3yD7jJPWeqE0dY-8$}w$+L}0c((xMMK5Bf^= z0_5%?$Vnin{CY?sRAF9zwn%$);#SmF^=w-WQd2zNKWAh}~OK zco7cT|GCiJ>)Z?;X}$ndzRu&srUCBExI8B-g@ zVa3UkC6%~=#&uApXz6Wv#-&A#G!6 zTI@en6)c%BCvwMVL9n{@hPCT#=xZFl)++C-+Iwf)d29RHLDq)byqo9)6z;dOG3(LJ zAN1NfyJ$1Z>QFh|##sE8`fx{^+Y{HWzxWkk&-7}R&~yl@8V^E#NO)U9hFTCv?t5~i z9A6>(DH~B|j z>G+fZmf@6aSKBPAJ4vn#gmw+!vQhGyoKs+wYobcWt+%a~c3-IRT#xD&brVQCIRQ@L z`DAP7vL-2>bnnLQxVgqjmNJ%40qz%F9qtq@6l|X+3xlBzCCQKFXHe|5_BG;_xq(3fL3inpG%Ze+vFYNnIrK|S0QQ-ln6Ive zuMcGH(a?@F*e{ECeiS;Q3SvL)lopix4&VpLyq3a_(Bfxr$~&kZ&>@+XVJEo-Hk$Cb zdNjCizruzk9LBUvd&epH{nT#47 z_ngZ95;$+%#%n^kHlfL(u!ajZ$jeD7`cFMGhUABTxcj~pI?5QAajC8ebdERDhoVf- z*zWjHrIAYK6e%Vu#>7edzfz$|z+34=zx^m;YNs`ij+tKp_DoZ60~Kv+X=|-Dt9WeZ zdxU>--_8=*)hW~gt7yS(D8m0V*wr95iT}asdeOy!`AG8hUIU|Xjwj^X=PbrY@(zS)L`=L>1 z%v6o!9+>RE{g?g*V9T1Dzy(-K2sV-qj9dt1?(nH`F|zu!LEy)_!}>*tPyIZw10%Y*XuvJA*rg z(GKAhpmKc?Mi>+qCi5bt9OS_%qCud+WuJpXlelAKUap_76N7I#7A^!g3Tt~cJX#DtUtMD6^}X*;Ho?<|`SYyT1;yb{-hYZJnp?RWY> z;S26{F)6F@OR~9qORYYoeK>ZYLKYDNSQZ%H5MR6m{!DibUfj{w|#-a zeF`o;k~F3JcRHlpF)zOzs4j)z)8Z2#9eQhnwkmKwoC{#SHMzEI!r??%dA(DLcatn! zY(J{uh;q1moCGNm!acEu-Pb(#1DGd z#MAP(J@|tYD2!D?DGaVlG|UO47we5CqG^Xv&XCeQiF>7WCki8-JCDYsrM)Y)FNb1s zd(6t{S9|SQKxMfL%m$&vY%AN!uPcYvhA44!42WPB8888Oka=5hzF!O}!CDf9!-9IQ zq-#J=upwn#=&Ns}pvO(V-cz7TWZTz!K5A17VYupAj*i0r(9v_AN;}GGV$_2zz_F z7mrJn2SGv{g^7TY2-n~V{<*5r;HsV?rRo({tsQ3blT3s}u^OHr7Yh9w1vhr}1hjoU zxCY69@YzkGrIaz1c5NnVZOr}w+eb#VIw&

    p<;%()mX{D z!>CnSZyvM3AfQ3+Jbgf;Onbc;mXVp-kXYcB^&;pw`TbQhR844!N(X@1ViAWNK8D)0 zPASSf@$0sG6YQ}I=qwAe-8pfIE#erzi$j0fCkYV`$@rP*$mr9O5);e6*M~x>fzqW= zk49T5pVTrgYWEKthD|ZtJeVLx_p86H+tmB~mQz26 zUvr46q@mtaER6u^b=b2K3$f;d0^ubA-NZR zR-{r9g!XrbbcD+SXNf7XJcOKqLes$LQxZo0FdJUqVsMmVMz0O&Q@<%~iL$0whpe*3 z9Br>c@G#L=UueQqII*Tdm`*=XZS^kpLU@x66E9(hWJNBD2tc^Dc-@V^7p<2|omp&rP}!}ew}@bG;~bheY@ootw1_Dl9( zl?8N@tqG;H^g9<#6#7d2cwKs}Me}#U#rBoUOb931kqhIdfp{bUy=j;l3{9TlGt`Ow zYWj|7TR#T~p{HXxNhbRo-F5?L8_UTz7?s3kup(t0t)~`hsE6=ISszDZ-!)5c+a#i%+YEwe~sb3K3 z(s~_0sJ3aXWMvL@ri!aYhcqw7(P(_on4)AoCDG`wL;Lk)ty z+g5zaYxLGpL*h-OKEL>S&>Cn5OJTeoIC9w6sKLsKy@WzrRL*T6|C)2-hi)OlSwyRL zPkR8J^;PrXMw6DWuekcA_!E ziKE(S=tA%*Df%K}qGiZ9**fx=L>m4S5x~mw2~Z<<4`7j8$rjT#Q;4#g2a?g88Ql6R zyCVhw*TC;>^R&m~3ewW~F+h$y{=XS_kb4G|569P%#*#WD7lgr=Ff6r+RAbx@A>1HF z;Fw6RN=|GzZm46GD1HUFB=|cp*`)GW)@~n$n)UAUsp<1s?S9<#Bw!9{7hen+Frf=6cX3S+W{C;d+vY^%aU=jE zE7e6ZR+8{;*w(WyAX`7C_n8cEhFFeLp~1H`2-hKad2H+8u21Q2{7L(YDus)o=+AbK zMm;T)PTbN)9E4=Qq#(I3XGH&N2k@0t5kut~ za{5U!4iuXF{OXa>jg%VuZX{bn z_;@qj>IgFdnCY&878cg-Wy==ToZCj>X=KvX9$I_N#)NQKY5Ij;{F$G5!XY)$G}8$r zk${AsJmbYdEVJf}K<<(99CLb|Kvx;U!F+(Gi1gqao3bTgNMED-X1e>;;gHk1cmDUJ8KdD;VYW5SWbNLDc3cAuYHR8KQ7)#k)vIc4h+mnkN8NvVKnO1fcOp+brnon~Jc8LT9_`fo`*LI}`&7W`_`tt}*i=O8qI2LZ{>& zmDjsK3%*zx=Ytq<_D9Jzg`mwo{>S>#b#S5{-+3wq$rShhyzUSrJAVU+!G;wG)x)7> zS(z5nNwF^oWxI&udK%ro?yC(v=aa6r7-9lQjOTUTa6{edj8C+R(QK)uPapbGZhH=D z#=*k9)$d9h!q2q5Xoip*R~dUNAm$n~D)HKgL=c#jPs3std8H1jWOeHxz{8KrK2o4 z97%(-d(`TLuBBksY8$6|+=@xoI|$d({Y8+Xe5veG)N9-mT2}KUNJ80cM7V@X_cwud z+GIedR@bnx##WpLRmZfMqEJ60D&{ANwiH(lLY@3za?j@$-w)p&Kt}DV(Ed7D{;3g~ z|L=ku**K0&zK_QP+;){HNZx|GB~Ax~Yj(^@dPRNM+D0AFCNd3fCR?CfcfpPItOp+a z#PV+ddMMSfvVVwrji%Kk@Q%ze3*MkK)^Yw~$oHwoo}p3(;;D?!X0LaEHdQf z`RHSVQ7qKCVnUCRjD(c>$5Vbon!L!mI0CyCDfh_O814^5qq@_;LJ&{Z@QD_lNbze3 zwzQAs{dqRLdL39OV^c@rU;xZijb1Godc8jCg*at?9mt52fV&h;=p~$LXsD-c8F&)w zs3lqHYXYzQNYTjTpfYFp-JD_i8f#;0I*m}8#%M@k-2*(rxzQebWRl9il>7_96<3V_ z=lot8KkLV05EMGc#K)A|Hqd5p|Nox+V?5{^P+1g!(_bsGCN`DN1jl&0}d!i__ro**? zt+ijk{?hs$)?3@7BNm_oECy%-YzcNA{U3xy&>CbStLL2xc-wMA#_KG3n;0c$o*;gs zLE}Rmc}eW0cBdhm3>IGR0z)?Wnvr$kfmdhL?mLoyi#}Kiu_rQ7I|kbh?r#p`#`bOj zPyC73?eLyIN@0K}u1zg*nhfV1?veCcFtKd$gGe`>(gwS+p0ywm^qBZdktF~}CjV-E z-@aw^>!122w)oeP>pe=j1q#B!r4fzBuoFd&mbeRhG>GZY98_os3B(qsey`*dQ*n)Vk z*+S8c%Iwc8v$lz{K(;*~AbA@%IJvtQJ%!AKy*KkmxAmbCLj@ zl}*ophnj(0TBPvOOS=X&hgB3)8RZdGk(CZHQ0P;+z(}&EENI3yy|=;|+^&{HFscz2 zrAp{gMTpT1rXw8mVO7wA`)=nfDhwc3TkGXc6AQz6!D(^F4xu>xF7p@)DZ=(TI@s;@ z14L}24UeA$l*rWBrugbPBxq$JOJ`xBy*!VpLG}#HKt+icq;6V(_m9zc8JMdFHDK+JE1 z;C(7&k^rpP6RhlUWIJm6*0lGEO56PF1OBXKG(v{bpxChv*N1CmtB}vl2+Sz&qcwjF zu`)C!PEnu)-0Jt^2zOFldCjv6!T?fn>ErA@ZMBs2xH!Gg7m^q!k+lxs7i1rK5UvG2 zCfyJkEfFYs*Iz$0K&Lj!X4`-qK5dT7MrfIChb&hhb?R=4?8_Qji1Fd(@F490Q zv{yi=NDSWue;*~Vl54#EnfE>P(J?`@PUzWp_auhTjEX1@sOj2c5OdDsMKT7!!$H| zCr!A;BO9DK?FM+033b$J>7h}Bfj_lZ?Stev4lL8hX9|{I1464mET<*{>g$sb5c8%; z>NE#xkS_v-YwnckTzYZ(bbqZmI!j!&_!_Xu+ykd*2os;N&hl1U-(V~0=CQc^49K}U9%1E4KiUDwR*HnD z!b$$@GQTkyHlaD!pOs=(kNc1P!0fXoPXap&-e&J7iZe8w(VSct+$_dzf#6o3<;=%6 z*sKqct{0fi%Gpg)#y{F>^X zq!Kx_(ph=@u;Q>QgX3{cxb@FU_4ze0i_TsO_KPGvg382>=05>KmItN5AQs5B$_JHx z$+vnOd&`(ZVP$>zHJy+oPE1+0ulXu8wEeL7YM1lfp;jK&SKi*wVX0+{&+4+L6-2hV z5tbnFxghM3W%sD;x9bz%Bd$HeCBH9tgbqSItZe2;-7}&uw{9%|B2~T`!$M+G3{^bT&&Cvx9<-vU@^|K`PPRFQ-DR zHFBD3W_0_0gG_XH2*N49H2eV!GhLExAc+c^ut=nlCN@GYR;}~p^lk+xTw^FF^rZeM zG!qgb$lJo+l%GXNH}Q{8Bi+}SLmL5&*so6xve9efhHae%x>$N-zc0BcC?{KUrGsvij%ms-?eEoRW>#+@wg6 z;&gaS4D;(-qhLErzI?E_b8|<_Y059koDfD7a~!~Y+guual5Z>Ny9>zvZU~xa7`%of z?w{7z%o6xPhYv6{E|e=Ml%2xAo9k99HsQzo0LNb|QS}MHmSkHEu!0gc2!V8*>ID?8 zp5SIq_B7AA3D!uH`2h>*tyWlB&OrlZu`)6y31BefkF?FBn>65toz9~`a!tn|1!LVt z;Y1nZKsix>aP#0O9-k8`ul#yyzbl<>tqsHwvdk8QT!-ntguPAjw7n>fL~NljO<>dD zX2a(@P`PWfw^odRK@R9j(1O9-2oTM7xjx=XHMha&GoAV32$)- zuRqi$|IXH!{CJ9D$aF1qLxik0HAJWp<6U_a*?IXr)vKA7koQ!VNk7&We}#Wz?OL}b z1hu3Fz2{8kd!|k#lwjqDdhkjlVw@;qVuQY!pU?u~wsN8@x3yhMNg%E6Qb6H;ftCrP zi_+A8ZG)sOP&%LoJ#?tRh9cdwcG|%J*Ajh;+pPm1voxNj*OrD~ zipRu}`81q#vR+ik=_KpS!+%V!N_}`pvNf0%C)g5~eJg?5{~P-O%?ETM-HQf~xm>h2 z;YT~5oEGe`a0Lo}aU3vAvHml_#gSTX@gMm{E9^%YBp+;qja0> zL}1z?Z42q8mBa~TejSvvr+zon!juM(yx#sDR@$C*pzR)6u3ej?r~Q@>-|-cU$;Qyk zmKMo@alNm?0p+4!2hPS7B1WX;l|Y?rB?nLTLFg#cHqUd?BYTjPflNlar*eekRUi7v zWfW;hRO+1%6s~#5?2&&AdLUTpD}tDaZC*qnG3eW-kZOkBUmE!Z1ESI1S^xBet^FKP5N+)QKu)7a`I5`UG+zh>uB2XK@{ zncMxe+}@N$I2Ol^#ff&cYvS*wAhFI9W7g!QSmV;Aw%J*jt_#raJRNw1d=cou;#9m? zd|QHcU^tcyVp!A_bLiR6hsU-n@;Fhk`E-39e9m#@fZQY1E zKnPHMkmfTMio%L*UI?0g(Ry_DZw40bwX&NdI|NFQ43=i0ZUAk~C|nDC!K`s^U%BhR zAiI-kzz-e#z4(as7CmLI6AMPc4eJPQwe>y#?ah z*ELjTnN9*L@YtH(k%cC*>w}3Afs|V2qo# zd>&9+SyO+c_0hCz(Co3z38g}zoWf?F8@+f^3S??%(ANln_$fY=J5r{BPM^L$#}G#g z^2|yz&tbhQdkB_5LrA_M9VbCQ92v_Ywhw(E9emC_tCVF>Z+%YYvRCYKt9fR&=L9WQLeo( z4ErTscO4XzI?VyU6=)iGa~L+jlD9oZNXbaA+OQ#%BZx~t1JK)Vv*uZxm|%0bkLKl? z|IB1;N@0sQ(WaD?)5HteJ$}u9`)81>Xggi2rD{mM?JqLdeYd@vV7`ZRr3c2t3SPxlMLZTRvxx;pWQxcM`>Ec(mXmJs@j2-so?5r*vvxp0AvDT z)W0DNWxi%C`L)#c#bELo(@VH9FZenu3@42fxRmgyz)SWV+fsqYRMgvGlH?zKT~OgK z+wjKXTz$K3J7Kp0w*}m&=(Py?Kle?{y6GzAPXDB~Zg`xWLKHKDjv1+*`XU9V)B0x0S2}T2 z%B8+RX`c{7fys3{pbdwG_OR{*X7(puwOD(3JZ+AFm0S9t=?onwX!V>Bl>3j;gOn;N zWn)IX1r$Gb44kiE4E6?iuNq*(J56h>4byiS=)bCG6kQe$G6V6k0)t}+dgsd}iu_QbiX?l>oP8XMI&vS=d`}}fZOe9~Xtc=7Pi@iALxqKp_6+Z!p z2WN=Q_0$ApYF|Bvnvk|eJW*|r?3E{@_5>6L<@XGwKo(P~t&l<~kCFD$JPeH(2KE); zphrh4={*TzwX*9a_mn$7Z4@yNM=NCe&C-D@R9J z2qFDpnbaq8+Ze)Vn=uM^l7Gowc1ZkPv~l^c&-F+NKiVP$=c>6Myc{RnTe|*&|4;D( zexbPT<7<=7HndIxm20aVr70bS;`ddOr>)=008b)cpu7&p8;!jlKqAQV@*qn9;`Xm! zN@|u@ZkL*wT3JPvlHR`dFe;;*y}|82kSg^FPipjfigkQKZIT#U6IdJQS5|oR6x(7x z$LFiHbJ7VF7=^vHT%MPrQTMo)JgY@sa`+pen9|w%!}hKN2LedN=-J$mHzqY~iwm!nLMyYuR4BCt|j* zlQ9w{|2;PZ8#7D?k7AHLoFM6!t_YR#yr1MTl1hD$3viJ?1LNM@`x_g=VgS>3FFdiH zr=2IJ-0d$p|L@tr$!wMc!RKBH(};}1MC-oNz%gA%ueDftrCRi~$8-++Cv_m`pIVT>lQU`R!{|k>)GS-5h%ok6 zkiPQrZ769-I+P$E*;k>Foc&-cBAp6z4eg)~+?DUDpgSCBT+;((ObTx842Lfec-qmk4riS2V?MG%A0dRTb{pTW-zN$9s09FPSop2I>L+LoT6{AU5&l7B#YAbR^> z0VDpu64C}V>x~z0IUZv&K$T!}kOW|Ufb^iBpUvOSN!|&lOkjBCQzNcr188a5dIVqJ z&cG~aC033oF8$yUAEgDF%WhHK%>NLy%yFZ~6L=Y~)sk5}2awbCXDW}PV zD4~3FwKYoDbOE$Q{8g(w1d|RPwffIzU;&K&na+@c_~06ZP|lr#V-C2RiI(-|;6lT> z$s_g`hGZ&_8W*$*dswa;LUS@ShTv^qMf9k7)WR*$I;OgcMi+@)^GloIz| z3Qk%hIPM|w<2g!lmTlXm`=`fZr=h6l8eYivW`^rn;;-hMY}rids8Tv}kfZJv>rz>N z)PYUwJaDbQJ+|z?T&#T4FbF`&-IuAW+=yyPVxp@jyXi1MW@IX^!zFM98oDc3jlNhQI%lHko#)_nTJEKF| z9K!bLmk{PuH&1Z$09g~yn`}Ofu6H|EW5pnWJfQVSs4kh!T^FCUlpzW%@s+wjo)mf1 z#&wDd(C#r5-v--;-sK8}dC;cP`Kd)OIBg%Ty!P2g+lSbK_XtXzG#<#sS~H1pk9)|c zQJQLTZqju>xv;|GDRMnCF9Vip5q`C1wEM;k}m;$$Ae*7@O8+*0|A0Sfnr zfra}w0Pco1-~fJ40?AmpZLI8HI4NuB;AnRlV&ZqfZPY+fOueo(b)zaHMM7G2o~+*(2SVgG6L6spD7^Ri}njiXa*^A@58-of9S@r zgl6K5Q0ZGSAgm|+=a9V^SW3?9zgwZU1Hyu)X{`j}6H+!pxK(cn<0x~Ovc8o33!&Zl z*U{pdDp8S6dpRB0+IkeGlo{gWF~5%4D*9w#Q0R#UX{>jUOi9ElB4&cFX8*xiui54J zY!w>Y#yJMu<#%gC=_zdI$-}J!y=~GzO=jO9yrpS{`prHuZGX1K-OnD*`n1RLP&n(kC4EQ!qkh8{M{oe^X}06)S@Fj)c;bIF`p`(6_T#3Tr}-c&A$_K^SvQa+Y2&){@r}h`A`BjA zhwO<^5{btr0JV3ldO@ZQMq8gFs-3gjq(b?OlQd>yJYKOd69hFX?QIl~?k(k8=}4SF z3g&~dG^iX@>F!&r*4n?)m|_FwIzea$Yhuog%!88oZkZ8U4#|^~w@ci$NscXjzf)71 zrWQOXni0}4*fX-CC}JwA)*9oU^y9;9q+qX)^QVbezO=x~Pk=TQ6xxF0n$z=$a%5=; zd)^c%5!C5zU-d;+hkJY+^Xd>nA_nwlF{Otn*)!%F=;`wYz}2*)Z^}_4$u9n!sVwUsW!t@AtPw&k3)3E=2iVA37k>bWN^Y<5#FR>-)E9yrrML$^ z1&Xqr2_Ag7rcvv!Cq;5j8oQoSJgD{I?w?2RpuAeA|M_h4SbjZ z^l4$CKQJ1ci##AivNb3T1nF(I`>cLTBEnh+q}@6=Kt7KYe3fXnev{y~m)j-_UNb6o zl=Pq0zbH75V_CHGZw(;)vFcLyAa|V%mX%g8y*vy`CTT%vXXw5W1C9L@(M)JSfo4KT zBQgx!_gw(Kb{U6kc5ZF_mOE3WxSxxP*_hsdo;%70*ltnf|eqL zq*f0f!7sUjx-FwHp8nfFrMgAhdH0WVh}=v?JKP*Z`Tr0~ zPMb6uC}*Rwtq)V*dgAa}tjFCucj=P*-`f`UMfG!G0!$=JQZUW^;t+Xx@i?mrj29B$ zZyby9o&Hdlo~s`O#ae#w_lIlTXL#pRE8PRabs^;I7yg69`ZtPy+@%=>J!1^axG}& zeZ}|$!}c{?Kve~b2hd0k}_P*%D( z0~D@PW@u-jax!N&IOBx3miW_R$N}KK50V-*`)CnIR>;?JNt~cK-!q^kHfX)M&jV!a z3LvrX0Z-Fw8V_+U@>nkj*U~-RNUPr_>!m>Q5=udX|X!qJo!l3dJSZc>0@hC13=ky89;cII9GBG+|Ekoj5bDX zENU|#><>874n{$d(kSu*f1hJ>$)siDEyXQSFjHF_iY}~t1N))*F`gl(2I#GdDLoXY z73(V|C6>A6aXZI#EauT}VJ6lhrojM*M-v7PHuc2*{9Ycz$a1##+h0LyR3CPA#K}yw zz?HM;eZM0PG z0p|yL8nH5@onfuwTTWktd@m)Rj>~{H`MoX=@aPqvRV6rc@`|v34V5OJKp=x;HCP(3 z09ffk;4{oa0N#(zsE$AKp6;VPe(Ml5i_CgPzHOA*ARzSR2hr@Q%b0ah<@aQ8`|DII zu@9|_<-iMt@q^TcitnD;2%Kx#z!8#Wi) zkD>Y=|9T9i2EXdjuYJHbxO!bs zQEw>?PyUffA?;tmBHkR8HZ}!*k4l@Jdl2Z@u-3#n`WZJ2?GgmZ%SP@o#mLN^_L2l6 zcia3hn{O*}4U@Q~gL1|{6U^??c_95^&7uJyrjg5Mn6aA?+03NED_MR$a&!^%?*@KS&``}zu0(GP+&==j*y?asq8szc zXVtku%^#vwLyy`QH0rN|VqrI<8GH_UO0gI?DNwFElc#Lo>sbdaYk#$l@iB)(<+1lAP7aUy{~fW#UOLq=6zPug5L8Hg*VG zh9#B)WPvQ*f<9&+o5D#TGxGI8;Z?KDgvZE3bRi-ImiEH1C;p`ElA@=Q&;>T_K~AIw zJW!NgW4n?)<%oKnJ9$=x2%gb?ze-luA(mNYF0LewPHI%7aLNIB3 zOQ^F?^qSaUm0Ut*DvoGz&Y?C=3_^7;z*~n{~e6ev!siQYd1OJOLLB zEX74qjGr)4(viD^*>=9>j*w0y+|uy)zr^v9_@=so`2@`PZ5CXnm?@yKt_rWBS0g}U zu&`gBAiGgF`S22?vs(zVS(V};I#x;liwQt4Z@le%Ej;LQ!are{8-Z% zEkI6U`*HlxS7o^mZt1`EttGN{rYu2Heniny9vxX-iAECYDfP?0N4*~l8kYOyiLp4RgPUBulhX%3VYl(G z&ZkXYs2@XCw}XmZW`_c^JJW(&Qi92Rf)5?#v=l5zuN{!f4gWC{*)Q@)gOcQxbL0ZQ0%_7tXntmH6O#BV8 z4~GAFtJwIg__7Opvb0N{X@Il8B<-3lh-GF1utt5nZNaPomA2hi@8ba+sC~4Ej*TX2 zP$%>F0Fp*4v;&pvLj*13#l1#6^8m|zHZ00uU%)P_!@_+W)pncJbJ+q0Mt5Txxi=Dj zlnWpX5nHtZlKyn2QO`9^jZaCAJ#GF-RwMS8_J;`@^18ih6TEovrb$)Z)TF~vaM_pI zObEVrv5eNyuzX_|Ym3>9PM81eB5=b*>w!|>kX)VO(BH9xvkH_p**xm`0;zIu?DXe=hU%_V+`&K#{9H1V8CeOAjuRT+u+J;dBSEtD^k*p1! z3Xj6y@dP>Zoo=Ibi03K>ugj$2y%ox%F^=mYAQq$~K~2BCcWs~J?dqt}sm-%&M|TP(3@^zOmJbSU0BST z5V_z_L5LCO_VFrYLtu6V!i=Tz5PbaOvJeX6>~}Qm_CyJq5beLNs7o_bto^k4FmISHt^i*SxKD>{`lPzX520PQu$dom6?3YFtT+T5yA_ zD`^^HJpQDpB5k^#4cJ+s2e7ejK|H8Sd#Fy48!-$S1Cy^xfH0;-Te!t=rn#ewq7s5EW#?)B)Zm*HUp)rEj09@+_N^qg zq(ZI*EIb2RcMmj~yC{pwM|(B1%eYk>Vh%`aR1V6P+nUm^oFJ=`qv*(s)D)(l^3x7a znv2@nXd&R~dw8)eX6e<1l123+SN3&sRaXx6Dmxd9trVl7V~vsR=;OB5TEve+VDR zzjnVm)b2fS%PKBzXBzVI(gBQ6sFo%L=o`{SNJ__`L|l?o<}9|aH!GgWoFmEOpxFK@ z&x7R1C%Iw0pQcHyB|m| zcI*EHJ$?lih~5WW*Ug6X^$|4aJ-PT<_Gn(9a!nxl8s2=$!}0-r=9?!Sa{2Wz9?(a6 zU^bi*g62ER@;e!OPT#|e?QPL-Q1Il-EBK0-PzdY#*?Kz21C>=vPK8r~d2d76Bkik_ zT6{4TCYogCCwt)y#$O)rkZj;EwJ7-=$t_BuLSsf|^(>~T3CE^AoA0*u! zhs1$2JlvCh^tbJLKufuVpsmwVe2H3FyEqlA2H03B^~fjttI=r^G2dHDB4aOB76Y{2 zqVZzLY#yu6b_~*f4;!=UIFbtPsP^_zN;D~jVDI@m9zf-Kfy6>u7J{&`TtE47`F$^f zdGv~ojvYLDx`vbB+i4{1fc3B#Uu}OwCF(xxu@p{d$`?KDLObcT=15BBqf^o(9%E=( z^fuBqxMCp`Wb~4x-^KP9lxqA$z(@=Rx;&FURR zc;tTDcr66b!jHGU7n(m{GRuDq^X0&7a#Z+4@c5OUfSI3FjdE`p*l93eQd|tG5=U)_ zzrM^Phf)M5)rr%-OkjEZH#6>`&2?ZrKS7tJh=>ladF{4Q4Af^9CcuO`!ku6jKEB7r z8#4bgKW{XvuIaZ1za7A0U#b2R+STiVTG+2I+McH6=qX)^$(}itky1meTF8;`hr1p% z5Yl?J>3C?-K4xDiyuyGFIO!%d#FD)U!x@4&MqJ~L1w!w|fBTs9$6kc?ThkW)=R zFR6`fNu89oishJ3HYTm0quh{f%gX4ulY*yq`0LgNg)-2WM|WV`v6v6pq+{R9oemf? zeMdDkM`uO~T(2Y{f2%-$`Jyzh%(91w5L+DItcnK1OCsoEITso>(+ZX@S*&ES4+^Pu zYSH}SAjG(0_bP#1m8ZvLF~+K59{)0dz0k0j0izJl-X4-|Csou%c7PSp@)H3DU2e;8 z?hRk77W7Sp8_3;z;;$c6xxAJ?rm1A@LXcIgzp9lp655!L1RxLEO7Jj&FT93+ja`!F zf(A{Mh}Mx6CgX{RE=cZC8><7nYnLDeMQ9CB@J8sIpVpVRJy*@M)CL(pmBhTIfb2&l zX&EZx3S@#j%Mv7h=C$Si@tT+HU-hMZAn6~(JLy`JU{?hR!jaboke1z=v%!Esc8v>~%Wa zNahan@I5ZeknnU4{&>#+T!;IIjdi@R#{MKu&U4PF8CJ?2)ta=Pa{{ZqgNT_NgTOCb zg3d9Ol-6kD#~^hiM=S;Tdw_c;(+{1&XAhA zDbPW`wID38vY|+s?2Hb~Xlkyln!pab;wKN+?*zv>4kp92RHqhZ)gzg7B+RS$^k@`+t3aj?N(s4A2ku{!KhV#)JWz#X0Nnm?N~=8~tQ-_S zMM$RwF*xG8`i>_kf0U4o+GNhUS7hbmO{n-6RJlvU6!w?AmrxudN` z<$AbDNn-#yoDv;mWjrnlgogBGjPv)e^Io-r1^^-v&`7nkbAH0*G2gp)Oo}751tTEh( zmQsnHb{w@jo0udp2(B418gTSXT1iK}Ryv$kHccKV@Xf@K{#1}ONF|E0)y;|V@KSE^ z{FwT)}?qG)rhFvw=>BPhhGFo2c|qk@T>m8V z>;^+w8^Ta)l=w>7TRveNg)?@Juj;;E2_CoH0Q@Xc4IZ1kab$-M*&oaT?QG0bB@)`j zW~{t?s3Glbn=`|g__MCP{Y&jy)|<>;ib^)gz~nal;q2oZ3Ee0iv}4Q=qL2^UD}xK? zseER2SS>RCB1vu_>p@Hc|3zu*i3(z7D`=E6w;>suBfbd=_gatrw*Wm7>D*x+xM=ln z7#Ul%!wnG%iLKm$$%m}?RjhrVAv+O|by9B*Zgz?u>W!fkG(y@m`ea~#Hxu9DbT@@3 zi6d#Ppm!C?Ioe)}&bA1Y>*2t{eP?JJ?C7#U5-C*c^|feyB3&yy$b8t5py&`L@P~Xq z#aQ&!e6}+dE!aN}D3FmxEZI;ln!cSVtW6>u!o=ahjS_!Nc#n_$D8+GeK-1Z{!=>Y^ z4^BIF6tI2;I25Mhg*@$YlEc?J9itXUSc%z1kevts^Ru2KwtxJi;y{m!Vmwe9aAiKF zBjy4qpggBz+&KcBqtC=gOz6^dAnTKSmqFwOKFKk=^f4BNhw-xI-fVD02@4>(aD=v6y?WLi%G&$ zGxBWh5?AMzFe&ovu%VYG{Is$GXfz?{D9_(8MWH;8aY}S0e}xjk)8Mg1CqqpBab0-@ z-(f_*Qe0Un8^_PUay#j|nWA+Dn+9I)TnOu(uBK_ff_`;C!dE0Q?HGCR_>&f(L0>wb zw~sIg^lX9aQ-DkDJ&1PIoBWgdd35ZO;|`r5EmNR9B%ubJAAN)?rBS3M7(w-G&5as2 zliw;iWmpot{6`-7bwhyhq^)!jCH;CpZOeBzO8D$jR4NkH;FY$>_@5BnjA&Jb~myju^k@oA3uyLgwTllOHK~h~<(ovCeYsy1LUtfnJpBCc{QQfnr5I64A+G>nR{@ zdMwX2KXtNbkfA{*Y;vj%_Kq&lP7e=g+AKMxR|PcymDcS|+78AeRkrdX&9=`gWpu61 zS!xEbJ!(Gy8eI<@v;%2Nj7ps6-x9D6Yu3HB47eh2Oi(F@BVcAlk4Aa6oI|%am#0_f8-;rT^@Yv&ql!q?R$Z%D$+`rU5zJk&( z_C22%ma zT2apGJJ(BX|0Ve_H#+G+0AWC$zg%PmkNONM9Tx`bbR%gf+|x2?75ouuJZ75JApZz4Zn8uz}zU0nutjQIR`$nqKR&^_cb|Go!l&NWbSg(`no$wapu^O!WsWRrr zpdB~0qbAiu-k$p`P_8K*A5QciQZPp*+a&>*CjQ#lBGI)11_=;yH(iaGn6gsH#<*$e zsS&0KD$KO(0lr4V%OUn#3Ex{yaSsFRs$?FLvc&5(8A$IFA|5DukA>?Kj|(a?8v=Wh zG^vP?giflL2BB@F^0$bL&s>;ltzLdWlPSM5zdKMWxI(M11QzNVl75`D6#<@v-rAGK zB=B@v+Rsur!E<5rYJ8_nykwE+EE|aI4I{lN8r@NNO?#qx;IgJhU!^g{4pOJ?n81)o zPHujtZP5gB3*_#8A0f>L$ba4H% zroeMWR=ZLgzeHT%^9_i<0a!mp8V<*oG<`f;MVI$GE8O#dVwyF zvyHPQOP2CeK~M;4+I!c?kLx`0*iCzx`j=WTwSUWX_7g+<1?JQ=nWX#bARW z`5LaPR*0b0ai7G~B(cnolV!*;xlgRVp6RBH*ELHss;D&xdQf1^qqd(4=`7tn$dEBY zOr&Qqqz!@?u_b#3d07k4S=J4L0{~XGJ>WyzcS=W-0WrzNq!H|{B|}Bq_k?6XL&%L^ zCMK5RNr^yh5fWdf4t`F86zs-(U^2BOlr%$Ck{bC~_r47cP*1*|P~%l$82|^Q2oDAF zCA)GvQ28JWK1y-TX^@T_-bi{7#F!*UX_ivLZZAVMap(0ou5Hp> z8ibDfJh=^iFcqJ{_+#B~!DkRO?6dd%0qwv~XJUo^cHRS7d8KWe%3$9}<_@1LUR}Rx zA@D;w91et}m%$hslzxHr)51qo+G%v;ngQq#TBu(E_B-zA{%tQ@oae!X@W7;nK=iG( zXB7X`(bZyv{$$BN$}svG>(+Yu1Is<;b%K4+2M*7{AI_RO)<+p8AJ!B3!4I6AzGf8^ z&?F=$uaPRXsezcEQ0h^6t%|NVaUxq)lYB`K&QhMxubpofk}`r$&lFr+iZ0RBJA>|B zrW?l(GxtWNW3@=|(-yVS9=mc2&|@Wc*zHv2W=qLlIdSSjEw6lE2WG_}@lHzq(Y6*H zE{|9{8LPY+9b|44vbYJ>#NVq2M`#y0b=(vANZ~sQZKdOqE$8|^sZ$5xZwlN*=DP6f zE$EjQhEJBx%7ejm71*n!j~Mchk@lDBcpKi1Lr(e+lp!sr!>l9Q%$^#$ zQvSu;Pr9xlfKZqSh~0!`*F-C6=U)0l1FTIUfXr9hHoEThHTF9q%5**vE&E)JgaK(e z9~=3F#ur`}VnC*h&j*3@Ha4R}S;;wMudU$x$3lH_&-p6xSRyK&XD!RxOCe?lFt-z&Va_%kj{a)EXN zj3*w+e$eEqhsQm6rWr|T>2Z!3q>})UTRMQX7t>hN6Y*e>Vk;6}Dy}Et-Pu<24MUSv z>7XX)|6I|-XmEu;2+dDPb*}{QHPM3BImPXj5`PzAz`FHA5SneJy1j2&W`tseiX)H= z@8m|~rF}cf*OE|ya_vMZwzg!IUTgN0CdNJUNX}$$Im;^uDUp=kdk#; zeu1ZQtm$j*q(1i22g_2&GsNR7z*-JS0w(jIKV*t(doMe#R6(GUBBhF@KB0CGkp1oD zDPP+`DPsuo=EjLjPIetUK3tIgEZ$Mn$XQnqE3xqeNo=*|?~rHWyuxcKBN%0n8yZjd zUa+Qyo@VB{HOJZI8CGL-KG;sPmVSAS6i&X=0%yG^LXZ zAMr`}IYi1;AI-dFwk}QTrH<=R$=8KD;#5=11&XKm!(O&E0hVhCL07oI+IhEWiR<{B z6q2l4H%LQcC>5;G<_^XHoddeLpxHrz!$6FPbSB|^+DI3y;!E;%GH5a!-$}6K|IMPp zYDw;q*1Z%@rBD@Ro5{c}!Q8BgGvfyB0ICDUqc}Bcu_47In)^8f6t4DRq;Tqx`v+qh zRqK+1w;)_kglww#;&D;|THNmFVwGv;f~%BYolvA>f@do(-RDX5L4qU;hj@3RwGV~D zhl_&>Yj-5~&!_91y5yJImW}MS10(}RhwI?~KUB6?j3@?e;Wcd0)|yQ~>JXgCKXU#C zKDgvT1PN)^2@Lwl9}U{|LWS&RO9awa-l1cRF5z@r!W=DXhXy1~`FSOT?h^+{U-?lm zPEpsY4s!_U*t9GNbx9+x)Y9vvfz3#077Yq@u?o{b9}W7CkKc}#gmuF|eVoq`~}L49zpBfF<+R8wb_?f5#`4?S-YvY00B3K}S9CX*jctJYW&M z1?xl0`)qg0D=8t93|dKbd)4@&WRJ7or-Obr;1XLx6Sp+^8`p($+fks0H>r+-Ze4F% z%;!;Dx-OpAjn_!LwO1wK6=}|LkG>k(>;7VeUV(p zeWLB&EbqBKbPS4aFVm8f!SvPv?_u{kc#qf>xMcq)O(Zqj7>R*A30NLlbN`&8>HhhC zc8L`x+9MC7z^>vAfcbGCIo3E54?$@eA3`f1a>!@q{(4`A>~%tj`Vl(dW_Ds26h26b zFtJVI{>H1*9$y#f53varIZ&<#oiRS~gzD(3%@4w@Kxbu=z{A3|MVs%mwDYdypSJ!y zbg~`~--?)1NPf3Zie-(Y7Yq=xD}3G-5X5s9NpRBFv*FRV2vn{|K?K~3!OX0hB8y-L%3M?18w&Dvm^?|O5P6urW_Sxq(Bza6k#R^c44 z@S}co^iZW(Y#1&I3M?GrML8iad$5!Of0f+7HgZBZcp7>!;a*i?UHD7=#$RLNZvYFp z+_syLj&Eeq*Hdvb8Ox8$1Bkvw+=mC6S?HP%cFf@v(6b#cp^r%HGptQkmI4W_ zk6NHyZ(}Ho6@wKrF$`Yyp{_^j6_W%|I5v@!WsLw}@{;z45V8ZSFh!JT9KfkQkFtym z{8;8Hn8{agIX?!Ybha%`r?sxw7KFL*$ykO8lxvDx`C#c-BqA@RDi|8A^)VkZ`!4>LMH0;Zjkc>?iy|@t%hQWwi55!vDE-l-r9Wm)EZ#M#E z&v_8s7oIGt)xQBKBUdtHuUaqT`0&J$j0dEbYT^(`9Y2ICdSegj*+neGb|_wEL$M~V z7?I^IGCBhSlGEgxvJHixH0*1{LjO_FQWDe_c>31VY3(-|M7drppq7|V_=vt<0#wTA z;A8JAGHIwbvjyETy*o6a$z`lz`ubF zMn~DU?z{Vwiv5t@$v6<}a4AP+R+7j`*D^XD@K41}VinE*OLfh7kJ-co20j1N@^;Ey z+Huh7##gTEK9AQ9IWN;;M9mkDUJ!%%8-Pk4OhIhrm!{gaVzHoHkCxO((+6)L4vtpO zfjM9E={R|a$;)=V(c{@=rGJ3@c7zNLp{o%E#)m@lq`?W0=24alKdlnj)+b+)-sqt* z*g@wp9(0nFn_N$!(1*7r<)|4E2Fa7r@^=h?6k!U6fd3`{J)E&z?%UfhMeeJtlR;Vv z8E(sCg%@BnS8S*xxfZE&p?R$io?SA%Aq-mhhzO?Hn7_29#Tqg~dw(S>gv;*ysE`kG7}Z)wNR zSa;45w4*F}{rri0q9y=qgHEhy?P4j5X|lwYgHUE;fFw4o!(Fvvl#R}lB#Pl0R?yL! zCJ*k=f-q%WsO3ElqU5N%4HAMFfFj6oX>Ee72jY08P#u&qt9{EFvUrRTL8+mpE7O5; zIzn<4Rw6LoO9~W1HOHkXy<-40qt3KWG61E$c~g6*MJcFU8<2t?zBRXlW9_8_FUVam zS@{$pDFI;8_@LBBkJ6pP4!#aHmL}NmP_$4U{wmLvABs z59Si*34K(6=M=kx@0}3Yg3&yLb6ka%pA4kja$h5of5D))$wdh1Po7QcEOOLNRNDdP zA>Xq(x8dDgAsq#n##xCdDeDqKk7#02SVt#dWt|R2*8MG@1qyrl_U0QXO%;IS`h0Zq zmXaN`^y{r8{}xnFC+!g5P}acD+B&7Q4{eieon%KPp*^&ZlkJlREwcnSnZFB#wQ!%w z>lz?=Bu$R_@uoCnT7O`ltbl$vUPQj$;u{3Rpzz|x=a3fFs$1eo@-s2tv)=%$4f!7- z2+&7bc=|j6n$z$E5muy(2L0fpxdieXFFF}23>JT_Y=Yc6nQt(L=KNe=<0m5doO;P? ztt?d&k03Jh*f;VgH9ymBNWl!PtDOsvIeA$P=-zL#uofWC2cU0G^T9g$PzjBS!cR06 zqY$JmVD03C;#F-&w5r`?vfwBElRu~QXPXGe%`Sqo$eD#3TK0n zaGag5Mv?%mJpw=5!}Eu(8=(G#7Pam{;U5h|P$457(50wJ3fiHU(3!(jw^*==Fd991 zrbytE7C_#@v`Su|BsQ08n&2eW8JQ7~%nYB4HZQ()obJ*6;=%o6E~GGf`Y-zE%>z24 z$Z2Vo5+@Hj2Y%pvM`!T8H8L)|i)0hM&hGX=XMF=RtR(7f;oyfMKbrZZp$!^j&xhr$ zD#7JIl8@75<)A(9HFPNvsN5p?_j!e89AmN5s>@;m$ml zE6py{OTh`UBk}t{NPQNPuK@r5?OlnMOb`KaP0aK+rzeRkBAe7E zyNhot23nNvOx((t$Q z;!U6oMqjfKbYIINo!fEIlya z041J`Jn;B!G@hhsq(c)H+JW%Qr;Rc_cgt?try8gUzc<0L9&p^xn?0FIl4Uy|Jz&2k zhmZQCn_`&Ff19qgpn?aBd@WUlw!Be!TbuuM4kYJX$gaU>s8Om|4pi>rvvex$;35xz!KnMgKsfiTWB@F~{2DB^%$vf0 z@Qt=H#u!rMi^LC+t`AK7c|4H=0uM}kI2}X0W8d_@P!0t?%JArdYON9mk=`k|xs&2X z8b6zZ4Z+R(-TzQmy(Z=6i-bRPfbOHA#g;(woUdW?-v>z#1_DVJFOCG{vL%xxmNzSg@ZoEmy8j;x^NBf)NGRghd z?w}Qs)h%gIeCute`TJ)Eej?6y0RF;9H(VQ57e5E}s{+w+AMEltw))OJO~T0GP)r&& zLh>PQxe&Zum28#0ngq5uV%+2nX@Eb%5%&O;BMS2FV6KpK)$BIxu)Jzy)k6TbYP*cjK;m< zil8aUbZjVV18v|tb*&MfcQFb4ko@aSuX#zT;dLKY&aBM^4U`>i7|D2TFRqyX2#BZC zF9*B{4sP5(fO4NwkSz@vlaktZo23m2aYvsKAN!>!A2|wK6xNTl+@g1~aH3H*eCo(1 zV_<7eo|l>J(=A#`iM}a3lw>EQ@=m`yk#-xb5(7awC0dz6pH4tneCT~6_#SF-7y#a! zUSE$%hgB`%wGB8=3j85*bAE-$(Ix&m`iB*mj`lp(PYNr`Opx(bcw#W4 z+ZW&UeE-D~JZpSo(2?XeR&ua5Hm}En_RNKyXsty>;*UQMkPFG%d#1C`+YR#NR)!VW zO+O9{i47iR(;orC3oTRjfHQ{`U6gdY6q)$*P?Kc`DxbH)mv29HU1;{;8wh)ofOdgU zMWx8O&p_MLd!F=5I)vNN%$~*+dn}aw@M$od{!%;JtCxO%nYEMGg+yUH; z8qeWO-25|`81(!C*i_r`w~;@X-hS*O@x3R67fYT4EId2GodA!ehAy5*CR_j?2AU`i zIg6|=7Yk8r{MW&`Fin6az|s@Tw1C*UVM58K(Y7HhFz&Av zI4Az~^;58ERvfWc|R8^|NirJqN##o7!2aCgVJeg zw(1m!I2EmM^o_|fw2XTob^H;Hu^WKORfUq!OWuVL|AWG$#uT;tQkq6G0>hAt-8C=D z&H260sPsWQFZ1$}O`mb}$;LQ%3ZwxLSwq1WNd(U3sX_rEqz0Xw@`t{V1nE#QCV95%cdWfMBz=yUlKewq@&|Dpf@p6nOP2cb z@4xs3I=%iM3=;WR)v;$69=2~H@SFI9H#>C4K5&`tl!Ka=YH4T>tSq*X$84|-R>T~hK6l{#-?SDz_#o9RjbBH12rFvquUW51W#Q|9cu$?d*W(|2W+LW1)?d z2uvzn0frX~WU(6ueNvVwgctdfio_p=VOwX}Nf4%%>O#=s^hP3cWxUDH?VukH{3ZSd zL*dDTSf!cB8jmfhn`SRiq+mgu^IFlg7gk6BUe@y5a4m8cQ1-x*w#!>9d6N+W1XHKXrYEl3MOXLZ>a|@D;Yk1 zH0mNI9%wOS-;WvJ_4k<%W&TlOEqUsTsjyJDSn~J@3Y7Xk7#MW2h*v3Mcr{A=8<cu&{b>i=Uqz6Dljtc{^! z$btUJh9f5Z4jLhr1@inRD0{&LC^m&q7Aj*n`fSC5C5@-CE+nO$^2Orc_51yF{cxb$ z3BG^_Lvh%n&)-obDb&rjiHF@%ou=2Ed`$wlDU$Vn0wjC*0G^M9kAKDQ07SMee4FST z=Nr_59~gw8TlQVcs}%y**6D&WoehbPNFMP@?!`o4P|hg635UtFbGmU*tb-(tA=vx4 z!u@Fc;m01$cm|_yYl9C>2V+k92$7#EBuPNFfG%B5sR(6Q+`9oz`|$?}1NW`RCv1mB zi`3jX3iY>w+m11U0Fc{5G-iDU2Fiw)jopX85M&SiFlDBsv9)sQwN+qthr*NJ|8unj zgPuUjObpq_9U##9I^QixMbHIRN-}iJT&hE3Dk%qVZN3VIiV2qRN|3bNWW}w%JI{qo|0KB%v6;zO2#8O*&?LnL~=Fci3 zcw~PgP`@O~j7SM_fiWo~b#)3Zb;iBx^cszBC`OV73zBiU!`*0emJDMPtqnQ@$Allg z9n9mO9tWJ z1GEM4m(@}X?IgU}hTn;(oHVKkfxj{GyrF_d;)KTi zV!vDsLL)!yQEcza8}_0@uO;_u#k}zBDC>0R(f=uI;qL4;exP6HNCL35{j3O6a?S*ehruKgxNYt#q0LYVKydNQA7Xt#RHOlQZ5Y&?UnXy=9MrMyDHIw3F)1iN%=jFp0? z6}T6u9ZR~W&(rk31SmXjh4DQ5NeBP^pJok`Dp5M(_x`1YAqR`7&=^x3LrC^v2*nMR zgt|q@J#)l+y%ixKAoG+l68(KenZ2g0Ie84~`i-h;)Qr{4TOP2H^#P{?9{b*ko%Dr43&pE= zy7BslLE*s$c1Y6aSYFoS+NGgbvWz|3`-6GNzXR}08bF>FRt&|b%1P(>OQcfd4MC5{ zqtCD0Oe@eD0qP7*JIW(E#@Fiu*AP$$HbnrcG>uxP=Il%DtKbaeo@7^1xYy_s#W~6V z8hyEyjJjcM3I^Gt<5ze1fg%gyT+9Vy6D-%l7JV1Ejp1b7Oy2#__GrT1X{YJOLlYKM z$`(LMph`C6cKNpCs}WqML2tHpcO=1HYzwL59G?7>u6qE^|1RheRr=KJ{|jwJ$)Nk! zl?XxQ{-HS6$hhKBpKW|eMF6sJO(i+?w836GHzd(L0t;>SOh}nq*OFec{vp_t2c_hm zcqR-zKo=cD(3hor2-e1ic>?hGLcNdC))U@PZ`N*$hb7!(p3hT2a z@L-Kr9%UFGe%bwh-hbaw3161PdK6|i7B}LIN6DP3M^b_-=!djrbSconG%WNv!46}f ztUvHR(xm*+(hb5_nuF%eQ65=#YpM zExhIyXP^a##Z8lVyU3E$~J=-wKi(mb{kiVo@m5S6$=( z1FJ`&4gEfCpdEDQt)N8J!x7gT8Nn5HTR&Z~4A&*i2tE zoO%>0`1q2w4@2P)b~C#)&s;;6l^pbsun?f!2S@oMaKDcITx8Hpooy+qj)z!iSBRK^ zdmHD5&SdS7Bh9%cNEjNEem>rU^AJ?81ye;+JD^}l5`R*Vp$)gv2Di+W1yfc`E+whF z1jXzkKTUw#1!;*(miP#s9qNF^C#&3Q%g;qYc&-YTc7U}YiOC&%DT#)MjK}d@b3BV9 zzLJVrS&6qL{|c0Gw^A6i*q0-gCQyVPmJFL0izoee~oSYwMOmb5b{0 z+M1+vRgz#9N<3=hUDSWP?mhYCx$!x&KW=4r24C01*TjPZN;-J*DVVtgK9uDD>GU4| zpUod>V=?d9SQ@Y@`S(yrz7~!Opp^vM`ebE25-2ZmClq4=HOl;yf}C-reJV5G(uZBTfbg_nr3f9z09We!k?eF!^))$xK}> zy)hu$5SmML^$`Lg*BHW(vsmp)eoC6fI^y|@zk~tmXJE;o@gosHr%Qi5xM%&#M3nR9 zAlmvN3Owo^>>Y|yFhy-qktU{YVlpZO`S8zwcAF-zVnaB-!}H|jAs3jAc})d*C36u` z#-^afd-cpCZRU24#0yU*Zea3H5+UvRao|cQ>BVA5Nr4BDm1B}6PJCQQ)|^_Xq`Awa z_JM9duVLx{2;_7Don;RJ_kWTdQi_oLpWfx}*#h_f!MK+Q0(KP#EK;x$q(BLH!5dJy zo=&@78}oBlg2H%mue5&`9ubSZMDDmts|L^c;hA0q!~@_R>O_xt*F6g7a}<|#f=66jShag4Ku&%XC;rTD_$Gsa ze0w;Y|8a8C6=@c>#PcpWDP>o&+_q}|EuDxLnovpmJW!LQp}34cmfspLkX;{y$MCXV znOrTMhZk>7-#MHU4@me~8K));wg;n8+Du5Bm0Zf#M`BTK(mp};mb@MBbB+yH9OHDo z3MSa_*`VhG%^$qRCpRSj=ym>#MKXkFQ*2u`_<(ixV?*7cP;6h>AE5B}uS4O5=4Tf} zo4)4Ar1b-%e{Ib)yv83q6F+Iha(HEG{k?l09r|%S->F5*B`}GMd5$v{0>7zRA)X0c zd_o%3fpY!509!VYZ_t2XcP4G{qzIBbyc#uwZ@;yL)q~P!flTd9kQDAj>;O%rKJmwF)kZ}OjmXx5hqS+dw|Q>odM4TOnxmT~ zLFuVtFqZo<%PMRH>3r$J+jOiK1%6c93Sk#OU!g!Fi;`aj~r8=zO@A23} z=l=Vqm`R?QZ@}^AS`N)o@dTM9jnq@cJO`3oN5y6@Ndl3?9qJKget$e8;WZ6I&+pOr z6nYn(j*ylLjI9_Il=5L<23GpWclxn!6zQp7T^&ls_X_OMpb7OsfZYQbg}-e!*2Q*5 zRjwyUp7Ep)sho*`q)(tEjYP7q!JdFR1kSI{NQY-S{d`a! z-AJ^xJ)qp*0t{2EE{`oR2|IXGF#)rNHCT9_0G@^7-~a3FlWkMh;e$5oPKZHEyx(yR8YqsV55jY)of9*3IbjDHTg%rX@a6N7U&K?nFbPfr35@uFC{74z?~p1i3UxM? zi7n4k_D3m`QoLTgq3W}GJmEb0b3cZvM*~agoa(xK2py)RE`Qs&*CvHyl z2LEhOc6y0uO&2d_bXuzDUR0(W}Qj<^KOF=M59L+_VMM&TqPh zzc`t;@fs3f$ZbRc6KpwD)M;)dmGN{vLr9jCWjfv>XF-mC>1Jkl(b~IF&`~KHwjk$) zg?h>DarFVEFC8NUd5L8#igRkpx4>R{_jAP+gzM6QkJr(1!N7l-Ne16=0`%j;HHQHj z9)&Pw3$(!RLld8nYpcM!Pto!D5B|CusuXCn!7(K|+G}B~Z?-A+&=bV}CQ2!f9B_t!(GHY*ZLCIeh}eo`Vc3+n55gmPC&^-&hv;Yxf?rQm-XTwOS%%KC z$s`O}^o{cpJ++hb6EG(&A(nu<*PCa09>4g37(nVwypeXd#f) zmiN0z!f_@?B4~*m?5HCJXmQtx7p{2-D)_<=JQHPA@=BJ=)6QVpIJBL0c9oa7P?rAu z!Xxlwc?$Z@zw;xR+IcgCBsmrnco_=GzmH%$g~KAH7pE;aH0?}ANgme9oIpr^xmNRy zn9nRsMBCS_7^+$f<0W6LdtdRCApoAJ`iA8aA6J6UtJ0GQw6NnK43Y^pn$9W87b`tG z@Z%7bABlg`I>4xF23;?mJP6Mo&sm!5(LiS#57R2(Q6#Rrl3+X*cUT7pDsAFgptH{rJIHQU11rl1fg4v^){KYBy#~pwg&-!2ujVn~7eY+_$pM-?6)&@%-RK@@vwh1z z0{PdWVQ+@%vkpt*q0kRV&tb_w>3N#3+g)G&FcuAozj(g@n%^m%w|F1v?5VY_r)m(S zWA7`muMPV7UNDKY65)cpEI~<$q-ftb{9wi5Qkr_?R^a(xNb)tPO~}@CrmG-#(!AR> zIcjE^4NVoQlY zDHqJjz~n1r!(G$TG+<7c%5Zb21|UN5e_aAE9~>VkV37M z*1~|K^S5Gtg6lMcB%Xlq6xHR1b+MdCdf3dEhsby(e%&H+I|Ip0pliFVAx&PO>QZVB zSk2)3SA{+I?F+W(SN*U@<625*p+V4+0rr$sXC@F64Lh$JhBwJq$azz=A*I0iUqmG@ zZj8aSa5^aEJS6bwJ3p7|m>@$_b^mGDTL=F;MJBx@@A%`tNARr(sS=U0$P*_Nyz$u#t>|ggyLwfp@ ze%s-CYr?;)5f0wsQ5)+}=$T1rECQvel>94g2=e3~Jq}5eU3*T3P*YL0dIg>>lAHt- z+N09{(Kc90-to=m_gcS!;G~q=a^A03!vc`~N%en7q1%UAB(?*{6-YhgZKdaRkk-FQ zPFT#8OfAGnHY~Y+7`W{?o-v6VF~k?jzHIp3TmC*upobQiP%%M=w(iKAT(cmOW#v`!f%5xmGkdDct zpdKGflQIXGG=pUM!aTlJq~G0U0i9))JS;V4`y<=Xwr6=8p!&!7=1HE^rX^cn70wkf<26425wf zp~OSUK=l~w;jjek!NTzf-bJ8t--pi*V8~u9`$+n+J$Nf0r0{!T(vO=5FxnRJXhWDn zGILq1B(En8H&Gp0v#NZDa}O_D>?)M}m)K!fAiKT=mFtn*`T*O;5B8|k`Nz4w8?&1{ zJ^(7^L-8Ux&~ zYokRffz_RpkK+%;`oTO8Z>eNsb)0$(jOEyXk zJcD4l-}$)6iR;kZv9(Zc0d87ADUt||@>}2$pob956)%K`$ZS0%@+DJ$+7})=Fq?J{ z3raeRc794xXwyJqV26YT11vDtrbQT?X<%iz+o4m$o2A z+ccVOA`vPH8ioz+oEJ2!Q*8K$3!VIM^L-4=7)iNW;OP`p>X&1CAK=CN^zr9>sXkeo zka>`z|09@3%)2EJl0_{FpbgiJV?ZQHKox8qY>2B#l|rDa#f*?qGbY8xl1zxaNuvjO z2qaU)cQ~f2_0cI2=0(1#*PQfY$dNu2`Vy2ji%-XsfshX;jt^RcyeV%+dFmmx3#d$7 zN<=ZjvtJgMWNVgZPtxErQrO9&~BQB5mOItccq~7ToTAj zyJg841q zuXMT_4i6WtW2~e&lOfshz{rr6Fcqa#QB4NrfdI#SH?+;8b^SANHKTRZr^m)zBbKP8 zh-ns12R98I+-!i=;GweZ0T)dp(Mg+VtdkPY(!KON9B${i>;4&Z->1xj(3niHp0%>U z&~#wPM|-Uam&H{t0#ufN@G>5Bp@SFC2tG1Mrv^?Q_x}7fh7k04Oa><7He-9c--7q+ z&i zF$%})TCrLriNBEa^lSv@&RFSy;~$=Db9cN@OOoUjly=IQDoDjOX@X`sD<%J=_Ey^^ ziIfu1D6cuJ-{ep%JN^jAPWe}3J8t5qg}Ughc?x+l?kz^lp;R3H9O%@gzDqEU2hG!S z8_4aJW{mZUeITv*e2zb0rYHVFDElPp<(KX@1bux8Zj`?+>`HeD9a1trq)dh`m2nGC zlYVhcT#trfsxQNEyRDvqYM#B22hAA7i-yDU3^C`ff`$gAKKwpDYYD^aS&pN-&U2fG zBz5?=^zpy*o4Nmg{pJGk`|(wtQz1fdepFTqa?ghB8uBr`n6)kExM|C9NI zrs^-=MpD42Lnleb<|n#H+Zsb~^_IF-$r&iunv)zqTs_L^aPCn^vN>8yCxG*N8Vo0p z@amt6G8%#8e_GZqbEOuk={`u>N#6mO4cG)J%O*?K^$WW8tZL%T${Cf~Kw(VUts-F4*3%-R=D;*+ zEwwMK@tN4zQ7q-JCIphR-I%ApSi;w+G-Cr6DWwfk1@EBw|K(ULPu9@jxIYKs`}qPS z?i`Q!;*2iNtL5S(|3raC-4I9-bCYfTslJov=Ci8qiv5r z9gO(-eit7cS#wC=nP+A`f}T0h@<&IV_j?1aDS8nTk@33-Y5wyB2*e09a`0S8GmT*m z=u*^63{3IW&8>pT_DPL9L_VOl_)dnB~i6h ztCUjh)-}pT&w96WECb|Z75FKEZ}c)Ef zygA^yXzx-AtW3H}R#pqkhCq^U7k1h|C#{y5e!Rc#H~`a_jh0UaHKRyMr|ZD7y?nXP zr!bR$fB0emFFdarO@enCDJ;M+BMcw#{hu1Kylh+i>7R!G0OH3SmBbu-K0hieo$ZwP z7HJz{ObrE6>H#S|N$@GHy-{0xlYSB_D@icN=fMSd6IGND>s14TdSGP`^7de6#87!I zafcF}hg{Yy$tYk?k?029%$Dd4D%z4R8nDo|N^UtTrE+OPo$WUR9rY;OKLk)Hzn%9l zbHc#>elc{3l0zB~C>2O2QL*DnEYv?JlvVKoS~@)w+^(F-ziV$i-WiNxm!fLI2ikuV z4q7Fd-e!rNgi1m>Q2_P0E52j4J!EuExJ0>j(-8;2_{(6OV!B04>ivTKT<`gAIo^)nljqZ+LEUy>;kg=QD=% z9i!TmP>7>>gf{`w6g)R+(kQ_bm7*}2r{Aq8Ek z@?2?X*%01(?Akz-bo4R0yI79mzDiDBYU={B>!x6O@O`|Je$qJ&jbZ4sHr+62a$STN z6g?b`i9hZ6aTu@{leW}4AEajlG~IQ#H?RgDj?L|w2SvN_5AdKGjfp?*f8$UOOZJt{ zQRBj=feAm0j}PN<9scyM2LEL`c5XfA^(Om4wCO1L@w_=7M$4@agJdhE;HNlgjc0ud40+qi z7hCAq3(22O=1U%xUO(nxt)z>zrAfaR*f$L&|9=*|mrOxm`;p z#1Ex&!gfC5MKRW5{G6Zrr3c6lA7d5+5`jS4hAJfrWfVu<(k)^EAL1BVc#^I+c%?C^ zbUzB_@!UNb$O(vYFLkO^oFGXA)`JRT3H19DFkHVxoR9WtflAVtGy*1Prz&03bS1|$ z6$7Ong|>V=_k25Qxhh&zjq52cs7++;e@gO;B zwr88`0);9I`Wks0llW0#_~GtCli{k^M;H%c_u3Lyo%OUNdAy5wGJ{Env4oxDzNfeaF$X-GDEDZ#>^I{Wxwj?FBxL&Ub$x$ z3J^=Q&>suuclB3+2Ss*TJQ9@Y=rg^lgYyG|1?@1=pj)#XbQw3y>g7!XAMe3^yVng% z=6O`tDBE9f!?2cLuGzFC$rMP8e1HsTd;BjX|M=QfSx2`NYso(POs_-G{i4)P`&U8h zbjHubpgl3pD=pAGAF6FIb1Tw#P2_cTuEnM{&I@kfve^CQ>u*8_Pb3@NVWYTS&e|>M zT*dF}$sb59_vayOWb;e-)4^W+CzitTAF|}1cHpOA`NP_r(XcILTA0|C#zYJ~1JY?~ zhvKgk@aI{mKEq@N1KFQ%m}`DegQc-Hsn+y=DUiO$h{G+@(#dBqyhECPw3RL?FzRsF zcf|Zv7^z;mSAjvdm1FqTA{y$ummB?IKkEI-5;P}xH`fD- zC@pr)r@7YQ)thRJ z52VS!DBQmYmXFU8e=l5rHwN*<$S2ecF?)w3UnU)e>M&TgBssn%&Kr$C9UKLg0K_yq zRyx>gkzG1smCSX^r^7>^NzKkFLTEEK61$>E&(zVXz38xLJWtGPG;yipV=tF1k1v~; z=_8BdZP zq@8P-7C(HOULzV_Tb?9g7Q84?#zW<%+3~J?32y>4KhJcA{<<%B6&j>Cpf(MzCw;U@vh`>iUyDB-9F3pzQ~*Sjk-Ssu^TlVM21gxj8&Zv>FaE6U}r-lVfXJ|gGbYDM15AawXy7%Dx{SyqgB>$wL zsk7q&sTG{k{#E-ddAJ#hQ3Wn((sAaFpohqLf=CiiHf4-Ofkr;3+RzXPntzqC`x;PbdUv*#8s1VPJ20`QF!(x1u Path: + hub_dir = Path(torch.hub.get_dir()).expanduser() + + hsh = cls._get_uri_hash(uri) + + filename = cls._get_filename(uri) + + return hub_dir.joinpath( + "seamless_communication", "assets", sub_dir, hsh, filename + ) + + +download_manager = SCAssetDownloadManager() diff --git a/src/seamless_communication/assets/store.py b/src/seamless_communication/assets/store.py new file mode 100644 index 00000000..ca4a9d08 --- /dev/null +++ b/src/seamless_communication/assets/store.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from pathlib import Path + +from fairseq2.assets import AssetStore +from fairseq2.assets.card_storage import LocalAssetCardStorage +from fairseq2.assets.store import DefaultAssetStore + + +def create_default_asset_store() -> AssetStore: + pathname = Path(__file__).parent.joinpath("cards") + + card_storage = LocalAssetCardStorage(pathname) + + return DefaultAssetStore(card_storage) + + +asset_store = create_default_asset_store() diff --git a/src/seamless_communication/datasets/__init__.py b/src/seamless_communication/datasets/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/seamless_communication/datasets/datatypes.py b/src/seamless_communication/datasets/datatypes.py new file mode 100644 index 00000000..9ee27c88 --- /dev/null +++ b/src/seamless_communication/datasets/datatypes.py @@ -0,0 +1,47 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +from dataclasses import dataclass +from typing import Any, Dict, List, Optional + +import torch + + +@dataclass +class MultimodalSample: + id: int + lang: str + text: str + audio_local_path: Optional[str] = None + waveform: Optional[torch.Tensor] = None + sampling_rate: Optional[int] = None + units: Optional[List[int]] = None + + @classmethod + def from_json(cls, js: Dict[str, Any]) -> "MultimodalSample": + return cls( + id=js["id"], + lang=js["lang"], + text=js["text"], + audio_local_path=js.get("audio_local_path"), + waveform=None, # don't serialize + sampling_rate=js.get("sampling_rate"), + units=js.get("units"), + ) + + +@dataclass +class LangPairSample: + source: MultimodalSample + target: MultimodalSample + + @classmethod + def from_json(cls, js: Dict[str, Any]) -> "LangPairSample": + return cls( + source=MultimodalSample.from_json(js["source"]), + target=MultimodalSample.from_json(js["target"]), + ) diff --git a/src/seamless_communication/datasets/huggingface.py b/src/seamless_communication/datasets/huggingface.py new file mode 100644 index 00000000..e146018c --- /dev/null +++ b/src/seamless_communication/datasets/huggingface.py @@ -0,0 +1,126 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + + +import logging +import os +from typing import Any, Dict, Iterable, Optional + +import numpy as np +import torch +from datasets import load_dataset + +from .datatypes import LangPairSample, MultimodalSample + +logger = logging.getLogger(__name__) + + +class Speech2SpeechFleursDatasetBuilder: + """Assembles speech2speech dataset from google/fleurs on HuggingFace""" + + HF_FLEURS_DATASET_NAME = "google/fleurs" + + def __init__( + self, + source_lang: str, + target_lang: str, + split: str = "test", + skip_source_audio: bool = True, + skip_target_audio: bool = True, + audio_dtype: torch.dtype = torch.float32, + dataset_cache_dir: Optional[str] = None, + speech_tokenizer: Optional[Any] = None, + ): + self.source_lang = source_lang + self.target_lang = target_lang + self.split = split + self.dataset_cache_dir = dataset_cache_dir + self.audio_dtype = audio_dtype + self.skip_source_audio = skip_source_audio + self.skip_target_audio = skip_target_audio + self.speech_tokenizer = speech_tokenizer + + def _prepare_sample( + self, + sample_id: int, + lang: str, + text: str, + audio_local_path: Optional[str] = None, + waveform_npy: Optional[np.ndarray] = None, + sampling_rate: Optional[int] = None, + ) -> MultimodalSample: + should_skip_audio = ( + lang == self.target_lang + and self.skip_target_audio + or lang == self.source_lang + and self.skip_source_audio + or waveform_npy is None + ) + if not should_skip_audio: + waveform = torch.from_numpy(waveform_npy).to(self.audio_dtype) + else: + waveform = None + if self.speech_tokenizer is not None and not should_skip_audio: + assert waveform is not None + units = self.speech_tokenizer.encode(waveform.unsqueeze(0))[0].tolist() + else: + units = None + return MultimodalSample( + id=sample_id, + lang=lang, + text=text.strip(), + audio_local_path=audio_local_path, + waveform=waveform, + sampling_rate=sampling_rate, + units=units, + ) + + def iterate_lang_audio_samples(self, lang: str) -> Iterable[MultimodalSample]: + ds = load_dataset( + self.HF_FLEURS_DATASET_NAME, + lang, + split=self.split, + cache_dir=self.dataset_cache_dir, + streaming=False, + ) + for item in ds: + audio_path = os.path.join( + os.path.dirname(item["path"]), item["audio"]["path"] + ) + (sample_id, audio_local_path, waveform, sampling_rate, text) = ( + item["id"], + audio_path, + item["audio"]["array"], + item["audio"]["sampling_rate"], + item["transcription"], + ) + yield self._prepare_sample( + sample_id=sample_id, + audio_local_path=audio_local_path, + waveform_npy=waveform, + sampling_rate=sampling_rate, + text=text, + lang=lang, + ) + + def __iter__(self) -> Iterable[LangPairSample]: + logger.info(f"Loading {self.target_lang} samples") + target_samples: Dict[int, MultimodalSample] = {} + for idx, sample in enumerate( + self.iterate_lang_audio_samples(lang=self.target_lang) + ): + if idx and idx % 100 == 0: + logger.info(f"..loaded {idx} target samples") + target_samples[sample.id] = sample + + logger.info(f"Loading {self.source_lang} samples") + for idx, sample in enumerate( + self.iterate_lang_audio_samples(lang=self.source_lang) + ): + if idx and idx % 100 == 0: + logger.info(f"..loaded {idx} source samples") + if sample.id in target_samples: + yield LangPairSample(source=sample, target=target_samples[sample.id]) diff --git a/src/seamless_communication/models/__init__.py b/src/seamless_communication/models/__init__.py new file mode 100644 index 00000000..fff2a450 --- /dev/null +++ b/src/seamless_communication/models/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/src/seamless_communication/models/inference/__init__.py b/src/seamless_communication/models/inference/__init__.py new file mode 100644 index 00000000..94b1a7ab --- /dev/null +++ b/src/seamless_communication/models/inference/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from seamless_communication.models.inference.translator import Translator as Translator diff --git a/src/seamless_communication/models/inference/translator.py b/src/seamless_communication/models/inference/translator.py new file mode 100644 index 00000000..e3d77726 --- /dev/null +++ b/src/seamless_communication/models/inference/translator.py @@ -0,0 +1,209 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from fairseq2.assets.card import AssetCard +from fairseq2.data import Collater +from fairseq2.data.audio import AudioDecoder, WaveformToFbankConverter +from fairseq2.data.text.text_tokenizer import TextTokenizer +from fairseq2.data.typing import StringLike +from fairseq2.generation import SequenceToTextOutput, SequenceGeneratorOptions +from fairseq2.memory import MemoryBlock +from fairseq2.typing import Device +from torch import Tensor +from enum import Enum, auto + +from seamless_communication.models.unity import ( + UnitTokenizer, + UnitYGenerator, + UnitYModel, + load_unity_model, + load_unity_text_tokenizer, + load_unity_unit_tokenizer, +) +from seamless_communication.models.unity.generator import SequenceToUnitOutput +from seamless_communication.models.vocoder import load_vocoder_model, Vocoder + + +class Task(Enum): + S2ST = auto() + S2TT = auto() + T2ST = auto() + T2TT = auto() + ASR = auto() + + +class Modality(Enum): + SPEECH = "speech" + TEXT = "text" + + +class Translator(nn.Module): + def __init__( + self, + model_name_or_card: Union[str, AssetCard], + vocoder_name_or_card: Union[str, AssetCard], + device: Device, + sample_rate: int = 16000, + ): + super().__init__() + # Load the model. + self.model: UnitYModel = load_unity_model( + model_name_or_card, device=device, dtype=torch.float16 + ) + self.model.eval() + self.text_tokenizer = load_unity_text_tokenizer(model_name_or_card) + self.unit_tokenizer = load_unity_unit_tokenizer(model_name_or_card) + self.device = device + self.decode_audio = AudioDecoder(dtype=torch.float32, device=device) + self.convert_to_fbank = WaveformToFbankConverter( + num_mel_bins=80, + waveform_scale=2**15, + channel_last=True, + standardize=True, + device=device, + dtype=torch.float16, + ) + self.collate = Collater( + pad_idx=self.text_tokenizer.vocab_info.pad_idx, pad_to_multiple=2 + ) + # Load the vocoder. + self.vocoder: Vocoder = load_vocoder_model(vocoder_name_or_card, device=device) + self.vocoder.eval() + self.sr = sample_rate + + @classmethod + def get_prediction( + cls, + model: UnitYModel, + text_tokenizer: TextTokenizer, + unit_tokenizer: UnitTokenizer, + src: Dict[str, Tensor], + input_modality: Modality, + output_modality: Modality, + tgt_lang: str, + ) -> Tuple[SequenceToTextOutput, Optional[SequenceToUnitOutput]]: + if input_modality == Modality.TEXT: + # need to adjust this since src_len is smaller for text. + max_len_a = 25 + else: + max_len_a = 1 + + generator = UnitYGenerator( + model, + text_tokenizer, + tgt_lang, + unit_tokenizer if output_modality == Modality.SPEECH else None, + text_opts=SequenceGeneratorOptions(beam_size=5, soft_max_seq_len=(1, 200)), + unit_opts=SequenceGeneratorOptions( + beam_size=5, soft_max_seq_len=(max_len_a, 50) + ), + ) + return generator( + src["seqs"], src["seq_lens"], input_modality.value, output_modality.value + ) + + def get_modalities_from_task(self, task: Task) -> Tuple[Modality, Modality]: + if task == Task.S2ST: + return Modality.SPEECH, Modality.SPEECH + # ASR is treated as S2TT with src_lang == tgt_lang + elif task == Task.S2TT or task == Task.ASR: + return Modality.SPEECH, Modality.TEXT + elif task == Task.T2TT: + return Modality.TEXT, Modality.TEXT + else: + return Modality.TEXT, Modality.SPEECH + + @torch.no_grad() + def synthesize_speech( + self, + code: List[int], + lang: str, + speaker: Optional[int] = None, + dur_prediction: Optional[bool] = True, + ) -> Tuple[List[Tensor], int]: + return self.vocoder(code, lang, speaker, dur_prediction), self.sr + + @torch.no_grad() + def predict( + self, + input: Union[str, torch.Tensor], + task_str: str, + tgt_lang: str, + src_lang: Optional[str] = None, + spkr: Optional[int] = -1, + ) -> Tuple[StringLike, Optional[List[Tensor]], Optional[int]]: + """ + The main method used to perform inference on all tasks. + + :param input: + Either text or path to audio or audio Tensor. + :param task_str: + String representing the task. + Valid choices are "S2ST", "S2TT", "T2ST", "T2TT", "ASR" + :param tgt_lang: + Target language to decode into. + :param src_lang: + Source language of input, only required for T2ST, T2TT tasks. + :param spkr: + Speaker id for vocoder. + + :returns: + - Translated text. + - Audio waveform. + - Sampling rate of audio waveform. + """ + try: + task = Task[task_str.upper()] + except KeyError: + raise ValueError(f"Unsupported task: {task_str}") + + input_modality, output_modality = self.get_modalities_from_task(task) + + if input_modality == Modality.SPEECH: + audio = input + if isinstance(audio, str): + with Path(audio).open("rb") as fb: + block = MemoryBlock(fb.read()) + decoded_audio = self.decode_audio(block) + else: + decoded_audio = { + "waveform": audio, + "sample_rate": self.sr, + "format": -1, + } + src = self.collate(self.convert_to_fbank(decoded_audio))["fbank"] + else: + if src_lang is None: + raise ValueError("src_lang must be specified for T2ST, T2TT tasks.") + + text = input + self.token_encoder = self.text_tokenizer.create_encoder( + task="translation", lang=src_lang, mode="source", device=self.device + ) + src = self.collate(self.token_encoder(text)) + + result = self.get_prediction( + self.model, + self.text_tokenizer, + self.unit_tokenizer, + src, + input_modality, + output_modality, + tgt_lang=tgt_lang, + ) + + text_out = result[0] + unit_out = result[1] + if output_modality == Modality.TEXT: + return text_out.sentences[0], None, None + else: + units = unit_out.units[:, 1:][0].cpu().numpy().tolist() + wav_out, sr_out = self.synthesize_speech(units, tgt_lang, spkr) + return text_out.sentences[0], wav_out, sr_out diff --git a/src/seamless_communication/models/unity/__init__.py b/src/seamless_communication/models/unity/__init__.py new file mode 100644 index 00000000..bef57dd8 --- /dev/null +++ b/src/seamless_communication/models/unity/__init__.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from seamless_communication.models.unity.builder import UnitYBuilder as UnitYBuilder +from seamless_communication.models.unity.builder import UnitYConfig as UnitYConfig +from seamless_communication.models.unity.builder import ( + UnitYT2UBuilder as UnitYT2UBuilder, +) +from seamless_communication.models.unity.builder import UnitYT2UConfig as UnitYT2UConfig +from seamless_communication.models.unity.builder import ( + create_unity_model as create_unity_model, +) +from seamless_communication.models.unity.builder import ( + create_unity_t2u_model as create_unity_t2u_model, +) +from seamless_communication.models.unity.builder import unity_arch as unity_arch +from seamless_communication.models.unity.builder import unity_archs as unity_archs +from seamless_communication.models.unity.builder import unity_t2u_arch as unity_t2u_arch +from seamless_communication.models.unity.builder import ( + unity_t2u_archs as unity_t2u_archs, +) +from seamless_communication.models.unity.loader import UnitYLoader as UnitYLoader +from seamless_communication.models.unity.loader import ( + load_unity_model as load_unity_model, +) +from seamless_communication.models.unity.loader import ( + load_unity_text_tokenizer as load_unity_text_tokenizer, +) +from seamless_communication.models.unity.loader import ( + load_unity_unit_tokenizer as load_unity_unit_tokenizer, +) +from seamless_communication.models.unity.model import UnitYModel as UnitYModel +from seamless_communication.models.unity.model import UnitYX2TModel as UnitYX2TModel +from seamless_communication.models.unity.model import UnitYOutput as UnitYOutput +from seamless_communication.models.unity.unit_tokenizer import ( + UnitTokenDecoder as UnitTokenDecoder, +) +from seamless_communication.models.unity.unit_tokenizer import ( + UnitTokenEncoder as UnitTokenEncoder, +) +from seamless_communication.models.unity.unit_tokenizer import ( + UnitTokenizer as UnitTokenizer, +) +from seamless_communication.models.unity.generator import ( + UnitYGenerator as UnitYGenerator, +) diff --git a/src/seamless_communication/models/unity/adaptor_block.py b/src/seamless_communication/models/unity/adaptor_block.py new file mode 100644 index 00000000..5e7a0ff8 --- /dev/null +++ b/src/seamless_communication/models/unity/adaptor_block.py @@ -0,0 +1,423 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Iterable, Optional, Tuple, final + +import torch +from fairseq2.models.conformer import ConformerBlock +from fairseq2.nn.module_list import ModuleList +from fairseq2.nn.normalization import LayerNorm +from fairseq2.nn.projection import Linear +from fairseq2.nn.transformer import ( + EncoderLayerOutputHook, + FeedForwardNetwork, + LayerNormFactory, + MultiheadAttention, + TransformerEncoder, + TransformerEncoderLayer, + create_default_layer_norm, +) +from fairseq2.nn.utils.mask import to_padding_mask +from fairseq2.nn.utils.module import check_model_dim +from fairseq2.typing import DataType, Device +from overrides import final as finaloverride +from torch import Tensor +from torch.nn import GLU, Conv1d, Dropout, ReLU + + +@final +class UnitYEncoderAdaptor(TransformerEncoder): + """Represents a Transformer encoder that wraps a speech encoder and adapts + it to be used with the UnitY architecture.""" + + inner: TransformerEncoder + inner_layer_norm: Optional[LayerNorm] + proj1: Linear + activation: ReLU + proj2: Linear + adaptor_layers: ModuleList + layer_norm: LayerNorm + + def __init__( + self, + inner: TransformerEncoder, + adaptor_layers: Iterable[TransformerEncoderLayer], + inner_layer_norm: bool = False, + layer_norm_fn: Optional[LayerNormFactory] = None, + device: Optional[Device] = None, + dtype: Optional[DataType] = None, + ) -> None: + """ + :param inner: + The speech encoder to wrap. + :param adaptor_layers: + The adaptor layers to stack on top of ``inner``. + :param inner_layer_norm: + If ``True``, applies Layer Normalization to outputs of ``inner``. + :param layer_norm_fn: + The factory to use to construct the Layer Normalization modules. + """ + model_dim = inner.model_dim + + super().__init__(model_dim) + + if layer_norm_fn is None: + layer_norm_fn = create_default_layer_norm + + self.inner = inner + + if inner_layer_norm: + self.inner_layer_norm = layer_norm_fn(model_dim, device, dtype) + else: + self.register_module("inner_layer_norm", None) + + self.proj1 = Linear( + model_dim, model_dim * 4, bias=True, device=device, dtype=dtype + ) + + self.activation = ReLU() + + self.proj2 = Linear( + model_dim * 4, model_dim, bias=True, device=device, dtype=dtype + ) + + layer_list = ModuleList(adaptor_layers) + if not layer_list: + raise ValueError("`adaptor_layers` must be non-empty.") + + self.adaptor_layers = layer_list + + self.layer_norm = layer_norm_fn(model_dim, device, dtype) + + check_model_dim(self) + + @finaloverride + def forward( + self, + seqs: Tensor, + padding_mask: Optional[Tensor], + layer_output_hook: Optional[EncoderLayerOutputHook] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + seqs, padding_mask = self.inner(seqs, padding_mask, layer_output_hook) + + if self.inner_layer_norm is not None: + seqs = self.inner_layer_norm(seqs) + + # Only difference compared to a vanilla Transformer encoder. + seqs = seqs + 0.5 * self._expand_contract(seqs) + + for layer in self.adaptor_layers: + seqs, padding_mask = layer(seqs, padding_mask) + + seqs = self.layer_norm(seqs) + + return seqs, padding_mask + + def _expand_contract(self, seqs: Tensor) -> Tensor: + seqs = self.proj1(seqs) + + seqs = self.activation(seqs) + + seqs = self.proj2(seqs) + + return seqs + + +@final +class UnitYTransformerAdaptorLayer(TransformerEncoderLayer): + """Represents a variant of M-Adaptor layer described in + :cite:t`https://doi.org/10.48550/arxiv.2207.00952`. + + The main difference from the paper is that pooling is applied to multi-head + attention input rather than projected Q, K, V. + """ + + kernel_size: int + stride: int + residual_layer_norm: LayerNorm + residual_conv: Conv1d + residual_activation: GLU + self_attn_layer_norm: LayerNorm + self_attn_conv: Conv1d + self_attn_activation: GLU + self_attn: MultiheadAttention + self_attn_dropout: Optional[Dropout] + ffn_layer_norm: LayerNorm + ffn: FeedForwardNetwork + ffn_dropout: Optional[Dropout] + + def __init__( + self, + self_attn: MultiheadAttention, + ffn: FeedForwardNetwork, + kernel_size: int = 8, + stride: int = 8, + dropout_p: float = 0.1, + layer_norm_fn: Optional[LayerNormFactory] = None, + device: Optional[Device] = None, + dtype: Optional[DataType] = None, + ) -> None: + """ + :param self_attn: + The self attention layer. + :param ffn: + The feed-forward network. + :param kernel_size: + The kernel size for 1D pooling convolutions. + :param stride: + The stride for 1D pooling convolutions. + :param dropout_p: + The dropout probability on outputs of the self attention layer and + the feed-forward network. + :param layer_norm_fn: + The factory to use to construct the Layer Normalization modules. + """ + model_dim = self_attn.model_dim + + super().__init__(model_dim) + + if layer_norm_fn is None: + layer_norm_fn = create_default_layer_norm + + self.kernel_size = kernel_size + self.stride = stride + + self.residual_layer_norm = layer_norm_fn(model_dim, device, dtype) + + self.residual_conv = Conv1d( + model_dim, + model_dim * 2, + kernel_size, + stride, + padding=kernel_size // 2, + device=device, + dtype=dtype, + ) + + self.residual_activation = GLU(dim=1) + + self.self_attn_layer_norm = layer_norm_fn(model_dim, device, dtype) + + self.self_attn_conv = Conv1d( + model_dim, + model_dim * 2, + kernel_size, + stride, + padding=kernel_size // 2, + device=device, + dtype=dtype, + ) + + self.self_attn_activation = GLU(dim=1) + + self.self_attn = self_attn + + if dropout_p > 0.0: + self.self_attn_dropout = Dropout(dropout_p) + else: + self.register_module("self_attn_dropout", None) + + self.ffn_layer_norm = layer_norm_fn(model_dim, device, dtype) + + self.ffn = ffn + + if dropout_p > 0.0: + self.ffn_dropout = Dropout(dropout_p) + else: + self.register_module("ffn_dropout", None) + + check_model_dim(self) + + @finaloverride + def forward( + self, seqs: Tensor, padding_mask: Optional[Tensor] + ) -> Tuple[Tensor, Optional[Tensor]]: + seqs, padding_mask = self._forward_self_attn(seqs, padding_mask) + + seqs = self._forward_ffn(seqs) + + return seqs, padding_mask + + def _forward_self_attn( + self, seqs: Tensor, padding_mask: Optional[Tensor] + ) -> Tuple[Tensor, Optional[Tensor]]: + residual = self.residual_layer_norm(seqs) + + # Apply pooling to the residual to match the sequence length of the + # multi-head attention output. + # (N, S, M) -> (N, M, S) + residual = residual.transpose(1, 2) + + residual = self.residual_conv(residual) + + residual = self.residual_activation(residual) + + # (N, M, S) -> (N, S, M) + residual = residual.transpose(1, 2) + + seqs = self.self_attn_layer_norm(seqs) + + # Apply pooling before feeding to the multihead-attention layer. + # (N, S, M) -> (N, M, S) + seqs = seqs.transpose(1, 2) + + seqs = self.self_attn_conv(seqs) + + seqs = self.self_attn_activation(seqs) + + # (N, M, S) -> (N, S, M) + seqs = seqs.transpose(1, 2) + + padding_mask = _compute_new_padding_mask( + seqs, padding_mask, self.kernel_size, self.stride + ) + + # The rest of the computation is identical to a vanilla Transformer + # encoder layer. + seqs = self.self_attn( + seqs, + padding_mask, + keys=seqs, + values=seqs, + key_padding_mask=padding_mask, + ) + + if self.self_attn_dropout is not None: + seqs = self.self_attn_dropout(seqs) + + seqs = seqs + residual + + return seqs, padding_mask + + def _forward_ffn(self, seqs: Tensor) -> Tensor: + residual = seqs + + seqs = self.ffn_layer_norm(seqs) + + seqs = self.ffn(seqs) + + if self.ffn_dropout is not None: + seqs = self.ffn_dropout(seqs) + + return seqs + residual + + def extra_repr(self) -> str: + """:meta private:""" + s = super().extra_repr() + + return s + f", kernel_size={self.kernel_size}, stride={self.stride}" + + +@final +class UnitYConformerAdaptorLayer(TransformerEncoderLayer): + """Represents a variant of M-Adaptor layer described in + :cite:t`https://doi.org/10.48550/arxiv.2207.00952`. + + The main difference from the paper is that this variant uses a Conformer + block which empirically showed better performance when used with Conformer- + based speech encoder architectures such as w2v-BERT. + """ + + kernel_size: int + stride: int + layer_norm: Optional[LayerNorm] + conv: Conv1d + activation: GLU + block: ConformerBlock + + def __init__( + self, + block: ConformerBlock, + kernel_size: int = 8, + stride: int = 8, + layer_norm: bool = False, + layer_norm_fn: Optional[LayerNormFactory] = None, + device: Optional[Device] = None, + dtype: Optional[DataType] = None, + ) -> None: + """ + :param block: + The Conformer block to use. + :param kernel_size: + The kernel size for 1D pooling convolutions. + :param stride: + The stride for 1D pooling convolutions. + :param layer_norm: + If ``True``, applies Layer Normalization to inputs before pooling. + :param layer_norm_fn: + The factory to use to construct the Layer Normalization modules. + """ + super().__init__(block.model_dim) + + if layer_norm_fn is None: + layer_norm_fn = create_default_layer_norm + + self.kernel_size = kernel_size + self.stride = stride + + if layer_norm: + self.layer_norm = layer_norm_fn(self.model_dim, device, dtype) + else: + self.register_module("layer_norm", None) + + self.conv = Conv1d( + self.model_dim, + self.model_dim * 2, + kernel_size, + stride, + padding=kernel_size // 2, + device=device, + dtype=dtype, + ) + + self.activation = GLU(dim=1) + + self.block = block + + @finaloverride + def forward( + self, seqs: Tensor, padding_mask: Optional[Tensor] + ) -> Tuple[Tensor, Optional[Tensor]]: + if self.layer_norm is not None: + seqs = self.layer_norm(seqs) + + # Apply pooling before feeding to the Conformer block. + # (N, S, M) -> (N, M, S) + seqs = seqs.transpose(1, 2) + + seqs = self.conv(seqs) + + seqs = self.activation(seqs) + + # (N, M, S) -> (N, S, M) + seqs = seqs.transpose(1, 2) + + padding_mask = _compute_new_padding_mask( + seqs, padding_mask, self.kernel_size, self.stride + ) + + return self.block(seqs, padding_mask) # type: ignore[no-any-return] + + def extra_repr(self) -> str: + """:meta private:""" + s = super().extra_repr() + + return s + f", kernel_size={self.kernel_size}, stride={self.stride}" + + +def _compute_new_padding_mask( + seqs: Tensor, padding_mask: Optional[Tensor], kernel_size: int, stride: int +) -> Optional[Tensor]: + if padding_mask is None: + return padding_mask + + pad = kernel_size // 2 + + seq_lens = padding_mask.size(1) - torch.nan_to_num(padding_mask, neginf=1.0).sum(1) + + seq_lens = ((seq_lens + 2 * pad - kernel_size) / stride) + 1 + + return to_padding_mask(seqs, seq_lens.floor()) diff --git a/src/seamless_communication/models/unity/builder.py b/src/seamless_communication/models/unity/builder.py new file mode 100644 index 00000000..bfcc1524 --- /dev/null +++ b/src/seamless_communication/models/unity/builder.py @@ -0,0 +1,626 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass +from typing import Optional + +from fairseq2.data import VocabularyInfo +from fairseq2.models.conformer import ConformerBlock, ConformerConvolution +from fairseq2.models.nllb import NllbBuilder, NllbConfig, nllb_archs +from fairseq2.models.transformer import ( + TransformerEmbeddingFrontend, + TransformerFrontend, +) +from seamless_communication.models.unity.adaptor_block import ( + UnitYConformerAdaptorLayer, + UnitYEncoderAdaptor, + UnitYTransformerAdaptorLayer, +) +from seamless_communication.models.unity.model import UnitYModel, UnitYT2UModel +from fairseq2.models.utils.arch_registry import ArchitectureRegistry +from fairseq2.models.w2vbert import w2vbert_archs +from fairseq2.models.wav2vec2 import Wav2Vec2EncoderBuilder, Wav2Vec2EncoderConfig +from fairseq2.nn.embedding import Embedding +from fairseq2.nn.position_encoder import SinusoidalPositionEncoder +from fairseq2.nn.projection import TiedProjection +from fairseq2.nn.transformer import ( + FeedForwardNetwork, + MultiheadAttention, + StandardFeedForwardNetwork, + StandardMultiheadAttention, + StandardTransformerDecoder, + StandardTransformerDecoderLayer, + StandardTransformerEncoder, + StandardTransformerEncoderLayer, + TransformerDecoder, + TransformerDecoderLayer, + TransformerEncoder, + TransformerEncoderLayer, + TransformerNormOrder, + create_default_sdpa, +) +from fairseq2.typing import DataType, Device + + +@dataclass +class UnitYConfig: + """Holds the configuration of a UnitY model as described in + :cite:t`https://doi.org/10.48550/arxiv.2212.08055`""" + + model_dim: int + """The dimensionality of the model.""" + + w2v2_encoder_config: Wav2Vec2EncoderConfig + """The configuration of the underlying wav2vec 2.0 encoder.""" + + nllb_config: NllbConfig + """The configuration of the underlying NLLB text encoder-decoder.""" + + t2u_config: Optional["UnitYT2UConfig"] + """The configuration of the UnitY T2U sub-model.""" + + use_text_encoder: bool + """If ``True``, uses an aligned NLLB encoder for the MT task.""" + + use_conformer_adaptor: bool + """If ``True``, uses a Conformer-based adaptor block.""" + + num_adaptor_layers: int + """The number of Transformer encoder layers in the adaptor block.""" + + adaptor_kernel_size: int + """The kernel size of 1D convolutions in the adaptor block.""" + + adaptor_stride: int + """The stride of 1D convolutions in the adaptor block.""" + + adaptor_layer_norm: bool + """If ``True``, applies Layer Normalization to outputs of the underlying + encoder in the adaptor block.""" + + adaptor_dropout_p: float + """The dropout probability in Transformer layers of the adaptor block.""" + + +unity_archs = ArchitectureRegistry[UnitYConfig]("unity") + + +unity_arch = unity_archs.marker + + +@unity_arch("base") +def _base() -> UnitYConfig: + w2vbert_config = w2vbert_archs.get_config("600m") + + nllb_config = nllb_archs.get_config("dense_1b") + + nllb_config.vocabulary_size = 256102 # NLLB-100 + + t2u_config = unity_t2u_archs.get_config("base") + + return UnitYConfig( + model_dim=1024, + w2v2_encoder_config=w2vbert_config.w2v2_config.encoder_config, + nllb_config=nllb_config, + t2u_config=t2u_config, + use_text_encoder=True, + use_conformer_adaptor=False, + num_adaptor_layers=1, + adaptor_kernel_size=8, + adaptor_stride=8, + adaptor_layer_norm=True, + adaptor_dropout_p=0.1, + ) + + +@unity_arch("medium") +def _medium() -> UnitYConfig: + w2vbert_config = w2vbert_archs.get_config("300m") + + nllb_config = nllb_archs.get_config("dense_600m") + + nllb_config.vocabulary_size = 256206 # NLLB-200 + + t2u_config = unity_t2u_archs.get_config("medium") + + return UnitYConfig( + model_dim=1024, + w2v2_encoder_config=w2vbert_config.w2v2_config.encoder_config, + nllb_config=nllb_config, + t2u_config=t2u_config, + use_text_encoder=True, + use_conformer_adaptor=False, + num_adaptor_layers=1, + adaptor_kernel_size=8, + adaptor_stride=8, + adaptor_layer_norm=True, + adaptor_dropout_p=0.1, + ) + + +class UnitYBuilder: + """Builds modules of a UnitY model. + + To tweak the architecture, you can derive from this class and override the + corresponding methods. + """ + + config: UnitYConfig + w2v2_encoder_builder: Wav2Vec2EncoderBuilder + nllb_builder: NllbBuilder + t2u_builder: Optional["UnitYT2UBuilder"] + device: Optional[Device] + dtype: Optional[DataType] + + def __init__( + self, + config: UnitYConfig, + w2v2_encoder_builder: Wav2Vec2EncoderBuilder, + nllb_builder: NllbBuilder, + t2u_builder: Optional["UnitYT2UBuilder"], + device: Optional[Device] = None, + dtype: Optional[DataType] = None, + ) -> None: + """ + :param config: + The configuration to use. + :param w2v2_encoder_builder: + The wav2vec 2.0 encoder builder. + :param nllb_builder: + The NLLB model builder. + :param t2u_builder: + The UnitY T2U model builder. + :param device: + The device on which to initialize modules. + :param dtype: + The data type of module parameters and buffers. + """ + if w2v2_encoder_builder.config.model_dim != config.model_dim: + raise ValueError( + f"`model_dim` and `model_dim` of `w2v2_encoder_builder.config` must be equal, but are {config.model_dim} and {w2v2_encoder_builder.config.model_dim} instead." + ) + + if nllb_builder.config.model_dim != config.model_dim: + raise ValueError( + f"`model_dim` and `model_dim` of `nllb_builder.config` must be equal, but are {config.model_dim} and {nllb_builder.config.model_dim} instead." + ) + + if t2u_builder is not None and t2u_builder.config.model_dim != config.model_dim: + raise ValueError( + f"`model_dim` and `model_dim` of `t2u_builder.config` must be equal, but are {config.model_dim} and {t2u_builder.config.model_dim} instead." + ) + + self.config = config + self.w2v2_encoder_builder = w2v2_encoder_builder + self.nllb_builder = nllb_builder + self.t2u_builder = t2u_builder + self.device = device + self.dtype = dtype + + def build_model(self) -> UnitYModel: + """Build a model.""" + text_embed = self.nllb_builder.build_embedding() + + speech_encoder_frontend = self.w2v2_encoder_builder.build_frontend() + speech_encoder = self.build_speech_encoder() + + text_decoder_frontend = self.nllb_builder.build_frontend(text_embed) + text_decoder = self.nllb_builder.build_decoder() + + if self.config.use_text_encoder: + # We use shared embedding as in NLLB. + text_encoder_frontend = text_decoder_frontend + text_encoder = self.nllb_builder.build_encoder() + else: + text_encoder_frontend = None + text_encoder = None + + final_proj = TiedProjection(text_embed.weight) + + if self.t2u_builder is None: + t2u_model = None + else: + t2u_model = self.t2u_builder.build_model() + + return UnitYModel( + speech_encoder_frontend, + speech_encoder, + text_encoder_frontend, + text_encoder, + text_decoder_frontend, + text_decoder, + final_proj, + t2u_model, + self.config.nllb_config.pad_idx, + ) + + def build_speech_encoder(self) -> TransformerEncoder: + """Build a speech Transformer encoder.""" + w2v2_encoder = self.w2v2_encoder_builder.build_encoder() + + # For Conformer-based wav2vec 2.0 architectures (e.g. w2v-BERT), we + # typically use a special type of adaptor layer. + if not self.config.use_conformer_adaptor: + build_adaptor_layer = self.build_adaptor_layer + else: + build_adaptor_layer = self.build_conformer_adaptor_layer + + num_layers = self.config.num_adaptor_layers + + layers = [build_adaptor_layer(i) for i in range(num_layers)] + + return UnitYEncoderAdaptor( + w2v2_encoder, + layers, + self.config.adaptor_layer_norm, + device=self.device, + dtype=self.dtype, + ) + + def build_adaptor_layer(self, idx: int) -> TransformerEncoderLayer: + """Build a Transformer-based encoder adaptor layer.""" + self_attn = self.build_adaptor_attention( + self.w2v2_encoder_builder.config.num_encoder_attn_heads + ) + + # Unlike wav2vec2, we use ReLU (i.e. standard FFN activation function) + # instead of GELU. + ffn = StandardFeedForwardNetwork( + self.config.model_dim, + self.w2v2_encoder_builder.config.ffn_inner_dim, + device=self.device, + dtype=self.dtype, + ) + + return UnitYTransformerAdaptorLayer( + self_attn, + ffn, + self.config.adaptor_kernel_size, + self.config.adaptor_stride, + self.config.adaptor_dropout_p, + device=self.device, + dtype=self.dtype, + ) + + def build_conformer_adaptor_layer(self, idx: int) -> TransformerEncoderLayer: + """Build a Conformer-based encoder adaptor layer.""" + ffn1 = self.w2v2_encoder_builder.build_ffn(use_swish=True) + + # Empirically shown that, in adaptor layers, vanilla MHA performs better + # than MHA with relative positional encoding. + self_attn = self.build_adaptor_attention( + self.w2v2_encoder_builder.config.num_encoder_attn_heads + ) + + conv = ConformerConvolution( + self.w2v2_encoder_builder.config.model_dim, + self.w2v2_encoder_builder.config.depthwise_conv_kernel_size, + device=self.device, + dtype=self.dtype, + ) + + ffn2 = self.w2v2_encoder_builder.build_ffn(use_swish=True) + + block = ConformerBlock( + ffn1, + self_attn, + conv, + ffn2, + dropout_p=self.config.adaptor_dropout_p, + device=self.device, + dtype=self.dtype, + ) + + layer_norm = idx == 0 + + return UnitYConformerAdaptorLayer( + block, + self.config.adaptor_kernel_size, + self.config.adaptor_stride, + layer_norm, + device=self.device, + dtype=self.dtype, + ) + + def build_adaptor_attention(self, num_heads: int) -> MultiheadAttention: + """Build a Transformer multi-head attention layer in adaptor block.""" + sdpa = create_default_sdpa(attn_dropout_p=self.config.adaptor_dropout_p) + + return StandardMultiheadAttention( + self.config.model_dim, + num_heads, + sdpa=sdpa, + device=self.device, + dtype=self.dtype, + ) + + +def create_unity_model( + config: UnitYConfig, + device: Optional[Device] = None, + dtype: Optional[DataType] = None, +) -> UnitYModel: + """Create a UnitY model. + + :param config: + The configuration to use. + :param device: + The device on which to initialize modules. + :param dtype: + The data type of module parameters and buffers. + """ + w2v2_encoder_builder = Wav2Vec2EncoderBuilder( + config.w2v2_encoder_config, device, dtype + ) + + nllb_builder = NllbBuilder(config.nllb_config, device, dtype) + + if config.t2u_config is None: + t2u_builder = None + else: + t2u_builder = UnitYT2UBuilder(config.t2u_config, device, dtype) + + unity_builder = UnitYBuilder( + config, w2v2_encoder_builder, nllb_builder, t2u_builder, device, dtype + ) + + return unity_builder.build_model() + + +@dataclass +class UnitYT2UConfig: + """Holds the configuration of a UnitY T2U model as described in + :cite:t`https://doi.org/10.48550/arxiv.2212.08055`""" + + model_dim: int + """The dimensionality of the model.""" + + unit_max_seq_len: int + """The expected maximum unit sequence length.""" + + unit_vocabulary_size: int + """The size of the unit vocabulary.""" + + unit_pad_idx: Optional[int] + """The index of the pad symbol in the unit vocabulary.""" + + num_encoder_layers: int + """The number of Transformer encoder layers.""" + + num_decoder_layers: int + """The number of Transformer decoder layers.""" + + num_encoder_attn_heads: int + """The number of attention heads in Transformer encoder layers.""" + + num_decoder_attn_heads: int + """The number of attention heads in Transformer decoder layers.""" + + ffn_inner_dim: int + """The inner dimensionality of Transformer feed-forward networks.""" + + dropout_p: float + """The dropout probability in Transformer layers.""" + + def update_unit_vocabulary(self, info: VocabularyInfo) -> None: + """Update unit vocabulary configuration from ``info``.""" + self.unit_vocabulary_size, self.unit_pad_idx = info.size, info.pad_idx + + +unity_t2u_archs = ArchitectureRegistry[UnitYT2UConfig]("unity_t2u") + + +unity_t2u_arch = unity_t2u_archs.marker + + +@unity_t2u_arch("base") +def _base_t2u() -> UnitYT2UConfig: + return UnitYT2UConfig( + model_dim=1024, + unit_max_seq_len=2048, + unit_vocabulary_size=10082, + unit_pad_idx=1, + num_encoder_layers=6, + num_decoder_layers=6, + num_encoder_attn_heads=16, + num_decoder_attn_heads=16, + ffn_inner_dim=1024 * 8, + dropout_p=0.1, + ) + + +@unity_t2u_arch("medium") +def _medium_t2u() -> UnitYT2UConfig: + return UnitYT2UConfig( + model_dim=1024, + unit_max_seq_len=2048, + unit_vocabulary_size=10082, + unit_pad_idx=1, + num_encoder_layers=4, + num_decoder_layers=4, + num_encoder_attn_heads=16, + num_decoder_attn_heads=16, + ffn_inner_dim=1024 * 8, + dropout_p=0.1, + ) + + +class UnitYT2UBuilder: + """Builds modules of a UnitY T2U model. + + To tweak the architecture, you can derive from this class and override the + corresponding methods. + """ + + config: UnitYT2UConfig + device: Optional[Device] + dtype: Optional[DataType] + + def __init__( + self, + config: UnitYT2UConfig, + device: Optional[Device] = None, + dtype: Optional[DataType] = None, + ) -> None: + """ + :param config: + The configuration to use. + :param device: + The device on which to initialize modules. + :param dtype: + The data type of module parameters and buffers. + """ + self.config = config + self.device = device + self.dtype = dtype + + def build_model(self) -> UnitYT2UModel: + """Build a model.""" + embed = self.build_embedding() + + encoder = self.build_encoder() + + decoder_frontend = self.build_decoder_frontend(embed) + decoder = self.build_decoder() + + final_proj = TiedProjection(embed.weight) + + return UnitYT2UModel( + encoder, + decoder_frontend, + decoder, + final_proj, + self.config.unit_pad_idx, + ) + + def build_embedding(self) -> Embedding: + """Build a unit embedding table.""" + return Embedding( + num_embeddings=self.config.unit_vocabulary_size, + embedding_dim=self.config.model_dim, + pad_idx=self.config.unit_pad_idx, + scaled=True, + device=self.device, + dtype=self.dtype, + ) + + def build_encoder(self) -> Optional[TransformerEncoder]: + """Build a Transformer encoder.""" + num_layers = self.config.num_encoder_layers + if num_layers == 0: + return None + + layers = [self.build_encoder_layer() for _ in range(num_layers)] + + return StandardTransformerEncoder( + layers, + norm_order=TransformerNormOrder.PRE, + device=self.device, + dtype=self.dtype, + ) + + def build_encoder_layer(self) -> TransformerEncoderLayer: + """Build a Transformer encoder layer.""" + self_attn = self.build_attention(self.config.num_encoder_attn_heads) + + ffn = self.build_ffn() + + return StandardTransformerEncoderLayer( + self_attn, + ffn, + dropout_p=self.config.dropout_p, + norm_order=TransformerNormOrder.PRE, + device=self.device, + dtype=self.dtype, + ) + + def build_decoder_frontend(self, embed: Embedding) -> TransformerFrontend: + """Build a Transformer decoder front-end.""" + pos_encoder = SinusoidalPositionEncoder( + self.config.model_dim, + self.config.unit_max_seq_len, + _legacy_pad_idx=self.config.unit_pad_idx, + device=self.device, + dtype=self.dtype, + ) + + return TransformerEmbeddingFrontend( + embed, + pos_encoder, + dropout_p=self.config.dropout_p, + device=self.device, + dtype=self.dtype, + ) + + def build_decoder(self) -> TransformerDecoder: + """Build a Transformer decoder.""" + num_layers = self.config.num_decoder_layers + + layers = [self.build_decoder_layer() for _ in range(num_layers)] + + return StandardTransformerDecoder( + layers, + norm_order=TransformerNormOrder.PRE, + device=self.device, + dtype=self.dtype, + ) + + def build_decoder_layer(self) -> TransformerDecoderLayer: + """Build a Transformer decoder layer.""" + self_attn = self.build_attention(self.config.num_decoder_attn_heads) + + encoder_decoder_attn = self.build_attention(self.config.num_decoder_attn_heads) + + ffn = self.build_ffn() + + return StandardTransformerDecoderLayer( + self_attn, + encoder_decoder_attn, + ffn, + dropout_p=self.config.dropout_p, + norm_order=TransformerNormOrder.PRE, + device=self.device, + dtype=self.dtype, + ) + + def build_attention(self, num_heads: int) -> MultiheadAttention: + """Build a Transformer multi-head attention layer.""" + sdpa = create_default_sdpa(attn_dropout_p=self.config.dropout_p) + + return StandardMultiheadAttention( + self.config.model_dim, + num_heads, + sdpa=sdpa, + device=self.device, + dtype=self.dtype, + ) + + def build_ffn(self) -> FeedForwardNetwork: + """Build a Transformer feed-forward network.""" + return StandardFeedForwardNetwork( + self.config.model_dim, + self.config.ffn_inner_dim, + norm_order=TransformerNormOrder.PRE, + device=self.device, + dtype=self.dtype, + ) + + +def create_unity_t2u_model( + config: UnitYT2UConfig, + device: Optional[Device] = None, + dtype: Optional[DataType] = None, +) -> UnitYT2UModel: + """Create a UnitY T2U model. + + :param config: + The configuration to use. + :param device: + The device on which to initialize modules. + :param dtype: + The data type of module parameters and buffers. + """ + return UnitYT2UBuilder(config, device, dtype).build_model() diff --git a/src/seamless_communication/models/unity/generator.py b/src/seamless_communication/models/unity/generator.py new file mode 100644 index 00000000..06c24381 --- /dev/null +++ b/src/seamless_communication/models/unity/generator.py @@ -0,0 +1,220 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass +from typing import Optional, Tuple + +import torch +from fairseq2.data.text import TextTokenizer +from fairseq2.generation import ( + Seq2SeqGenerator, + SequenceGeneratorOptions, + SequenceGeneratorOutput, + SequenceToTextGenerator, + SequenceToTextOutput, +) +from seamless_communication.models.unity.model import UnitYModel, UnitYX2TModel +from seamless_communication.models.unity.unit_tokenizer import ( + UnitTokenDecoder, + UnitTokenizer, +) +from fairseq2.nn.utils.module import infer_device +from torch import Tensor + + +class UnitYGenerator: + """Generates text translations and speech units from a UnitY model.""" + + model: UnitYModel + s2t_generator: SequenceToTextGenerator + t2t_generator: Optional[SequenceToTextGenerator] + unit_decoder: Optional[UnitTokenDecoder] + unit_generator: Optional[Seq2SeqGenerator] + + def __init__( + self, + model: UnitYModel, + text_tokenizer: TextTokenizer, + target_lang: str, + unit_tokenizer: Optional[UnitTokenizer] = None, + text_opts: Optional[SequenceGeneratorOptions] = None, + unit_opts: Optional[SequenceGeneratorOptions] = None, + ) -> None: + """ + :param model: + The UnitY model to use for generation. + :param text_tokenizer: + The text tokenizer to use. + :param unit_tokenizer: + The unit tokenizer to use. + :param target_lang: + The target language. + :param text_generator_opts: + The options to pass to the underlying text :class:`Seq2SeqGenerator`. + :param unit_generator_opts: + The options to pass to the underlying unit :class:`Seq2SeqGenerator`. + """ + if model.t2u_model is None: + raise ValueError( + "`model` does not have a T2U sub-model. " + "For text generation only, " + "use `SequenceToTextGenerator` instead." + ) + + model.eval() + + self.model = model + + s2t_model = UnitYX2TModel( + encoder_frontend=model.speech_encoder_frontend, + encoder=model.speech_encoder, + decoder_frontend=model.text_decoder_frontend, + decoder=model.text_decoder, + final_proj=model.final_proj, + pad_idx=model.pad_idx, + ) + self.s2t_generator = SequenceToTextGenerator( + s2t_model, text_tokenizer, target_lang, text_opts + ) + + if model.text_encoder is None: + self.t2t_generator = None + else: + assert model.text_encoder_frontend is not None + assert model.text_encoder is not None + t2t_model = UnitYX2TModel( + encoder_frontend=model.text_encoder_frontend, + encoder=model.text_encoder, + decoder_frontend=model.text_decoder_frontend, + decoder=model.text_decoder, + final_proj=model.final_proj, + pad_idx=model.pad_idx, + ) + self.t2t_generator = SequenceToTextGenerator( + t2t_model, text_tokenizer, target_lang, text_opts + ) + + self.unit_generator = None + self.unit_decoder = None + # Set up unit generator. + if unit_tokenizer is not None: + self.unit_decoder = unit_tokenizer.create_decoder() + + unit_encoder = unit_tokenizer.create_encoder( + lang=target_lang, device=infer_device(model.t2u_model) + ) + + if unit_opts is None: + # Speech sequences are typically much longer than text sequences. + unit_opts = SequenceGeneratorOptions( + soft_max_seq_len=(1, 50), hard_max_seq_len=5000 + ) + + self.unit_generator = Seq2SeqGenerator( + model.t2u_model, + unit_tokenizer.vocab_info, + unit_encoder.prefix_indices, + unit_opts, + ) + + @torch.inference_mode() + def __call__( + self, + source_seqs: Tensor, + source_seq_lens: Optional[Tensor], + input_modality: str = "speech", + output_modality: str = "speech", + ) -> Tuple[SequenceToTextOutput, Optional["SequenceToUnitOutput"]]: + """ + :param source_seqs: + The source sequences to use for generation. *Shape:* :math:`(N,S,*)`, + where :math:`N` is the batch size, :math:`S` is the sequence length, + and :math:`*` is any number of sequence-specific dimensions + including none. + :param source_seq_lens: + An array where each element represents the length of the sequence at + the same index in ``source_seqs``. *Shape:* :math:`(N)`, where + :math:`N` is the batch size. + :param input_modality: + The type of modality to encode. + :param output_modality: + The type of modality to decode. + + :returns: + - The output of the text generator. + - The output of the unit generator. + """ + + if input_modality == "speech": + text_output = self.s2t_generator.generate_ex(source_seqs, source_seq_lens) + elif input_modality == "text" and self.t2t_generator is not None: + text_output = self.t2t_generator.generate_ex(source_seqs, source_seq_lens) + elif input_modality == "text" and self.t2t_generator is None: + raise ValueError( + f"Please set use_text_encoder to True in your model config to encode text." + ) + else: + raise ValueError(f"Unsupported input_modality: {input_modality}") + + # We skip T2U when we only need to output text. + if output_modality == "text": + return text_output, None + + text_seqs, text_seq_lens = text_output.generator_output.collate() + + # Use the output of the text generator to compute the decoder output. + decoder_output, decoder_padding_mask = self.model.decode( + text_seqs, + text_seq_lens, + text_output.encoder_output, + text_output.encoder_padding_mask, + ) + + assert self.model.t2u_model is not None + + t2u_encoder_output, t2u_encoder_padding_mask = self.model.t2u_model.encode( + decoder_output, decoder_padding_mask + ) + + assert self.unit_generator is not None + assert self.unit_decoder is not None + + unit_gen_output = self.unit_generator( + t2u_encoder_output, + t2u_encoder_padding_mask, + source_seq_len=source_seqs.size(1), + ) + + unit_seqs, _ = unit_gen_output.collate() + + # Convert to speech units. + units = self.unit_decoder(unit_seqs) + + unit_output = SequenceToUnitOutput( + units, unit_gen_output, t2u_encoder_output, t2u_encoder_padding_mask + ) + + return text_output, unit_output + + +@dataclass +class SequenceToUnitOutput: + units: Tensor + """The generated units.""" + + generator_output: SequenceGeneratorOutput + """The output of the underlying :class:`Seq2SeqGenerator`.""" + + t2u_encoder_output: Tensor + """The encoder output of the underlying UnitY T2U model used to generate the + units. *Shape:* :math:`(N,S_{enc},M)`, where :math:`N` is the batch size, + :math:`S_{enc}` is the encoder output sequence length, and :math:`M` is the + dimensionality of the model.""" + + t2u_encoder_padding_mask: Optional[Tensor] + """The float padding mask of :attr:`encoder_output`. *Shape:* + :math:`(N,S_{enc})`, where :math:`N` is the batch size and :math:`S_{enc}` + is the encoder output sequence length.""" diff --git a/src/seamless_communication/models/unity/loader.py b/src/seamless_communication/models/unity/loader.py new file mode 100644 index 00000000..c3ba86b3 --- /dev/null +++ b/src/seamless_communication/models/unity/loader.py @@ -0,0 +1,270 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any, Dict, Mapping, Union, final + +import torch +from fairseq2.assets import AssetStore, download_manager +from fairseq2.assets.card import AssetCard +from fairseq2.models.nllb.loader import NllbTokenizerLoader +from seamless_communication.models.unity.builder import ( + UnitYConfig, + create_unity_model, + unity_archs, +) +from seamless_communication.models.unity.model import UnitYModel +from seamless_communication.models.unity.unit_tokenizer import UnitTokenizer +from fairseq2.models.utils.checkpoint_loader import upgrade_fairseq_checkpoint +from fairseq2.models.utils.model_loader import ModelConfigLoader, ModelLoader +from overrides import override as finaloverride + +from seamless_communication.assets import asset_store + + +@final +class UnitYLoader(ModelLoader[UnitYModel, UnitYConfig]): + """Loads UnitY models.""" + + @finaloverride + def _upgrade_checkpoint( + self, checkpoint: Mapping[str, Any], config: UnitYConfig + ) -> Mapping[str, Any]: + state_dict = checkpoint["model"] + + # Check if we have a fairseq2 checkpoint. + if "decoder_frontend.embed.weight" in state_dict: + return checkpoint + + key_map = self._fairseq_key_map(config) + + checkpoint = upgrade_fairseq_checkpoint(checkpoint, key_map) + + state_dict = checkpoint["model"] + + # Use the built-in version attribute of `torch.Module`. + del state_dict["target_letter_decoder.version"] + del state_dict["target_letter_decoder.embed_positions._float_tensor"] + + if config.use_text_encoder: + if "text_encoder.version" in state_dict: + del state_dict["text_encoder.version"] + if "text_encoder.embed_positions._float_tensor" in state_dict: + del state_dict["text_encoder.embed_positions._float_tensor"] + + # Remnant of wav2vec2 pretraining, not needed for eval or fine-tuning. + del state_dict["encoder.w2v_encoder.w2v_model.mask_emb"] + + embeds = state_dict["final_proj.weight"] + + # fairseq had a bug that accidentally introduced a dummy token in the + # embedding table of NLLB-100. We just discard it. + if embeds.size(0) == 256103: # means NLLB-100 + embeds = embeds[:-1] + + state_dict["final_proj.weight"] = embeds + + # fairseq checkpoints have duplicate embedding weights. Ensure that we + # use a single embedding table in fairseq2. + state_dict["text_decoder_frontend.embed.weight"] = embeds + + if config.use_text_encoder: + state_dict["text_encoder_frontend.embed.weight"] = embeds + + # The embedding positions of the control symbols in fairseq's dict do + # not match the SentencePiece model of the tokenizer. + with torch.inference_mode(): + # (BOS, PAD, EOS, UNK) -> (PAD, UNK, BOS, EOS) + embeds[[0, 1, 2, 3]] = embeds[[1, 3, 0, 2]] + + if config.t2u_config is not None: + # fairseq checkpoints have duplicate embedding weights. Ensure that we + # use a single embedding table in fairseq2. + embeds = state_dict["t2u_model.final_proj.weight"] + + state_dict["t2u_model.decoder_frontend.embed.weight"] = embeds + + return checkpoint + + @staticmethod + def _fairseq_key_map(config: UnitYConfig) -> Dict[str, str]: + key_map = { + # fmt: off + + # Speech Encoder + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.pos_conv\.0\.": r"speech_encoder_frontend.pos_encoder.conv.", + r"^encoder\.w2v_encoder\.w2v_model\.layer_norm\.": r"speech_encoder_frontend.post_extract_layer_norm.", + r"^encoder\.w2v_encoder\.w2v_model\.post_extract_proj\.": r"speech_encoder_frontend.model_dim_proj.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.conv_module\.batch_norm\.": r"speech_encoder.inner.layers.\1.conv.batch_norm.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.conv_module\.depthwise_conv\.": r"speech_encoder.inner.layers.\1.conv.depthwise_conv.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.conv_module\.layer_norm\.": r"speech_encoder.inner.layers.\1.conv_layer_norm.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.conv_module\.pointwise_conv1\.": r"speech_encoder.inner.layers.\1.conv.pointwise_conv1.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.conv_module\.pointwise_conv2\.": r"speech_encoder.inner.layers.\1.conv.pointwise_conv2.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.ffn(1|2)\.layer_norm\.": r"speech_encoder.inner.layers.\1.ffn\2_layer_norm.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.ffn(1|2)\.w_1\.": r"speech_encoder.inner.layers.\1.ffn\2.inner_proj.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.ffn(1|2)\.w_2\.": r"speech_encoder.inner.layers.\1.ffn\2.output_proj.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.self_attn_layer_norm\.": r"speech_encoder.inner.layers.\1.self_attn_layer_norm.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.self_attn\.linear_q\.": r"speech_encoder.inner.layers.\1.self_attn.q_proj.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.self_attn\.linear_k\.": r"speech_encoder.inner.layers.\1.self_attn.k_proj.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.self_attn\.linear_v\.": r"speech_encoder.inner.layers.\1.self_attn.v_proj.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.self_attn\.linear_out\.": r"speech_encoder.inner.layers.\1.self_attn.output_proj.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.self_attn\.linear_pos\.": r"speech_encoder.inner.layers.\1.self_attn.sdpa.r_proj.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.self_attn\.pos_bias_u": r"speech_encoder.inner.layers.\1.self_attn.sdpa.u_bias", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.self_attn\.pos_bias_v": r"speech_encoder.inner.layers.\1.self_attn.sdpa.v_bias", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layers\.([0-9]+)\.final_layer_norm\.": r"speech_encoder.inner.layers.\1.layer_norm.", + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layer_norm\.": r"speech_encoder.inner.layer_norm.", + + # Speech Encoder Adaptor + r"^encoder\.adaptor\.proj\.0\.": r"speech_encoder.proj1.", + r"^encoder\.adaptor\.proj\.2\.": r"speech_encoder.proj2.", + r"^encoder\.adaptor\.out_ln\.": r"speech_encoder.layer_norm.", + + # Text Encoder + r"^text_encoder\.embed_tokens\.": r"text_encoder_frontend.embed.", + r"^text_encoder\.layers\.([0-9]+)\.self_attn\.out_proj\.": r"text_encoder.layers.\1.self_attn.output_proj.", + r"^text_encoder\.layers\.([0-9]+)\.self_attn\.": r"text_encoder.layers.\1.self_attn.", + r"^text_encoder\.layers\.([0-9]+)\.self_attn_layer_norm\.": r"text_encoder.layers.\1.self_attn_layer_norm.", + r"^text_encoder\.layers\.([0-9]+)\.encoder_attn\.out_proj\.": r"text_encoder.layers.\1.encoder_decoder_attn.output_proj.", + r"^text_encoder\.layers\.([0-9]+)\.encoder_attn\.": r"text_encoder.layers.\1.encoder_decoder_attn.", + r"^text_encoder\.layers\.([0-9]+)\.encoder_attn_layer_norm\.": r"text_encoder.layers.\1.encoder_decoder_attn_layer_norm.", + r"^text_encoder\.layers\.([0-9]+)\.fc1\.": r"text_encoder.layers.\1.ffn.inner_proj.", + r"^text_encoder\.layers\.([0-9]+)\.fc2\.": r"text_encoder.layers.\1.ffn.output_proj.", + r"^text_encoder\.layers\.([0-9]+)\.final_layer_norm\.": r"text_encoder.layers.\1.ffn_layer_norm.", + r"^text_encoder\.layer_norm\.": r"text_encoder.layer_norm.", + + # Text Decoder + r"^target_letter_decoder\.embed_tokens\.": r"text_decoder_frontend.embed.", + r"^target_letter_decoder\.layers\.([0-9]+)\.self_attn\.out_proj\.": r"text_decoder.layers.\1.self_attn.output_proj.", + r"^target_letter_decoder\.layers\.([0-9]+)\.self_attn\.": r"text_decoder.layers.\1.self_attn.", + r"^target_letter_decoder\.layers\.([0-9]+)\.self_attn_layer_norm\.": r"text_decoder.layers.\1.self_attn_layer_norm.", + r"^target_letter_decoder\.layers\.([0-9]+)\.encoder_attn\.out_proj\.": r"text_decoder.layers.\1.encoder_decoder_attn.output_proj.", + r"^target_letter_decoder\.layers\.([0-9]+)\.encoder_attn\.": r"text_decoder.layers.\1.encoder_decoder_attn.", + r"^target_letter_decoder\.layers\.([0-9]+)\.encoder_attn_layer_norm\.": r"text_decoder.layers.\1.encoder_decoder_attn_layer_norm.", + r"^target_letter_decoder\.layers\.([0-9]+)\.fc1\.": r"text_decoder.layers.\1.ffn.inner_proj.", + r"^target_letter_decoder\.layers\.([0-9]+)\.fc2\.": r"text_decoder.layers.\1.ffn.output_proj.", + r"^target_letter_decoder\.layers\.([0-9]+)\.final_layer_norm\.": r"text_decoder.layers.\1.ffn_layer_norm.", + r"^target_letter_decoder\.layer_norm\.": r"text_decoder.layer_norm.", + r"^target_letter_decoder\.output_projection\.": r"final_proj.", + + # T2U Encoder + r"^synthesizer_encoder\.layers\.([0-9]+)\.self_attn\.out_proj\.": r"t2u_model.encoder.layers.\1.self_attn.output_proj.", + r"^synthesizer_encoder\.layers\.([0-9]+)\.self_attn\.": r"t2u_model.encoder.layers.\1.self_attn.", + r"^synthesizer_encoder\.layers\.([0-9]+)\.self_attn_layer_norm\.": r"t2u_model.encoder.layers.\1.self_attn_layer_norm.", + r"^synthesizer_encoder\.layers\.([0-9]+)\.fc1\.": r"t2u_model.encoder.layers.\1.ffn.inner_proj.", + r"^synthesizer_encoder\.layers\.([0-9]+)\.fc2\.": r"t2u_model.encoder.layers.\1.ffn.output_proj.", + r"^synthesizer_encoder\.layers\.([0-9]+)\.final_layer_norm\.": r"t2u_model.encoder.layers.\1.ffn_layer_norm.", + r"^synthesizer_encoder\.layer_norm\.": r"t2u_model.encoder.layer_norm.", + + # T2U Decoder + r"^decoder\.embed_tokens\.": r"t2u_model.decoder_frontend.embed.", + r"^decoder\.layers\.([0-9]+)\.self_attn\.out_proj\.": r"t2u_model.decoder.layers.\1.self_attn.output_proj.", + r"^decoder\.layers\.([0-9]+)\.self_attn\.": r"t2u_model.decoder.layers.\1.self_attn.", + r"^decoder\.layers\.([0-9]+)\.self_attn_layer_norm\.": r"t2u_model.decoder.layers.\1.self_attn_layer_norm.", + r"^decoder\.layers\.([0-9]+)\.encoder_attn\.out_proj\.": r"t2u_model.decoder.layers.\1.encoder_decoder_attn.output_proj.", + r"^decoder\.layers\.([0-9]+)\.encoder_attn\.": r"t2u_model.decoder.layers.\1.encoder_decoder_attn.", + r"^decoder\.layers\.([0-9]+)\.encoder_attn_layer_norm\.": r"t2u_model.decoder.layers.\1.encoder_decoder_attn_layer_norm.", + r"^decoder\.layers\.([0-9]+)\.fc1\.": r"t2u_model.decoder.layers.\1.ffn.inner_proj.", + r"^decoder\.layers\.([0-9]+)\.fc2\.": r"t2u_model.decoder.layers.\1.ffn.output_proj.", + r"^decoder\.layers\.([0-9]+)\.final_layer_norm\.": r"t2u_model.decoder.layers.\1.ffn_layer_norm.", + r"^decoder\.layer_norm\.": r"t2u_model.decoder.layer_norm.", + r"^decoder\.output_projection\.": r"t2u_model.final_proj.", + # fmt: on + } + + # In normal circumstances, we should never encounter a `LayerNorm` when + # `use_conformer` is `True`. Unfortunately, the w2v-BERT pretraining in + # fairseq was accidentally run with a pre-LN encoder, and ended up with + # a redundant `LayerNorm` right after the Conformer blocks. We mitigate + # that issue here by moving that `LayerNorm` to the adaptor block. + if config.w2v2_encoder_config.use_conformer: + key_map.update( + { + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layer_norm\.": r"speech_encoder.inner_layer_norm." + } + ) + else: + key_map.update( + { + r"^encoder\.w2v_encoder\.w2v_model\.encoder\.layer_norm\.": r"speech_encoder.inner.layer_norm." + } + ) + + # fmt: off + if config.use_conformer_adaptor: + key_map.update( + { + r"^encoder\.adaptor\.layers\.([0-9]+)\.self_attn\.out_proj\.": r"speech_encoder.adaptor_layers.\1.block.self_attn.output_proj.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.self_attn\.": r"speech_encoder.adaptor_layers.\1.block.self_attn.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.self_attn_layer_norm\.": r"speech_encoder.adaptor_layers.\1.block.self_attn_layer_norm.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.ffn(1|2)\.layer_norm\.": r"speech_encoder.adaptor_layers.\1.block.ffn\2_layer_norm.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.ffn(1|2)\.w_1\.": r"speech_encoder.adaptor_layers.\1.block.ffn\2.inner_proj.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.ffn(1|2)\.w_2\.": r"speech_encoder.adaptor_layers.\1.block.ffn\2.output_proj.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.conv_module\.batch_norm\.": r"speech_encoder.adaptor_layers.\1.block.conv.batch_norm.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.conv_module\.depthwise_conv\.": r"speech_encoder.adaptor_layers.\1.block.conv.depthwise_conv.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.conv_module\.layer_norm\.": r"speech_encoder.adaptor_layers.\1.block.conv_layer_norm.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.conv_module\.pointwise_conv1\.": r"speech_encoder.adaptor_layers.\1.block.conv.pointwise_conv1.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.conv_module\.pointwise_conv2\.": r"speech_encoder.adaptor_layers.\1.block.conv.pointwise_conv2.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.final_layer_norm\.": r"speech_encoder.adaptor_layers.\1.block.layer_norm.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.conv_ln\.": r"speech_encoder.adaptor_layers.\1.layer_norm.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.conv_pool\.1\.": r"speech_encoder.adaptor_layers.\1.conv.", + } + ) + else: + key_map.update( + { + r"^encoder\.adaptor\.layers\.([0-9]+)\.residual_layer_norm\.": r"speech_encoder.adaptor_layers.\1.residual_layer_norm.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.residual_pool\.1\.": r"speech_encoder.adaptor_layers.\1.residual_conv.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.attn_pool\.1\.": r"speech_encoder.adaptor_layers.\1.self_attn_conv.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.self_attn\.out_proj\.": r"speech_encoder.adaptor_layers.\1.self_attn.output_proj.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.self_attn\.": r"speech_encoder.adaptor_layers.\1.self_attn.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.self_attn_layer_norm\.": r"speech_encoder.adaptor_layers.\1.self_attn_layer_norm.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.fc1\.": r"speech_encoder.adaptor_layers.\1.ffn.inner_proj.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.fc2\.": r"speech_encoder.adaptor_layers.\1.ffn.output_proj.", + r"^encoder\.adaptor\.layers\.([0-9]+)\.final_layer_norm\.": r"speech_encoder.adaptor_layers.\1.ffn_layer_norm.", + } + ) + # fmt: on + + return key_map + + +load_unity_model = UnitYLoader( + asset_store, download_manager, create_unity_model, unity_archs +) + + +load_unity_config = ModelConfigLoader[UnitYConfig](asset_store, unity_archs) + + +load_unity_text_tokenizer = NllbTokenizerLoader(asset_store, download_manager) + + +class UnitYUnitTokenizerLoader: + """Loads speech unit tokenizers of UnitY models.""" + + def __init__(self, asset_store: AssetStore) -> None: + """ + :param asset_store: + The asset store to retrieve the model information. + """ + self.asset_store = asset_store + + def __call__(self, model_name_or_card: Union[str, AssetCard]) -> UnitTokenizer: + """ + :param model_name_or_card: + The name of the model or an already loaded AssetCard + """ + + if isinstance(model_name_or_card, AssetCard): + card = model_name_or_card + else: + card = self.asset_store.retrieve_card(model_name_or_card) + + return UnitTokenizer( + card.field("num_units").as_(int), card.field("unit_langs").as_list(str) + ) + + +load_unity_unit_tokenizer = UnitYUnitTokenizerLoader(asset_store) diff --git a/src/seamless_communication/models/unity/model.py b/src/seamless_communication/models/unity/model.py new file mode 100644 index 00000000..1e8aa370 --- /dev/null +++ b/src/seamless_communication/models/unity/model.py @@ -0,0 +1,326 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass +from typing import Optional, Tuple, final + +from fairseq2.models.encoder_decoder import EncoderDecoderModel, Seq2SeqDecoder +from fairseq2.models.seq2seq import Seq2SeqBatch +from fairseq2.models.sequence import SequenceModelOutput +from fairseq2.models.transformer.frontend import TransformerFrontend +from fairseq2.nn.incremental_state import IncrementalStateBag +from fairseq2.nn.projection import Projection +from fairseq2.nn.transformer import TransformerDecoder, TransformerEncoder +from fairseq2.nn.utils.module import check_model_dim +from overrides import final as finaloverride +from torch import Tensor +from torch.nn import Module + + +@final +class UnitYModel(EncoderDecoderModel): + """Represents a UnitY model as described in + :cite:t`https://doi.org/10.48550/arxiv.2212.08055`. + + Note that this implementation is augmented with a text encoder to enable + translating from text. + """ + + model_dim: int + input_modality: str + speech_encoder_frontend: TransformerFrontend + speech_encoder: TransformerEncoder + text_encoder_frontend: Optional[TransformerFrontend] + text_encoder: Optional[TransformerEncoder] + text_decoder_frontend: TransformerFrontend + text_decoder: TransformerDecoder + final_proj: Projection + t2u_model: Optional["UnitYT2UModel"] + pad_idx: Optional[int] + + def __init__( + self, + speech_encoder_frontend: TransformerFrontend, + speech_encoder: TransformerEncoder, + text_encoder_frontend: Optional[TransformerFrontend], + text_encoder: Optional[TransformerEncoder], + text_decoder_frontend: TransformerFrontend, + text_decoder: TransformerDecoder, + final_proj: Projection, + t2u_model: Optional["UnitYT2UModel"], + pad_idx: Optional[int], + input_modality: str = "speech", + ) -> None: + model_dim = speech_encoder.model_dim + + super().__init__(model_dim) + + self.input_modality = input_modality + + self.speech_encoder_frontend = speech_encoder_frontend + self.speech_encoder = speech_encoder + + if text_encoder is not None: + if text_encoder_frontend is None: + raise ValueError( + "Both `text_encoder` and `text_encoder_frontend` must be specified, but `text_encoder_frontend` is `None`." + ) + + self.text_encoder_frontend = text_encoder_frontend + self.text_encoder = text_encoder + else: + if text_encoder_frontend is not None: + raise ValueError( + "Both `text_encoder` and `text_encoder_frontend` must be specified, but `text_encoder` is `None`." + ) + + self.register_module("text_encoder_frontend", None) + self.register_module("text_encoder", None) + + self.text_decoder_frontend = text_decoder_frontend + self.text_decoder = text_decoder + + self.final_proj = final_proj + + if t2u_model is not None: + self.t2u_model = t2u_model + else: + self.register_module("t2u_model", None) + + self.pad_idx = pad_idx + + check_model_dim(self) + + @finaloverride + def encode( + self, seqs: Tensor, seq_lens: Optional[Tensor] + ) -> Tuple[Tensor, Optional[Tensor]]: + if self.input_modality == "speech": + return self.encode_speech(seqs, seq_lens) + + if self.input_modality == "text": + return self.encode_text(seqs, seq_lens) + + raise RuntimeError( + f"`input_modality` must be 'speech' or 'text', but is '{self.input_modality}' instead." + ) + + def encode_speech( + self, seqs: Tensor, seq_lens: Optional[Tensor] + ) -> Tuple[Tensor, Optional[Tensor]]: + seqs, padding_mask = self.speech_encoder_frontend(seqs, seq_lens) + + return self.speech_encoder(seqs, padding_mask) # type: ignore[no-any-return] + + def encode_text( + self, seqs: Tensor, seq_lens: Optional[Tensor] + ) -> Tuple[Tensor, Optional[Tensor]]: + if self.text_encoder is None: + raise ValueError( + "`encode_text()` requires a text encoder, but the current UnitY model does not have one." + ) + + assert self.text_encoder_frontend is not None + + seqs, padding_mask = self.text_encoder_frontend(seqs, seq_lens) + + return self.text_encoder(seqs, padding_mask) # type: ignore[no-any-return] + + @finaloverride + def decode( + self, + seqs: Tensor, + seq_lens: Optional[Tensor], + encoder_output: Tensor, + encoder_padding_mask: Optional[Tensor], + state_bag: Optional[IncrementalStateBag] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + seqs, padding_mask = self.text_decoder_frontend(seqs, seq_lens, state_bag) + + return self.text_decoder( # type: ignore[no-any-return] + seqs, padding_mask, encoder_output, encoder_padding_mask, state_bag + ) + + @finaloverride + def project( + self, decoder_output: Tensor, decoder_padding_mask: Optional[Tensor] + ) -> SequenceModelOutput: + logits = self.final_proj(decoder_output) + + return SequenceModelOutput(logits, self.pad_idx) + + +@final +class UnitYX2TModel(EncoderDecoderModel): + model_dim: int + encoder_frontend: TransformerFrontend + encoder: TransformerEncoder + decoder_frontend: TransformerFrontend + decoder: TransformerDecoder + final_proj: Projection + pad_idx: Optional[int] + + def __init__( + self, + encoder_frontend: TransformerFrontend, + encoder: TransformerEncoder, + decoder_frontend: TransformerFrontend, + decoder: TransformerDecoder, + final_proj: Projection, + pad_idx: Optional[int], + ) -> None: + model_dim = encoder.model_dim + super().__init__(model_dim) + + self.encoder_frontend = encoder_frontend + self.encoder = encoder + self.decoder_frontend = decoder_frontend + self.decoder = decoder + self.final_proj = final_proj + self.pad_idx = pad_idx + check_model_dim(self) + + @finaloverride + def encode( + self, seqs: Tensor, seq_lens: Optional[Tensor] + ) -> Tuple[Tensor, Optional[Tensor]]: + seqs, padding_mask = self.encoder_frontend(seqs, seq_lens) + return self.encoder(seqs, padding_mask) # type: ignore[no-any-return] + + @finaloverride + def decode( + self, + seqs: Tensor, + seq_lens: Optional[Tensor], + encoder_output: Tensor, + encoder_padding_mask: Optional[Tensor], + state_bag: Optional[IncrementalStateBag] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + seqs, padding_mask = self.decoder_frontend(seqs, seq_lens, state_bag) + + return self.decoder( # type: ignore[no-any-return] + seqs, padding_mask, encoder_output, encoder_padding_mask, state_bag + ) + + @finaloverride + def project( + self, decoder_output: Tensor, decoder_padding_mask: Optional[Tensor] + ) -> SequenceModelOutput: + logits = self.final_proj(decoder_output) + + return SequenceModelOutput(logits, self.pad_idx) + + +@final +class UnitYT2UModel(Module, Seq2SeqDecoder): + """Represents a UnitY T2U model as described in + :cite:t`https://doi.org/10.48550/arxiv.2212.08055`.""" + + model_dim: int + encoder: Optional[TransformerEncoder] + decoder_frontend: TransformerFrontend + decoder: TransformerDecoder + final_proj: Projection + pad_idx: Optional[int] + + def __init__( + self, + encoder: Optional[TransformerEncoder], + decoder_frontend: TransformerFrontend, + decoder: TransformerDecoder, + final_proj: Projection, + pad_idx: Optional[int], + ) -> None: + super().__init__() + + self.model_dim = decoder.model_dim + + if encoder is not None: + if encoder.model_dim != self.model_dim: + raise ValueError( + f"`model_dim` of `encoder` and `model_dim` of `decoder` must be equal, but are {encoder.model_dim} and {self.model_dim} instead." + ) + + self.encoder = encoder + else: + self.register_module("encoder", None) + + if decoder_frontend.model_dim != self.model_dim: + raise ValueError( + f"`model_dim` of `decoder_frontend` and `model_dim` of `decoder` must be equal, but are {decoder_frontend.model_dim} and {self.model_dim} instead." + ) + + self.decoder_frontend = decoder_frontend + self.decoder = decoder + + self.final_proj = final_proj + + self.pad_idx = pad_idx + + def forward(self, batch: Seq2SeqBatch) -> SequenceModelOutput: + encoder_output, encoder_padding_mask = self.encode( + batch.source_seqs, batch.source_seq_lens + ) + + decoder_output, decoder_padding_mask = self.decode( + batch.target_seqs, + batch.target_seq_lens, + encoder_output, + encoder_padding_mask, + ) + + return self.project(decoder_output, decoder_padding_mask) + + def encode( + self, + text_decoder_output: Tensor, + text_decoder_padding_mask: Optional[Tensor], + ) -> Tuple[Tensor, Optional[Tensor]]: + if self.encoder is None: + return text_decoder_output, text_decoder_padding_mask + + return self.encoder(text_decoder_output, text_decoder_padding_mask) # type: ignore[no-any-return] + + def decode( + self, + seqs: Tensor, + seq_lens: Optional[Tensor], + encoder_output: Tensor, + encoder_padding_mask: Optional[Tensor], + state_bag: Optional[IncrementalStateBag] = None, + ) -> Tuple[Tensor, Optional[Tensor]]: + seqs, padding_mask = self.decoder_frontend(seqs, seq_lens, state_bag) + + return self.decoder( # type: ignore[no-any-return] + seqs, padding_mask, encoder_output, encoder_padding_mask, state_bag + ) + + def project( + self, decoder_output: Tensor, decoder_padding_mask: Optional[Tensor] + ) -> SequenceModelOutput: + logits = self.final_proj(decoder_output) + + return SequenceModelOutput(logits, self.pad_idx) + + +@dataclass +class UnitYOutput: + """Holds the output of a UnitY model.""" + + s2t_output: SequenceModelOutput + """The S2T output of the multitask model.""" + + mt_output: SequenceModelOutput + """The MT output of the multitask model.""" + + t2u_output: SequenceModelOutput + """The output of the T2U model.""" + + def compute_loss( + self, targets: Tensor, ignore_prefix_size: int = 0, label_smoothing: float = 0.0 + ) -> None: + # TODO: Implement R-Drop based loss + pass diff --git a/src/seamless_communication/models/unity/unit_tokenizer.py b/src/seamless_communication/models/unity/unit_tokenizer.py new file mode 100644 index 00000000..40fabcfd --- /dev/null +++ b/src/seamless_communication/models/unity/unit_tokenizer.py @@ -0,0 +1,199 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Dict, Optional, Sequence + +import torch +from fairseq2.data import VocabularyInfo +from fairseq2.typing import Device +from torch import Tensor + + +class UnitTokenizer: + """Represents a tokenizer to encode and decode UnitY speech units.""" + + num_units: int + langs: Sequence[str] + lang_map: Dict[str, int] + + def __init__(self, num_units: int, langs: Sequence[str]) -> None: + """ + :param num_units: + The number of speech units. + :param langs: + The list of supported languages. + """ + self.num_units = num_units + + self.langs = langs + + self.lang_map = {lang: idx for idx, lang in enumerate(langs)} + + # For legacy reasons, we have to repeat the language symbols twice, + # along with a placeholder `` token. + vocab_size = num_units + (2 * (len(langs) + 1)) + 4 + + # We use fairseq's control symbol order. + self.vocab_info = VocabularyInfo( + size=vocab_size, bos_idx=0, pad_idx=1, eos_idx=2, unk_idx=3 + ) + + def lang_to_index(self, lang: str) -> int: + """Return the symbol index of the specified language.""" + # +4 for PAD/EOS/BOS/UNK, and +1 for the `` token. + try: + return self.num_units + len(self.langs) + self.lang_map[lang] + 5 + except KeyError: + langs = ", ".join(self.langs) + + raise ValueError( + f"`lang` must be one of the supported languages, but is '{lang}' instead. Supported languages: {langs}" + ) + + def index_to_lang(self, idx: int) -> str: + """Return the language of the specified language symbol index.""" + relative_idx = idx - self.num_units - len(self.langs) - 5 + + if relative_idx < 0 or relative_idx >= len(self.langs): + raise ValueError( + f"`idx` must correspond to one of the supported language symbol indices (0 to {len(self.langs) - 1}), but is {idx} instead." + ) + + return self.langs[relative_idx] + + def create_encoder( + self, lang: str, device: Optional[Device] = None + ) -> "UnitTokenEncoder": + """Create a token encoder. + + :param lang: + The language of generated token indices. + """ + return UnitTokenEncoder(self, lang, device) + + def create_decoder(self) -> "UnitTokenDecoder": + """Create a token decoder.""" + return UnitTokenDecoder(self) + + +class UnitTokenEncoder: + """Encodes speech units into token indices.""" + + tokenizer: UnitTokenizer + eos_idx: int + unk_idx: int + lang_idx: int + prefix_indices: Tensor + + def __init__( + self, tokenizer: UnitTokenizer, lang: str, device: Optional[Device] = None + ) -> None: + """ + :param tokenizer: + The unit tokenizer to use. + :param lang: + The language of generated token indices. + """ + if not lang in tokenizer.lang_map: + langs = ", ".join(tokenizer.langs) + + raise ValueError( + f"`lang` must be one of the supported languages, but is '{lang}' instead. Supported languages: {langs}" + ) + + self.tokenizer = tokenizer + + assert tokenizer.vocab_info.eos_idx is not None + assert tokenizer.vocab_info.unk_idx is not None + + self.eos_idx = tokenizer.vocab_info.eos_idx + self.unk_idx = tokenizer.vocab_info.unk_idx + + self.lang_idx = tokenizer.lang_to_index(lang) + + if device is None: + device = Device("cpu") + + # We always start sequences with EOS, followed by the language token. + self.prefix_indices = torch.tensor( + [self.eos_idx, self.lang_idx], device=device, dtype=torch.int64 + ) + + def __call__(self, units: Tensor) -> Tensor: + """Encode ``units`` to token indices. + + :param units: + The speech units to encode. *Shape:* :math:`(N,S)`, where :math:`N` + is the batch size and :math:`S` is the sequence length. + + :returns: + The token indices corresponding to ``units``. *Shape:* + :math:`(N,S_{tok})` ,where :math:`N` is the batch size and + :math`S_{tok}` is the sequence length of the token indices. + """ + batch_size = units.size(0) + + token_indices = torch.cat( + [self.prefix_indices.clone().expand(batch_size, -1), units.detach()], dim=1 + ) + + # Ensure that non-symbol indices larger than `num_units` are replaced + # with UNK. + seqs = token_indices[:, 2:] + + # Add offset for control symbols. + seqs += 4 + + seqs[seqs >= self.tokenizer.num_units + 4] = self.unk_idx + + return token_indices + + +class UnitTokenDecoder: + """Decodes speech units from token indices.""" + + eos_idx: int + pad_idx: int + + def __init__(self, tokenizer: UnitTokenizer) -> None: + """ + :param tokenizer: + The unit tokenizer to use. + """ + assert tokenizer.vocab_info.eos_idx is not None + assert tokenizer.vocab_info.pad_idx is not None + + self.eos_idx = tokenizer.vocab_info.eos_idx + self.pad_idx = tokenizer.vocab_info.pad_idx + + def __call__(self, token_indices: Tensor) -> Tensor: + """Decode ``token_indices`` to speech units. + + :param token_indices: + The token indices to decode. *Shape:* :math:`(N,S)`, where :math:`N` + is the batch size and :math:`S` is the sequence length. + + :returns: + The speech units corresponding to ``token_indices``. *Shape:* + :math:`(N,S_{unt})`, where :math:`N` is the batch size and + :math`S_{unt}` is the sequence length of the speech units. + """ + if token_indices.size(1) == 0: + return token_indices + + # Remove the prefix EOS symbol. The language symbol is still expected to + # be part of the decoded output. + units = token_indices[:, 1:].clone().detach() + + # Also, replace EOS with PAD at sequence ends. + units[units == self.eos_idx] = self.pad_idx + + units[units == self.pad_idx] = self.pad_idx + 4 + + # Remove offset of control symbols (exclude language symbol). + units[:, 1:] -= 4 + + return units diff --git a/src/seamless_communication/models/vocoder/__init__.py b/src/seamless_communication/models/vocoder/__init__.py new file mode 100644 index 00000000..5aba9a1a --- /dev/null +++ b/src/seamless_communication/models/vocoder/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from seamless_communication.models.vocoder.builder import ( + VocoderBuilder as VocoderBuilder, +) +from seamless_communication.models.vocoder.builder import VocoderConfig as VocoderConfig +from seamless_communication.models.vocoder.codehifigan import ( + CodeGenerator as CodeGenerator, +) +from seamless_communication.models.vocoder.hifigan import Generator as Generator +from seamless_communication.models.vocoder.loader import VocoderLoader as VocoderLoader +from seamless_communication.models.vocoder.loader import ( + load_vocoder_model as load_vocoder_model, +) +from seamless_communication.models.vocoder.vocoder import Vocoder as Vocoder diff --git a/src/seamless_communication/models/vocoder/builder.py b/src/seamless_communication/models/vocoder/builder.py new file mode 100644 index 00000000..eeb2da76 --- /dev/null +++ b/src/seamless_communication/models/vocoder/builder.py @@ -0,0 +1,134 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from dataclasses import dataclass +from typing import Dict, List, Optional + +from fairseq2.models.utils.arch_registry import ArchitectureRegistry +from fairseq2.typing import DataType, Device + +from seamless_communication.models.vocoder.codehifigan import CodeGenerator +from seamless_communication.models.vocoder.vocoder import Vocoder + + +@dataclass +class VocoderConfig: + """Holds the configuration of a Vocoder model.""" + + upsample_rates: List[int] + upsample_kernel_sizes: List[int] + upsample_initial_channel: int + resblock_kernel_sizes: List[int] + resblock_dilation_sizes: List[List[int]] + model_in_dim: int + num_embeddings: int + embedding_dim: int + dur_predictor_params: Dict[str, float] + lang_embedding_dim: int + num_langs: int + spkr_embedding_dim: int + num_spkrs: int + lang_spkr_idx_map: dict + + +vocoder_archs = ArchitectureRegistry[VocoderConfig]("vocoder_code_hifigan") + + +vocoder_arch = vocoder_archs.marker + + +@vocoder_arch("base") +def _base_vocoder() -> VocoderConfig: + return VocoderConfig( + upsample_rates=[5, 4, 4, 2, 2], + upsample_kernel_sizes=[11, 8, 8, 4, 4], + upsample_initial_channel=512, + resblock_kernel_sizes=[3, 7, 11], + resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]], + model_in_dim=1792, + num_embeddings=10000, + embedding_dim=1280, + dur_predictor_params={ + "encoder_embed_dim": 1280, + "var_pred_hidden_dim": 1280, + "var_pred_kernel_size": 3, + "var_pred_dropout": 0.5, + }, + lang_embedding_dim=256, + num_langs=36, + spkr_embedding_dim=256, + num_spkrs=200, + lang_spkr_idx_map={}, + ) + + +class VocoderBuilder: + """Builds modules of a vocoder model (Code Hifigan) as described in + :cite:t`https://github.com/facebookresearch/speech-resynthesis`. + + To tweak the architecture, you can derive from this class and override the + corresponding methods. + """ + + config: VocoderConfig + device: Optional[Device] + dtype: Optional[DataType] + + def __init__( + self, + config: VocoderConfig, + device: Optional[Device] = None, + dtype: Optional[DataType] = None, + ) -> None: + """ + :param config: + The configuration to use. + :param device: + The device on which to initialize modules. + :param dtype: + The data type of module parameters and buffers. + """ + self.config = config + self.device = device + self.dtype = dtype + + def build_model(self) -> Vocoder: + """Build a model.""" + + code_generator = CodeGenerator( + self.config.upsample_rates, + self.config.upsample_kernel_sizes, + self.config.upsample_initial_channel, + self.config.resblock_kernel_sizes, + self.config.resblock_dilation_sizes, + self.config.model_in_dim, + self.config.num_embeddings, + self.config.embedding_dim, + self.config.dur_predictor_params, + self.config.lang_embedding_dim, + self.config.num_langs, + self.config.spkr_embedding_dim, + self.config.num_spkrs, + ) + return Vocoder(code_generator, self.config.lang_spkr_idx_map) + + +def create_vocoder_model( + config: VocoderConfig, + device: Optional[Device] = None, + dtype: Optional[DataType] = None, +) -> Vocoder: + """Create a Vocoder model. + + :param config: + The configuration to use. + :param device: + The device on which to initialize modules. + :param dtype: + The data type of module parameters and buffers. + """ + + return VocoderBuilder(config, device, dtype).build_model() diff --git a/src/seamless_communication/models/vocoder/codehifigan.py b/src/seamless_communication/models/vocoder/codehifigan.py new file mode 100644 index 00000000..72e5b3b2 --- /dev/null +++ b/src/seamless_communication/models/vocoder/codehifigan.py @@ -0,0 +1,137 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +from typing import Any, Dict, List, Optional + +import torch +import torch.nn as nn +from torch import Tensor +from torch.nn import Dropout + +from seamless_communication.models.vocoder.hifigan import Generator + + +class VariancePredictor(nn.Module): + def __init__( + self, + encoder_embed_dim: int, + var_pred_hidden_dim: int, + var_pred_kernel_size: int, + var_pred_dropout: float, + ): + super().__init__() + self.conv1 = nn.Sequential( + nn.Conv1d( + encoder_embed_dim, + var_pred_hidden_dim, + kernel_size=var_pred_kernel_size, + padding=(var_pred_kernel_size - 1) // 2, + ), + nn.ReLU(), + ) + self.ln1 = nn.LayerNorm(var_pred_hidden_dim) + self.dropout_module = Dropout(p=var_pred_dropout) + self.conv2 = nn.Sequential( + nn.Conv1d( + var_pred_hidden_dim, + var_pred_hidden_dim, + kernel_size=var_pred_kernel_size, + padding=1, + ), + nn.ReLU(), + ) + self.ln2 = nn.LayerNorm(var_pred_hidden_dim) + self.proj = nn.Linear(var_pred_hidden_dim, 1) + + def forward(self, x: Tensor) -> Any: + # Input: B x T x C; Output: B x T + x = self.conv1(x.transpose(1, 2)).transpose(1, 2) + x = self.dropout_module(self.ln1(x)) + x = self.conv2(x.transpose(1, 2)).transpose(1, 2) + x = self.dropout_module(self.ln2(x)) + return self.proj(x).squeeze(dim=2) + + +class CodeGenerator(Generator): + def __init__( + self, + upsample_rates: List[int], + upsample_kernel_sizes: List[int], + upsample_initial_channel: int, + resblock_kernel_sizes: List[int], + resblock_dilation_sizes: List[List[int]], + model_in_dim: Optional[int], + num_embeddings: int, + embedding_dim: int, + dur_predictor_params: Dict[str, Any], + lang_embedding_dim: int, + num_langs: int, + spkr_embedding_dim: int, + num_spkrs: int, + ): + super().__init__( + upsample_rates, + upsample_kernel_sizes, + upsample_initial_channel, + resblock_kernel_sizes, + resblock_dilation_sizes, + model_in_dim, + ) + self.dict = nn.Embedding(num_embeddings, embedding_dim) + self.spkr = nn.Embedding(num_spkrs, spkr_embedding_dim) + self.lang = nn.Embedding(num_langs, lang_embedding_dim) + + self.dur_predictor = None + if dur_predictor_params: + self.dur_predictor = VariancePredictor(**dur_predictor_params) + + self.num_spkrs = num_spkrs + self.num_langs = num_langs + + @staticmethod + def _upsample(signal: Tensor, max_frames: int) -> Tensor: + if signal.dim() == 3: + bsz, channels, cond_length = signal.size() + elif signal.dim() == 2: + signal = signal.unsqueeze(2) + bsz, channels, cond_length = signal.size() + else: + signal = signal.view(-1, 1, 1) + bsz, channels, cond_length = signal.size() + + signal = signal.unsqueeze(3).repeat(1, 1, 1, max_frames // cond_length) + + # pad zeros as needed (if signal's shape does not divide completely with max_frames) + reminder = (max_frames - signal.shape[2] * signal.shape[3]) // signal.shape[3] + if reminder > 0: + raise NotImplementedError( + "Padding condition signal - misalignment between condition features." + ) + + signal = signal.view(bsz, channels, max_frames) + return signal + + def forward(self, sample: Dict[str, Any], dur_prediction: bool) -> Tensor: # type: ignore + x = sample["code"].clone().to(device=self.dict.weight.device) + x = self.dict(x).transpose(1, 2) + + if self.dur_predictor and dur_prediction: + assert x.size(0) == 1, "only support single sample" + log_dur_pred = self.dur_predictor(x.transpose(1, 2)) + dur_out = torch.clamp( + torch.round((torch.exp(log_dur_pred) - 1)).long(), min=1 + ) + # B x C x T + x = torch.repeat_interleave(x, dur_out.view(-1), dim=2) + + spkr = self.spkr(sample["spkr"].to(self.spkr.weight.device)).transpose(1, 2) + spkr = self._upsample(spkr, x.shape[-1]) + x = torch.cat([x, spkr], dim=1) + + lang = self.lang(sample["lang"].to(self.lang.weight.device)).transpose(1, 2) + lang = self._upsample(lang, x.shape[-1]) + x = torch.cat([lang, x], dim=1) + + return super().forward(x) diff --git a/src/seamless_communication/models/vocoder/hifigan.py b/src/seamless_communication/models/vocoder/hifigan.py new file mode 100644 index 00000000..7ebf1bbb --- /dev/null +++ b/src/seamless_communication/models/vocoder/hifigan.py @@ -0,0 +1,194 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.nn import Conv1d, ConvTranspose1d +from torch.nn.utils.weight_norm import remove_weight_norm, weight_norm + +LRELU_SLOPE = 0.1 + + +def init_weights(m, mean: float = 0.0, std: float = 0.01) -> None: # type: ignore + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def get_padding(kernel_size: int, dilation: int = 1) -> int: + return (kernel_size * dilation - dilation) // 2 + + +class ResBlock(torch.nn.Module): + def __init__( + self, channels: int, kernel_size: int = 3, dilation: List[int] = [1, 3, 5] + ): + super(ResBlock, self).__init__() + self.convs1 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]), + ) + ), + ] + ) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + ] + ) + self.convs2.apply(init_weights) + + def forward(self, x: Tensor) -> Tensor: + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + xt = c2(xt) + x = xt + x + return x + + def remove_weight_norm(self) -> None: + for layer in self.convs1: + remove_weight_norm(layer) + for layer in self.convs2: + remove_weight_norm(layer) + + +class Generator(torch.nn.Module): + def __init__( + self, + upsample_rates: List[int], + upsample_kernel_sizes: List[int], + upsample_initial_channel: int, + resblock_kernel_sizes: List[int], + resblock_dilation_sizes: List[List[int]], + model_in_dim: Optional[int], + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = weight_norm( + Conv1d( + model_in_dim if model_in_dim is not None else 80, + upsample_initial_channel, + 7, + 1, + padding=3, + ) + ) + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2**i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for k, d in zip(resblock_kernel_sizes, resblock_dilation_sizes): + self.resblocks.append(ResBlock(ch, k, d)) + + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) + self.ups.apply(init_weights) + self.conv_post.apply(init_weights) + + def forward(self, x: Tensor) -> Tensor: + x = self.conv_pre(x) + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels # type: ignore + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self) -> None: + print("Removing weight norm...") + for layer in self.ups: + remove_weight_norm(layer) + for layer in self.resblocks: + layer.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) diff --git a/src/seamless_communication/models/vocoder/loader.py b/src/seamless_communication/models/vocoder/loader.py new file mode 100644 index 00000000..950fa3dc --- /dev/null +++ b/src/seamless_communication/models/vocoder/loader.py @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Any, Mapping, final + +from fairseq2.models.utils.model_loader import ModelLoader +from overrides import override as finaloverride + +from seamless_communication.assets import asset_store, download_manager +from seamless_communication.models.vocoder.builder import ( + VocoderConfig, + create_vocoder_model, + vocoder_archs, +) +from seamless_communication.models.vocoder.vocoder import Vocoder + + +@final +class VocoderLoader(ModelLoader[Vocoder, VocoderConfig]): + """Loads Vocoder models.""" + + @finaloverride + def _upgrade_checkpoint( + self, checkpoint: Mapping[str, Any], config: VocoderConfig + ) -> Mapping[str, Any]: + old_state_dict = checkpoint["generator"] + new_state_dict = {} + for key in old_state_dict: + new_key = f"code_generator.{key}" + new_state_dict[new_key] = old_state_dict[key] + checkpoint["model"] = new_state_dict + del checkpoint["generator"] # type: ignore + return checkpoint + + +load_vocoder_model = VocoderLoader( + asset_store, download_manager, create_vocoder_model, vocoder_archs +) diff --git a/src/seamless_communication/models/vocoder/vocoder.py b/src/seamless_communication/models/vocoder/vocoder.py new file mode 100644 index 00000000..33441683 --- /dev/null +++ b/src/seamless_communication/models/vocoder/vocoder.py @@ -0,0 +1,39 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import List, Optional + +import torch +import torch.nn as nn +from fairseq2.typing import Device + +from seamless_communication.models.vocoder.codehifigan import CodeGenerator + + +class Vocoder(nn.Module): + def __init__(self, code_generator: CodeGenerator, lang_spkr_idx_map: dict): + super(Vocoder, self).__init__() + self.code_generator = code_generator + self.lang_spkr_idx_map = lang_spkr_idx_map + + def forward( + self, + code: List[int], + lang: str, + spkr: Optional[int] = -1, + dur_prediction: bool = True, + ): + x = { + "code": torch.LongTensor(code).view(1, -1), + } + lang_idx = self.lang_spkr_idx_map["multilingual"][lang] + spkr_list = self.lang_spkr_idx_map["multispkr"][lang] + if not spkr: + spkr = -1 + spkr = spkr_list[0] if spkr == -1 else spkr + x["spkr"] = torch.tensor([[spkr]]) + x["lang"] = torch.tensor([[lang_idx]]) + return self.code_generator(x, dur_prediction)